Java Code Examples for org.apache.hadoop.hbase.client.Admin#disableTable()
The following examples show how to use
org.apache.hadoop.hbase.client.Admin#disableTable() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHelloHBase.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); admin.deleteNamespace(HelloHBase.MY_NAMESPACE_NAME); }
Example 2
Source File: TestHelloHBase.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); admin.deleteNamespace(HelloHBase.MY_NAMESPACE_NAME); }
Example 3
Source File: TestTableDescriptorModificationFromClient.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testDeleteColumn() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with two families TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(TABLE_NAME); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_0)); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_1)); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { // Verify the table descriptor verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); // Modify the table removing one family and verify the descriptor admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); verifyTableDescriptor(TABLE_NAME, FAMILY_0); } finally { admin.deleteTable(TABLE_NAME); } }
Example 4
Source File: TestCoprocessorWhitelistMasterObserver.java From hbase with Apache License 2.0 | 5 votes |
/** * Test a table modification adding a coprocessor path * which is whitelisted. The coprocessor should be added to * the table descriptor successfully. * @param whitelistedPaths A String array of paths to add in * for the whitelisting configuration * @param coprocessorPath A String to use as the * path for a mock coprocessor */ private static void negativeTestCase(String[] whitelistedPaths, String coprocessorPath) throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setInt("hbase.client.retries.number", 5); // load coprocessor under test conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, CoprocessorWhitelistMasterObserver.class.getName()); // set retries low to raise exception quickly // set a coprocessor whitelist path for test conf.setStrings( CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, whitelistedPaths); UTIL.startMiniCluster(); UTIL.createTable(TEST_TABLE, new byte[][] { TEST_FAMILY }); UTIL.waitUntilAllRegionsAssigned(TEST_TABLE); Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); // disable table so we do not actually try loading non-existant // coprocessor file admin.disableTable(TEST_TABLE); Table t = connection.getTable(TEST_TABLE); HTableDescriptor htd = new HTableDescriptor(t.getDescriptor()); htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted", new Path(coprocessorPath), Coprocessor.PRIORITY_USER, null); LOG.info("Modifying Table"); admin.modifyTable(htd); assertEquals(1, t.getDescriptor().getCoprocessorDescriptors().size()); LOG.info("Done Modifying Table"); }
Example 5
Source File: BaseTest.java From phoenix with Apache License 2.0 | 5 votes |
/** * Synchronously split table at the given split point */ protected static void splitRegion(TableName fullTableName, byte[] splitPoint) throws SQLException, IOException, InterruptedException { Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); admin.split(fullTableName, splitPoint); // make sure the split finishes (there's no synchronous splitting before HBase 2.x) admin.disableTable(fullTableName); admin.enableTable(fullTableName); }
Example 6
Source File: TestTableDescriptorModificationFromClient.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testDeleteSameColumnFamilyTwice() throws IOException { Admin admin = TEST_UTIL.getAdmin(); // Create a table with two families TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(TABLE_NAME); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_0)); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_1)); admin.createTable(tableDescriptor); admin.disableTable(TABLE_NAME); try { // Verify the table descriptor verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); // Modify the table removing one family and verify the descriptor admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); verifyTableDescriptor(TABLE_NAME, FAMILY_0); try { // Delete again - expect failure admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); Assert.fail("Delete a non-exist column family should fail"); } catch (Exception e) { // Expected. } } finally { admin.deleteTable(TABLE_NAME); } }
Example 7
Source File: TestSchemaResource.java From hbase with Apache License 2.0 | 5 votes |
@After public void tearDown() throws Exception { Admin admin = TEST_UTIL.getAdmin(); for (String table : new String[] {TABLE1, TABLE2}) { TableName t = TableName.valueOf(table); if (admin.tableExists(t)) { admin.disableTable(t); admin.deleteTable(t); } } conf.set("hbase.rest.readonly", "false"); }
Example 8
Source File: CubeHTableUtil.java From kylin with Apache License 2.0 | 5 votes |
public static void deleteHTable(TableName tableName) throws IOException { Admin admin = HBaseConnection.get(KylinConfig.getInstanceFromEnv().getStorageUrl()).getAdmin(); try { if (admin.tableExists(tableName)) { logger.info("disabling hbase table " + tableName); admin.disableTable(tableName); logger.info("deleting hbase table " + tableName); admin.deleteTable(tableName); } } finally { IOUtils.closeQuietly(admin); } }
Example 9
Source File: ITAclTableMigrationToolTest.java From kylin with Apache License 2.0 | 5 votes |
private void dropTestHTables() throws IOException { Configuration conf = HBaseConnection.getCurrentHBaseConfiguration(); Admin hbaseAdmin = new HBaseAdmin(conf); if (hbaseAdmin.tableExists(aclTable)) { if (hbaseAdmin.isTableEnabled(aclTable)) hbaseAdmin.disableTable(aclTable); hbaseAdmin.deleteTable(aclTable); } if (hbaseAdmin.tableExists(userTable)) { if (hbaseAdmin.isTableEnabled(userTable)) hbaseAdmin.disableTable(userTable); hbaseAdmin.deleteTable(userTable); } hbaseAdmin.close(); }
Example 10
Source File: EnrichmentCoprocessorIntegrationTest.java From metron with Apache License 2.0 | 5 votes |
private static void addCoprocessor(TableName tableName) throws IOException { // https://hbase.apache.org/1.1/book.html#cp_loading Admin hbaseAdmin = testUtil.getConnection().getAdmin(); hbaseAdmin.disableTable(tableName); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); htd.addCoprocessor(EnrichmentCoprocessor.class.getCanonicalName()); hbaseAdmin.modifyTable(tableName, htd); hbaseAdmin.enableTable(tableName); }
Example 11
Source File: TestSCVFWithMiniCluster.java From hbase with Apache License 2.0 | 5 votes |
private static void destroy(Admin admin, TableName tableName) throws IOException { try { admin.disableTable(tableName); admin.deleteTable(tableName); } catch (TableNotFoundException tnfe) { /* Ignore */ } }
Example 12
Source File: IndexToolIT.java From phoenix with Apache License 2.0 | 5 votes |
protected static void dropIndexToolTables(Connection conn) throws Exception { Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); TableName indexToolOutputTable = TableName.valueOf(IndexVerificationOutputRepository.OUTPUT_TABLE_NAME_BYTES); admin.disableTable(indexToolOutputTable); admin.deleteTable(indexToolOutputTable); TableName indexToolResultTable = TableName.valueOf(IndexVerificationResultRepository.RESULT_TABLE_NAME_BYTES); admin.disableTable(indexToolResultTable); admin.deleteTable(indexToolResultTable); }
Example 13
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 5 votes |
/** * Set the number of Region replicas. */ public static void setReplicas(Admin admin, TableName table, int replicaCount) throws IOException, InterruptedException { admin.disableTable(table); HTableDescriptor desc = new HTableDescriptor(admin.getDescriptor(table)); desc.setRegionReplication(replicaCount); admin.modifyTable(desc); admin.enableTable(table); }
Example 14
Source File: TestCoprocessorTableEndpoint.java From hbase with Apache License 2.0 | 5 votes |
private static void updateTable( final TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor) throws Exception { Admin admin = TEST_UTIL.getAdmin(); admin.disableTable(tableDescriptor.getTableName()); admin.modifyTable(tableDescriptor); admin.enableTable(tableDescriptor.getTableName()); }
Example 15
Source File: TestRegionObserverBypass.java From hbase with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { Admin admin = util.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { admin.disableTable(tableName); } admin.deleteTable(tableName); } util.createTable(tableName, new byte[][] {dummy, test}); TestCoprocessor.PREPUT_BYPASSES.set(0); TestCoprocessor.PREPUT_INVOCATIONS.set(0); }
Example 16
Source File: TestMasterObserverPostCalls.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testPostDeleteNamespace() throws IOException { final Admin admin = UTIL.getAdmin(); final String ns = "postdeletens"; final TableName tn1 = TableName.valueOf(ns, "table1"); admin.createNamespace(NamespaceDescriptor.create(ns).build()); admin.createTable(TableDescriptorBuilder.newBuilder(tn1) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()) .build()); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); MasterObserverForTest observer = master.getMasterCoprocessorHost().findCoprocessor( MasterObserverForTest.class); int preCount = observer.postHookCalls.get(); try { admin.deleteNamespace(ns); fail("Deleting a non-empty namespace should be disallowed"); } catch (IOException e) { // Pass } int postCount = observer.postHookCalls.get(); assertEquals("Expected no invocations of postDeleteNamespace when the operation fails", preCount, postCount); // Disable and delete the table so that we can delete the NS. admin.disableTable(tn1); admin.deleteTable(tn1); // Validate that the postDeletNS hook is invoked preCount = observer.postHookCalls.get(); admin.deleteNamespace(ns); postCount = observer.postHookCalls.get(); assertEquals("Expected 1 invocation of postDeleteNamespace", preCount + 1, postCount); }
Example 17
Source File: IndexToolForNonTxGlobalIndexIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testIndexToolVerifyWithExpiredIndexRows() throws Exception { String schemaName = generateUniqueName(); String dataTableName = generateUniqueName(); String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName); String indexTableName = generateUniqueName(); String indexTableFullName = SchemaUtil.getTableName(schemaName, indexTableName); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); try (Connection conn = DriverManager.getConnection(getUrl(), props)) { conn.createStatement().execute("CREATE TABLE " + dataTableFullName + " (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, CODE VARCHAR) COLUMN_ENCODED_BYTES=0"); // Insert a row conn.createStatement() .execute("upsert into " + dataTableFullName + " values (1, 'Phoenix', 'A')"); conn.commit(); conn.createStatement() .execute(String.format("CREATE INDEX %s ON %s (NAME) INCLUDE (CODE) ASYNC", indexTableName, dataTableFullName)); IndexToolIT.runIndexTool(directApi, useSnapshot, schemaName, dataTableName, indexTableName, null, 0, IndexTool.IndexVerifyType.ONLY); Cell cell = IndexToolIT.getErrorMessageFromIndexToolOutputTable(conn, dataTableFullName, indexTableFullName); try { String expectedErrorMsg = IndexRebuildRegionScanner.ERROR_MESSAGE_MISSING_INDEX_ROW; String actualErrorMsg = Bytes .toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertTrue(expectedErrorMsg.equals(actualErrorMsg)); } catch(Exception ex) { Assert.fail("Fail to parsing the error message from IndexToolOutputTable"); } // Run the index tool to populate the index while verifying rows IndexToolIT.runIndexTool(directApi, useSnapshot, schemaName, dataTableName, indexTableName, null, 0, IndexTool.IndexVerifyType.AFTER); // Set ttl of index table ridiculously low so that all data is expired Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin(); TableName indexTable = TableName.valueOf(indexTableFullName); ColumnFamilyDescriptor desc = admin.getDescriptor(indexTable).getColumnFamilies()[0]; ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(desc).setTimeToLive(1); Future<Void> future = admin.modifyColumnFamilyAsync(indexTable, builder.build()); Thread.sleep(1000); future.get(40, TimeUnit.SECONDS); TableName indexToolOutputTable = TableName.valueOf(IndexVerificationOutputRepository.OUTPUT_TABLE_NAME_BYTES); admin.disableTable(indexToolOutputTable); admin.deleteTable(indexToolOutputTable); // Run the index tool using the only-verify option, verify it gives no mismatch IndexToolIT.runIndexTool(directApi, useSnapshot, schemaName, dataTableName, indexTableName, null, 0, IndexTool.IndexVerifyType.ONLY); Scan scan = new Scan(); Table hIndexToolTable = conn.unwrap(PhoenixConnection.class).getQueryServices() .getTable(indexToolOutputTable.getName()); Result r = hIndexToolTable.getScanner(scan).next(); assertTrue(r == null); } }
Example 18
Source File: MutableIndexReplicationIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testReplicationWithMutableIndexes() throws Exception { Connection conn = getConnection(); //create the primary and index tables conn.createStatement().execute( "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); conn.createStatement().execute( "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1)"); // make sure that the tables are empty, but reachable String query = "SELECT * FROM " + DATA_TABLE_FULL_NAME; ResultSet rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); //make sure there is no data in the table query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); // make sure the data tables are created on the remote cluster Admin admin = utility1.getAdmin(); Admin admin2 = utility2.getAdmin(); List<String> dataTables = new ArrayList<String>(); dataTables.add(DATA_TABLE_FULL_NAME); dataTables.add(INDEX_TABLE_FULL_NAME); for (String tableName : dataTables) { TableDescriptor desc = admin.getDescriptor(TableName.valueOf(tableName)); //create it as-is on the remote cluster admin2.createTable(desc); LOGGER.info("Enabling replication on source table: "+tableName); ColumnFamilyDescriptor[] cols = desc.getColumnFamilies(); assertEquals(1, cols.length); // add the replication scope to the column ColumnFamilyDescriptor col = ColumnFamilyDescriptorBuilder.newBuilder(cols[0].getName()).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build(); desc=TableDescriptorBuilder.newBuilder(desc).removeColumnFamily(cols[0].getName()).addColumnFamily(col).build(); //disable/modify/enable table so it has replication enabled admin.disableTable(desc.getTableName()); admin.modifyTable(desc); admin.enableTable(desc.getTableName()); LOGGER.info("Replication enabled on source table: "+tableName); } // load some data into the source cluster table PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)"); stmt.setString(1, "a"); // k stmt.setString(2, "x"); // v1 <- has index stmt.setString(3, "1"); // v2 stmt.execute(); conn.commit(); // make sure the index is working as expected query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals("x", rs.getString(1)); assertFalse(rs.next()); conn.close(); /* Validate that we have replicated the rows to the remote cluster */ // other table can't be reached through Phoenix right now - would need to change how we // lookup tables. For right now, we just go through an HTable LOGGER.info("Looking up tables in replication target"); TableName[] tables = admin2.listTableNames(); org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(utility2.getConfiguration()); Table remoteTable = hbaseConn.getTable(tables[0]); for (int i = 0; i < REPLICATION_RETRIES; i++) { if (i >= REPLICATION_RETRIES - 1) { fail("Waited too much time for put replication on table " + remoteTable .getDescriptor().getTableName()); } if (ensureAnyRows(remoteTable)) { break; } LOGGER.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS + " for edits to get replicated"); Thread.sleep(REPLICATION_WAIT_TIME_MILLIS); } remoteTable.close(); }
Example 19
Source File: WALReplayWithIndexWritesAndCompressedWALIT.java From phoenix with Apache License 2.0 | 4 votes |
/** * Test writing edits into an region, closing it, splitting logs, opening Region again. Verify * seqids. * @throws Exception on failure */ @Test public void testReplayEditsWrittenViaHRegion() throws Exception { final String tableNameStr = "testReplayEditsWrittenViaHRegion"; final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build(); final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)); deleteDir(basedir); final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr); //setup basic indexing for the table // enable indexing to a non-existant index table byte[] family = new byte[] { 'a' }; ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME); fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS)); CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder(); builder.addIndexGroup(fam1); builder.build(htd); WALFactory walFactory = new WALFactory(this.conf, "localhost,1234"); WAL wal = createWAL(this.conf, walFactory); // create the region + its WAL HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd, wal); // FIXME: Uses private type region0.close(); region0.getWAL().close(); HRegionServer mockRS = Mockito.mock(HRegionServer.class); // mock out some of the internals of the RSS, so we can run CPs when(mockRS.getWAL(null)).thenReturn(wal); RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class); when(mockRS.getRegionServerAccounting()).thenReturn(rsa); ServerName mockServerName = Mockito.mock(ServerName.class); when(mockServerName.getServerName()).thenReturn(tableNameStr + ",1234"); when(mockRS.getServerName()).thenReturn(mockServerName); HRegion region = spy(new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS)); region.initialize(); //make an attempted write to the primary that should also be indexed byte[] rowkey = Bytes.toBytes("indexed_row_key"); Put p = new Put(rowkey); p.addColumn(family, Bytes.toBytes("qual"), Bytes.toBytes("value")); region.put(p); // we should then see the server go down Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(), Mockito.any(Exception.class)); // then create the index table so we are successful on WAL replay TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME); // run the WAL split and setup the region runWALSplit(this.conf, walFactory); WAL wal2 = createWAL(this.conf, walFactory); HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS); // initialize the region - this should replay the WALEdits from the WAL region1.initialize(); org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(UTIL.getConfiguration()); // now check to ensure that we wrote to the index table Table index = hbaseConn.getTable(org.apache.hadoop.hbase.TableName.valueOf(INDEX_TABLE_NAME)); int indexSize = getKeyValueCount(index); assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize); Get g = new Get(rowkey); final Result result = region1.get(g); assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size()); // cleanup the index table Admin admin = UTIL.getAdmin(); admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME)); admin.close(); }
Example 20
Source File: TestHFileArchiving.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testRemoveRegionDirOnArchive() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); UTIL.createTable(tableName, TEST_FAM); final Admin admin = UTIL.getAdmin(); // get the current store files for the region List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(tableName); // make sure we only have 1 region serving this table assertEquals(1, servingRegions.size()); HRegion region = servingRegions.get(0); // and load the table UTIL.loadRegion(region, TEST_FAM); // shutdown the table so we can manipulate the files admin.disableTable(tableName); FileSystem fs = UTIL.getTestFileSystem(); // now attempt to depose the region Path rootDir = region.getRegionFileSystem().getTableDir().getParent(); Path regionDir = FSUtils.getRegionDirFromRootDir(rootDir, region.getRegionInfo()); HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); // check for the existence of the archive directory and some files in it Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region); assertTrue(fs.exists(archiveDir)); // check to make sure the store directory was copied FileStatus[] stores = fs.listStatus(archiveDir, new PathFilter() { @Override public boolean accept(Path p) { if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { return false; } return true; } }); assertTrue(stores.length == 1); // make sure we archived the store files FileStatus[] storeFiles = fs.listStatus(stores[0].getPath()); assertTrue(storeFiles.length > 0); // then ensure the region's directory isn't present assertFalse(fs.exists(regionDir)); UTIL.deleteTable(tableName); }