Java Code Examples for org.apache.hadoop.hbase.client.Table#delete()
The following examples show how to use
org.apache.hadoop.hbase.client.Table#delete() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseResourceStore.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Override protected void deleteResourceImpl(String resPath) throws IOException { Table table = getConnection().getTable(TableName.valueOf(tableName)); try { boolean hdfsResourceExist = isHdfsResourceExist(table, resPath); Delete del = new Delete(Bytes.toBytes(resPath)); table.delete(del); if (hdfsResourceExist) { // remove hdfs cell value deletePushdown(resPath); } } finally { IOUtils.closeQuietly(table); } }
Example 2
Source File: HBCKActions.java From hbase-operator-tools with Apache License 2.0 | 6 votes |
/** * Deletes the middle region from the regions of the given table from Meta table * Removes whole of the "info" column family */ private void deleteRegionFromMeta(String tname) throws IOException, InterruptedException { TableName tn = TableName.valueOf(tname); try (Connection connection = ConnectionFactory.createConnection(conf)) { Table metaTable = connection.getTable(TableName.valueOf("hbase:meta")); List<RegionInfo> ris = HBCKMetaTableAccessor.getTableRegions(connection, tn); System.out.println(String.format("Current Regions of the table " + tn.getNameAsString() + " in Meta before deletion of the region are: " + ris)); RegionInfo ri = ris.get(ris.size() / 2); System.out.println("Deleting Region " + ri.getRegionNameAsString()); byte[] key = HBCKMetaTableAccessor.getMetaKeyForRegion(ri); Delete delete = new Delete(key); delete.addFamily(Bytes.toBytes("info")); metaTable.delete(delete); Thread.sleep(500); ris = HBCKMetaTableAccessor.getTableRegions(connection, tn); System.out.println("Current Regions of the table " + tn.getNameAsString() + " in Meta after deletion of the region are: " + ris); } }
Example 3
Source File: HBaseResourceStore.java From kylin with Apache License 2.0 | 6 votes |
@Override protected void deleteResourceImpl(String resPath, long timestamp) throws IOException { Table table = getConnection().getTable(TableName.valueOf(tableName)); try { boolean hdfsResourceExist = isHdfsResourceExist(table, resPath); long origLastModified = getResourceLastModified(table, resPath); if (checkTimeStampBeforeDelete(origLastModified, timestamp)) { Delete del = new Delete(Bytes.toBytes(resPath)); table.delete(del); if (hdfsResourceExist) { // remove hdfs cell value deletePushdown(resPath); } } else { throw new IOException("Resource " + resPath + " timestamp not match, [originLastModified: " + origLastModified + ", timestampToDelete: " + timestamp + "]"); } } finally { IOUtils.closeQuietly(table); } }
Example 4
Source File: ThriftHBaseServiceHandler.java From hbase with Apache License 2.0 | 6 votes |
@Override public void deleteAllRowTs( ByteBuffer tableName, ByteBuffer row, long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError { Table table = null; try { table = getTable(tableName); Delete delete = new Delete(getBytes(row), timestamp); addAttributes(delete, attributes); table.delete(delete); } catch (IOException e) { LOG.warn(e.getMessage(), e); throw getIOError(e); } finally { closeTable(table); } }
Example 5
Source File: TestWALFiltering.java From hbase with Apache License 2.0 | 5 votes |
private void fillTable() throws IOException, InterruptedException { Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES, 3, Bytes.toBytes("row0"), Bytes.toBytes("row99"), NUM_RS); Random rand = new Random(19387129L); for (int iStoreFile = 0; iStoreFile < 4; ++iStoreFile) { for (int iRow = 0; iRow < 100; ++iRow) { final byte[] row = Bytes.toBytes("row" + iRow); Put put = new Put(row); Delete del = new Delete(row); for (int iCol = 0; iCol < 10; ++iCol) { final byte[] cf = rand.nextBoolean() ? CF1 : CF2; final long ts = Math.abs(rand.nextInt()); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { final byte[] value = Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); } else { del.addColumn(cf, qual, ts); } } table.put(put); table.delete(del); } } TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME); }
Example 6
Source File: HBaseUtil.java From java-study with Apache License 2.0 | 5 votes |
/** * 数据删除 * @param tableName 表名 * @param rowKey 行健 * @param family 列族 * @param qualifier 列 * @return */ public static void delete(String tableName, String rowKey, String family, String qualifier) { if (null == tableName ||tableName.length()==0) { return; } if( null == rowKey || rowKey.length() == 0){ return; } Table t = null; try { t = getConnection().getTable(TableName.valueOf(tableName)); Delete del = new Delete(Bytes.toBytes(rowKey)); // 如果列族不为空 if (null != family && family.length() > 0) { // 如果列不为空 if (null != qualifier && qualifier.length() > 0) { del.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier)); } else { del.addFamily(Bytes.toBytes(family)); } } t.delete(del); } catch (IOException e) { System.out.println("删除失败!"); e.printStackTrace(); } finally { close(); } }
Example 7
Source File: PermissionStorage.java From hbase with Apache License 2.0 | 5 votes |
/** * Remove specified table from the _acl_ table. */ static void removeTablePermissions(Configuration conf, TableName tableName, Table t) throws IOException{ Delete d = new Delete(tableName.getName()); d.addFamily(ACL_LIST_FAMILY); if (LOG.isDebugEnabled()) { LOG.debug("Removing permissions of removed table "+ tableName); } try { t.delete(d); } finally { t.close(); } }
Example 8
Source File: FlowQueueService.java From hraven with Apache License 2.0 | 5 votes |
/** * Moves a flow_queue record from one row key to another. All Cells in the * existing row will be written to the new row. This would primarily be used * for transitioning a flow's data from one status to another. * * @param oldKey the existing row key to move * @param newKey the new row key to move to * @throws IOException */ public void moveFlow(FlowQueueKey oldKey, FlowQueueKey newKey) throws DataException, IOException { byte[] oldRowKey = queueKeyConverter.toBytes(oldKey); Get get = new Get(oldRowKey); Table flowQueueTable = null; try { flowQueueTable = hbaseConnection .getTable(TableName.valueOf(Constants.FLOW_QUEUE_TABLE)); Result result = flowQueueTable.get(get); if (result == null || result.isEmpty()) { // no existing row throw new DataException( "No row for key " + Bytes.toStringBinary(oldRowKey)); } // copy the existing row to the new key Put p = new Put(queueKeyConverter.toBytes(newKey)); for (Cell c : result.rawCells()) { p.addColumn(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), CellUtil.cloneValue(c)); } flowQueueTable.put(p); // delete the old row Delete d = new Delete(oldRowKey); flowQueueTable.delete(d); } finally { if (flowQueueTable != null) { flowQueueTable.close(); } } }
Example 9
Source File: HelloHBase.java From hbase with Apache License 2.0 | 5 votes |
/** * Invokes Table#delete to delete test data (i.e. the row) * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); }
Example 10
Source File: TestNamespaceReplication.java From hbase with Apache License 2.0 | 5 votes |
private void delete(Table source, byte[] row, byte[]... families) throws Exception { for (byte[] fam : families) { Delete del = new Delete(row); del.addFamily(fam); source.delete(del); } }
Example 11
Source File: HelloHBase.java From hbase with Apache License 2.0 | 5 votes |
/** * Invokes Table#delete to delete test data (i.e. the row) * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); }
Example 12
Source File: DataJanitorState.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@VisibleForTesting void deleteFromScan(Table stateTable, Scan scan) throws IOException { try (ResultScanner scanner = stateTable.getScanner(scan)) { Result next; while ((next = scanner.next()) != null) { stateTable.delete(new Delete(next.getRow())); } } }
Example 13
Source File: PermissionStorage.java From hbase with Apache License 2.0 | 5 votes |
static private void removeTablePermissions(TableName tableName, byte[] column, Table table, boolean closeTable) throws IOException { Scan scan = new Scan(); scan.addFamily(ACL_LIST_FAMILY); String columnName = Bytes.toString(column); scan.setFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( String.format("(%s%s%s)|(%s%s)$", ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, ACL_KEY_DELIMITER, columnName)))); Set<byte[]> qualifierSet = new TreeSet<>(Bytes.BYTES_COMPARATOR); ResultScanner scanner = null; try { scanner = table.getScanner(scan); for (Result res : scanner) { for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { qualifierSet.add(q); } } if (qualifierSet.size() > 0) { Delete d = new Delete(tableName.getName()); for (byte[] qualifier : qualifierSet) { d.addColumns(ACL_LIST_FAMILY, qualifier); } table.delete(d); } } finally { if (scanner != null) { scanner.close(); } if (closeTable) { table.close(); } } }
Example 14
Source File: TestCleanupCompactedFileOnRegionClose.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testCleanupOnClose() throws Exception { TableName tableName = TableName.valueOf("testCleanupOnClose"); String familyName = "f"; byte[] familyNameBytes = Bytes.toBytes(familyName); util.createTable(tableName, familyName); Admin hBaseAdmin = util.getAdmin(); Table table = util.getConnection().getTable(tableName); HRegionServer rs = util.getRSForFirstRegionInTable(tableName); Region region = rs.getRegions(tableName).get(0); int refSFCount = 4; for (int i = 0; i < refSFCount; i++) { for (int j = 0; j < refSFCount; j++) { Put put = new Put(Bytes.toBytes(j)); put.addColumn(familyNameBytes, Bytes.toBytes(i), Bytes.toBytes(j)); table.put(put); } util.flush(tableName); } assertEquals(refSFCount, region.getStoreFileList(new byte[][]{familyNameBytes}).size()); //add a delete, to test wether we end up with an inconsistency post region close Delete delete = new Delete(Bytes.toBytes(refSFCount-1)); table.delete(delete); util.flush(tableName); assertFalse(table.exists(new Get(Bytes.toBytes(refSFCount-1)))); //Create a scanner and keep it open to add references to StoreFileReaders Scan scan = new Scan(); scan.withStopRow(Bytes.toBytes(refSFCount-2)); scan.setCaching(1); ResultScanner scanner = table.getScanner(scan); Result res = scanner.next(); assertNotNull(res); assertEquals(refSFCount, res.getFamilyMap(familyNameBytes).size()); //Verify the references int count = 0; for (HStoreFile sf : (Collection<HStoreFile>)region.getStore(familyNameBytes).getStorefiles()) { synchronized (sf) { if (count < refSFCount) { assertTrue(sf.isReferencedInReads()); } else { assertFalse(sf.isReferencedInReads()); } } count++; } //Major compact to produce compacted storefiles that need to be cleaned up util.compact(tableName, true); assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size()); assertEquals(refSFCount+1, ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager() .getCompactedfiles().size()); //close then open the region to determine wether compacted storefiles get cleaned up on close hBaseAdmin.unassign(region.getRegionInfo().getRegionName(), false); hBaseAdmin.assign(region.getRegionInfo().getRegionName()); util.waitUntilNoRegionsInTransition(10000); assertFalse("Deleted row should not exist", table.exists(new Get(Bytes.toBytes(refSFCount-1)))); rs = util.getRSForFirstRegionInTable(tableName); region = rs.getRegions(tableName).get(0); assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size()); assertEquals(0, ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager() .getCompactedfiles().size()); }
Example 15
Source File: CubeMigrationCLI.java From kylin with Apache License 2.0 | 4 votes |
private static void undo(Opt opt) throws IOException, InterruptedException { logger.info("Undo operation: " + opt.toString()); switch (opt.type) { case CHANGE_HTABLE_HOST: { TableName tableName = TableName.valueOf((String) opt.params[0]); HTableDescriptor desc = hbaseAdmin.getTableDescriptor(tableName); hbaseAdmin.disableTable(tableName); desc.setValue(IRealizationConstants.HTableTag, srcConfig.getMetadataUrlPrefix()); hbaseAdmin.modifyTable(tableName, desc); hbaseAdmin.enableTable(tableName); break; } case COPY_FILE_IN_META: { // no harm logger.info("Undo for COPY_FILE_IN_META is ignored"); break; } case COPY_DICT_OR_SNAPSHOT: { // no harm logger.info("Undo for COPY_DICT_OR_SNAPSHOT is ignored"); break; } case RENAME_FOLDER_IN_HDFS: { String srcPath = (String) opt.params[1]; String dstPath = (String) opt.params[0]; if (hdfsFS.exists(new Path(srcPath)) && !hdfsFS.exists(new Path(dstPath))) { renameHDFSPath(srcPath, dstPath); logger.info("HDFS Folder renamed from " + srcPath + " to " + dstPath); } break; } case ADD_INTO_PROJECT: { logger.info("Undo for ADD_INTO_PROJECT is ignored"); break; } case COPY_ACL: { String cubeId = (String) opt.params[0]; String modelId = (String) opt.params[1]; Table destAclHtable = null; try { destAclHtable = HBaseConnection.get(dstConfig.getStorageUrl()) .getTable(TableName.valueOf(dstConfig.getMetadataUrlPrefix() + ACL_TABLE_NAME)); destAclHtable.delete(new Delete(Bytes.toBytes(cubeId))); destAclHtable.delete(new Delete(Bytes.toBytes(modelId))); } finally { IOUtils.closeQuietly(destAclHtable); } break; } case PURGE_AND_DISABLE: { logger.info("Undo for PURGE_AND_DISABLE is not supported"); break; } default: { //do nothing break; } } }
Example 16
Source File: AbstractTestWALReplay.java From hbase with Apache License 2.0 | 4 votes |
/** * * @throws Exception */ @Test public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception { final TableName tableName = TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF"); byte[] family1 = Bytes.toBytes("cf1"); byte[] family2 = Bytes.toBytes("cf2"); byte[] qualifier = Bytes.toBytes("q"); byte[] value = Bytes.toBytes("testV"); byte[][] familys = { family1, family2 }; TEST_UTIL.createTable(tableName, familys); Table htable = TEST_UTIL.getConnection().getTable(tableName); Put put = new Put(Bytes.toBytes("r1")); put.addColumn(family1, qualifier, value); htable.put(put); ResultScanner resultScanner = htable.getScanner(new Scan()); int count = 0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(1, count); MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster(); List<HRegion> regions = hbaseCluster.getRegions(tableName); assertEquals(1, regions.size()); // move region to another regionserver Region destRegion = regions.get(0); int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName()); assertTrue("Please start more than 1 regionserver", hbaseCluster.getRegionServerThreads().size() > 1); int destServerNum = 0; while (destServerNum == originServerNum) { destServerNum++; } HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum); HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum); // move region to destination regionserver TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName()); // delete the row Delete del = new Delete(Bytes.toBytes("r1")); htable.delete(del); resultScanner = htable.getScanner(new Scan()); count = 0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(0, count); // flush region and make major compaction HRegion region = (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); region.flush(true); // wait to complete major compaction for (HStore store : region.getStores()) { store.triggerMajorCompaction(); } region.compact(true); // move region to origin regionserver TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName()); // abort the origin regionserver originServer.abort("testing"); // see what we get Result result = htable.get(new Get(Bytes.toBytes("r1"))); if (result != null) { assertTrue("Row is deleted, but we get" + result.toString(), (result == null) || result.isEmpty()); } resultScanner.close(); }
Example 17
Source File: TestWALPlayer.java From hbase with Apache License 2.0 | 4 votes |
/** * Simple end-to-end test * @throws Exception */ @Test public void testWALPlayer() throws Exception { final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1"); final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); final byte[] COLUMN2 = Bytes.toBytes("c2"); final byte[] ROW = Bytes.toBytes("row"); Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); Table t2 = TEST_UTIL.createTable(tableName2, FAMILY); // put a row into the first table Put p = new Put(ROW); p.addColumn(FAMILY, COLUMN1, COLUMN1); p.addColumn(FAMILY, COLUMN2, COLUMN2); t1.put(p); // delete one column Delete d = new Delete(ROW); d.addColumns(FAMILY, COLUMN1); t1.delete(d); // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); Configuration configuration= TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); String optionName="_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); assertEquals(1000,configuration.getLong(optionName,0)); assertEquals(0, ToolRunner.run(configuration, player, new String[] {walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN2)); }
Example 18
Source File: HbaseImpl.java From tephra with MIT License | 4 votes |
private void delete(Table table, String id) throws IOException { table.delete(new Delete(Bytes.toBytes(id))); }
Example 19
Source File: CubeMigrationCLI.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
private static void undo(Opt opt) throws IOException, InterruptedException { logger.info("Undo operation: " + opt.toString()); switch (opt.type) { case CHANGE_HTABLE_HOST: { TableName tableName = TableName.valueOf((String) opt.params[0]); HTableDescriptor desc = hbaseAdmin.getTableDescriptor(tableName); hbaseAdmin.disableTable(tableName); desc.setValue(IRealizationConstants.HTableTag, srcConfig.getMetadataUrlPrefix()); hbaseAdmin.modifyTable(tableName, desc); hbaseAdmin.enableTable(tableName); break; } case COPY_FILE_IN_META: { // no harm logger.info("Undo for COPY_FILE_IN_META is ignored"); break; } case COPY_DICT_OR_SNAPSHOT: { // no harm logger.info("Undo for COPY_DICT_OR_SNAPSHOT is ignored"); break; } case RENAME_FOLDER_IN_HDFS: { String srcPath = (String) opt.params[1]; String dstPath = (String) opt.params[0]; if (hdfsFS.exists(new Path(srcPath)) && !hdfsFS.exists(new Path(dstPath))) { renameHDFSPath(srcPath, dstPath); logger.info("HDFS Folder renamed from " + srcPath + " to " + dstPath); } break; } case ADD_INTO_PROJECT: { logger.info("Undo for ADD_INTO_PROJECT is ignored"); break; } case COPY_ACL: { String cubeId = (String) opt.params[0]; String modelId = (String) opt.params[1]; Table destAclHtable = null; try { destAclHtable = HBaseConnection.get(dstConfig.getStorageUrl()) .getTable(TableName.valueOf(dstConfig.getMetadataUrlPrefix() + ACL_TABLE_NAME)); destAclHtable.delete(new Delete(Bytes.toBytes(cubeId))); destAclHtable.delete(new Delete(Bytes.toBytes(modelId))); } finally { IOUtils.closeQuietly(destAclHtable); } break; } case PURGE_AND_DISABLE: { logger.info("Undo for PURGE_AND_DISABLE is not supported"); break; } default: { //do nothing break; } } }
Example 20
Source File: TestRegionObserverInterface.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testRegionObserver() throws IOException { final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); // recreate table every time in order to reset the status of the // coprocessor. Table table = util.createTable(tableName, new byte[][] { A, B, C }); try { verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation", "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName, new Boolean[] { false, false, false, false, false, false, false, false }); Put put = new Put(ROW); put.addColumn(A, A, A); put.addColumn(B, B, B); put.addColumn(C, C, C); table.put(put); verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPostStartRegionOperation", "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] { false, false, true, true, true, true, false, true, true, true }); verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose" }, tableName, new Integer[] { 1, 1, 0, 0 }); Get get = new Get(ROW); get.addColumn(A, A); get.addColumn(B, B); get.addColumn(C, C); table.get(get); verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete", "hadPrePreparedDeleteTS" }, tableName, new Boolean[] { true, true, true, true, false, false }); Delete delete = new Delete(ROW); delete.addColumn(A, A); delete.addColumn(B, B); delete.addColumn(C, C); table.delete(delete); verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS" }, tableName, new Boolean[] { true, true, true, true, true, true, true, true }); } finally { util.deleteTable(tableName); table.close(); } verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose" }, tableName, new Integer[] { 1, 1, 1, 1 }); }