Java Code Examples for org.apache.hadoop.hbase.regionserver.HStore#triggerMajorCompaction()
The following examples show how to use
org.apache.hadoop.hbase.regionserver.HStore#triggerMajorCompaction() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MutableIndexExtendedIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test(timeout = 120000) public void testCompactNonPhoenixTable() throws Exception { if (localIndex || tableDDLOptions.contains("TRANSACTIONAL=true")) return; try (Connection conn = getConnection()) { // create a vanilla HBase table (non-Phoenix) String randomTable = generateUniqueName(); TableName hbaseTN = TableName.valueOf(randomTable); byte[] famBytes = Bytes.toBytes("fam"); Table hTable = getUtility().createTable(hbaseTN, famBytes); TestUtil.addCoprocessor(conn, randomTable, UngroupedAggregateRegionObserver.class); Put put = new Put(Bytes.toBytes("row")); byte[] value = new byte[1]; Bytes.random(value); put.addColumn(famBytes, Bytes.toBytes("colQ"), value); hTable.put(put); // major compaction shouldn't cause a timeout or RS abort List<HRegion> regions = getUtility().getHBaseCluster().getRegions(hbaseTN); HRegion hRegion = regions.get(0); hRegion.flush(true); HStore store = hRegion.getStore(famBytes); store.triggerMajorCompaction(); store.compactRecentForTestingAssumingDefaultPolicy(1); // we should be able to compact syscat itself as well regions = getUtility().getHBaseCluster().getRegions( TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME)); hRegion = regions.get(0); hRegion.flush(true); store = hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); store.triggerMajorCompaction(); store.compactRecentForTestingAssumingDefaultPolicy(1); } }
Example 2
Source File: TestMobStoreCompaction.java From hbase with Apache License 2.0 | 4 votes |
/** * During compaction, the mob threshold size is changed. */ @Test public void testLargerValue() throws Exception { init(UTIL.getConfiguration(), 200); byte[] dummyData = makeDummyData(300); // larger than mob threshold Table loader = new RegionAsTable(region); for (int i = 0; i < compactionThreshold; i++) { Put p = createPut(i, dummyData); loader.put(p); region.flush(true); } assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles()); assertEquals("Before compaction: mob file count", compactionThreshold, countMobFiles()); assertEquals("Before compaction: rows", compactionThreshold, UTIL.countRows(region)); assertEquals("Before compaction: mob rows", compactionThreshold, countMobRows()); assertEquals("Before compaction: number of mob cells", compactionThreshold, countMobCellsInMetadata()); // Change the threshold larger than the data size setMobThreshold(region, COLUMN_FAMILY, 500); region.initialize(); List<HStore> stores = region.getStores(); for (HStore store: stores) { // Force major compaction store.triggerMajorCompaction(); Optional<CompactionContext> context = store.requestCompaction(HStore.PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, User.getCurrent()); if (!context.isPresent()) { continue; } region.compact(context.get(), store, NoLimitThroughputController.INSTANCE, User.getCurrent()); } assertEquals("After compaction: store files", 1, countStoreFiles()); assertEquals("After compaction: mob file count", compactionThreshold, countMobFiles()); assertEquals("After compaction: referenced mob file count", 0, countReferencedMobFiles()); assertEquals("After compaction: rows", compactionThreshold, UTIL.countRows(region)); assertEquals("After compaction: mob rows", 0, countMobRows()); }
Example 3
Source File: AbstractTestWALReplay.java From hbase with Apache License 2.0 | 4 votes |
/** * * @throws Exception */ @Test public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception { final TableName tableName = TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF"); byte[] family1 = Bytes.toBytes("cf1"); byte[] family2 = Bytes.toBytes("cf2"); byte[] qualifier = Bytes.toBytes("q"); byte[] value = Bytes.toBytes("testV"); byte[][] familys = { family1, family2 }; TEST_UTIL.createTable(tableName, familys); Table htable = TEST_UTIL.getConnection().getTable(tableName); Put put = new Put(Bytes.toBytes("r1")); put.addColumn(family1, qualifier, value); htable.put(put); ResultScanner resultScanner = htable.getScanner(new Scan()); int count = 0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(1, count); MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster(); List<HRegion> regions = hbaseCluster.getRegions(tableName); assertEquals(1, regions.size()); // move region to another regionserver Region destRegion = regions.get(0); int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName()); assertTrue("Please start more than 1 regionserver", hbaseCluster.getRegionServerThreads().size() > 1); int destServerNum = 0; while (destServerNum == originServerNum) { destServerNum++; } HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum); HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum); // move region to destination regionserver TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName()); // delete the row Delete del = new Delete(Bytes.toBytes("r1")); htable.delete(del); resultScanner = htable.getScanner(new Scan()); count = 0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(0, count); // flush region and make major compaction HRegion region = (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); region.flush(true); // wait to complete major compaction for (HStore store : region.getStores()) { store.triggerMajorCompaction(); } region.compact(true); // move region to origin regionserver TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName()); // abort the origin regionserver originServer.abort("testing"); // see what we get Result result = htable.get(new Get(Bytes.toBytes("r1"))); if (result != null) { assertTrue("Row is deleted, but we get" + result.toString(), (result == null) || result.isEmpty()); } resultScanner.close(); }