Java Code Examples for org.apache.hadoop.hbase.io.hfile.CacheConfig#setEvictOnClose()
The following examples show how to use
org.apache.hadoop.hbase.io.hfile.CacheConfig#setEvictOnClose() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBlockEvictionFromClient.java From hbase with Apache License 2.0 | 5 votes |
private BlockCache setCacheProperties(HRegion region) { Iterator<HStore> strItr = region.getStores().iterator(); BlockCache cache = null; while (strItr.hasNext()) { HStore store = strItr.next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); // Use the last one cache = cacheConf.getBlockCache().get(); } return cache; }
Example 2
Source File: TestAvoidCellReferencesIntoShippedBlocks.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testHBase16372InCompactionWritePath() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); // Create a table with block size as 1024 final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CompactorRegionObserver.class.getName()); try { // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); final BlockCache cache = cacheConf.getBlockCache().get(); // insert data. 5 Rows are added Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW1); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); put = new Put(ROW1); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW2); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW2); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); put = new Put(ROW3); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW3); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW4); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); put = new Put(ROW4); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW5); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW5); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); // Load cache Scan s = new Scan(); s.setMaxResultSize(1000); int count; try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } assertEquals("Count all the rows ", 6, count); // all the cache is loaded // trigger a major compaction ScannerThread scannerThread = new ScannerThread(table, cache); scannerThread.start(); region.compact(true); s = new Scan(); s.setMaxResultSize(1000); try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } assertEquals("Count all the rows ", 6, count); } finally { table.close(); } }
Example 3
Source File: TestBlockEvictionFromClient.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedException { Table table = null; try { latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself getLatch = new CountDownLatch(1); final TableName tableName = TableName.valueOf(name.getMethodName()); // Create KV that will give you two blocks // Create a table with block size as 1024 table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); BlockCache cache = cacheConf.getBlockCache().get(); Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); region.flush(true); put = new Put(ROW1); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); region.flush(true); byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER2, data2); table.put(put); region.flush(true); // flush the data System.out.println("Flushing cache"); // Should create one Hfile with 2 blocks CustomInnerRegionObserver.waitForGets.set(true); // Create three sets of gets GetThread[] getThreads = initiateGet(table, false, false); Thread.sleep(200); CustomInnerRegionObserver.getCdl().get().countDown(); for (GetThread thread : getThreads) { thread.join(); } // Verify whether the gets have returned the blocks that it had CustomInnerRegionObserver.waitForGets.set(true); // giving some time for the block to be decremented checkForBlockEviction(cache, true, false); getLatch.countDown(); System.out.println("Gets should have returned the bloks"); } finally { if (table != null) { table.close(); } } }