Java Code Examples for org.apache.hadoop.hbase.regionserver.HRegion#compact()
The following examples show how to use
org.apache.hadoop.hbase.regionserver.HRegion#compact() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestEncodedSeekers.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testEncodedSeeker() throws IOException { System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : " + includeTags + ", compressTags : " + compressTags); if(includeTags) { testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3); } LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(testUtil.getConfiguration()); // Need to disable default row bloom filter for this test to pass. ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(CF_BYTES).setMaxVersions(MAX_VERSIONS). setDataBlockEncoding(encoding). setBlocksize(BLOCK_SIZE). setBloomFilterType(BloomType.NONE). setCompressTags(compressTags).build(); HRegion region = testUtil.createTestRegion(TABLE_NAME, cfd, cache); //write the data, but leave some in the memstore doPuts(region); //verify correctness when memstore contains data doGets(region); //verify correctness again after compacting region.compact(false); doGets(region); Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest(); // Ensure that compactions don't pollute the cache with unencoded blocks // in case of in-cache-only encoding. System.err.println("encodingCounts=" + encodingCounts); assertEquals(1, encodingCounts.size()); DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next(); assertEquals(encoding, encodingInCache); assertTrue(encodingCounts.get(encodingInCache) > 0); }
Example 2
Source File: MiniHBaseCluster.java From hbase with Apache License 2.0 | 5 votes |
/** * Call flushCache on all regions on all participating regionservers. * @throws IOException */ public void compact(boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { r.compact(major); } } }
Example 3
Source File: MiniHBaseCluster.java From hbase with Apache License 2.0 | 5 votes |
/** * Call flushCache on all regions of the specified table. * @throws IOException */ public void compact(TableName tableName, boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { if(r.getTableDescriptor().getTableName().equals(tableName)) { r.compact(major); } } } }
Example 4
Source File: TestFavoredStochasticLoadBalancer.java From hbase with Apache License 2.0 | 5 votes |
private void compactTable(TableName tableName) throws IOException { for(JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { for(HRegion region : t.getRegionServer().getRegions(tableName)) { region.compact(true); } } }
Example 5
Source File: TestZooKeeperTableArchiveClient.java From hbase with Apache License 2.0 | 5 votes |
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException { // create two hfiles in the region createHFileInRegion(region, family); createHFileInRegion(region, family); HStore s = region.getStore(family); int count = s.getStorefilesCount(); assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count, count >= 2); // compact the two files into one file to get files in the archive LOG.debug("Compacting stores"); region.compact(true); }
Example 6
Source File: TestRegionObserverInterface.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testPreWALAppendNotCalledOnMetaEdit() throws Exception { final TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + "." + name.getMethodName()); TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); tdBuilder.setColumnFamily(cfBuilder.build()); tdBuilder.setCoprocessor(SimpleRegionObserver.class.getName()); TableDescriptor td = tdBuilder.build(); Table table = util.createTable(td, new byte[][] { A, B, C }); PreWALAppendWALActionsListener listener = new PreWALAppendWALActionsListener(); List<HRegion> regions = util.getHBaseCluster().getRegions(tableName); //should be only one region HRegion region = regions.get(0); region.getWAL().registerWALActionsListener(listener); //flushing should write to the WAL region.flush(true); //so should compaction region.compact(false); //and so should closing the region region.close(); //but we still shouldn't have triggered preWALAppend because no user data was written String[] methods = new String[] {"getCtPreWALAppend"}; Object[] expectedResult = new Integer[]{0}; verifyMethodResult(SimpleRegionObserver.class, methods, tableName, expectedResult); }
Example 7
Source File: TestCoprocessorInterface.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testCoprocessorInterface() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initConfig(); HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[]{CoprocessorImpl.class}, families); for (int i = 0; i < 3; i++) { HTestConst.addContent(region, fam3); region.flush(true); } region.compact(false); // HBASE-4197 Scan s = new Scan(); RegionScanner scanner = region.getCoprocessorHost().postScannerOpen(s, region.getScanner(s)); assertTrue(scanner instanceof CustomScanner); // this would throw an exception before HBASE-4197 scanner.next(new ArrayList<>()); HBaseTestingUtility.closeRegionAndWAL(region); Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted()); assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped()); assertTrue(((CoprocessorImpl)c).wasOpened()); assertTrue(((CoprocessorImpl)c).wasClosed()); assertTrue(((CoprocessorImpl)c).wasFlushed()); assertTrue(((CoprocessorImpl)c).wasCompacted()); }
Example 8
Source File: MutableIndexExtendedIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testCompactDisabledIndex() throws Exception { if (localIndex || tableDDLOptions.contains("TRANSACTIONAL=true")) return; try (Connection conn = getConnection()) { String schemaName = generateUniqueName(); String dataTableName = generateUniqueName() + "_DATA"; String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName); String indexTableName = generateUniqueName() + "_IDX"; String indexTableFullName = SchemaUtil.getTableName(schemaName, indexTableName); conn.createStatement().execute( String.format(PartialScannerResultsDisabledIT.TEST_TABLE_DDL, dataTableFullName)); conn.createStatement().execute( String.format(PartialScannerResultsDisabledIT.INDEX_1_DDL, indexTableName, dataTableFullName)); //insert a row, and delete it PartialScannerResultsDisabledIT.writeSingleBatch(conn, 1, 1, dataTableFullName); List<HRegion> regions = getUtility().getHBaseCluster().getRegions(TableName.valueOf(dataTableFullName)); HRegion hRegion = regions.get(0); hRegion.flush( true); // need to flush here, or else nothing will get written to disk due to the delete conn.createStatement().execute("DELETE FROM " + dataTableFullName); conn.commit(); // disable the index, simulating an index write failure PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); IndexUtil.updateIndexState(pConn, indexTableFullName, PIndexState.DISABLE, EnvironmentEdgeManager.currentTimeMillis()); // major compaction should not remove the deleted row hRegion.flush(true); hRegion.compact(true); Table dataTable = conn.unwrap(PhoenixConnection.class).getQueryServices() .getTable(Bytes.toBytes(dataTableFullName)); assertEquals(1, TestUtil.getRawRowCount(dataTable)); // reenable the index IndexUtil.updateIndexState(pConn, indexTableFullName, PIndexState.INACTIVE, EnvironmentEdgeManager.currentTimeMillis()); IndexUtil.updateIndexState(pConn, indexTableFullName, PIndexState.ACTIVE, 0L); // now major compaction should remove the deleted row hRegion.compact(true); dataTable = conn.unwrap(PhoenixConnection.class).getQueryServices() .getTable(Bytes.toBytes(dataTableFullName)); assertEquals(0, TestUtil.getRawRowCount(dataTable)); } }
Example 9
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(true, false); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compact(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(true, false); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compact(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compact(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 10
Source File: TestScannerSelectionUsingTTL.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testScannerSelection() throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean("hbase.store.delete.expired.storefile", false); LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(conf); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE).setColumnFamily( ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_BYTES).setMaxVersions(Integer.MAX_VALUE) .setTimeToLive(TTL_SECONDS).build()).build(); RegionInfo info = RegionInfoBuilder.newBuilder(TABLE).build(); HRegion region = HBaseTestingUtility .createRegionAndWAL(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, td, cache); long ts = EnvironmentEdgeManager.currentTime(); long version = 0; //make sure each new set of Put's have a new ts for (int iFile = 0; iFile < totalNumFiles; ++iFile) { if (iFile == NUM_EXPIRED_FILES) { Threads.sleepWithoutInterrupt(TTL_MS); version += TTL_MS; } for (int iRow = 0; iRow < NUM_ROWS; ++iRow) { Put put = new Put(Bytes.toBytes("row" + iRow)); for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) { put.addColumn(FAMILY_BYTES, Bytes.toBytes("col" + iCol), ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol)); } region.put(put); } region.flush(true); version++; } Scan scan = new Scan().readVersions(Integer.MAX_VALUE); cache.clearCache(); InternalScanner scanner = region.getScanner(scan); List<Cell> results = new ArrayList<>(); final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW; int numReturnedRows = 0; LOG.info("Scanning the entire table"); while (scanner.next(results) || results.size() > 0) { assertEquals(expectedKVsPerRow, results.size()); ++numReturnedRows; results.clear(); } assertEquals(NUM_ROWS, numReturnedRows); Set<String> accessedFiles = cache.getCachedFileNamesForTest(); LOG.debug("Files accessed during scan: " + accessedFiles); // Exercise both compaction codepaths. if (explicitCompaction) { HStore store = region.getStore(FAMILY_BYTES); store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles); } else { region.compact(false); } HBaseTestingUtility.closeRegionAndWAL(region); }
Example 11
Source File: TestNamespaceAuditor.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testRegionMerge() throws Exception { String nsp1 = prefix + "_regiontest"; final int initialRegions = 3; NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions) .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); ADMIN.createNamespace(nspDesc); final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2"); byte[] columnFamily = Bytes.toBytes("info"); TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(tableTwo); tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(columnFamily)); ADMIN.createTable(tableDescriptor, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions); Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); try (Table table = connection.getTable(tableTwo)) { UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999); } ADMIN.flush(tableTwo); List<RegionInfo> hris = ADMIN.getRegions(tableTwo); assertEquals(initialRegions, hris.size()); Collections.sort(hris, RegionInfo.COMPARATOR); Future<?> f = ADMIN.mergeRegionsAsync( hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false); f.get(10, TimeUnit.SECONDS); hris = ADMIN.getRegions(tableTwo); assertEquals(initialRegions - 1, hris.size()); Collections.sort(hris, RegionInfo.COMPARATOR); byte[] splitKey = Bytes.toBytes("3"); HRegion regionToSplit = UTIL.getMiniHBaseCluster().getRegions(tableTwo).stream() .filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get(); regionToSplit.compact(true); // Waiting for compaction to finish UTIL.waitFor(30000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return (CompactionState.NONE == ADMIN .getCompactionStateForRegion(regionToSplit.getRegionInfo().getRegionName())); } }); // Cleaning compacted references for split to proceed regionToSplit.getStores().stream().forEach(s -> { try { s.closeAndArchiveCompactedFiles(); } catch (IOException e1) { LOG.error("Error whiling cleaning compacted file"); } }); // the above compact may quit immediately if there is a compaction ongoing, so here we need to // wait a while to let the ongoing compaction finish. UTIL.waitFor(10000, regionToSplit::isSplittable); ADMIN.splitRegionAsync(regionToSplit.getRegionInfo().getRegionName(), splitKey).get(10, TimeUnit.SECONDS); hris = ADMIN.getRegions(tableTwo); assertEquals(initialRegions, hris.size()); Collections.sort(hris, RegionInfo.COMPARATOR); // Fail region merge through Coprocessor hook MiniHBaseCluster cluster = UTIL.getHBaseCluster(); MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost(); Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class); CPMasterObserver masterObserver = (CPMasterObserver) coprocessor; masterObserver.failMerge(true); f = ADMIN.mergeRegionsAsync( hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false); try { f.get(10, TimeUnit.SECONDS); fail("Merge was supposed to fail!"); } catch (ExecutionException ee) { // Expected. } hris = ADMIN.getRegions(tableTwo); assertEquals(initialRegions, hris.size()); Collections.sort(hris, RegionInfo.COMPARATOR); // verify that we cannot split try { ADMIN.split(tableTwo, Bytes.toBytes("6")); fail(); } catch (DoNotRetryRegionException e) { // Expected } Thread.sleep(2000); assertEquals(initialRegions, ADMIN.getRegions(tableTwo).size()); }
Example 12
Source File: AbstractTestWALReplay.java From hbase with Apache License 2.0 | 4 votes |
/** * * @throws Exception */ @Test public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception { final TableName tableName = TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF"); byte[] family1 = Bytes.toBytes("cf1"); byte[] family2 = Bytes.toBytes("cf2"); byte[] qualifier = Bytes.toBytes("q"); byte[] value = Bytes.toBytes("testV"); byte[][] familys = { family1, family2 }; TEST_UTIL.createTable(tableName, familys); Table htable = TEST_UTIL.getConnection().getTable(tableName); Put put = new Put(Bytes.toBytes("r1")); put.addColumn(family1, qualifier, value); htable.put(put); ResultScanner resultScanner = htable.getScanner(new Scan()); int count = 0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(1, count); MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster(); List<HRegion> regions = hbaseCluster.getRegions(tableName); assertEquals(1, regions.size()); // move region to another regionserver Region destRegion = regions.get(0); int originServerNum = hbaseCluster.getServerWith(destRegion.getRegionInfo().getRegionName()); assertTrue("Please start more than 1 regionserver", hbaseCluster.getRegionServerThreads().size() > 1); int destServerNum = 0; while (destServerNum == originServerNum) { destServerNum++; } HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum); HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum); // move region to destination regionserver TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName()); // delete the row Delete del = new Delete(Bytes.toBytes("r1")); htable.delete(del); resultScanner = htable.getScanner(new Scan()); count = 0; while (resultScanner.next() != null) { count++; } resultScanner.close(); assertEquals(0, count); // flush region and make major compaction HRegion region = (HRegion) destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); region.flush(true); // wait to complete major compaction for (HStore store : region.getStores()) { store.triggerMajorCompaction(); } region.compact(true); // move region to origin regionserver TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName()); // abort the origin regionserver originServer.abort("testing"); // see what we get Result result = htable.get(new Get(Bytes.toBytes("r1"))); if (result != null) { assertTrue("Row is deleted, but we get" + result.toString(), (result == null) || result.isEmpty()); } resultScanner.close(); }
Example 13
Source File: AbstractTestLogRolling.java From hbase with Apache License 2.0 | 4 votes |
/** * Tests that logs are deleted when some region has a compaction * record in WAL and no other records. See HBASE-8597. */ @Test public void testCompactionRecordDoesntBlockRolling() throws Exception { Table table = null; // When the hbase:meta table can be opened, the region servers are running Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); try { table = createTestTable(getName()); server = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); HRegion region = server.getRegions(table.getName()).get(0); final WAL log = server.getWAL(region.getRegionInfo()); Store s = region.getStore(HConstants.CATALOG_FAMILY); // Put some stuff into table, to make sure we have some files to compact. for (int i = 1; i <= 2; ++i) { doPut(table, i); admin.flush(table.getName()); } doPut(table, 3); // don't flush yet, or compaction might trigger before we roll WAL assertEquals("Should have no WAL after initial writes", 0, AbstractFSWALProvider.getNumRolledLogFiles(log)); assertEquals(2, s.getStorefilesCount()); // Roll the log and compact table, to have compaction record in the 2nd WAL. log.rollWriter(); assertEquals("Should have WAL; one table is not flushed", 1, AbstractFSWALProvider.getNumRolledLogFiles(log)); admin.flush(table.getName()); region.compact(false); // Wait for compaction in case if flush triggered it before us. Assert.assertNotNull(s); for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) { Threads.sleepWithoutInterrupt(200); } assertEquals("Compaction didn't happen", 1, s.getStorefilesCount()); // Write some value to the table so the WAL cannot be deleted until table is flushed. doPut(table, 0); // Now 2nd WAL will have both compaction and put record for table. log.rollWriter(); // 1st WAL deleted, 2nd not deleted yet. assertEquals("Should have WAL; one table is not flushed", 1, AbstractFSWALProvider.getNumRolledLogFiles(log)); // Flush table to make latest WAL obsolete; write another record, and roll again. admin.flush(table.getName()); doPut(table, 1); log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added. assertEquals("Should have 1 WALs at the end", 1, AbstractFSWALProvider.getNumRolledLogFiles(log)); } finally { if (t != null) t.close(); if (table != null) table.close(); } }
Example 14
Source File: TestRegionSnapshotTask.java From hbase with Apache License 2.0 | 4 votes |
/** * Tests adding a region to the snapshot manifest while compactions are running on the region. * The idea is to slow down the process of adding a store file to the manifest while * triggering compactions on the region, allowing the store files to be marked for archival while * snapshot operation is running. * This test checks for the correct behavior in such a case that the compacted files should * not be moved around if a snapshot operation is in progress. * See HBASE-18398 */ @Test public void testAddRegionWithCompactions() throws Exception { final TableName tableName = TableName.valueOf("test_table"); Table table = setupTable(tableName); List<HRegion> hRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName); final SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() .setTable(tableName.getNameAsString()) .setType(SnapshotProtos.SnapshotDescription.Type.FLUSH) .setName("test_table_snapshot") .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION) .build(); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(snapshot.getName()); final HRegion region = spy(hRegions.get(0)); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf); final SnapshotManifest manifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor); manifest.addTableDescriptor(table.getDescriptor()); if (!fs.exists(workingDir)) { fs.mkdirs(workingDir); } assertTrue(fs.exists(workingDir)); SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs); doAnswer(__ -> { addRegionToSnapshot(snapshot, region, manifest); return null; }).when(region).addRegionToSnapshot(snapshot, monitor); FlushSnapshotSubprocedure.RegionSnapshotTask snapshotTask = new FlushSnapshotSubprocedure.RegionSnapshotTask(region, snapshot, true, monitor); ExecutorService executor = Executors.newFixedThreadPool(1); Future f = executor.submit(snapshotTask); // Trigger major compaction and wait for snaphot operation to finish LOG.info("Starting major compaction"); region.compact(true); LOG.info("Finished major compaction"); f.get(); // Consolidate region manifests into a single snapshot manifest manifest.consolidate(); // Make sure that the region manifest exists, which means the snapshot operation succeeded assertNotNull(manifest.getRegionManifests()); // Sanity check, there should be only one region assertEquals(1, manifest.getRegionManifests().size()); // Make sure that no files went missing after the snapshot operation SnapshotReferenceUtil.verifySnapshot(conf, fs, manifest); }
Example 15
Source File: TestCoprocessorInterface.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testSharedData() throws IOException { TableName tableName = TableName.valueOf(name.getMethodName()); byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initConfig(); HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[]{}, families); for (int i = 0; i < 3; i++) { HTestConst.addContent(region, fam3); region.flush(true); } region.compact(false); region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); Object o = ((CoprocessorImpl)c).getSharedData().get("test1"); Object o2 = ((CoprocessorII)c2).getSharedData().get("test2"); assertNotNull(o); assertNotNull(o2); // to coprocessors get different sharedDatas assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData()); c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); // make sure that all coprocessor of a class have identical sharedDatas assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2); // now have all Environments fail try { byte [] r = region.getRegionInfo().getStartKey(); if (r == null || r.length <= 0) { // Its the start row. Can't ask for null. Ask for minimal key instead. r = new byte [] {0}; } Get g = new Get(r); region.get(g); fail(); } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) { } assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class)); c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); c = c2 = null; // perform a GC System.gc(); // reopen the region region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); // CPimpl is unaffected, still the same reference assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); // new map and object created, hence the reference is different // hence the old entry was indeed removed by the GC and new one has been created Object o3 = ((CoprocessorII)c2).getSharedData().get("test2"); assertFalse(o3 == o2); HBaseTestingUtility.closeRegionAndWAL(region); }
Example 16
Source File: TestAvoidCellReferencesIntoShippedBlocks.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testHBase16372InCompactionWritePath() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); // Create a table with block size as 1024 final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CompactorRegionObserver.class.getName()); try { // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); final BlockCache cache = cacheConf.getBlockCache().get(); // insert data. 5 Rows are added Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW1); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); put = new Put(ROW1); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW2); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW2); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); put = new Put(ROW3); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW3); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW4); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); put = new Put(ROW4); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); put = new Put(ROW5); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); put = new Put(ROW5); put.addColumn(FAMILY, QUALIFIER1, data); table.put(put); // data was in memstore so don't expect any changes region.flush(true); // Load cache Scan s = new Scan(); s.setMaxResultSize(1000); int count; try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } assertEquals("Count all the rows ", 6, count); // all the cache is loaded // trigger a major compaction ScannerThread scannerThread = new ScannerThread(table, cache); scannerThread.start(); region.compact(true); s = new Scan(); s.setMaxResultSize(1000); try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } assertEquals("Count all the rows ", 6, count); } finally { table.close(); } }
Example 17
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(true, false); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compact(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(true, false); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compact(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compact(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 18
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(CellUtil.cloneRow(c)).addColumn(CellUtil.cloneFamily(c), CellUtil.cloneQualifier(c), c.getTimestamp(), CellUtil.cloneValue(c))); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(true, false, new FlushLifeCycleTracker() { }); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compact(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(true, false, new FlushLifeCycleTracker() { }); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compact(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compact(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }
Example 19
Source File: TransactionProcessorTest.java From phoenix-tephra with Apache License 2.0 | 4 votes |
@Test public void testPreExistingData() throws Exception { String tableName = "TestPreExistingData"; byte[] familyBytes = Bytes.toBytes("f"); long ttlMillis = TimeUnit.DAYS.toMillis(14); HRegion region = createRegion(tableName, familyBytes, ttlMillis); try { region.initialize(); // timestamps for pre-existing, non-transactional data long now = txVisibilityState.getVisibilityUpperBound() / TxConstants.MAX_TX_PER_MS; long older = now - ttlMillis / 2; long newer = now - ttlMillis / 3; // timestamps for transactional data long nowTx = txVisibilityState.getVisibilityUpperBound(); long olderTx = nowTx - (ttlMillis / 2) * TxConstants.MAX_TX_PER_MS; long newerTx = nowTx - (ttlMillis / 3) * TxConstants.MAX_TX_PER_MS; Map<byte[], Long> ttls = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); ttls.put(familyBytes, ttlMillis); List<Cell> cells = new ArrayList<>(); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v11"))); cells.add(new KeyValue(Bytes.toBytes("r1"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v12"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c1"), older, Bytes.toBytes("v21"))); cells.add(new KeyValue(Bytes.toBytes("r2"), familyBytes, Bytes.toBytes("c2"), newer, Bytes.toBytes("v22"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c1"), olderTx, Bytes.toBytes("v31"))); cells.add(new KeyValue(Bytes.toBytes("r3"), familyBytes, Bytes.toBytes("c2"), newerTx, Bytes.toBytes("v32"))); // Write non-transactional and transactional data for (Cell c : cells) { region.put(new Put(c.getRow()).add(c.getFamily(), c.getQualifier(), c.getTimestamp(), c.getValue())); } Scan rawScan = new Scan(); rawScan.setMaxVersions(); Transaction dummyTransaction = TxUtils.createDummyTransaction(txVisibilityState); Scan txScan = new Scan(); txScan.setMaxVersions(); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // read all back with raw scanner scanAndAssert(region, cells, rawScan); // read all back with transaction filter scanAndAssert(region, cells, txScan); // force a flush to clear the memstore region.flushcache(true, false); scanAndAssert(region, cells, txScan); // force a major compaction to remove any expired cells region.compact(true); scanAndAssert(region, cells, txScan); // Reduce TTL, this should make cells with timestamps older and olderTx expire long newTtl = ttlMillis / 2 - 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // Raw scan should still give all cells scanAndAssert(region, cells, rawScan); // However, tx scan should not return expired cells scanAndAssert(region, select(cells, 1, 3, 5), txScan); region.flushcache(true, false); scanAndAssert(region, cells, rawScan); // force a major compaction to remove any expired cells region.compact(true); // This time raw scan too should not return expired cells, as they would be dropped during major compaction scanAndAssert(region, select(cells, 1, 3, 5), rawScan); // Reduce TTL again to 1 ms, this should expire all cells newTtl = 1; region = updateTtl(region, familyBytes, newTtl); ttls.put(familyBytes, newTtl); txScan.setTimeRange(TxUtils.getOldestVisibleTimestamp(ttls, dummyTransaction, true), TxUtils.getMaxVisibleTimestamp(dummyTransaction)); txScan.setFilter(TransactionFilters.getVisibilityFilter(dummyTransaction, ttls, false, ScanType.USER_SCAN)); // force a major compaction to remove expired cells region.compact(true); // This time raw scan should not return any cells, as all cells have expired. scanAndAssert(region, Collections.<Cell>emptyList(), rawScan); } finally { region.close(); } }