org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker Java Examples
The following examples show how to use
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RangerAuthorizationCoprocessor.java From ranger with Apache License 2.0 | 6 votes |
@Override public void postCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerAuthorizationCoprocessor.postCompact()"); } try { activatePluginClassLoader(); implRegionObserver.postCompact(c, store, resultFile, tracker, request); } finally { deactivatePluginClassLoader(); } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerAuthorizationCoprocessor.postCompact()"); } }
Example #2
Source File: TestMajorCompaction.java From hbase with Apache License 2.0 | 6 votes |
/** * Test for HBASE-5920 */ @Test public void testUserMajorCompactionRequest() throws IOException { HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); } store.triggerMajorCompaction(); CompactionRequestImpl request = store .requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get().getRequest(); assertNotNull("Expected to receive a compaction request", request); assertEquals( "User-requested major compaction should always occur, even if there are too many store files", true, request.isMajor()); }
Example #3
Source File: CompactSplit.java From hbase with Apache License 2.0 | 6 votes |
private Optional<CompactionContext> selectCompaction(HRegion region, HStore store, int priority, CompactionLifeCycleTracker tracker, CompactionCompleteTracker completeTracker, User user) throws IOException { // don't even select for compaction if disableCompactions is set to true if (!isCompactionsEnabled()) { LOG.info(String.format("User has disabled compactions")); return Optional.empty(); } Optional<CompactionContext> compaction = store.requestCompaction(priority, tracker, user); if (!compaction.isPresent() && region.getRegionInfo() != null) { String reason = "Not compacting " + region.getRegionInfo().getRegionNameAsString() + " because compaction request was cancelled"; tracker.notExecuted(store, reason); completeTracker.completed(store); LOG.debug(reason); } return compaction; }
Example #4
Source File: RangerAuthorizationCoprocessor.java From ranger with Apache License 2.0 | 6 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { final InternalScanner ret; if(LOG.isDebugEnabled()) { LOG.debug("==> RangerAuthorizationCoprocessor.preCompact()"); } try { activatePluginClassLoader(); ret = implRegionObserver.preCompact(c, store, scanner, scanType, tracker, request); } finally { deactivatePluginClassLoader(); } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerAuthorizationCoprocessor.preCompact()"); } return ret; }
Example #5
Source File: RangerAuthorizationCoprocessor.java From ranger with Apache License 2.0 | 6 votes |
@Override public void postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store, List<? extends StoreFile> selected, CompactionLifeCycleTracker tracker, CompactionRequest request) { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerAuthorizationCoprocessor.postCompactSelection()"); } try { activatePluginClassLoader(); implRegionObserver.postCompactSelection(c, store, selected, tracker, request); } finally { deactivatePluginClassLoader(); } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerAuthorizationCoprocessor.postCompactSelection()"); } }
Example #6
Source File: RangerAuthorizationCoprocessor.java From ranger with Apache License 2.0 | 6 votes |
@Override public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store, List<? extends StoreFile> candidates, CompactionLifeCycleTracker request) throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("==> RangerAuthorizationCoprocessor.preCompactSelection()"); } try { activatePluginClassLoader(); implRegionObserver.preCompactSelection(c, store, candidates, request); } finally { deactivatePluginClassLoader(); } if(LOG.isDebugEnabled()) { LOG.debug("<== RangerAuthorizationCoprocessor.preCompactSelection()"); } }
Example #7
Source File: MemstoreAwareObserver.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { BlockingProbe.blockPreCompact(); if (!(request instanceof SpliceCompactionRequest)) { SpliceLogUtils.error(LOG,"Compaction request must be a SpliceCompactionRequest"); throw new DoNotRetryIOException(); } SpliceCompactionRequest scr = (SpliceCompactionRequest) request; // memstoreAware is injected into the request, where the blocking logic lives, and where compaction // count will be incremented and decremented. scr.setMemstoreAware(memstoreAware); HRegion region = (HRegion) c.getEnvironment().getRegion(); scr.setRegion(region); return scanner == null ? DummyScanner.INSTANCE : scanner; } catch (Throwable t) { throw CoprocessorUtils.getIOException(t); } }
Example #8
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 6 votes |
@Override public InternalScanner preCompact( org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { // Get the latest tx snapshot state for the compaction TransactionVisibilityState snapshot = cache.getLatestState(); // Record tx state before the compaction if (compactionState != null) { compactionState.record(request, snapshot); } // Also make sure to use the same snapshot for the compaction InternalScanner s = createStoreScanner(c.getEnvironment(), "compaction", snapshot, scanner, scanType); if (s != null) { return s; } return scanner; }
Example #9
Source File: TestCompactionLifeCycleTracker.java From hbase with Apache License 2.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { if (TRACKER != null) { assertSame(tracker, TRACKER); } return scanner; }
Example #10
Source File: TestCompactionLifeCycleTracker.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store, List<? extends StoreFile> candidates, CompactionLifeCycleTracker tracker) throws IOException { if (TRACKER != null) { assertSame(tracker, TRACKER); } }
Example #11
Source File: TestCompaction.java From hbase with Apache License 2.0 | 5 votes |
/** * Test no new Compaction requests are generated after calling stop compactions */ @Test public void testStopStartCompaction() throws IOException { // setup a compact/split thread on a mock server HRegionServer mockServer = Mockito.mock(HRegionServer.class); Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf()); final CompactSplit thread = new CompactSplit(mockServer); Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread); // setup a region/store with some files HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < HStore.DEFAULT_BLOCKING_STOREFILE_COUNT - 1; i++) { createStoreFile(r); } thread.switchCompaction(false); thread.requestCompaction(r, store, "test", Store.PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); assertFalse(thread.isCompactionsEnabled()); int longCompactions = thread.getLongCompactions().getActiveCount(); int shortCompactions = thread.getShortCompactions().getActiveCount(); assertEquals("longCompactions=" + longCompactions + "," + "shortCompactions=" + shortCompactions, 0, longCompactions + shortCompactions); thread.switchCompaction(true); assertTrue(thread.isCompactionsEnabled()); // Make sure no compactions have run. assertEquals(0, thread.getLongCompactions().getCompletedTaskCount() + thread.getShortCompactions().getCompletedTaskCount()); // Request a compaction and make sure it is submitted successfully. thread.requestCompaction(r, store, "test", Store.PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); // Wait until the compaction finishes. Waiter.waitFor(UTIL.getConfiguration(), 5000, (Waiter.Predicate<Exception>) () -> thread.getLongCompactions().getCompletedTaskCount() + thread.getShortCompactions().getCompletedTaskCount() == 1); // Make sure there are no compactions running. assertEquals(0, thread.getLongCompactions().getActiveCount() + thread.getShortCompactions().getActiveCount()); }
Example #12
Source File: TestCompactionLifeCycleTracker.java From hbase with Apache License 2.0 | 5 votes |
@Override public void postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store, List<? extends StoreFile> selected, CompactionLifeCycleTracker tracker, CompactionRequest request) { if (TRACKER != null) { assertSame(tracker, TRACKER); } }
Example #13
Source File: TransactionProcessor.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@Override public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { if (cache.getLatestState() != null) { options.readAllVersions(); } }
Example #14
Source File: SIObserver.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public void postCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { if(tableEnvMatch){ Tracer.compact(); } } catch (Throwable t) { throw CoprocessorUtils.getIOException(t); } }
Example #15
Source File: SIObserver.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { // We can't return null, there's a check in org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preCompact // return a dummy implementation instead if (scanner == null || scanner == DummyScanner.INSTANCE) return DummyScanner.INSTANCE; if(tableEnvMatch){ SIDriver driver=SIDriver.driver(); SimpleCompactionContext context = new SimpleCompactionContext(); SICompactionState state = new SICompactionState(driver.getTxnSupplier(), driver.getConfiguration().getActiveTransactionMaxCacheSize(), context, driver.getRejectingExecutorService()); SConfiguration conf = driver.getConfiguration(); PurgeConfig purgeConfig; if (conf.getOlapCompactionAutomaticallyPurgeDeletedRows()) { if (request.isMajor()) purgeConfig = PurgeConfig.purgeDuringMajorCompactionConfig(); else purgeConfig = PurgeConfig.purgeDuringMinorCompactionConfig(); } else { purgeConfig = PurgeConfig.noPurgeConfig(); } SICompactionScanner siScanner = new SICompactionScanner( state, scanner, purgeConfig, conf.getOlapCompactionResolutionShare(), conf.getLocalCompactionResolutionBufferSize(), context); siScanner.start(); return siScanner; } return scanner; } catch (Throwable t) { throw CoprocessorUtils.getIOException(t); } }
Example #16
Source File: BackupEndpointObserver.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { if (LOG.isDebugEnabled()) SpliceLogUtils.debug(LOG, "BackupEndpointObserver.preCompact()"); BackupUtils.waitForBackupToComplete(tableName, regionName); isCompacting.set(true); SpliceLogUtils.info(LOG, "setting isCompacting=true for %s:%s", tableName, regionName); return scanner == null ? DummyScanner.INSTANCE : scanner; } catch (Throwable t) { throw CoprocessorUtils.getIOException(t); } }
Example #17
Source File: BackupEndpointObserver.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public void postCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { isCompacting.set(false); SpliceLogUtils.info(LOG, "setting isCompacting=false for %s:%s", tableName, regionName); if (LOG.isDebugEnabled()) { String filePath = resultFile != null?resultFile.getPath().toString():null; SpliceLogUtils.debug(LOG, "Compaction result file %s", filePath); } } catch (Throwable t) { throw CoprocessorUtils.getIOException(t); } }
Example #18
Source File: MemstoreAwareObserver.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public void postCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { BlockingProbe.blockPostCompact(); } catch (Throwable t) { throw CoprocessorUtils.getIOException(t); } }
Example #19
Source File: CompactionTool.java From hbase with Apache License 2.0 | 5 votes |
/** * Execute the actual compaction job. * If the compact once flag is not specified, execute the compaction until * no more compactions are needed. Uses the Configuration settings provided. */ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName); LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + " family=" + familyName); if (major) { store.triggerMajorCompaction(); } do { Optional<CompactionContext> compaction = store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); if (!compaction.isPresent()) { break; } List<HStoreFile> storeFiles = store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { for (HStoreFile storeFile: storeFiles) { fs.delete(storeFile.getPath(), false); } } } } while (store.needsCompaction() && !compactOnce); //We need to close the store properly, to make sure it will archive compacted files store.close(); }
Example #20
Source File: IndexHalfStoreFileReaderGenerator.java From phoenix with Apache License 2.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { if (!IndexUtil.isLocalIndexStore(store)) { return s; } if (!store.hasReferences()) { InternalScanner repairScanner = null; if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) { LOGGER.info("we have found inconsistent data for local index for region:" + c.getEnvironment().getRegion().getRegionInfo()); if (c.getEnvironment().getConfiguration().getBoolean(LOCAL_INDEX_AUTOMATIC_REPAIR, true)) { LOGGER.info("Starting automatic repair of local Index for region:" + c.getEnvironment().getRegion().getRegionInfo()); repairScanner = getRepairScanner(c.getEnvironment(), store); } } if (repairScanner != null) { if (s!=null) { s.close(); } return repairScanner; } else { return s; } } return s; }
Example #21
Source File: TestHRegionServerBulkLoad.java From hbase with Apache License 2.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { try { Thread.sleep(sleepDuration); } catch (InterruptedException ie) { IOException ioe = new InterruptedIOException(); ioe.initCause(ie); throw ioe; } return scanner; }
Example #22
Source File: TestCompaction.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testInterruptingRunningCompactions() throws Exception { // setup a compact/split thread on a mock server conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, WaitThroughPutController.class.getName()); HRegionServer mockServer = Mockito.mock(HRegionServer.class); Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf()); CompactSplit thread = new CompactSplit(mockServer); Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread); // setup a region/store with some files HStore store = r.getStore(COLUMN_FAMILY); int jmax = (int) Math.ceil(15.0 / compactionThreshold); byte[] pad = new byte[1000]; // 1 KB chunk for (int i = 0; i < compactionThreshold; i++) { Table loader = new RegionAsTable(r); Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i))); p.setDurability(Durability.SKIP_WAL); for (int j = 0; j < jmax; j++) { p.addColumn(COLUMN_FAMILY, Bytes.toBytes(j), pad); } HTestConst.addContent(loader, Bytes.toString(COLUMN_FAMILY)); loader.put(p); r.flush(true); } HStore s = r.getStore(COLUMN_FAMILY); int initialFiles = s.getStorefilesCount(); thread.requestCompaction(r, store, "test custom comapction", PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); Thread.sleep(3000); thread.switchCompaction(false); assertEquals(initialFiles, s.getStorefilesCount()); //don't mess up future tests thread.switchCompaction(true); }
Example #23
Source File: TestCompactionLifeCycleTracker.java From hbase with Apache License 2.0 | 5 votes |
@Override public void postCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { if (TRACKER != null) { assertSame(tracker, TRACKER); } }
Example #24
Source File: SimpleRegionObserver.java From hbase with Apache License 2.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) throws IOException { ctPreCompact.incrementAndGet(); return scanner; }
Example #25
Source File: TestCoprocessorInterface.java From hbase with Apache License 2.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) { preCompactCalled = true; return scanner; }
Example #26
Source File: TestRegionObserverInterface.java From hbase with Apache License 2.0 | 5 votes |
@Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request) { return new InternalScanner() { @Override public boolean next(List<Cell> results, ScannerContext scannerContext) throws IOException { List<Cell> internalResults = new ArrayList<>(); boolean hasMore; do { hasMore = scanner.next(internalResults, scannerContext); if (!internalResults.isEmpty()) { long row = Bytes.toLong(CellUtil.cloneValue(internalResults.get(0))); if (row % 2 == 0) { // return this row break; } // clear and continue internalResults.clear(); } } while (hasMore); if (!internalResults.isEmpty()) { results.addAll(internalResults); } return hasMore; } @Override public void close() throws IOException { scanner.close(); } }; }
Example #27
Source File: TestRegionCoprocessorHost.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testPreCompactScannerOpen() throws IOException { RegionCoprocessorHost host = new RegionCoprocessorHost(region, rsServices, conf); ScanInfo oldScanInfo = getScanInfo(); HStore store = mock(HStore.class); when(store.getScanInfo()).thenReturn(oldScanInfo); ScanInfo newScanInfo = host.preCompactScannerOpen(store, ScanType.COMPACT_DROP_DELETES, mock(CompactionLifeCycleTracker.class), mock(CompactionRequest.class), mock(User.class)); verifyScanInfo(newScanInfo); }
Example #28
Source File: RegionCoprocessorHost.java From hbase with Apache License 2.0 | 5 votes |
/** * Called after the store compaction has completed. * @param store the store being compacted * @param resultFile the new store file written during compaction * @param tracker used to track the life cycle of a compaction * @param request the compaction request * @param user the user * @throws IOException */ public void postCompact(final HStore store, final HStoreFile resultFile, final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty()? null: new RegionObserverOperationWithoutResult(user) { @Override public void call(RegionObserver observer) throws IOException { observer.postCompact(this, store, resultFile, tracker, request); } }); }
Example #29
Source File: RegionCoprocessorHost.java From hbase with Apache License 2.0 | 5 votes |
/** * Called prior to opening store scanner for compaction. */ public ScanInfo preCompactScannerOpen(HStore store, ScanType scanType, CompactionLifeCycleTracker tracker, CompactionRequest request, User user) throws IOException { if (coprocEnvironments.isEmpty()) { return store.getScanInfo(); } CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo()); execOperation(new RegionObserverOperationWithoutResult(user) { @Override public void call(RegionObserver observer) throws IOException { observer.preCompactScannerOpen(this, store, scanType, builder, tracker, request); } }); return builder.build(); }
Example #30
Source File: RegionCoprocessorHost.java From hbase with Apache License 2.0 | 5 votes |
/** * Called after the {@link HStoreFile}s to be compacted have been selected from the available * candidates. * @param store The store where compaction is being requested * @param selected The store files selected to compact * @param tracker used to track the life cycle of a compaction * @param request the compaction request * @param user the user */ public void postCompactSelection(final HStore store, final List<HStoreFile> selected, final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user) throws IOException { if (coprocEnvironments.isEmpty()) { return; } execOperation(new RegionObserverOperationWithoutResult(user) { @Override public void call(RegionObserver observer) throws IOException { observer.postCompactSelection(this, store, selected, tracker, request); } }); }