org.apache.hadoop.hbase.regionserver.Region Java Examples
The following examples show how to use
org.apache.hadoop.hbase.regionserver.Region.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClientPushbackTestBase.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testMutateRowStats() throws IOException { HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); Region region = rs.getRegions(tableName).get(0); RowMutations mutations = new RowMutations(Bytes.toBytes("row")); Put p = new Put(Bytes.toBytes("row")); p.addColumn(family, qualifier, Bytes.toBytes("value2")); mutations.add(p); mutateRow(mutations); ServerStatisticTracker stats = getStatisticsTracker(); assertNotNull("No stats configured for the client!", stats); // get the names so we can query the stats ServerName server = rs.getServerName(); byte[] regionName = region.getRegionInfo().getRegionName(); // check to see we found some load on the memstore ServerStatistics serverStats = stats.getStats(server); ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName); assertNotNull(regionStats); assertTrue(regionStats.getMemStoreLoadPercent() > 0); }
Example #2
Source File: TestFileSystemUtilizationChore.java From hbase with Apache License 2.0 | 6 votes |
private Region mockRegionWithHFileLinks(Collection<Long> storeSizes, Collection<Long> hfileSizes) { final Region r = mock(Region.class); final RegionInfo info = mock(RegionInfo.class); when(r.getRegionInfo()).thenReturn(info); List<Store> stores = new ArrayList<>(); when(r.getStores()).thenReturn((List) stores); assertEquals( "Logic error, storeSizes and linkSizes must be equal in size", storeSizes.size(), hfileSizes.size()); Iterator<Long> storeSizeIter = storeSizes.iterator(); Iterator<Long> hfileSizeIter = hfileSizes.iterator(); while (storeSizeIter.hasNext() && hfileSizeIter.hasNext()) { final long storeSize = storeSizeIter.next(); final long hfileSize = hfileSizeIter.next(); final Store s = mock(Store.class); stores.add(s); when(s.getStorefilesSize()).thenReturn(storeSize); when(s.getHFilesSize()).thenReturn(hfileSize); } return r; }
Example #3
Source File: MiniHBaseCluster.java From hbase with Apache License 2.0 | 6 votes |
@Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) throws IOException { // Assume there is only one master thread which is the active master. // If there are multiple master threads, the backup master threads // should hold some regions. Please refer to #countServedRegions // to see how we find out all regions. HMaster master = getMaster(); Region region = master.getOnlineRegion(regionName); if (region != null) { return master.getServerName(); } int index = getServerWith(regionName); if (index < 0) { return null; } return getRegionServer(index).getServerName(); }
Example #4
Source File: TestFileSystemUtilizationChore.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testNonHFilesAreIgnored() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); // Region r1 has two store files, one hfile link and one hfile final List<Long> r1StoreFileSizes = Arrays.asList(1024L, 2048L); final List<Long> r1HFileSizes = Arrays.asList(0L, 2048L); final long r1HFileSizeSum = sum(r1HFileSizes); // Region r2 has one store file which is a hfile link final List<Long> r2StoreFileSizes = Arrays.asList(1024L * 1024L); final List<Long> r2HFileSizes = Arrays.asList(0L); final long r2HFileSizeSum = sum(r2HFileSizes); // We expect that only the hfiles would be counted (hfile links are ignored) final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer( sum(Arrays.asList(r1HFileSizeSum, r2HFileSizeSum)))) .when(rs).reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region r1 = mockRegionWithHFileLinks(r1StoreFileSizes, r1HFileSizes); final Region r2 = mockRegionWithHFileLinks(r2StoreFileSizes, r2HFileSizes); Mockito.doReturn(Arrays.asList(r1, r2)).when(rs).getRegions(); chore.chore(); }
Example #5
Source File: UngroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 6 votes |
private void separateLocalAndRemoteMutations(Table targetHTable, Region region, List<Mutation> mutations, List<Mutation> localRegionMutations, List<Mutation> remoteRegionMutations, boolean isPKChanging){ boolean areMutationsInSameTable = areMutationsInSameTable(targetHTable, region); //if we're writing to the same table, but the PK can change, that means that some //mutations might be in our current region, and others in a different one. if (areMutationsInSameTable && isPKChanging) { RegionInfo regionInfo = region.getRegionInfo(); for (Mutation mutation : mutations){ if (regionInfo.containsRow(mutation.getRow())){ localRegionMutations.add(mutation); } else { remoteRegionMutations.add(mutation); } } } else if (areMutationsInSameTable && !isPKChanging) { localRegionMutations.addAll(mutations); } else { remoteRegionMutations.addAll(mutations); } }
Example #6
Source File: TestRegionReplicaReplicationEndpointNoMaster.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testReplayCallable() throws Exception { // tests replaying the edits to a secondary region replica using the Callable directly openRegion(HTU, rs0, hriSecondary); // load some data to primary HTU.loadNumericRows(table, f, 0, 1000); Assert.assertEquals(1000, entries.size()); try (AsyncClusterConnection conn = ClusterConnectionFactory .createAsyncClusterConnection(HTU.getConfiguration(), null, User.getCurrent())) { // replay the edits to the secondary using replay callable replicateUsingCallable(conn, entries); } Region region = rs0.getRegion(hriSecondary.getEncodedName()); HTU.verifyNumericRows(region, f, 0, 1000); HTU.deleteNumericRows(table, f, 0, 1000); closeRegion(HTU, rs0, hriSecondary); }
Example #7
Source File: TestFileSystemUtilizationChore.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testIgnoreSplitParents() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); // Three regions with multiple store sizes final List<Long> r1Sizes = Arrays.asList(1024L, 2048L); final long r1Sum = sum(r1Sizes); final List<Long> r2Sizes = Arrays.asList(1024L * 1024L); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs); doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum)))) .when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); final Region r1 = mockRegionWithSize(r1Sizes); final Region r2 = mockSplitParentRegionWithSize(r2Sizes); Mockito.doReturn(Arrays.asList(r1, r2)).when(rs).getRegions(); chore.chore(); }
Example #8
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 5 votes |
public int countRows(final Region region, final Scan scan) throws IOException { InternalScanner scanner = region.getScanner(scan); try { return countRows(scanner); } finally { scanner.close(); } }
Example #9
Source File: FileSystemUtilizationChore.java From hbase with Apache License 2.0 | 5 votes |
/** * Computes total FileSystem size for the given {@link Region}. * * @param r The region * @return The size, in bytes, of the Region. */ long computeSize(Region r) { long regionSize = 0L; for (Store store : r.getStores()) { regionSize += store.getHFilesSize(); } if (LOG.isTraceEnabled()) { LOG.trace("Size of " + r + " is " + regionSize); } return regionSize; }
Example #10
Source File: StatisticsScannerTest.java From phoenix with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void testCheckRegionServerStoppedOnException() throws Exception { StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable(); doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(Region.class), any(StatisticsCollector.class), any(ImmutableBytesPtr.class), any(List.class)); when(conn.isClosed()).thenReturn(false); when(conn.isAborted()).thenReturn(true); // Should not throw an exception realCallable.call(); verify(conn).isClosed(); verify(conn).isAborted(); }
Example #11
Source File: IndexUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static void writeLocalUpdates(Region region, final List<Mutation> mutations, boolean skipWAL) throws IOException { if(skipWAL) { for (Mutation m : mutations) { m.setDurability(Durability.SKIP_WAL); } } region.batchMutate(mutations.toArray(new Mutation[mutations.size()])); }
Example #12
Source File: StatisticsScannerTest.java From phoenix with Apache License 2.0 | 5 votes |
@Before public void setupMocks() throws Exception { this.config = new Configuration(false); // Create all of the mocks this.region = mock(Region.class); this.rsServices = mock(RegionServerServices.class); this.statsWriter = mock(StatisticsWriter.class); this.callable = mock(StatisticsScannerCallable.class); this.runTracker = mock(StatisticsCollectionRunTracker.class); this.mockScanner = mock(StatisticsScanner.class); this.tracker = mock(StatisticsCollector.class); this.delegate = mock(InternalScanner.class); this.regionInfo = mock(RegionInfo.class); this.env = mock(RegionCoprocessorEnvironment.class); this.conn = mock(Connection.class); // Wire up the mocks to the mock StatisticsScanner when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter); when(mockScanner.createCallable()).thenReturn(callable); when(mockScanner.getStatsCollectionRunTracker(any(Configuration.class))).thenReturn(runTracker); when(mockScanner.getRegion()).thenReturn(region); when(mockScanner.getConfig()).thenReturn(config); when(mockScanner.getTracker()).thenReturn(tracker); when(mockScanner.getDelegate()).thenReturn(delegate); when(env.getConnection()).thenReturn(conn); when(mockScanner.getConnection()).thenReturn(conn); // Wire up the HRegionInfo mock to the Region mock when(region.getRegionInfo()).thenReturn(regionInfo); // Always call close() on the mock StatisticsScanner doCallRealMethod().when(mockScanner).close(); }
Example #13
Source File: RegionServerRpcQuotaManager.java From hbase with Apache License 2.0 | 5 votes |
/** * Check the quota for the current (rpc-context) user. * Returns the OperationQuota used to get the available quota and * to report the data/usage of the operation. * @param region the region where the operation will be performed * @param actions the "multi" actions to perform * @return the OperationQuota * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. */ public OperationQuota checkQuota(final Region region, final List<ClientProtos.Action> actions) throws IOException, RpcThrottlingException { int numWrites = 0; int numReads = 0; for (final ClientProtos.Action action: actions) { if (action.hasMutation()) { numWrites++; } else if (action.hasGet()) { numReads++; } } return checkQuota(region, numWrites, numReads, 0); }
Example #14
Source File: RegionServerRpcQuotaManager.java From hbase with Apache License 2.0 | 5 votes |
/** * Check the quota for the current (rpc-context) user. * Returns the OperationQuota used to get the available quota and * to report the data/usage of the operation. * @param region the region where the operation will be performed * @param type the operation type * @return the OperationQuota * @throws RpcThrottlingException if the operation cannot be executed due to quota exceeded. */ public OperationQuota checkQuota(final Region region, final OperationQuota.OperationType type) throws IOException, RpcThrottlingException { switch (type) { case SCAN: return checkQuota(region, 0, 0, 1); case GET: return checkQuota(region, 0, 1, 0); case MUTATE: return checkQuota(region, 1, 0, 0); } throw new RuntimeException("Invalid operation type: " + type); }
Example #15
Source File: TestFileSystemUtilizationChore.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testProcessingNowOfflineLeftoversAreIgnored() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); // Some leftover regions from a previous chore() final List<Long> leftover1Sizes = Arrays.asList(1024L, 4096L); final long leftover1Sum = sum(leftover1Sizes); final List<Long> leftover2Sizes = Arrays.asList(2048L); final Region lr1 = mockRegionWithSize(leftover1Sizes); final Region lr2 = mockRegionWithSize(leftover2Sizes); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs) { @Override Iterator<Region> getLeftoverRegions() { return Arrays.asList(lr1, lr2).iterator(); } }; doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum)))) .when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); // We shouldn't compute all of these region sizes, just the leftovers final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L)); final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L)); final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L)); // lr2 is no longer online, so it should be ignored Mockito.doReturn(Arrays.asList(r1, r2, r3, lr1)).when(rs).getRegions(); chore.chore(); }
Example #16
Source File: NonTxIndexBuilderTest.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be * called, where any read requests to * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test * field 'currentRowCells' */ @Before public void setup() throws Exception { RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class); Configuration conf = new Configuration(false); conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName()); Mockito.when(env.getConfiguration()).thenReturn(conf); // the following is used by LocalTable#getCurrentRowState() Region mockRegion = Mockito.mock(Region.class); Mockito.when(env.getRegion()).thenReturn(mockRegion); Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))) .thenAnswer(new Answer<RegionScanner>() { @Override public RegionScanner answer(InvocationOnMock invocation) throws Throwable { Scan sArg = (Scan) invocation.getArguments()[0]; TimeRange timeRange = sArg.getTimeRange(); return getMockTimeRangeRegionScanner(timeRange); } }); // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes() RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class); Mockito.when(env.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a")); Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z")); Mockito.when(mockRegionInfo.getTable()).thenReturn(TableName.valueOf(TEST_TABLE_STRING)); mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class); Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation)Mockito.any())).thenReturn(true); Mockito.when(mockIndexMetaData.getReplayWrite()).thenReturn(null); Mockito.when(mockIndexMetaData.getIndexMaintainers()) .thenReturn(Collections.singletonList(getTestIndexMaintainer())); indexBuilder = new NonTxIndexBuilder(); indexBuilder.setup(env); }
Example #17
Source File: TestFileSystemUtilizationChore.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testProcessingLeftoverRegions() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); // Some leftover regions from a previous chore() final List<Long> leftover1Sizes = Arrays.asList(1024L, 4096L); final long leftover1Sum = sum(leftover1Sizes); final List<Long> leftover2Sizes = Arrays.asList(2048L); final long leftover2Sum = sum(leftover2Sizes); final Region lr1 = mockRegionWithSize(leftover1Sizes); final Region lr2 = mockRegionWithSize(leftover2Sizes); final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs) { @Override Iterator<Region> getLeftoverRegions() { return Arrays.asList(lr1, lr2).iterator(); } }; doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum, leftover2Sum)))) .when(rs) .reportRegionSizesForQuotas(any(RegionSizeStore.class)); // We shouldn't compute all of these region sizes, just the leftovers final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L)); final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L)); final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L)); Mockito.doReturn(Arrays.asList(r1, r2, r3, lr1, lr2)).when(rs).getRegions(); chore.chore(); }
Example #18
Source File: VisibilityController.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan) throws IOException { if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"); } // Nothing to do if authorization is not enabled if (!authorizationEnabled) { return; } Region region = e.getEnvironment().getRegion(); Authorizations authorizations = null; try { authorizations = scan.getAuthorizations(); } catch (DeserializationException de) { throw new IOException(de); } if (authorizations == null) { // No Authorizations present for this scan/Get! // In case of system tables other than "labels" just scan with out visibility check and // filtering. Checking visibility labels for META and NAMESPACE table is not needed. TableName table = region.getRegionInfo().getTable(); if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) { return; } } Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region, authorizations); if (visibilityLabelFilter != null) { Filter filter = scan.getFilter(); if (filter != null) { scan.setFilter(new FilterList(filter, visibilityLabelFilter)); } else { scan.setFilter(visibilityLabelFilter); } } }
Example #19
Source File: AccessController.java From hbase with Apache License 2.0 | 5 votes |
private TableName getTableName(Region region) { RegionInfo regionInfo = region.getRegionInfo(); if (regionInfo != null) { return regionInfo.getTable(); } return null; }
Example #20
Source File: ServerUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static boolean isKeyInRegion(byte[] key, Region region) { byte[] startKey = region.getRegionInfo().getStartKey(); byte[] endKey = region.getRegionInfo().getEndKey(); return (Bytes.compareTo(startKey, key) <= 0 && (Bytes.compareTo(HConstants.LAST_ROW, endKey) == 0 || Bytes.compareTo(key, endKey) < 0)); }
Example #21
Source File: GlobalIndexRegionScanner.java From phoenix with Apache License 2.0 | 5 votes |
public GlobalIndexRegionScanner(RegionScanner innerScanner, final Region region, final Scan scan, final RegionCoprocessorEnvironment env) throws IOException { super(innerScanner); final Configuration config = env.getConfiguration(); if (scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGING) != null) { byte[] pageSizeFromScan = scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGE_ROWS); if (pageSizeFromScan != null) { pageSizeInRows = Bytes.toLong(pageSizeFromScan); } else { pageSizeInRows = config.getLong(INDEX_REBUILD_PAGE_SIZE_IN_ROWS, QueryServicesOptions.DEFAULT_INDEX_REBUILD_PAGE_SIZE_IN_ROWS); } } maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD); if (indexMetaData == null) { indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD); } List<IndexMaintainer> maintainers = IndexMaintainer.deserialize(indexMetaData, true); indexMaintainer = maintainers.get(0); this.scan = scan; this.innerScanner = innerScanner; this.region = region; // Create the following objects only for rebuilds by IndexTool hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env); indexHTable = hTableFactory.getTable(new ImmutableBytesPtr(indexMaintainer.getIndexTableName())); indexTableTTL = indexHTable.getTableDescriptor().getColumnFamilies()[0].getTimeToLive(); pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor( new ThreadPoolBuilder("IndexVerify", env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY, DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS).setCoreTimeout( INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env)); rowCountPerTask = config.getInt(INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY, DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK); }
Example #22
Source File: IndexerRegionScanner.java From phoenix with Apache License 2.0 | 5 votes |
IndexerRegionScanner (final RegionScanner innerScanner, final Region region, final Scan scan, final RegionCoprocessorEnvironment env) throws IOException { super(innerScanner, region, scan, env); indexKeyToDataPutMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); verificationResult = new IndexToolVerificationResult(scan); verificationResultRepository = new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory); }
Example #23
Source File: HTestConst.java From hbase with Apache License 2.0 | 5 votes |
/** * Add content to region <code>r</code> on the passed column <code>column</code>. Adds data of the * from 'aaa', 'aab', etc where key and value are the same. * @return count of what we added. */ public static long addContent(final Region r, final byte[] columnFamily, final byte[] column) throws IOException { byte[] startKey = r.getRegionInfo().getStartKey(); byte[] endKey = r.getRegionInfo().getEndKey(); byte[] startKeyBytes = startKey; if (startKeyBytes == null || startKeyBytes.length == 0) { startKeyBytes = START_KEY_BYTES; } return addContent(new RegionAsTable(r), Bytes.toString(columnFamily), Bytes.toString(column), startKeyBytes, endKey, -1); }
Example #24
Source File: TestFlushWithThroughputController.java From hbase with Apache License 2.0 | 5 votes |
private HStore getStoreWithName(TableName tableName) { MiniHBaseCluster cluster = hbtu.getMiniHBaseCluster(); List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads(); for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { HRegionServer hrs = rsts.get(i).getRegionServer(); for (Region region : hrs.getRegions(tableName)) { return ((HRegion) region).getStores().iterator().next(); } } return null; }
Example #25
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
/** * We use an Increment to serialize the ON DUPLICATE KEY clause so that the HBase plumbing * sets up the necessary locks and mvcc to allow an atomic update. The Increment is not a * real increment, though, it's really more of a Put. We translate the Increment into a * list of mutations, at most a single Put and Delete that are the changes upon executing * the list of ON DUPLICATE KEY clauses for this row. */ @Override public Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> e, final Increment inc) throws IOException { long start = EnvironmentEdgeManager.currentTimeMillis(); try { List<Mutation> mutations = this.builder.executeAtomicOp(inc); if (mutations == null) { return null; } // Causes the Increment to be ignored as we're committing the mutations // ourselves below. e.bypass(); // ON DUPLICATE KEY IGNORE will return empty list if row already exists // as no action is required in that case. if (!mutations.isEmpty()) { Region region = e.getEnvironment().getRegion(); // Otherwise, submit the mutations directly here region.batchMutate(mutations.toArray(new Mutation[0])); } return Result.EMPTY_RESULT; } catch (Throwable t) { throw ServerUtil.createIOException( "Unable to process ON DUPLICATE IGNORE for " + e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + "(" + Bytes.toStringBinary(inc.getRow()) + ")", t); } finally { long duration = EnvironmentEdgeManager.currentTimeMillis() - start; if (duration >= slowIndexPrepareThreshold) { if (LOG.isDebugEnabled()) { LOG.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold)); } metricSource.incrementSlowDuplicateKeyCheckCalls(); } metricSource.updateDuplicateKeyCheckTime(duration); } }
Example #26
Source File: MiniHBaseCluster.java From hbase with Apache License 2.0 | 5 votes |
public List<HRegion> getRegions(TableName tableName) { List<HRegion> ret = new ArrayList<>(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); for (Region region : hrs.getOnlineRegionsLocalContext()) { if (region.getTableDescriptor().getTableName().equals(tableName)) { ret.add((HRegion)region); } } } return ret; }
Example #27
Source File: MiniHBaseCluster.java From hbase with Apache License 2.0 | 5 votes |
/** * Get the location of the specified region * @param regionName Name of the region in bytes * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} * of HRS carrying hbase:meta. Returns -1 if none found. */ public int getServerWith(byte[] regionName) { int index = 0; for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); if (!hrs.isStopped()) { Region region = hrs.getOnlineRegion(regionName); if (region != null) { return index; } } index++; } return -1; }
Example #28
Source File: TestCoprocessorScanPolicy.java From hbase with Apache License 2.0 | 5 votes |
@Override public void preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan) throws IOException { Region region = c.getEnvironment().getRegion(); TableName tableName = region.getTableDescriptor().getTableName(); Long ttl = this.ttls.get(tableName); if (ttl != null) { scan.setTimeRange(EnvironmentEdgeManager.currentTime() - ttl, scan.getTimeRange().getMax()); } Integer version = this.versions.get(tableName); if (version != null) { scan.readVersions(version); } }
Example #29
Source File: TestWALFiltering.java From hbase with Apache License 2.0 | 5 votes |
private List<byte[]> getRegionsByServer(int rsId) throws IOException { List<byte[]> regionNames = Lists.newArrayList(); HRegionServer hrs = getRegionServer(rsId); for (Region r : hrs.getRegions(TABLE_NAME)) { regionNames.add(r.getRegionInfo().getRegionName()); } return regionNames; }
Example #30
Source File: TestAsyncRegionAdminApi.java From hbase with Apache License 2.0 | 5 votes |
private static int countStoreFilesInFamilies(List<Region> regions, final byte[][] families) { int count = 0; for (Region region : regions) { count += region.getStoreFileList(families).size(); } return count; }