Java Code Examples for org.apache.hadoop.hbase.client.Scan#withStartRow()
The following examples show how to use
org.apache.hadoop.hbase.client.Scan#withStartRow() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IntegrationTestBigLinkedList.java From hbase with Apache License 2.0 | 6 votes |
protected static CINode findStartNode(Table table, byte[] startKey) throws IOException { Scan scan = new Scan(); scan.withStartRow(startKey); scan.setBatch(1); scan.addColumn(FAMILY_NAME, COLUMN_PREV); long t1 = System.currentTimeMillis(); ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); long t2 = System.currentTimeMillis(); scanner.close(); if ( result != null) { CINode node = getCINode(result, new CINode()); System.out.printf("FSR %d %s\n", t2 - t1, Bytes.toStringBinary(node.key)); return node; } System.out.println("FSR " + (t2 - t1)); return null; }
Example 2
Source File: IndexVerificationResultRepository.java From phoenix with Apache License 2.0 | 6 votes |
public IndexToolVerificationResult getVerificationResult(Table htable, long ts) throws IOException { byte[] startRowKey = Bytes.toBytes(Long.toString(ts)); byte[] stopRowKey = ByteUtil.calculateTheClosestNextRowKeyForPrefix(startRowKey); IndexToolVerificationResult verificationResult = new IndexToolVerificationResult(ts); Scan scan = new Scan(); scan.withStartRow(startRowKey); scan.withStopRow(stopRowKey); ResultScanner scanner = htable.getScanner(scan); for (Result result = scanner.next(); result != null; result = scanner.next()) { boolean isFirst = true; for (Cell cell : result.rawCells()) { if (isFirst){ byte[][] rowKeyParts = ByteUtil.splitArrayBySeparator(result.getRow(), ROW_KEY_SEPARATOR_BYTE[0]); verificationResult.setStartRow(rowKeyParts[3]); verificationResult.setStopRow(rowKeyParts[4]); isFirst = false; } verificationResult.update(cell); } } return verificationResult; }
Example 3
Source File: UngroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, Scan scan) throws IOException { super.preScannerOpen(c, scan); if (ScanUtil.isAnalyzeTable(scan)) { // We are setting the start row and stop row such that it covers the entire region. As part // of Phonenix-1263 we are storing the guideposts against the physical table rather than // individual tenant specific tables. scan.withStartRow(HConstants.EMPTY_START_ROW); scan.withStopRow(HConstants.EMPTY_END_ROW); scan.setFilter(null); } }
Example 4
Source File: MetaBrowser.java From hbase with Apache License 2.0 | 5 votes |
private Scan buildScan() { final Scan metaScan = new Scan() .addFamily(HConstants.CATALOG_FAMILY) .readVersions(1) .setLimit((scanLimit != null ? scanLimit : SCAN_LIMIT_DEFAULT) + 1); if (scanStart != null) { metaScan.withStartRow(scanStart, false); } final Filter filter = buildScanFilter(); if (filter != null) { metaScan.setFilter(filter); } return metaScan; }
Example 5
Source File: TestTableInputFormatScanBase.java From hbase with Apache License 2.0 | 5 votes |
/** * Tests a MR scan using specific start and stop rows. */ protected void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILYS[0]); scan.addFamily(INPUT_FAMILYS[1]); if (start != null) { scan.withStartRow(Bytes.toBytes(start)); } c.set(KEY_STARTROW, start != null ? start : ""); if (stop != null) { scan.withStopRow(Bytes.toBytes(stop)); } c.set(KEY_LASTROW, last != null ? last : ""); LOG.info("scan before: " + scan); Job job = Job.getInstance(c, jobName); TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); job.setReducerClass(ScanReducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); assertTrue(job.waitForCompletion(true)); LOG.info("After map/reduce completion - job " + jobName); }
Example 6
Source File: RowCounter.java From hbase with Apache License 2.0 | 5 votes |
/** * Sets filter {@link FilterBase} to the {@link Scan} instance. * If provided rowRangeList contains more than one element, * method sets filter which is instance of {@link MultiRowRangeFilter}. * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. * If rowRangeList contains exactly one element, startRow and stopRow are set to the scan. * @param scan * @param rowRangeList */ private static void setScanFilter(Scan scan, List<MultiRowRangeFilter.RowRange> rowRangeList) { final int size = rowRangeList == null ? 0 : rowRangeList.size(); if (size <= 1) { scan.setFilter(new FirstKeyOnlyFilter()); } if (size == 1) { MultiRowRangeFilter.RowRange range = rowRangeList.get(0); scan.withStartRow(range.getStartRow()); //inclusive scan.withStopRow(range.getStopRow()); //exclusive } else if (size > 1) { scan.setFilter(new MultiRowRangeFilter(rowRangeList)); } }
Example 7
Source File: IndexVerificationOutputRepository.java From phoenix with Apache License 2.0 | 5 votes |
public Iterator<IndexVerificationOutputRow> getOutputRowIterator(long ts, byte[] indexName) throws IOException { Scan scan = new Scan(); byte[] partialKey = generatePartialOutputTableRowKey(ts, indexName); scan.withStartRow(partialKey); scan.withStopRow(ByteUtil.calculateTheClosestNextRowKeyForPrefix(partialKey)); ResultScanner scanner = outputTable.getScanner(scan); return new IndexVerificationOutputRowIterator(scanner.iterator()); }
Example 8
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 5 votes |
/** * Creates Scan operation to load backup history * @return scan operation */ private Scan createScanForBackupHistory() { Scan scan = new Scan(); byte[] startRow = Bytes.toBytes(BACKUP_INFO_PREFIX); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); scan.withStopRow(stopRow); scan.addFamily(BackupSystemTable.SESSIONS_FAMILY); scan.readVersions(1); return scan; }
Example 9
Source File: TestTableInputFormat.java From hbase with Apache License 2.0 | 5 votes |
/** * Create table data and run tests on specified htable using the * o.a.h.hbase.mapreduce API. * * @param table * @throws IOException * @throws InterruptedException */ static void runTestMapreduce(Table table) throws IOException, InterruptedException { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan s = new Scan(); s.withStartRow(Bytes.toBytes("aaa")); s.withStopRow(Bytes.toBytes("zzz")); s.addFamily(FAMILY); trr.setScan(s); trr.setHTable(table); trr.initialize(null, null); Result r = new Result(); ImmutableBytesWritable key = new ImmutableBytesWritable(); boolean more = trr.nextKeyValue(); assertTrue(more); key = trr.getCurrentKey(); r = trr.getCurrentValue(); checkResult(r, key, Bytes.toBytes("aaa"), Bytes.toBytes("value aaa")); more = trr.nextKeyValue(); assertTrue(more); key = trr.getCurrentKey(); r = trr.getCurrentValue(); checkResult(r, key, Bytes.toBytes("bbb"), Bytes.toBytes("value bbb")); // no more data more = trr.nextKeyValue(); assertFalse(more); }
Example 10
Source File: MetaTableAccessor.java From hbase with Apache License 2.0 | 5 votes |
/** * This method creates a Scan object that will only scan catalog rows that belong to the specified * table. It doesn't specify any columns. This is a better alternative to just using a start row * and scan until it hits a new table since that requires parsing the HRI to get the table name. * @param tableName bytes of table's name * @return configured Scan object * @deprecated This is internal so please remove it when we get a chance. */ @Deprecated public static Scan getScanForTableName(Connection connection, TableName tableName) { // Start key is just the table name with delimiters byte[] startKey = ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION); // Stop key appends the smallest possible char to the table name byte[] stopKey = ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION); Scan scan = getMetaScan(connection, -1); scan.withStartRow(startKey); scan.withStopRow(stopKey); return scan; }
Example 11
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 5 votes |
/** * Creates Scan operation to load backup set list * @return scan operation */ private Scan createScanForBackupSetList() { Scan scan = new Scan(); byte[] startRow = Bytes.toBytes(SET_KEY_PREFIX); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); scan.withStopRow(stopRow); scan.addFamily(BackupSystemTable.META_FAMILY); return scan; }
Example 12
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 5 votes |
/** * Creates Scan operation to load WALs * @param backupRoot path to backup destination * @return scan operation */ private Scan createScanForGetWALs(String backupRoot) { // TODO: support for backupRoot Scan scan = new Scan(); byte[] startRow = Bytes.toBytes(WALS_PREFIX); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); scan.withStopRow(stopRow); scan.addFamily(BackupSystemTable.META_FAMILY); return scan; }
Example 13
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 5 votes |
static Scan createScanForBulkLoadedFiles(String backupId) { Scan scan = new Scan(); byte[] startRow = backupId == null ? BULK_LOAD_PREFIX_BYTES : rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); scan.withStopRow(stopRow); scan.addFamily(BackupSystemTable.META_FAMILY); scan.readVersions(1); return scan; }
Example 14
Source File: BackupSystemTable.java From hbase with Apache License 2.0 | 5 votes |
/** * Creates Scan operation to load last RS log roll results * @return scan operation */ private Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) { Scan scan = new Scan(); byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); scan.withStopRow(stopRow); scan.addFamily(BackupSystemTable.META_FAMILY); scan.readVersions(1); return scan; }
Example 15
Source File: CopyTable.java From hbase with Apache License 2.0 | 4 votes |
/** * Sets up the actual job. * * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ public Job createSubmittableJob(String[] args) throws IOException { if (!doCommandLine(args)) { return null; } String jobName = NAME + "_" + (tableName == null ? snapshot : tableName); Job job = Job.getInstance(getConf(), getConf().get(JOB_NAME_CONF_KEY, jobName)); job.setJarByClass(CopyTable.class); Scan scan = new Scan(); scan.setBatch(batch); scan.setCacheBlocks(false); if (cacheRow > 0) { scan.setCaching(cacheRow); } else { scan.setCaching(getConf().getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, 100)); } scan.setTimeRange(startTime, endTime); if (allCells) { scan.setRaw(true); } if (shuffle) { job.getConfiguration().set(TableInputFormat.SHUFFLE_MAPS, "true"); } if (versions >= 0) { scan.readVersions(versions); } if (startRow != null) { scan.withStartRow(Bytes.toBytesBinary(startRow)); } if (stopRow != null) { scan.withStopRow(Bytes.toBytesBinary(stopRow)); } if(families != null) { String[] fams = families.split(","); Map<String,String> cfRenameMap = new HashMap<>(); for(String fam : fams) { String sourceCf; if(fam.contains(":")) { // fam looks like "sourceCfName:destCfName" String[] srcAndDest = fam.split(":", 2); sourceCf = srcAndDest[0]; String destCf = srcAndDest[1]; cfRenameMap.put(sourceCf, destCf); } else { // fam is just "sourceCf" sourceCf = fam; } scan.addFamily(Bytes.toBytes(sourceCf)); } Import.configureCfRenaming(job.getConfiguration(), cfRenameMap); } job.setNumReduceTasks(0); if (bulkload) { initCopyTableMapperReducerJob(job, scan); // We need to split the inputs by destination tables so that output of Map can be bulk-loaded. TableInputFormat.configureSplitTable(job, TableName.valueOf(dstTableName)); bulkloadDir = generateUniqTempDir(false); LOG.info("HFiles will be stored at " + this.bulkloadDir); HFileOutputFormat2.setOutputPath(job, bulkloadDir); try (Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin()) { HFileOutputFormat2.configureIncrementalLoadMap(job, admin.getDescriptor((TableName.valueOf(dstTableName)))); } } else { initCopyTableMapperReducerJob(job, scan); TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress, null, null); } return job; }
Example 16
Source File: ThriftUtilities.java From hbase with Apache License 2.0 | 4 votes |
public static Scan scanFromThrift(TScan in) throws IOException { Scan out = new Scan(); if (in.isSetStartRow()) { out.withStartRow(in.getStartRow()); } if (in.isSetStopRow()) { out.withStopRow(in.getStopRow()); } if (in.isSetCaching()) { out.setCaching(in.getCaching()); } if (in.isSetMaxVersions()) { out.readVersions(in.getMaxVersions()); } if (in.isSetColumns()) { for (TColumn column : in.getColumns()) { if (column.isSetQualifier()) { out.addColumn(column.getFamily(), column.getQualifier()); } else { out.addFamily(column.getFamily()); } } } TTimeRange timeRange = in.getTimeRange(); if (timeRange != null && timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) { out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp()); } if (in.isSetBatchSize()) { out.setBatch(in.getBatchSize()); } if (in.isSetFilterString()) { ParseFilter parseFilter = new ParseFilter(); out.setFilter(parseFilter.parseFilterString(in.getFilterString())); } if (in.isSetAttributes()) { addAttributes(out,in.getAttributes()); } if (in.isSetAuthorizations()) { out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels())); } if (in.isSetReversed()) { out.setReversed(in.isReversed()); } if (in.isSetCacheBlocks()) { out.setCacheBlocks(in.isCacheBlocks()); } if (in.isSetColFamTimeRangeMap()) { Map<ByteBuffer, TTimeRange> colFamTimeRangeMap = in.getColFamTimeRangeMap(); if (MapUtils.isNotEmpty(colFamTimeRangeMap)) { for (Map.Entry<ByteBuffer, TTimeRange> entry : colFamTimeRangeMap.entrySet()) { out.setColumnFamilyTimeRange(Bytes.toBytes(entry.getKey()), entry.getValue().getMinStamp(), entry.getValue().getMaxStamp()); } } } if (in.isSetReadType()) { out.setReadType(readTypeFromThrift(in.getReadType())); } if (in.isSetLimit()) { out.setLimit(in.getLimit()); } if (in.isSetConsistency()) { out.setConsistency(consistencyFromThrift(in.getConsistency())); } if (in.isSetTargetReplicaId()) { out.setReplicaId(in.getTargetReplicaId()); } if (in.isSetFilterBytes()) { out.setFilter(filterFromThrift(in.getFilterBytes())); } return out; }
Example 17
Source File: TestReversibleScanners.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testReversibleStoreScanner() throws IOException { // write data to one memstore and two store files FileSystem fs = TEST_UTIL.getTestFileSystem(); Path hfilePath = new Path(new Path( TEST_UTIL.getDataTestDir("testReversibleStoreScanner"), "regionname"), "familyname"); CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); HFileContextBuilder hcBuilder = new HFileContextBuilder(); hcBuilder.withBlockSize(2 * 1024); HFileContext hFileContext = hcBuilder.build(); StoreFileWriter writer1 = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( hfilePath).withFileContext(hFileContext).build(); StoreFileWriter writer2 = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( hfilePath).withFileContext(hFileContext).build(); MemStore memstore = new DefaultMemStore(); writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, writer2 }); HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true); HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true); ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false); // Case 1.Test a full reversed scan Scan scan = new Scan(); scan.setReversed(true); StoreScanner storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC); verifyCountAndOrder(storeScanner, QUALSIZE * ROWSIZE, ROWSIZE, false); // Case 2.Test reversed scan with a specified start row int startRowNum = ROWSIZE / 2; byte[] startRow = ROWS[startRowNum]; scan.withStartRow(startRow); storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC); verifyCountAndOrder(storeScanner, QUALSIZE * (startRowNum + 1), startRowNum + 1, false); // Case 3.Test reversed scan with a specified start row and specified // qualifiers assertTrue(QUALSIZE > 2); scan.addColumn(FAMILYNAME, QUALS[0]); scan.addColumn(FAMILYNAME, QUALS[2]); storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC); verifyCountAndOrder(storeScanner, 2 * (startRowNum + 1), startRowNum + 1, false); // Case 4.Test reversed scan with mvcc based on case 3 for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) { LOG.info("Setting read point to " + readPoint); storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, readPoint); int expectedRowCount = 0; int expectedKVCount = 0; for (int i = startRowNum; i >= 0; i--) { int kvCount = 0; if (makeMVCC(i, 0) <= readPoint) { kvCount++; } if (makeMVCC(i, 2) <= readPoint) { kvCount++; } if (kvCount > 0) { expectedRowCount++; expectedKVCount += kvCount; } } verifyCountAndOrder(storeScanner, expectedKVCount, expectedRowCount, false); } }
Example 18
Source File: VerifyReplication.java From hbase with Apache License 2.0 | 4 votes |
private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] lastPrefixRow) { scan.withStartRow(startPrefixRow); byte[] stopRow = Bytes.add(Bytes.head(lastPrefixRow, lastPrefixRow.length - 1), new byte[]{(byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1)}); scan.withStopRow(stopRow); }
Example 19
Source File: AsyncAggregationClient.java From hbase with Apache License 2.0 | 4 votes |
private static <R, S, P extends Message, Q extends Message, T extends Message> void findMedian( CompletableFuture<R> future, AsyncTable<AdvancedScanResultConsumer> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan, NavigableMap<byte[], S> sumByRegion) { double halfSum = ci.divideForAvg(sumByRegion.values().stream().reduce(ci::add).get(), 2L); S movingSum = null; byte[] startRow = null; for (Map.Entry<byte[], S> entry : sumByRegion.entrySet()) { startRow = entry.getKey(); S newMovingSum = ci.add(movingSum, entry.getValue()); if (ci.divideForAvg(newMovingSum, 1L) > halfSum) { break; } movingSum = newMovingSum; } if (startRow != null) { scan.withStartRow(startRow); } // we can not pass movingSum directly to an anonymous class as it is not final. S baseSum = movingSum; byte[] family = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(family); byte[] weightQualifier = qualifiers.last(); byte[] valueQualifier = qualifiers.first(); table.scan(scan, new AdvancedScanResultConsumer() { private S sum = baseSum; private R value = null; @Override public void onNext(Result[] results, ScanController controller) { try { for (Result result : results) { Cell weightCell = result.getColumnLatestCell(family, weightQualifier); R weight = ci.getValue(family, weightQualifier, weightCell); sum = ci.add(sum, ci.castToReturnType(weight)); if (ci.divideForAvg(sum, 1L) > halfSum) { if (value != null) { future.complete(value); } else { future.completeExceptionally(new NoSuchElementException()); } controller.terminate(); return; } Cell valueCell = result.getColumnLatestCell(family, valueQualifier); value = ci.getValue(family, valueQualifier, valueCell); } } catch (IOException e) { future.completeExceptionally(e); controller.terminate(); } } @Override public void onError(Throwable error) { future.completeExceptionally(error); } @Override public void onComplete() { if (!future.isDone()) { // we should not reach here as the future should be completed in onNext. future.completeExceptionally(new NoSuchElementException()); } } }); }
Example 20
Source File: TableResultIterator.java From phoenix with Apache License 2.0 | 4 votes |
@Override public Tuple next() throws SQLException { try { renewLeaseLock.lock(); initScanner(); try { lastTuple = scanIterator.next(); if (lastTuple != null) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); lastTuple.getKey(ptr); } } catch (SQLException e) { try { throw ServerUtil.parseServerException(e); } catch(HashJoinCacheNotFoundException e1) { if(ScanUtil.isNonAggregateScan(scan) && plan.getContext().getAggregationManager().isEmpty()) { // For non aggregate queries if we get stale region boundary exception we can // continue scanning from the next value of lasted fetched result. Scan newScan = ScanUtil.newScan(scan); newScan.withStartRow(newScan.getAttribute(SCAN_ACTUAL_START_ROW)); if(lastTuple != null) { lastTuple.getKey(ptr); byte[] startRowSuffix = ByteUtil.copyKeyBytesIfNecessary(ptr); if(ScanUtil.isLocalIndex(newScan)) { // If we just set scan start row suffix then server side we prepare // actual scan boundaries by prefixing the region start key. newScan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.nextKey(startRowSuffix)); } else { newScan.withStartRow(ByteUtil.nextKey(startRowSuffix)); } } plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getName()); LOGGER.debug( "Retrying when Hash Join cache is not found on the server ,by sending the cache again"); if (retry <= 0) { throw e1; } Long cacheId = e1.getCacheId(); retry--; try { ServerCache cache = caches == null ? null : caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))); if (!hashCacheClient.addHashCacheToServer(newScan.getStartRow(), cache, plan.getTableRef().getTable())) { throw e1; } this.scanIterator = ((BaseQueryPlan) plan).iterator(caches, scanGrouper, newScan); } catch (Exception ex) { throw ServerUtil.parseServerException(ex); } lastTuple = scanIterator.next(); } else { throw e; } } } return lastTuple; } finally { renewLeaseLock.unlock(); } }