Java Code Examples for org.apache.hadoop.hbase.client.Scan#isRaw()
The following examples show how to use
org.apache.hadoop.hbase.client.Scan#isRaw() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: UserScanQueryMatcher.java From hbase with Apache License 2.0 | 6 votes |
protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns, boolean hasNullColumn, long oldestUnexpiredTS, long now) { super(createStartKey(scan, scanInfo), scanInfo, columns, oldestUnexpiredTS, now); this.hasNullColumn = hasNullColumn; this.filter = scan.getFilter(); if (this.filter != null) { this.versionsAfterFilter = scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions()); } else { this.versionsAfterFilter = 0; } this.stopRow = scan.getStopRow(); TimeRange timeRange = scan.getColumnFamilyTimeRange().get(scanInfo.getFamily()); if (timeRange == null) { this.tr = scan.getTimeRange(); } else { this.tr = timeRange; } }
Example 2
Source File: UserScanQueryMatcher.java From hbase with Apache License 2.0 | 6 votes |
public static UserScanQueryMatcher create(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> columns, long oldestUnexpiredTS, long now, RegionCoprocessorHost regionCoprocessorHost) throws IOException { boolean hasNullColumn = !(columns != null && columns.size() != 0 && columns.first().length != 0); Pair<DeleteTracker, ColumnTracker> trackers = getTrackers(regionCoprocessorHost, columns, scanInfo, oldestUnexpiredTS, scan); DeleteTracker deleteTracker = trackers.getFirst(); ColumnTracker columnTracker = trackers.getSecond(); if (scan.isRaw()) { return RawScanQueryMatcher.create(scan, scanInfo, columnTracker, hasNullColumn, oldestUnexpiredTS, now); } else { return NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, deleteTracker, hasNullColumn, oldestUnexpiredTS, now); } }
Example 3
Source File: UngroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 5 votes |
private RegionScanner rebuildIndices(final RegionScanner innerScanner, final Region region, final Scan scan, final RegionCoprocessorEnvironment env) throws IOException { boolean oldCoproc = region.getTableDescriptor().hasCoprocessor(Indexer.class.getCanonicalName()); byte[] valueBytes = scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_VERIFY_TYPE); IndexTool.IndexVerifyType verifyType = (valueBytes != null) ? IndexTool.IndexVerifyType.fromValue(valueBytes):IndexTool.IndexVerifyType.NONE; if(oldCoproc && verifyType == IndexTool.IndexVerifyType.ONLY) { return new IndexerRegionScanner(innerScanner, region, scan, env); } if (!scan.isRaw()) { Scan rawScan = new Scan(scan); rawScan.setRaw(true); rawScan.setMaxVersions(); rawScan.getFamilyMap().clear(); // For rebuilds we use count (*) as query for regular tables which ends up setting the FKOF on scan // This filter doesn't give us all columns and skips to the next row as soon as it finds 1 col // For rebuilds we need all columns and all versions if (scan.getFilter() instanceof FirstKeyOnlyFilter) { rawScan.setFilter(null); } else if (scan.getFilter() != null) { // Override the filter so that we get all versions rawScan.setFilter(new AllVersionsIndexRebuildFilter(scan.getFilter())); } rawScan.setCacheBlocks(false); for (byte[] family : scan.getFamilyMap().keySet()) { rawScan.addFamily(family); } innerScanner.close(); RegionScanner scanner = region.getScanner(rawScan); return new IndexRebuildRegionScanner(scanner, region, scan, env, this); } return new IndexRebuildRegionScanner(innerScanner, region, scan, env, this); }
Example 4
Source File: EncodedColumnsUtil.java From phoenix with Apache License 2.0 | 5 votes |
public static boolean useEncodedQualifierListOptimization(PTable table, Scan scan) { /* * HBase doesn't allow raw scans to have columns set. And we need columns to be set * explicitly on the scan to use this optimization. * * Disabling this optimization for tables with more than one column family. * See PHOENIX-3890. */ return !scan.isRaw() && table.getColumnFamilies().size() <= 1 && table.getImmutableStorageScheme() != null && table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN && usesEncodedColumnNames(table) && !table.isTransactional() && !ScanUtil.hasDynamicColumns(table); }
Example 5
Source File: StoreScanner.java From hbase with Apache License 2.0 | 4 votes |
/** An internal constructor. */ private StoreScanner(HStore store, Scan scan, ScanInfo scanInfo, int numColumns, long readPt, boolean cacheBlocks, ScanType scanType) { this.readPt = readPt; this.store = store; this.cacheBlocks = cacheBlocks; this.comparator = Preconditions.checkNotNull(scanInfo.getComparator()); get = scan.isGetScan(); explicitColumnQuery = numColumns > 0; this.scan = scan; this.now = EnvironmentEdgeManager.currentTime(); this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl(); this.minVersions = scanInfo.getMinVersions(); // We look up row-column Bloom filters for multi-column queries as part of // the seek operation. However, we also look the row-column Bloom filter // for multi-row (non-"get") scans because this is not done in // StoreFile.passesBloomFilter(Scan, SortedSet<byte[]>). this.useRowColBloom = numColumns > 1 || (!get && numColumns == 1); this.maxRowSize = scanInfo.getTableMaxRowSize(); if (get) { this.readType = Scan.ReadType.PREAD; this.scanUsePread = true; } else if (scanType != ScanType.USER_SCAN) { // For compaction scanners never use Pread as already we have stream based scanners on the // store files to be compacted this.readType = Scan.ReadType.STREAM; this.scanUsePread = false; } else { if (scan.getReadType() == Scan.ReadType.DEFAULT) { this.readType = scanInfo.isUsePread() ? Scan.ReadType.PREAD : Scan.ReadType.DEFAULT; } else { this.readType = scan.getReadType(); } // Always start with pread unless user specific stream. Will change to stream later if // readType is default if the scan keeps running for a long time. this.scanUsePread = this.readType != Scan.ReadType.STREAM; } this.preadMaxBytes = scanInfo.getPreadMaxBytes(); this.cellsPerHeartbeatCheck = scanInfo.getCellsPerTimeoutCheck(); // Parallel seeking is on if the config allows and more there is more than one store file. if (store != null && store.getStorefilesCount() > 1) { RegionServerServices rsService = store.getHRegion().getRegionServerServices(); if (rsService != null && scanInfo.isParallelSeekEnabled()) { this.parallelSeekEnabled = true; this.executor = rsService.getExecutorService(); } } }
Example 6
Source File: StoreScanner.java From hbase with Apache License 2.0 | 4 votes |
/** * Opens a scanner across memstore, snapshot, and all StoreFiles. Assumes we * are not in a compaction. * * @param store who we scan * @param scan the spec * @param columns which columns we are scanning * @throws IOException */ public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet<byte[]> columns, long readPt) throws IOException { this(store, scan, scanInfo, columns != null ? columns.size() : 0, readPt, scan.getCacheBlocks(), ScanType.USER_SCAN); if (columns != null && scan.isRaw()) { throw new DoNotRetryIOException("Cannot specify any column for a raw scan"); } matcher = UserScanQueryMatcher.create(scan, scanInfo, columns, oldestUnexpiredTS, now, store.getCoprocessorHost()); store.addChangedReaderObserver(this); List<KeyValueScanner> scanners = null; try { // Pass columns to try to filter out unnecessary StoreFiles. scanners = selectScannersFrom(store, store.getScanners(cacheBlocks, scanUsePread, false, matcher, scan.getStartRow(), scan.includeStartRow(), scan.getStopRow(), scan.includeStopRow(), this.readPt)); // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). // Always check bloom filter to optimize the top row seek for delete // family marker. seekScanners(scanners, matcher.getStartKey(), explicitColumnQuery && lazySeekEnabledGlobally, parallelSeekEnabled); // set storeLimit this.storeLimit = scan.getMaxResultsPerColumnFamily(); // set rowOffset this.storeOffset = scan.getRowOffsetPerColumnFamily(); addCurrentScanners(scanners); // Combine all seeked scanners with a heap resetKVHeap(scanners, comparator); } catch (IOException e) { clearAndClose(scanners); // remove us from the HStore#changedReaderObservers here or we'll have no chance to // and might cause memory leak store.deleteChangedReaderObserver(this); throw e; } }
Example 7
Source File: ScanQueryMatcher.java From hbase with Apache License 2.0 | 4 votes |
protected static Pair<DeleteTracker, ColumnTracker> getTrackers(RegionCoprocessorHost host, NavigableSet<byte[]> columns, ScanInfo scanInfo, long oldestUnexpiredTS, Scan userScan) throws IOException { int resultMaxVersion = scanInfo.getMaxVersions(); int maxVersionToCheck = resultMaxVersion; if (userScan != null) { if (userScan.isRaw()) { resultMaxVersion = userScan.getMaxVersions(); maxVersionToCheck = userScan.hasFilter() ? Integer.MAX_VALUE : resultMaxVersion; } else { resultMaxVersion = Math.min(userScan.getMaxVersions(), scanInfo.getMaxVersions()); maxVersionToCheck = userScan.hasFilter() ? scanInfo.getMaxVersions() : resultMaxVersion; } } DeleteTracker deleteTracker; if (scanInfo.isNewVersionBehavior() && (userScan == null || !userScan.isRaw())) { deleteTracker = new NewVersionBehaviorTracker(columns, scanInfo.getComparator(), scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS); } else { deleteTracker = new ScanDeleteTracker(scanInfo.getComparator()); } if (host != null) { deleteTracker = host.postInstantiateDeleteTracker(deleteTracker); if (deleteTracker instanceof VisibilityScanDeleteTracker && scanInfo.isNewVersionBehavior()) { deleteTracker = new VisibilityNewVersionBehaivorTracker(columns, scanInfo.getComparator(), scanInfo.getMinVersions(), scanInfo.getMaxVersions(), resultMaxVersion, oldestUnexpiredTS); } } ColumnTracker columnTracker; if (deleteTracker instanceof NewVersionBehaviorTracker) { columnTracker = (NewVersionBehaviorTracker) deleteTracker; } else if (columns == null || columns.size() == 0) { columnTracker = new ScanWildcardColumnTracker(scanInfo.getMinVersions(), maxVersionToCheck, oldestUnexpiredTS, scanInfo.getComparator()); } else { columnTracker = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersionToCheck, oldestUnexpiredTS); } return new Pair<>(deleteTracker, columnTracker); }