Java Code Examples for org.apache.hadoop.hbase.regionserver.RegionScanner#getMaxResultSize()
The following examples show how to use
org.apache.hadoop.hbase.regionserver.RegionScanner#getMaxResultSize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SpillableGroupByCache.java From phoenix with Apache License 2.0 | 4 votes |
@Override public RegionScanner getScanner(final RegionScanner s) { final Iterator<Entry<ImmutableBytesWritable, Aggregator[]>> cacheIter = new EntryIterator(); // scanner using the spillable implementation return new BaseRegionScanner() { @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { try { s.close(); } finally { // Always close gbCache and swallow possible Exceptions Closeables.closeQuietly(SpillableGroupByCache.this); } } @Override public boolean next(List<Cell> results) throws IOException { if (!cacheIter.hasNext()) { return false; } Map.Entry<ImmutableBytesWritable, Aggregator[]> ce = cacheIter.next(); ImmutableBytesWritable key = ce.getKey(); Aggregator[] aggs = ce.getValue(); byte[] value = aggregators.toBytes(aggs); if (logger.isDebugEnabled()) { logger.debug("Adding new distinct group: " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators " + aggs.toString() + " value = " + Bytes.toStringBinary(value)); } results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); return cacheIter.hasNext(); } @Override public long getMaxResultSize() { return s.getMaxResultSize(); } }; }
Example 2
Source File: GroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 4 votes |
@Override public RegionScanner getScanner(final RegionScanner s) { // Compute final allocation long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); chunk.resize(estSize); final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size()); final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter = aggregateMap.entrySet().iterator(); while (cacheIter.hasNext()) { Map.Entry<ImmutableBytesPtr, Aggregator[]> entry = cacheIter.next(); ImmutableBytesPtr key = entry.getKey(); Aggregator[] rowAggregators = entry.getValue(); // Generate byte array of Aggregators and set as value of row byte[] value = aggregators.toBytes(rowAggregators); if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Adding new distinct group: " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators " + Arrays.asList(rowAggregators).toString() + " value = " + Bytes.toStringBinary(value), customAnnotations)); } KeyValue keyValue = KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); aggResults.add(keyValue); } // scanner using the non spillable, memory-only implementation return new BaseRegionScanner() { private int index = 0; @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { try { s.close(); } finally { InMemoryGroupByCache.this.close(); } } @Override public boolean next(List<Cell> results) throws IOException { if (index >= aggResults.size()) return false; results.add(aggResults.get(index)); index++; return index < aggResults.size(); } @Override public long getMaxResultSize() { return s.getMaxResultSize(); } }; }