Java Code Examples for org.apache.hadoop.mapred.Merger.Segment#inMemory()
The following examples show how to use
org.apache.hadoop.mapred.Merger.Segment#inMemory() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BackupStore.java From hadoop with Apache License 2.0 | 5 votes |
public void reset() throws IOException { // Create a new segment for the previously written records only if we // are not already in the reset mode if (!inReset) { if (fileCache.isActive) { fileCache.createInDiskSegment(); } else { memCache.createInMemorySegment(); } } inReset = true; // Reset the segments to the correct position from where the next read // should begin. for (int i = 0; i < segmentList.size(); i++) { Segment<K,V> s = segmentList.get(i); if (s.inMemory()) { int offset = (i == 0) ? firstSegmentOffset : 0; s.getReader().reset(offset); } else { s.closeReader(); if (i == 0) { s.reinitReader(firstSegmentOffset); s.getReader().disableChecksumValidation(); } } } currentKVOffset = firstSegmentOffset; nextKVOffset = -1; readSegmentIndex = 0; hasMore = false; lastSegmentEOF = false; LOG.debug("Reset - First segment offset is " + firstSegmentOffset + " Segment List Size is " + segmentList.size()); }
Example 2
Source File: BackupStore.java From hadoop with Apache License 2.0 | 5 votes |
private void clearSegmentList() throws IOException { for (Segment<K,V> segment: segmentList) { long len = segment.getLength(); segment.close(); if (segment.inMemory()) { memCache.unreserve(len); } } segmentList.clear(); }
Example 3
Source File: BackupStore.java From big-c with Apache License 2.0 | 5 votes |
public void reset() throws IOException { // Create a new segment for the previously written records only if we // are not already in the reset mode if (!inReset) { if (fileCache.isActive) { fileCache.createInDiskSegment(); } else { memCache.createInMemorySegment(); } } inReset = true; // Reset the segments to the correct position from where the next read // should begin. for (int i = 0; i < segmentList.size(); i++) { Segment<K,V> s = segmentList.get(i); if (s.inMemory()) { int offset = (i == 0) ? firstSegmentOffset : 0; s.getReader().reset(offset); } else { s.closeReader(); if (i == 0) { s.reinitReader(firstSegmentOffset); s.getReader().disableChecksumValidation(); } } } currentKVOffset = firstSegmentOffset; nextKVOffset = -1; readSegmentIndex = 0; hasMore = false; lastSegmentEOF = false; LOG.debug("Reset - First segment offset is " + firstSegmentOffset + " Segment List Size is " + segmentList.size()); }
Example 4
Source File: BackupStore.java From big-c with Apache License 2.0 | 5 votes |
private void clearSegmentList() throws IOException { for (Segment<K,V> segment: segmentList) { long len = segment.getLength(); segment.close(); if (segment.inMemory()) { memCache.unreserve(len); } } segmentList.clear(); }
Example 5
Source File: BackupStore.java From hadoop with Apache License 2.0 | 4 votes |
public boolean hasNext() throws IOException { if (lastSegmentEOF) { return false; } // We read the next KV from the cache to decide if there is any left. // Since hasNext can be called several times before the actual call to // next(), we use hasMore to avoid extra reads. hasMore is set to false // when the user actually consumes this record in next() if (hasMore) { return true; } Segment<K,V> seg = segmentList.get(readSegmentIndex); // Mark the current position. This would be set to currentKVOffset // when the user consumes this record in next(). nextKVOffset = (int) seg.getActualPosition(); if (seg.nextRawKey()) { currentKey = seg.getKey(); seg.getValue(currentValue); hasMore = true; return true; } else { if (!seg.inMemory()) { seg.closeReader(); } } // If this is the last segment, mark the lastSegmentEOF flag and return if (readSegmentIndex == segmentList.size() - 1) { nextKVOffset = -1; lastSegmentEOF = true; return false; } nextKVOffset = 0; readSegmentIndex ++; Segment<K,V> nextSegment = segmentList.get(readSegmentIndex); // We possibly are moving from a memory segment to a disk segment. // Reset so that we do not corrupt the in-memory segment buffer. // See HADOOP-5494 if (!nextSegment.inMemory()) { currentValue.reset(currentDiskValue.getData(), currentDiskValue.getLength()); nextSegment.init(null); } if (nextSegment.nextRawKey()) { currentKey = nextSegment.getKey(); nextSegment.getValue(currentValue); hasMore = true; return true; } else { throw new IOException("New segment did not have even one K/V"); } }
Example 6
Source File: BackupStore.java From big-c with Apache License 2.0 | 4 votes |
public boolean hasNext() throws IOException { if (lastSegmentEOF) { return false; } // We read the next KV from the cache to decide if there is any left. // Since hasNext can be called several times before the actual call to // next(), we use hasMore to avoid extra reads. hasMore is set to false // when the user actually consumes this record in next() if (hasMore) { return true; } Segment<K,V> seg = segmentList.get(readSegmentIndex); // Mark the current position. This would be set to currentKVOffset // when the user consumes this record in next(). nextKVOffset = (int) seg.getActualPosition(); if (seg.nextRawKey()) { currentKey = seg.getKey(); seg.getValue(currentValue); hasMore = true; return true; } else { if (!seg.inMemory()) { seg.closeReader(); } } // If this is the last segment, mark the lastSegmentEOF flag and return if (readSegmentIndex == segmentList.size() - 1) { nextKVOffset = -1; lastSegmentEOF = true; return false; } nextKVOffset = 0; readSegmentIndex ++; Segment<K,V> nextSegment = segmentList.get(readSegmentIndex); // We possibly are moving from a memory segment to a disk segment. // Reset so that we do not corrupt the in-memory segment buffer. // See HADOOP-5494 if (!nextSegment.inMemory()) { currentValue.reset(currentDiskValue.getData(), currentDiskValue.getLength()); nextSegment.init(null); } if (nextSegment.nextRawKey()) { currentKey = nextSegment.getKey(); nextSegment.getValue(currentValue); hasMore = true; return true; } else { throw new IOException("New segment did not have even one K/V"); } }