org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockReceiver.java From hadoop with Apache License 2.0 | 4 votes |
/** * reads in the partial crc chunk and computes checksum * of pre-existing data in partial chunk. */ private Checksum computePartialChunkCrc(long blkoff, long ckoff) throws IOException { // find offset of the beginning of partial chunk. // int sizePartialChunk = (int) (blkoff % bytesPerChecksum); blkoff = blkoff - sizePartialChunk; if (LOG.isDebugEnabled()) { LOG.debug("computePartialChunkCrc for " + block + ": sizePartialChunk=" + sizePartialChunk + ", block offset=" + blkoff + ", metafile offset=" + ckoff); } // create an input stream from the block file // and read in partial crc chunk into temporary buffer // byte[] buf = new byte[sizePartialChunk]; byte[] crcbuf = new byte[checksumSize]; try (ReplicaInputStreams instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff)) { IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk); // open meta file and read in crc value computer earlier IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length); } // compute crc of partial chunk from data read in the block file. final Checksum partialCrc = DataChecksum.newDataChecksum( diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum()); partialCrc.update(buf, 0, sizePartialChunk); if (LOG.isDebugEnabled()) { LOG.debug("Read in partial CRC chunk from disk for " + block); } // paranoia! verify that the pre-computed crc matches what we // recalculated just now if (partialCrc.getValue() != checksum2long(crcbuf)) { String msg = "Partial CRC " + partialCrc.getValue() + " does not match value computed the " + " last time file was closed " + checksum2long(crcbuf); throw new IOException(msg); } return partialCrc; }
Example #2
Source File: SimulatedFSDataset.java From hadoop with Apache License 2.0 | 4 votes |
/** Not supported */ @Override // FsDatasetSpi public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff, long ckoff) throws IOException { throw new IOException("Not supported"); }
Example #3
Source File: ExternalDatasetImpl.java From hadoop with Apache License 2.0 | 4 votes |
@Override public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff, long ckoff) throws IOException { return new ReplicaInputStreams(null, null, null); }
Example #4
Source File: BlockReceiver.java From big-c with Apache License 2.0 | 4 votes |
/** * reads in the partial crc chunk and computes checksum * of pre-existing data in partial chunk. */ private Checksum computePartialChunkCrc(long blkoff, long ckoff) throws IOException { // find offset of the beginning of partial chunk. // int sizePartialChunk = (int) (blkoff % bytesPerChecksum); blkoff = blkoff - sizePartialChunk; if (LOG.isDebugEnabled()) { LOG.debug("computePartialChunkCrc for " + block + ": sizePartialChunk=" + sizePartialChunk + ", block offset=" + blkoff + ", metafile offset=" + ckoff); } // create an input stream from the block file // and read in partial crc chunk into temporary buffer // byte[] buf = new byte[sizePartialChunk]; byte[] crcbuf = new byte[checksumSize]; try (ReplicaInputStreams instr = datanode.data.getTmpInputStreams(block, blkoff, ckoff)) { IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk); // open meta file and read in crc value computer earlier IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length); } // compute crc of partial chunk from data read in the block file. final Checksum partialCrc = DataChecksum.newDataChecksum( diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum()); partialCrc.update(buf, 0, sizePartialChunk); if (LOG.isDebugEnabled()) { LOG.debug("Read in partial CRC chunk from disk for " + block); } // paranoia! verify that the pre-computed crc matches what we // recalculated just now if (partialCrc.getValue() != checksum2long(crcbuf)) { String msg = "Partial CRC " + partialCrc.getValue() + " does not match value computed the " + " last time file was closed " + checksum2long(crcbuf); throw new IOException(msg); } return partialCrc; }
Example #5
Source File: SimulatedFSDataset.java From big-c with Apache License 2.0 | 4 votes |
/** Not supported */ @Override // FsDatasetSpi public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff, long ckoff) throws IOException { throw new IOException("Not supported"); }
Example #6
Source File: ExternalDatasetImpl.java From big-c with Apache License 2.0 | 4 votes |
@Override public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff, long ckoff) throws IOException { return new ReplicaInputStreams(null, null, null); }
Example #7
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Find out the number of bytes in the block that match its crc. * * This algorithm assumes that data corruption caused by unexpected * datanode shutdown occurs only in the last crc chunk. So it checks * only the last chunk. * * @param blockFile the block file * @param genStamp generation stamp of the block * @return the number of valid bytes */ private long validateIntegrityAndSetLength(File blockFile, long genStamp) { try { final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp); long blockFileLen = blockFile.length(); long metaFileLen = metaFile.length(); int crcHeaderLen = DataChecksum.getChecksumHeaderSize(); if (!blockFile.exists() || blockFileLen == 0 || !metaFile.exists() || metaFileLen < crcHeaderLen) { return 0; } try (DataInputStream checksumIn = new DataInputStream( new BufferedInputStream( fileIoProvider.getFileInputStream(volume, metaFile), ioFileBufferSize))) { // read and handle the common header here. For now just a version final DataChecksum checksum = BlockMetadataHeader.readDataChecksum( checksumIn, metaFile); int bytesPerChecksum = checksum.getBytesPerChecksum(); int checksumSize = checksum.getChecksumSize(); long numChunks = Math.min( (blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum, (metaFileLen - crcHeaderLen) / checksumSize); if (numChunks == 0) { return 0; } try (InputStream blockIn = fileIoProvider.getFileInputStream( volume, blockFile); ReplicaInputStreams ris = new ReplicaInputStreams(blockIn, checksumIn, volume.obtainReference(), fileIoProvider)) { ris.skipChecksumFully((numChunks - 1) * checksumSize); long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum; ris.skipDataFully(lastChunkStartPos); int lastChunkSize = (int) Math.min( bytesPerChecksum, blockFileLen - lastChunkStartPos); byte[] buf = new byte[lastChunkSize + checksumSize]; ris.readChecksumFully(buf, lastChunkSize, checksumSize); ris.readDataFully(buf, 0, lastChunkSize); checksum.update(buf, 0, lastChunkSize); long validFileLength; if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc validFileLength = lastChunkStartPos + lastChunkSize; } else { // last chunk is corrupt validFileLength = lastChunkStartPos; } // truncate if extra bytes are present without CRC if (blockFile.length() > validFileLength) { try (RandomAccessFile blockRAF = fileIoProvider.getRandomAccessFile( volume, blockFile, "rw")) { // truncate blockFile blockRAF.setLength(validFileLength); } } return validFileLength; } } } catch (IOException e) { FsDatasetImpl.LOG.warn("Getting exception while validating integrity " + "and setting length for blockFile", e); return 0; } }