Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsConstants#IO_FILE_BUFFER_SIZE
The following examples show how to use
org.apache.hadoop.hdfs.protocol.HdfsConstants#IO_FILE_BUFFER_SIZE .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransferFsImage.java From hadoop with Apache License 2.0 | 4 votes |
private static void copyFileToStream(OutputStream out, File localfile, FileInputStream infile, DataTransferThrottler throttler, Canceler canceler) throws IOException { byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; try { CheckpointFaultInjector.getInstance() .aboutToSendFile(localfile); if (CheckpointFaultInjector.getInstance(). shouldSendShortFile(localfile)) { // Test sending image shorter than localfile long len = localfile.length(); buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); } int num = 1; while (num > 0) { if (canceler != null && canceler.isCancelled()) { throw new SaveNamespaceCancelledException( canceler.getCancellationReason()); } num = infile.read(buf); if (num <= 0) { break; } if (CheckpointFaultInjector.getInstance() .shouldCorruptAByte(localfile)) { // Simulate a corrupted byte on the wire LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!"); buf[0]++; } out.write(buf, 0, num); if (throttler != null) { throttler.throttle(num, canceler); } } } catch (EofException e) { LOG.info("Connection closed by client"); out = null; // so we don't close in the finally } finally { if (out != null) { out.close(); } } }
Example 2
Source File: DataXceiver.java From hadoop with Apache License 2.0 | 4 votes |
@Override public void blockChecksum(final ExtendedBlock block, final Token<BlockTokenIdentifier> blockToken) throws IOException { final DataOutputStream out = new DataOutputStream( getOutputStream()); checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ); // client side now can specify a range of the block for checksum long requestLength = block.getNumBytes(); Preconditions.checkArgument(requestLength >= 0); long visibleLength = datanode.data.getReplicaVisibleLength(block); boolean partialBlk = requestLength < visibleLength; updateCurrentThreadName("Reading metadata for block " + block); final LengthInputStream metadataIn = datanode.data .getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream( new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); updateCurrentThreadName("Getting checksum for block " + block); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader .readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int csize = checksum.getChecksumSize(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = csize <= 0 ? 0 : (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize; final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? calcPartialBlockChecksum(block, requestLength, checksum, checksumIn) : MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply BlockOpResponseProto.newBuilder() .setStatus(SUCCESS) .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder() .setBytesPerCrc(bytesPerCRC) .setCrcPerBlock(crcPerBlock) .setMd5(ByteString.copyFrom(md5.getDigest())) .setCrcType(PBHelper.convert(checksum.getChecksumType()))) .build() .writeDelimitedTo(out); out.flush(); } catch (IOException ioe) { LOG.info("blockChecksum " + block + " received exception " + ioe); incrDatanodeNetworkErrors(); throw ioe; } finally { IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } //update metrics datanode.metrics.addBlockChecksumOp(elapsed()); }
Example 3
Source File: TransferFsImage.java From big-c with Apache License 2.0 | 4 votes |
private static void copyFileToStream(OutputStream out, File localfile, FileInputStream infile, DataTransferThrottler throttler, Canceler canceler) throws IOException { byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; try { CheckpointFaultInjector.getInstance() .aboutToSendFile(localfile); if (CheckpointFaultInjector.getInstance(). shouldSendShortFile(localfile)) { // Test sending image shorter than localfile long len = localfile.length(); buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); } int num = 1; while (num > 0) { if (canceler != null && canceler.isCancelled()) { throw new SaveNamespaceCancelledException( canceler.getCancellationReason()); } num = infile.read(buf); if (num <= 0) { break; } if (CheckpointFaultInjector.getInstance() .shouldCorruptAByte(localfile)) { // Simulate a corrupted byte on the wire LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!"); buf[0]++; } out.write(buf, 0, num); if (throttler != null) { throttler.throttle(num, canceler); } } } catch (EofException e) { LOG.info("Connection closed by client"); out = null; // so we don't close in the finally } finally { if (out != null) { out.close(); } } }
Example 4
Source File: DataXceiver.java From big-c with Apache License 2.0 | 4 votes |
@Override public void blockChecksum(final ExtendedBlock block, final Token<BlockTokenIdentifier> blockToken) throws IOException { final DataOutputStream out = new DataOutputStream( getOutputStream()); checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ); // client side now can specify a range of the block for checksum long requestLength = block.getNumBytes(); Preconditions.checkArgument(requestLength >= 0); long visibleLength = datanode.data.getReplicaVisibleLength(block); boolean partialBlk = requestLength < visibleLength; updateCurrentThreadName("Reading metadata for block " + block); final LengthInputStream metadataIn = datanode.data .getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream( new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); updateCurrentThreadName("Getting checksum for block " + block); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader .readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int csize = checksum.getChecksumSize(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = csize <= 0 ? 0 : (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize; final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? calcPartialBlockChecksum(block, requestLength, checksum, checksumIn) : MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply BlockOpResponseProto.newBuilder() .setStatus(SUCCESS) .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder() .setBytesPerCrc(bytesPerCRC) .setCrcPerBlock(crcPerBlock) .setMd5(ByteString.copyFrom(md5.getDigest())) .setCrcType(PBHelper.convert(checksum.getChecksumType()))) .build() .writeDelimitedTo(out); out.flush(); } catch (IOException ioe) { LOG.info("blockChecksum " + block + " received exception " + ioe); incrDatanodeNetworkErrors(); throw ioe; } finally { IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } //update metrics datanode.metrics.addBlockChecksumOp(elapsed()); }