org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDFSPacket.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testPacket() throws Exception { Random r = new Random(12345L); byte[] data = new byte[chunkSize]; r.nextBytes(data); byte[] checksum = new byte[checksumSize]; r.nextBytes(checksum); DataOutputBuffer os = new DataOutputBuffer(data.length * 2); byte[] packetBuf = new byte[data.length * 2]; DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket, 0, 0, checksumSize, false); p.setSyncBlock(true); p.writeData(data, 0, data.length); p.writeChecksum(checksum, 0, checksum.length); p.writeTo(os); //we have set syncBlock to true, so the header has the maximum length int headerLen = PacketHeader.PKT_MAX_HEADER_LEN; byte[] readBuf = os.getData(); assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length); assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length); }
Example #2
Source File: TestDataTransferProtocol.java From big-c with Apache License 2.0 | 6 votes |
private void writeZeroLengthPacket(ExtendedBlock block, String description) throws IOException { PacketHeader hdr = new PacketHeader( 8, // size of packet block.getNumBytes(), // OffsetInBlock 100, // sequencenumber true, // lastPacketInBlock 0, // chunk length false); // sync block hdr.write(sendOut); sendOut.writeInt(0); // zero checksum //ok finally write a block with 0 len sendResponse(Status.SUCCESS, "", null, recvOut); new PipelineAck(100, new int[] {PipelineAck.combineHeader (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write (recvOut); sendRecvData(description, false); }
Example #3
Source File: DFSPacket.java From big-c with Apache License 2.0 | 6 votes |
/** * Create a new packet. * * @param buf the buffer storing data and checksums * @param chunksPerPkt maximum number of chunks per packet. * @param offsetInBlock offset in bytes into the HDFS block. * @param seqno the sequence number of this packet * @param checksumSize the size of checksum * @param lastPacketInBlock if this is the last packet */ DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno, int checksumSize, boolean lastPacketInBlock) { this.lastPacketInBlock = lastPacketInBlock; this.numChunks = 0; this.offsetInBlock = offsetInBlock; this.seqno = seqno; this.buf = buf; checksumStart = PacketHeader.PKT_MAX_HEADER_LEN; checksumPos = checksumStart; dataStart = checksumStart + (chunksPerPkt * checksumSize); dataPos = dataStart; maxChunks = chunksPerPkt; }
Example #4
Source File: TestDFSPacket.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testPacket() throws Exception { Random r = new Random(12345L); byte[] data = new byte[chunkSize]; r.nextBytes(data); byte[] checksum = new byte[checksumSize]; r.nextBytes(checksum); DataOutputBuffer os = new DataOutputBuffer(data.length * 2); byte[] packetBuf = new byte[data.length * 2]; DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket, 0, 0, checksumSize, false); p.setSyncBlock(true); p.writeData(data, 0, data.length); p.writeChecksum(checksum, 0, checksum.length); p.writeTo(os); //we have set syncBlock to true, so the header has the maximum length int headerLen = PacketHeader.PKT_MAX_HEADER_LEN; byte[] readBuf = os.getData(); assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length); assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length); }
Example #5
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 6 votes |
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/ private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno, boolean lastPacketInBlock) throws InterruptedIOException { final byte[] buf; final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize; try { buf = byteArrayManager.newByteArray(bufferSize); } catch (InterruptedException ie) { final InterruptedIOException iioe = new InterruptedIOException( "seqno=" + seqno); iioe.initCause(ie); throw iioe; } return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize(), lastPacketInBlock); }
Example #6
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 6 votes |
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/ private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock, long seqno, boolean lastPacketInBlock) throws InterruptedIOException { final byte[] buf; final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize; try { buf = byteArrayManager.newByteArray(bufferSize); } catch (InterruptedException ie) { final InterruptedIOException iioe = new InterruptedIOException( "seqno=" + seqno); iioe.initCause(ie); throw iioe; } return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize(), lastPacketInBlock); }
Example #7
Source File: TestDataTransferProtocol.java From hadoop with Apache License 2.0 | 6 votes |
private void writeZeroLengthPacket(ExtendedBlock block, String description) throws IOException { PacketHeader hdr = new PacketHeader( 8, // size of packet block.getNumBytes(), // OffsetInBlock 100, // sequencenumber true, // lastPacketInBlock 0, // chunk length false); // sync block hdr.write(sendOut); sendOut.writeInt(0); // zero checksum //ok finally write a block with 0 len sendResponse(Status.SUCCESS, "", null, recvOut); new PipelineAck(100, new int[] {PipelineAck.combineHeader (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write (recvOut); sendRecvData(description, false); }
Example #8
Source File: DFSPacket.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a new packet. * * @param buf the buffer storing data and checksums * @param chunksPerPkt maximum number of chunks per packet. * @param offsetInBlock offset in bytes into the HDFS block. * @param seqno the sequence number of this packet * @param checksumSize the size of checksum * @param lastPacketInBlock if this is the last packet */ DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno, int checksumSize, boolean lastPacketInBlock) { this.lastPacketInBlock = lastPacketInBlock; this.numChunks = 0; this.offsetInBlock = offsetInBlock; this.seqno = seqno; this.buf = buf; checksumStart = PacketHeader.PKT_MAX_HEADER_LEN; checksumPos = checksumStart; dataStart = checksumStart + (chunksPerPkt * checksumSize); dataPos = dataStart; maxChunks = chunksPerPkt; }
Example #9
Source File: FanOutOneBlockAsyncDFSOutput.java From hbase with Apache License 2.0 | 6 votes |
@Override public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { if (evt instanceof IdleStateEvent) { IdleStateEvent e = (IdleStateEvent) evt; if (e.state() == READER_IDLE) { failed(ctx.channel(), () -> new IOException("Timeout(" + timeoutMs + "ms) waiting for response")); } else if (e.state() == WRITER_IDLE) { PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false); int len = heartbeat.getSerializedSize(); ByteBuf buf = alloc.buffer(len); heartbeat.putInBuffer(buf.nioBuffer(0, len)); buf.writerIndex(len); ctx.channel().writeAndFlush(buf); } return; } super.userEventTriggered(ctx, evt); }
Example #10
Source File: BlockSender.java From hadoop with Apache License 2.0 | 5 votes |
/** * Write packet header into {@code pkt}, * return the length of the header written. */ private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) { pkt.clear(); // both syncBlock and syncPacket are false PacketHeader header = new PacketHeader(packetLen, offset, seqno, (dataLen == 0), dataLen, false); int size = header.getSerializedSize(); pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size); header.putInBuffer(pkt); return size; }
Example #11
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 5 votes |
private void computePacketChunkSize(int psize, int csize) { final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN; final int chunkSize = csize + getChecksumSize(); chunksPerPacket = Math.max(bodySize/chunkSize, 1); packetSize = chunkSize*chunksPerPacket; if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("computePacketChunkSize: src=" + src + ", chunkSize=" + chunkSize + ", chunksPerPacket=" + chunksPerPacket + ", packetSize=" + packetSize); } }
Example #12
Source File: RemoteBlockReader2.java From big-c with Apache License 2.0 | 5 votes |
private void readTrailingEmptyPacket() throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Reading empty packet at end of read"); } packetReceiver.receiveNextPacket(in); PacketHeader trailer = packetReceiver.getHeader(); if (!trailer.isLastPacketInBlock() || trailer.getDataLen() != 0) { throw new IOException("Expected empty end-of-read packet! Header: " + trailer); } }
Example #13
Source File: RemoteBlockReader2.java From hadoop with Apache License 2.0 | 5 votes |
private void readTrailingEmptyPacket() throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Reading empty packet at end of read"); } packetReceiver.receiveNextPacket(in); PacketHeader trailer = packetReceiver.getHeader(); if (!trailer.isLastPacketInBlock() || trailer.getDataLen() != 0) { throw new IOException("Expected empty end-of-read packet! Header: " + trailer); } }
Example #14
Source File: BlockSender.java From big-c with Apache License 2.0 | 5 votes |
/** * Write packet header into {@code pkt}, * return the length of the header written. */ private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) { pkt.clear(); // both syncBlock and syncPacket are false PacketHeader header = new PacketHeader(packetLen, offset, seqno, (dataLen == 0), dataLen, false); int size = header.getSerializedSize(); pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size); header.putInBuffer(pkt); return size; }
Example #15
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 5 votes |
private void computePacketChunkSize(int psize, int csize) { final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN; final int chunkSize = csize + getChecksumSize(); chunksPerPacket = Math.max(bodySize/chunkSize, 1); packetSize = chunkSize*chunksPerPacket; if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("computePacketChunkSize: src=" + src + ", chunkSize=" + chunkSize + ", chunksPerPacket=" + chunksPerPacket + ", packetSize=" + packetSize); } }
Example #16
Source File: FanOutOneBlockAsyncDFSOutput.java From hbase with Apache License 2.0 | 5 votes |
private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf, long nextPacketOffsetInBlock, boolean syncBlock) { int dataLen = dataBuf.readableBytes(); int chunkLen = summer.getBytesPerChecksum(); int trailingPartialChunkLen = dataLen % chunkLen; int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0); int checksumLen = numChecks * summer.getChecksumSize(); ByteBuf checksumBuf = alloc.directBuffer(checksumLen); summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen)); checksumBuf.writerIndex(checksumLen); PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock, nextPacketSeqno, false, dataLen, syncBlock); int headerLen = header.getSerializedSize(); ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeList); waitingAckQueue.addLast(c); // recheck again after we pushed the callback to queue if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) { future.completeExceptionally(new IOException("stream already broken")); // it's the one we have just pushed or just a no-op waitingAckQueue.removeFirst(); return; } // TODO: we should perhaps measure time taken per DN here; // we could collect statistics per DN, and/or exclude bad nodes in createOutput. datanodeList.forEach(ch -> { ch.write(headerBuf.retainedDuplicate()); ch.write(checksumBuf.retainedDuplicate()); ch.writeAndFlush(dataBuf.retainedDuplicate()); }); checksumBuf.release(); headerBuf.release(); dataBuf.release(); nextPacketSeqno++; }
Example #17
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 4 votes |
/** * For heartbeat packets, create buffer directly by new byte[] * since heartbeats should not be blocked. */ private DFSPacket createHeartbeatPacket() throws InterruptedIOException { final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN]; return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO, getChecksumSize(), false); }
Example #18
Source File: BlockSender.java From big-c with Apache License 2.0 | 4 votes |
private long doSendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException( "out stream is null" ); } initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; lastCacheDropOffset = initialOffset; if (isLongRead() && blockInFd != null) { // Advise that this file descriptor will be accessed sequentially. NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( block.getBlockName(), blockInFd, 0, 0, NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); } // Trigger readahead of beginning of file if configured. manageOsCache(); final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0; try { int maxChunksPerPacket; int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN; boolean transferTo = transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream && blockIn instanceof FileInputStream; if (transferTo) { FileChannel fileChannel = ((FileInputStream)blockIn).getChannel(); blockInPosition = fileChannel.position(); streamForSendChunks = baseStream; maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE); // Smaller packet size to only hold checksum when doing transferTo pktBufSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE)); // Packet size includes both checksum and data pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket; } ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize); while (endOffset > offset && !Thread.currentThread().isInterrupted()) { manageOsCache(); long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); offset += len; totalRead += len + (numberOfChunks(len) * checksumSize); seqno++; } // If this thread was interrupted, then it did not send the full block. if (!Thread.currentThread().isInterrupted()) { try { // send an empty packet to mark the end of the block sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } sentEntireByteRange = true; } } finally { if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) { final long endTime = System.nanoTime(); ClientTraceLog.debug(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close(); } return totalRead; }
Example #19
Source File: BlockSender.java From hadoop with Apache License 2.0 | 4 votes |
private long doSendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException( "out stream is null" ); } initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; lastCacheDropOffset = initialOffset; if (isLongRead() && blockInFd != null) { // Advise that this file descriptor will be accessed sequentially. NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible( block.getBlockName(), blockInFd, 0, 0, NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); } // Trigger readahead of beginning of file if configured. manageOsCache(); final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0; try { int maxChunksPerPacket; int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN; boolean transferTo = transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream && blockIn instanceof FileInputStream; if (transferTo) { FileChannel fileChannel = ((FileInputStream)blockIn).getChannel(); blockInPosition = fileChannel.position(); streamForSendChunks = baseStream; maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE); // Smaller packet size to only hold checksum when doing transferTo pktBufSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE)); // Packet size includes both checksum and data pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket; } ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize); while (endOffset > offset && !Thread.currentThread().isInterrupted()) { manageOsCache(); long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); offset += len; totalRead += len + (numberOfChunks(len) * checksumSize); seqno++; } // If this thread was interrupted, then it did not send the full block. if (!Thread.currentThread().isInterrupted()) { try { // send an empty packet to mark the end of the block sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } sentEntireByteRange = true; } } finally { if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) { final long endTime = System.nanoTime(); ClientTraceLog.debug(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close(); } return totalRead; }
Example #20
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 4 votes |
/** * For heartbeat packets, create buffer directly by new byte[] * since heartbeats should not be blocked. */ private DFSPacket createHeartbeatPacket() throws InterruptedIOException { final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN]; return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO, getChecksumSize(), false); }