Java Code Examples for org.apache.hadoop.net.NetUtils#getInputStream()
The following examples show how to use
org.apache.hadoop.net.NetUtils#getInputStream() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Connect to the given datanode's datantrasfer port, and return * the resulting IOStreamPair. This includes encryption wrapping, etc. */ private IOStreamPair connectToDN(DatanodeInfo dn, int timeout, LocatedBlock lb) throws IOException { boolean success = false; Socket sock = null; try { sock = socketFactory.createSocket(); String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr); } NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout); sock.setSoTimeout(timeout); OutputStream unbufOut = NetUtils.getOutputStream(sock); InputStream unbufIn = NetUtils.getInputStream(sock); IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this, lb.getBlockToken(), dn); success = true; return ret; } finally { if (!success) { IOUtils.closeSocket(sock); } } }
Example 2
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** For {@link TestTransferRbw} */ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException { assertEquals(2, datanodes.length); final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient); final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), HdfsConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[]{datanodes[1]}, new StorageType[]{StorageType.DEFAULT}); out.flush(); return BlockOpResponseProto.parseDelimitedFrom(in); }
Example 3
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
/** * Connect to the given datanode's datantrasfer port, and return * the resulting IOStreamPair. This includes encryption wrapping, etc. */ private IOStreamPair connectToDN(DatanodeInfo dn, int timeout, LocatedBlock lb) throws IOException { boolean success = false; Socket sock = null; try { sock = socketFactory.createSocket(); String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to datanode " + dnAddr); } NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout); sock.setSoTimeout(timeout); OutputStream unbufOut = NetUtils.getOutputStream(sock); InputStream unbufIn = NetUtils.getInputStream(sock); IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this, lb.getBlockToken(), dn); success = true; return ret; } finally { if (!success) { IOUtils.closeSocket(sock); } } }
Example 4
Source File: DFSTestUtil.java From big-c with Apache License 2.0 | 6 votes |
/** For {@link TestTransferRbw} */ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException { assertEquals(2, datanodes.length); final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient); final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), HdfsConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[]{datanodes[1]}, new StorageType[]{StorageType.DEFAULT}); out.flush(); return BlockOpResponseProto.parseDelimitedFrom(in); }
Example 5
Source File: Client.java From RDFS with Apache License 2.0 | 4 votes |
/** Connect to the server and set up the I/O streams. It then sends * a header to the server and starts * the connection thread that waits for responses. */ private synchronized void setupIOstreamsWithInternal() { if (socket != null || shouldCloseConnection.get()) { return; } short ioFailures = 0; short timeoutFailures = 0; try { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to "+server); } while (true) { try { this.socket = socketFactory.createSocket(); this.socket.setTcpNoDelay(tcpNoDelay); // connection time out is 20s by default NetUtils.connect(this.socket, remoteId.getAddress(), connectTimeout); if (rpcTimeout > 0) { pingInterval = rpcTimeout; // rpcTimeout overwrites pingInterval } this.socket.setSoTimeout(pingInterval); break; } catch (SocketTimeoutException toe) { /* The max number of retries is 45, * which amounts to 20s*45 = 15 minutes retries. */ handleConnectionFailure(timeoutFailures++, maxRetries, toe); } catch (IOException ie) { handleConnectionFailure(ioFailures++, maxRetries, ie); } } this.in = new DataInputStream(new BufferedInputStream (new PingInputStream(NetUtils.getInputStream(socket)))); this.out = new DataOutputStream (new BufferedOutputStream(NetUtils.getOutputStream(socket))); writeHeader(); // update last activity time touch(); // start the receiver thread after the socket connection has been set up start(); } catch (IOException e) { markClosed(e); close(); } }
Example 6
Source File: BlockReader.java From RDFS with Apache License 2.0 | 4 votes |
public static BlockReader newBlockReader( int dataTransferVersion, int namespaceId, Socket sock, String file, long blockId, long genStamp, long startOffset, long len, int bufferSize, boolean verifyChecksum, String clientName, long minSpeedBps) throws IOException { // in and out will be closed when sock is closed (by the caller) DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT))); //write the header. ReadBlockHeader readBlockHeader = new ReadBlockHeader( dataTransferVersion, namespaceId, blockId, genStamp, startOffset, len, clientName); readBlockHeader.writeVersionAndOpCode(out); readBlockHeader.write(out); out.flush(); // // Get bytes in block, set streams // DataInputStream in = new DataInputStream( new BufferedInputStream(NetUtils.getInputStream(sock), bufferSize)); if ( in.readShort() != DataTransferProtocol.OP_STATUS_SUCCESS ) { throw new IOException("Got error in response to OP_READ_BLOCK " + "self=" + sock.getLocalSocketAddress() + ", remote=" + sock.getRemoteSocketAddress() + " for file " + file + " for block " + blockId); } DataChecksum checksum = DataChecksum.newDataChecksum( in , new PureJavaCrc32()); //Warning when we get CHECKSUM_NULL? // Read the first chunk offset. long firstChunkOffset = in.readLong(); if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) { throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file); } return new BlockReader(file, blockId, in, checksum, verifyChecksum, startOffset, firstChunkOffset, sock, minSpeedBps, dataTransferVersion); }
Example 7
Source File: BlockReaderAccelerator.java From RDFS with Apache License 2.0 | 4 votes |
/** * Return all the data [startOffset , length] in one shot! */ public ByteBuffer readAll() throws IOException { // in and out will be closed when sock is closed (by the caller) DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT))); //write the header. ReadBlockAccelaratorHeader readBlockAccelaratorHeader = new ReadBlockAccelaratorHeader(dataTransferVersion, namespaceId, blk.getBlock().getBlockId(), blk.getBlock().getGenerationStamp(), startOffset, length, clientName); readBlockAccelaratorHeader.writeVersionAndOpCode(out); readBlockAccelaratorHeader.write(out); out.flush(); if (LOG.isDebugEnabled()) { LOG.debug("BlockReaderAccelerator client blkid " + blk.getBlock().getBlockId() + " offset " + startOffset + " length " + length); } in = new DataInputStream(NetUtils.getInputStream(sock)); // read the checksum header. // 1 byte of checksum type and 4 bytes of bytes-per-checksum byte[] cksumHeader = new byte[DataChecksum.HEADER_LEN]; in.readFully(cksumHeader); DataChecksum dsum = DataChecksum.newDataChecksum(cksumHeader, 0); this.bytesPerChecksum = dsum.getBytesPerChecksum(); // align the startOffset with the previous crc chunk long delta = startOffset % bytesPerChecksum; long newOffset = startOffset - delta; long newlength = length + delta; // align the length to encompass the entire last checksum chunk long del = newlength % bytesPerChecksum; if (del != 0) { del = bytesPerChecksum - del; newlength += del; } // find the number of checksum chunks long numChunks = newlength / bytesPerChecksum; long sizeChecksumData = numChunks * dsum.getChecksumSize(); // read in all checksums and data in one shot. this.dataBuffer = new byte[(int)newlength + (int)sizeChecksumData]; in.readFully(dataBuffer); if (LOG.isDebugEnabled()) { LOG.debug("BlockReaderAccelerator client read in " + dataBuffer.length + " bytes."); } // verify checksums of all chunks if (this.verifyChecksum) { for (int i = 0; i < numChunks; i++) { long dataOffset = sizeChecksumData + i * bytesPerChecksum; checker.reset(); checker.update(dataBuffer, (int)dataOffset, bytesPerChecksum); int ckOffset = i * dsum.getChecksumSize(); long expected = FSInputChecker.checksum2long(dataBuffer, ckOffset, dsum.getChecksumSize()); if (expected != checker.getValue()) { String msg = "Checksum failure for file " + hdfsfile + " block " + blk.getBlock() + " at blockoffet " + (startOffset + i * bytesPerChecksum) + " chunk " + i + " expected " + expected + " got " + checker.getValue(); LOG.warn(msg); throw new ChecksumException(msg, startOffset + i * bytesPerChecksum); } } } // The offset in the ByteBuffer skips over the // portion that stores the checksums. It also skips over the additional // data portion that was read while aligning with the previous chunk boundary return ByteBuffer.wrap(dataBuffer, (int)(sizeChecksumData + delta), (int)length); }
Example 8
Source File: Client.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** Connect to the server and set up the I/O streams. It then sends * a header to the server and starts * the connection thread that waits for responses. */ private synchronized void setupIOstreams() { if (socket != null || shouldCloseConnection.get()) { return; } short ioFailures = 0; short timeoutFailures = 0; try { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to "+server); } while (true) { try { this.socket = socketFactory.createSocket(); this.socket.setTcpNoDelay(tcpNoDelay); // connection time out is 20s NetUtils.connect(this.socket, remoteId.getAddress(), 20000); this.socket.setSoTimeout(pingInterval); break; } catch (SocketTimeoutException toe) { /* The max number of retries is 45, * which amounts to 20s*45 = 15 minutes retries. */ handleConnectionFailure(timeoutFailures++, 45, toe); } catch (IOException ie) { handleConnectionFailure(ioFailures++, maxRetries, ie); } } this.in = new DataInputStream(new BufferedInputStream (new PingInputStream(NetUtils.getInputStream(socket)))); this.out = new DataOutputStream (new BufferedOutputStream(NetUtils.getOutputStream(socket))); writeHeader(); // update last activity time touch(); // start the receiver thread after the socket connection has been set up start(); } catch (IOException e) { markClosed(e); close(); } }
Example 9
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public static BlockReader newBlockReader( Socket sock, String file, long blockId, long genStamp, long startOffset, long len, int bufferSize, boolean verifyChecksum, String clientName) throws IOException { // in and out will be closed when sock is closed (by the caller) DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT))); //write the header. out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION ); out.write( DataTransferProtocol.OP_READ_BLOCK ); out.writeLong( blockId ); out.writeLong( genStamp ); out.writeLong( startOffset ); out.writeLong( len ); Text.writeString(out, clientName); out.flush(); // // Get bytes in block, set streams // DataInputStream in = new DataInputStream( new BufferedInputStream(NetUtils.getInputStream(sock), bufferSize)); if ( in.readShort() != DataTransferProtocol.OP_STATUS_SUCCESS ) { throw new IOException("Got error in response to OP_READ_BLOCK " + "for file " + file + " for block " + blockId); } DataChecksum checksum = DataChecksum.newDataChecksum( in ); //Warning when we get CHECKSUM_NULL? // Read the first chunk offset. long firstChunkOffset = in.readLong(); if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) { throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file); } return new BlockReader( file, blockId, in, checksum, verifyChecksum, startOffset, firstChunkOffset, sock ); }
Example 10
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private boolean createBlockOutputStream(DatanodeInfo[] nodes, String client, boolean recoveryFlag) { String firstBadLink = ""; if (LOG.isDebugEnabled()) { for (int i = 0; i < nodes.length; i++) { LOG.debug("pipeline = " + nodes[i].getName()); } } // persist blocks on namenode on next flush persistBlocks = true; try { LOG.debug("Connecting to " + nodes[0].getName()); InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName()); s = socketFactory.createSocket(); int timeoutValue = 3000 * nodes.length + socketTimeout; NetUtils.connect(s, target, timeoutValue); s.setSoTimeout(timeoutValue); s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); LOG.debug("Send buf size " + s.getSendBufferSize()); long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length + datanodeWriteTimeout; // // Xmit header info to datanode // DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), DataNode.SMALL_BUFFER_SIZE)); blockReplyStream = new DataInputStream(NetUtils.getInputStream(s)); out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION ); out.write( DataTransferProtocol.OP_WRITE_BLOCK ); out.writeLong( block.getBlockId() ); out.writeLong( block.getGenerationStamp() ); out.writeInt( nodes.length ); out.writeBoolean( recoveryFlag ); // recovery flag Text.writeString( out, client ); out.writeBoolean(false); // Not sending src node information out.writeInt( nodes.length - 1 ); for (int i = 1; i < nodes.length; i++) { nodes[i].write(out); } checksum.writeHeader( out ); out.flush(); // receive ack for connect firstBadLink = Text.readString(blockReplyStream); if (firstBadLink.length() != 0) { throw new IOException("Bad connect ack with firstBadLink " + firstBadLink); } blockStream = out; return true; // success } catch (IOException ie) { LOG.info("Exception in createBlockOutputStream " + ie); // find the datanode that matches if (firstBadLink.length() != 0) { for (int i = 0; i < nodes.length; i++) { if (nodes[i].getName().equals(firstBadLink)) { errorIndex = i; break; } } } hasError = true; setLastException(ie); blockReplyStream = null; return false; // error } }