Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlock#getBlock()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LocatedBlock#getBlock() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FanOutOneBlockAsyncDFSOutput.java From hbase with Apache License 2.0 | 6 votes |
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList, DataChecksum summer, ByteBufAllocator alloc) { this.conf = conf; this.dfs = dfs; this.client = client; this.namenode = namenode; this.fileId = fileId; this.clientName = clientName; this.src = src; this.block = locatedBlock.getBlock(); this.locations = locatedBlock.getLocations(); this.encryptor = encryptor; this.datanodeList = datanodeList; this.summer = summer; this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum()); this.alloc = alloc; this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize()); this.state = State.STREAMING; setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT)); }
Example 2
Source File: FastCopy.java From RDFS with Apache License 2.0 | 6 votes |
/** * Copies all the replicas for a single block * * @param src * the source block * @param dst * the destination block * @param srcNamespaceId * namespace id of the source block * @param dstNamespaceId * namespace id of the destination block * @param supportFederation * whether or not federation is supported. * @throws Exception */ private void copyBlock(LocatedBlock src, LocatedBlock dst, int srcNamespaceId, int dstNamespaceId, boolean supportFederation) throws Exception { // Sorting source and destination locations so that we don't rely at all // on the ordering of the locations that we receive from the NameNode. DatanodeInfo[] dstLocs = dst.getLocations(); DatanodeInfo[] srcLocs = src.getLocations(); alignDatanodes(dstLocs, srcLocs); // We use minimum here, since its better for the NameNode to handle the // extra locations in either list. The locations that match up are the // ones we have chosen in our tool so we handle copies for only those. short blocksToCopy = (short) Math.min(srcLocs.length, dstLocs.length); Block srcBlock = src.getBlock(); Block dstBlock = dst.getBlock(); initializeBlockStatus(dstBlock, blocksToCopy); for (int i = 0; i < blocksToCopy; i++) { blockRPCExecutor.submit(new BlockCopyRPC(srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, supportFederation, srcLocs[i], dstLocs[i])); } }
Example 3
Source File: TestClientReportBadBlock.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a file with one block and corrupt some/all of the block replicas. */ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException { DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0); DFSTestUtil.waitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node final LocatedBlocks locatedblocks = dfs.dfs.getNamenode() .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE); Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length); // The file only has one block LocatedBlock lblock = locatedblocks.get(0); DatanodeInfo[] datanodeinfos = lblock.getLocations(); ExtendedBlock block = lblock.getBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; final DataNode dn = cluster.getDataNode(dninfo.getIpcPort()); corruptBlock(block, dn); LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo); } }
Example 4
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 6 votes |
private ExtendedBlock addBlocks(String fileName, String clientName) throws IOException { ExtendedBlock prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr()); datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock()); ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo( loc.getBlock().getLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) }; StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks( datanodes[dnIdx].storage.getStorageID(), rdBlocks) }; nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc .getBlock().getBlockPoolId(), report); } } return prevBlock; }
Example 5
Source File: TestDFSClientRetries.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { LocatedBlock goodLocatedBlock = goodBlockList.get(0); LocatedBlock badLocatedBlock = new LocatedBlock( goodLocatedBlock.getBlock(), new DatanodeInfo[] { DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234) }, goodLocatedBlock.getStartOffset(), false); List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>(); badBlocks.add(badLocatedBlock); return new LocatedBlocks(goodBlockList.getFileLength(), false, badBlocks, null, true, null); }
Example 6
Source File: NNThroughputBenchmark.java From RDFS with Apache License 2.0 | 5 votes |
private void addBlocks(String fileName, String clientName) throws IOException { for(int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNode.addBlock(fileName, clientName); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName()); datanodes[dnIdx].addBlock(loc.getBlock()); Block[] bi = new Block[] {loc.getBlock()}; nameNode.blockReceivedAndDeleted( datanodes[dnIdx].dnRegistration, bi); } } }
Example 7
Source File: MergeSortRowIdMatcher.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private static String getFirstBlockId(FileSystem fileSystem, Path realFile) throws IOException { FileStatus fileStatus = fileSystem.getFileStatus(realFile); BlockLocation[] locations = fileSystem.getFileBlockLocations(fileStatus, 0, 1); HdfsBlockLocation location = (HdfsBlockLocation) locations[0]; LocatedBlock locatedBlock = location.getLocatedBlock(); ExtendedBlock block = locatedBlock.getBlock(); return toNiceString(block.getBlockId()); }
Example 8
Source File: TestChecksumFile.java From RDFS with Apache License 2.0 | 5 votes |
private BlockPathInfo getBlockPathInfo(String filename, MiniDFSCluster miniCluster, DFSClient dfsclient) throws IOException { LocatedBlocks locations = dfsclient.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(1, locations.locatedBlockCount()); LocatedBlock locatedblock = locations.getLocatedBlocks().get(0); DataNode datanode = miniCluster.getDataNode(locatedblock.getLocations()[0] .getIpcPort()); assertTrue(datanode != null); Block lastblock = locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); return datanode.getBlockPathInfo(lastblock); }
Example 9
Source File: NNThroughputBenchmark.java From RDFS with Apache License 2.0 | 5 votes |
private void addBlocks(String fileName, String clientName) throws IOException { for (int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNode.addBlock(fileName, clientName); for (DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays .binarySearch(datanodes, dnInfo.getName()); datanodes[dnIdx].addBlock(loc.getBlock()); Block[] bi = new Block[] { loc.getBlock() }; nameNode.blockReceivedAndDeleted( datanodes[dnIdx].dnRegistration, bi); } } }
Example 10
Source File: DFSInputStream.java From RDFS with Apache License 2.0 | 4 votes |
private DNAddrPair chooseDataNode(LocatedBlock block) throws IOException { while (true) { DatanodeInfo[] nodes = block.getLocations(); String blockInfo = block.getBlock() + " file=" + src; if(block.isCorrupt()) throw new BlockMissingException(src, "Block: " + blockInfo + " is corrupt ", block.getStartOffset()); /*if(nodes.length == 1) { long lastContact = System.currentTimeMillis() - nodes[0].getLastUpdate(); if(lastContact > 9000) throw new BlockMissingException(src, "Could not obtain block: " + blockInfo, block.getStartOffset()); }*/ DatanodeInfo chosenNode = null; try { chosenNode = dfsClient.bestNode(nodes, deadNodes); InetSocketAddress targetAddr = NetUtils.createSocketAddr(chosenNode.getName()); return new DNAddrPair(chosenNode, targetAddr); } catch (IOException ie) { int failureTimes = DFSClient.dfsInputStreamfailures.get(); if (failureTimes >= dfsClient.maxBlockAcquireFailures || failureTimes >= block.getLocations().length) { throw new BlockMissingException(src, "Could not obtain block: " + blockInfo, block.getStartOffset()); } if (nodes == null || nodes.length == 0) { DFSClient.LOG.info("No node available for block: " + blockInfo); } DFSClient.LOG.info("Could not obtain block " + block.getBlock() + " from node: " + (chosenNode == null ? "" : chosenNode.getHostName()) + ie + ". Will get new block locations from namenode and retry..."); try { // Introducing a random factor to the wait time before another retry. // The wait time is dependent on # of failures and a random factor. // At the first time of getting a BlockMissingException, the wait time // is a random number between 0..3000 ms. If the first retry // still fails, we will wait 3000 ms grace period before the 2nd retry. // Also at the second retry, the waiting window is expanded to 6000 ms // alleviating the request rate from the server. Similarly the 3rd retry // will wait 6000ms grace period before retry and the waiting window is // expanded to 9000ms. // waitTime = grace period for the last round of attempt + // expanding time window for each failure double waitTime = timeWindow * failureTimes + timeWindow * (failureTimes + 1) * DFSClient.r.nextDouble(); DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failureTimes + 1) + " IOException, will wait for " + waitTime + " msec.", ie); Thread.sleep((long)waitTime); } catch (InterruptedException iex) { } deadNodes.clear(); //2nd option is to remove only nodes[blockId] openInfo(); block = getBlockAt(block.getStartOffset(), false, true); DFSClient.dfsInputStreamfailures.set(failureTimes+1); continue; } } }
Example 11
Source File: NamenodeFsck.java From big-c with Apache License 2.0 | 4 votes |
private void copyBlock(final DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception { int failures = 0; InetSocketAddress targetAddr = null; TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>(); BlockReader blockReader = null; ExtendedBlock block = lblock.getBlock(); while (blockReader == null) { DatanodeInfo chosenNode; try { chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr()); } catch (IOException ie) { if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) { throw new IOException("Could not obtain block " + lblock, ie); } LOG.info("Could not obtain block from any node: " + ie); try { Thread.sleep(10000); } catch (InterruptedException iex) { } deadNodes.clear(); failures++; continue; } try { String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId()); blockReader = new BlockReaderFactory(dfs.getConf()). setFileName(file). setBlock(block). setBlockToken(lblock.getBlockToken()). setStartOffset(0). setLength(-1). setVerifyChecksum(true). setClientName("fsck"). setDatanodeInfo(chosenNode). setInetSocketAddress(targetAddr). setCachingStrategy(CachingStrategy.newDropBehind()). setClientCacheContext(dfs.getClientContext()). setConfiguration(namenode.conf). setRemotePeerFactory(new RemotePeerFactory() { @Override public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { s.connect(addr, HdfsServerConstants.READ_TIMEOUT); s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocketAndKey( dfs.getSaslDataTransferClient(), s, NamenodeFsck.this, blockToken, datanodeId); } finally { if (peer == null) { IOUtils.closeQuietly(s); } } return peer; } }). build(); } catch (IOException ex) { // Put chosen node into dead list, continue LOG.info("Failed to connect to " + targetAddr + ":" + ex); deadNodes.add(chosenNode); } } byte[] buf = new byte[1024]; int cnt = 0; boolean success = true; long bytesRead = 0; try { while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) { fos.write(buf, 0, cnt); bytesRead += cnt; } if ( bytesRead != block.getNumBytes() ) { throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned " +bytesRead+" bytes"); } } catch (Exception e) { LOG.error("Error reading block", e); success = false; } finally { blockReader.close(); } if (!success) { throw new Exception("Could not copy block data for " + lblock.getBlock()); } }
Example 12
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 4 votes |
/** * Construct a data streamer for appending to the last partial block * @param lastBlock last block of the file to be appended * @param stat status of the file to be appended * @param bytesPerChecksum number of bytes per checksum * @throws IOException if error occurs */ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, int bytesPerChecksum) throws IOException { isAppend = true; stage = BlockConstructionStage.PIPELINE_SETUP_APPEND; block = lastBlock.getBlock(); bytesSent = block.getNumBytes(); accessToken = lastBlock.getBlockToken(); isLazyPersistFile = isLazyPersist(stat); long usedInLastBlock = stat.getLen() % blockSize; int freeInLastBlock = (int)(blockSize - usedInLastBlock); // calculate the amount of free space in the pre-existing // last crc chunk int usedInCksum = (int)(stat.getLen() % bytesPerChecksum); int freeInCksum = bytesPerChecksum - usedInCksum; // if there is space in the last block, then we have to // append to that block if (freeInLastBlock == blockSize) { throw new IOException("The last block for file " + src + " is full."); } if (usedInCksum > 0 && freeInCksum > 0) { // if there is space in the last partial chunk, then // setup in such a way that the next packet will have only // one chunk that fills up the partial chunk. // computePacketChunkSize(0, freeInCksum); setChecksumBufSize(freeInCksum); appendChunk = true; } else { // if the remaining space in the block is smaller than // that expected size of of a packet, then create // smaller size packet. // computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), bytesPerChecksum); } // setup pipeline to append to the last block XXX retries?? setPipeline(lastBlock); errorIndex = -1; // no errors yet. if (nodes.length < 1) { throw new IOException("Unable to retrieve blocks locations " + " for last block " + block + "of file " + src); } }
Example 13
Source File: TestDataNodeVolumeFailure.java From big-c with Apache License 2.0 | 4 votes |
/** * try to access a block on a data node. If fails - throws exception * @param datanode * @param lblock * @throws IOException */ private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) throws IOException { InetSocketAddress targetAddr = null; ExtendedBlock block = lblock.getBlock(); targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)). setInetSocketAddress(targetAddr). setBlock(block). setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())). setBlockToken(lblock.getBlockToken()). setStartOffset(0). setLength(-1). setVerifyChecksum(true). setClientName("TestDataNodeVolumeFailure"). setDatanodeInfo(datanode). setCachingStrategy(CachingStrategy.newDefaultStrategy()). setClientCacheContext(ClientContext.getFromConf(conf)). setConfiguration(conf). setRemotePeerFactory(new RemotePeerFactory() { @Override public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocket(sock); } finally { if (peer == null) { IOUtils.closeSocket(sock); } } return peer; } }). build(); blockReader.close(); }
Example 14
Source File: BlockReaderTestUtil.java From big-c with Apache License 2.0 | 4 votes |
/** * Get a BlockReader for the given block. */ public static BlockReader getBlockReader(MiniDFSCluster cluster, LocatedBlock testBlock, int offset, int lenToRead) throws IOException { InetSocketAddress targetAddr = null; ExtendedBlock block = testBlock.getBlock(); DatanodeInfo[] nodes = testBlock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr()); final DistributedFileSystem fs = cluster.getFileSystem(); return new BlockReaderFactory(fs.getClient().getConf()). setInetSocketAddress(targetAddr). setBlock(block). setFileName(targetAddr.toString()+ ":" + block.getBlockId()). setBlockToken(testBlock.getBlockToken()). setStartOffset(offset). setLength(lenToRead). setVerifyChecksum(true). setClientName("BlockReaderTestUtil"). setDatanodeInfo(nodes[0]). setClientCacheContext(ClientContext.getFromConf(fs.getConf())). setCachingStrategy(CachingStrategy.newDefaultStrategy()). setConfiguration(fs.getConf()). setAllowShortCircuitLocalReads(true). setRemotePeerFactory(new RemotePeerFactory() { @Override public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket sock = NetUtils. getDefaultSocketFactory(fs.getConf()).createSocket(); try { sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocket(sock); } finally { if (peer == null) { IOUtils.closeQuietly(sock); } } return peer; } }). build(); }
Example 15
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 4 votes |
/** * The following test first creates a file. * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. * @param useDnHostname whether DNs should connect to other DNs by hostname */ private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception { MiniDFSCluster cluster = null; conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We // do this by telling the DN to advertise localhost as its hostname // instead of the default hostname. conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .checkDataNodeHostConfig(true) .build(); cluster.waitActive(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); assertTrue(dfs.exists(filepath)); //get block info LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); assertTrue(datanodeinfo.length > 0); //connect to a data node DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy( datanode, datanodeinfo[0], conf, useDnHostname); // Stop the block scanners. datanode.getBlockScanner().removeAllVolumeScanners(); //verify BlockMetaDataInfo ExtendedBlock b = locatedblock.getBlock(); InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass()); checkMetaInfo(b, datanode); long recoveryId = b.getGenerationStamp() + 1; idp.initReplicaRecovery( new RecoveringBlock(b, locatedblock.getLocations(), recoveryId)); //verify updateBlock ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1); idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes()); checkMetaInfo(newblock, datanode); // Verify correct null response trying to init recovery for a missing block ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0); assertNull(idp.initReplicaRecovery( new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId))); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 16
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 4 votes |
/** * The following test first creates a file. * It verifies the block information from a datanode. * Then, it updates the block with new information and verifies again. * @param useDnHostname whether DNs should connect to other DNs by hostname */ private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception { MiniDFSCluster cluster = null; conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We // do this by telling the DN to advertise localhost as its hostname // instead of the default hostname. conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost"); } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .checkDataNodeHostConfig(true) .build(); cluster.waitActive(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); assertTrue(dfs.exists(filepath)); //get block info LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); assertTrue(datanodeinfo.length > 0); //connect to a data node DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy( datanode, datanodeinfo[0], conf, useDnHostname); // Stop the block scanners. datanode.getBlockScanner().removeAllVolumeScanners(); //verify BlockMetaDataInfo ExtendedBlock b = locatedblock.getBlock(); InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass()); checkMetaInfo(b, datanode); long recoveryId = b.getGenerationStamp() + 1; idp.initReplicaRecovery( new RecoveringBlock(b, locatedblock.getLocations(), recoveryId)); //verify updateBlock ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1); idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(), newblock.getNumBytes()); checkMetaInfo(newblock, datanode); // Verify correct null response trying to init recovery for a missing block ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.getBlockId(), 0, 0); assertNull(idp.initReplicaRecovery( new RecoveringBlock(badBlock, locatedblock.getLocations(), recoveryId))); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 17
Source File: TestCopyBlockAPI.java From RDFS with Apache License 2.0 | 4 votes |
@Test public void testCopyBlockAPI() throws Exception { // Generate source file and get its locations. String filename = "/testCopyBlockAPI"; DFSTestUtil.createFile(fs, new Path(filename), 1023 * 10, (short) 3, (long) 0); FileStatus srcFileStatus = fs.getFileStatus(new Path(filename)); LocatedBlocksWithMetaInfo lbkSrcMetaInfo = cluster.getNameNode() .openAndFetchMetaInfo(filename, 0, Long.MAX_VALUE); int srcNamespaceId = lbkSrcMetaInfo.getNamespaceID(); LocatedBlock lbkSrc = lbkSrcMetaInfo.getLocatedBlocks().get(0); DatanodeInfo[] srcLocs = lbkSrc.getLocations(); // Create destination file and add a single block. String newFile = "/testCopyBlockAPI_new"; String clientName = newFile; fs.create(new Path(filename + "new")); cluster.getNameNode().create(newFile, srcFileStatus.getPermission(), clientName, true, true, srcFileStatus.getReplication(), srcFileStatus.getBlockSize()); LocatedBlockWithMetaInfo lbkDstMetaInfo = cluster.getNameNode().addBlockAndFetchMetaInfo(newFile, clientName, null, srcLocs); int dstNamespaceId = lbkDstMetaInfo.getNamespaceID(); LocatedBlock lbkDst = lbkDstMetaInfo; // Verify locations of src and destination block. DatanodeInfo[] dstLocs = lbkDst.getLocations(); Arrays.sort(srcLocs); Arrays.sort(dstLocs); assertEquals(srcLocs.length, dstLocs.length); for (int i = 0; i < srcLocs.length; i++) { assertEquals(srcLocs[i], dstLocs[i]); } // Create datanode rpc connections. ClientDatanodeProtocol cdp2 = DFSClient.createClientDatanodeProtocolProxy( srcLocs[2], conf, 5 * 60 * 1000); Block srcBlock = new Block(lbkSrc.getBlock()); Block dstBlock = new Block(lbkDst.getBlock()); System.out.println("Copying src : " + srcBlock + " dst : " + dstBlock); // Find datanode object. DataNode datanode = null; for (DataNode dn : cluster.getDataNodes()) { DatanodeRegistration registration = dn.getDNRegistrationForNS(srcNamespaceId); if (registration.equals(srcLocs[0])) { datanode = dn; break; } } assertNotNull(datanode); // Submit a block transfer to location 2. ExecutorService pool = Executors.newSingleThreadExecutor(); pool.submit(datanode.new DataTransfer(new DatanodeInfo[] { srcLocs[2] }, srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, datanode)); try { Thread.sleep(5000); // Submit another transfer to same location, should receive // BlockAlreadyExistsException. cdp2.copyBlock(srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, srcLocs[2], false); } catch (RemoteException re) { // pass. return; } finally { // Shutdown RPC connections. RPC.stopProxy(cdp2); } fail("Second RPC did not throw Exception"); }
Example 18
Source File: DFSOutputStream.java From RDFS with Apache License 2.0 | 4 votes |
/** * Open a DataOutputStream to a DataNode so that it can be written to. * This happens when a file is created and each time a new block is allocated. * Must get block ID and the IDs of the destinations from the namenode. * Returns the list of target datanodes. */ private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException { LocatedBlock lb = null; boolean retry = false; DatanodeInfo[] nodes; ArrayList<DatanodeInfo> excludedNodes = new ArrayList<DatanodeInfo>(); int count = dfsClient.conf.getInt("dfs.client.block.write.retries", 3); boolean success; do { hasError = false; lastException = null; errorIndex = 0; retry = false; nodes = null; success = false; long startTime = System.currentTimeMillis(); DatanodeInfo[] excluded = excludedNodes.toArray(new DatanodeInfo[0]); lb = locateFollowingBlock(startTime, excluded.length > 0 ? excluded : null); block = lb.getBlock(); nodes = lb.getLocations(); // // Connect to first DataNode in the list. // success = createBlockOutputStream(nodes, dfsClient.clientName, false); if (!success) { DFSClient.LOG.info("Abandoning block " + block + " for file " + src); dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName); if (errorIndex < nodes.length) { DFSClient.LOG.debug("Excluding datanode " + nodes[errorIndex]); excludedNodes.add(nodes[errorIndex]); } // Connection failed. Let's wait a little bit and retry retry = true; } } while (retry && --count >= 0); if (!success) { throw new IOException("Unable to create new block."); } return nodes; }
Example 19
Source File: DFSInputStream.java From hadoop with Apache License 2.0 | 4 votes |
private DNAddrPair chooseDataNode(LocatedBlock block, Collection<DatanodeInfo> ignoredNodes) throws IOException { while (true) { try { return getBestNodeDNAddrPair(block, ignoredNodes); } catch (IOException ie) { String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(), deadNodes, ignoredNodes); String blockInfo = block.getBlock() + " file=" + src; if (failures >= dfsClient.getMaxBlockAcquireFailures()) { String description = "Could not obtain block: " + blockInfo; DFSClient.LOG.warn(description + errMsg + ". Throwing a BlockMissingException"); throw new BlockMissingException(src, description, block.getStartOffset()); } DatanodeInfo[] nodes = block.getLocations(); if (nodes == null || nodes.length == 0) { DFSClient.LOG.info("No node available for " + blockInfo); } DFSClient.LOG.info("Could not obtain " + block.getBlock() + " from any node: " + ie + errMsg + ". Will get new block locations from namenode and retry..."); try { // Introducing a random factor to the wait time before another retry. // The wait time is dependent on # of failures and a random factor. // At the first time of getting a BlockMissingException, the wait time // is a random number between 0..3000 ms. If the first retry // still fails, we will wait 3000 ms grace period before the 2nd retry. // Also at the second retry, the waiting window is expanded to 6000 ms // alleviating the request rate from the server. Similarly the 3rd retry // will wait 6000ms grace period before retry and the waiting window is // expanded to 9000ms. final int timeWindow = dfsClient.getConf().timeWindow; double waitTime = timeWindow * failures + // grace period for the last round of attempt timeWindow * (failures + 1) * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec."); Thread.sleep((long)waitTime); } catch (InterruptedException iex) { } deadNodes.clear(); //2nd option is to remove only nodes[blockId] openInfo(); block = getBlockAt(block.getStartOffset()); failures++; continue; } } }
Example 20
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 4 votes |
/** * Open a DataOutputStream to a DataNode so that it can be written to. * This happens when a file is created and each time a new block is allocated. * Must get block ID and the IDs of the destinations from the namenode. * Returns the list of target datanodes. */ private LocatedBlock nextBlockOutputStream() throws IOException { LocatedBlock lb = null; DatanodeInfo[] nodes = null; StorageType[] storageTypes = null; int count = dfsClient.getConf().nBlockWriteRetry; boolean success = false; ExtendedBlock oldBlock = block; do { hasError = false; lastException.set(null); errorIndex = -1; success = false; DatanodeInfo[] excluded = excludedNodes.getAllPresent(excludedNodes.asMap().keySet()) .keySet() .toArray(new DatanodeInfo[0]); block = oldBlock; lb = locateFollowingBlock(excluded.length > 0 ? excluded : null); block = lb.getBlock(); block.setNumBytes(0); bytesSent = 0; accessToken = lb.getBlockToken(); nodes = lb.getLocations(); storageTypes = lb.getStorageTypes(); // // Connect to first DataNode in the list. // success = createBlockOutputStream(nodes, storageTypes, 0L, false); if (!success) { DFSClient.LOG.info("Abandoning " + block); dfsClient.namenode.abandonBlock(block, fileId, src, dfsClient.clientName); block = null; DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]); excludedNodes.put(nodes[errorIndex], nodes[errorIndex]); } } while (!success && --count >= 0); if (!success) { throw new IOException("Unable to create new block."); } return lb; }