Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlock#getLocations()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LocatedBlock#getLocations() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileFixer.java From RDFS with Apache License 2.0 | 6 votes |
/** * Returns the corrupt blocks in a file. **/ List<LocatedBlock> corruptBlocksInFile( DistributedFileSystem fs, String uriPath, FileStatus stat) throws IOException { List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>(); LocatedBlocks locatedBlocks = fs.getClient().namenode.getBlockLocations( uriPath, 0, stat.getLen()); for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) { if (b.isCorrupt() || (b.getLocations().length == 0 && b.getBlockSize() > 0)) { LOG.info("Adding bad block for file " + uriPath); corrupt.add(b); } } return corrupt; }
Example 2
Source File: FastCopy.java From RDFS with Apache License 2.0 | 6 votes |
/** * Copies all the replicas for a single block * * @param src * the source block * @param dst * the destination block * @param srcNamespaceId * namespace id of the source block * @param dstNamespaceId * namespace id of the destination block * @param supportFederation * whether or not federation is supported. * @throws Exception */ private void copyBlock(LocatedBlock src, LocatedBlock dst, int srcNamespaceId, int dstNamespaceId, boolean supportFederation) throws Exception { // Sorting source and destination locations so that we don't rely at all // on the ordering of the locations that we receive from the NameNode. DatanodeInfo[] dstLocs = dst.getLocations(); DatanodeInfo[] srcLocs = src.getLocations(); alignDatanodes(dstLocs, srcLocs); // We use minimum here, since its better for the NameNode to handle the // extra locations in either list. The locations that match up are the // ones we have chosen in our tool so we handle copies for only those. short blocksToCopy = (short) Math.min(srcLocs.length, dstLocs.length); Block srcBlock = src.getBlock(); Block dstBlock = dst.getBlock(); initializeBlockStatus(dstBlock, blocksToCopy); for (int i = 0; i < blocksToCopy; i++) { blockRPCExecutor.submit(new BlockCopyRPC(srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, supportFederation, srcLocs[i], dstLocs[i])); } }
Example 3
Source File: BlockPlacementPolicyDefault.java From hadoop with Apache License 2.0 | 6 votes |
@Override public BlockPlacementStatus verifyBlockPlacement(String srcPath, LocatedBlock lBlk, int numberOfReplicas) { DatanodeInfo[] locs = lBlk.getLocations(); if (locs == null) locs = DatanodeDescriptor.EMPTY_ARRAY; int numRacks = clusterMap.getNumOfRacks(); if(numRacks <= 1) // only one rack return new BlockPlacementStatusDefault( Math.min(numRacks, numberOfReplicas), numRacks); int minRacks = Math.min(2, numberOfReplicas); // 1. Check that all locations are different. // 2. Count locations on different racks. Set<String> racks = new TreeSet<String>(); for (DatanodeInfo dn : locs) racks.add(dn.getNetworkLocation()); return new BlockPlacementStatusDefault(racks.size(), minRacks); }
Example 4
Source File: TestDataNodeVolumeFailure.java From big-c with Apache License 2.0 | 6 votes |
/** * Count datanodes that have copies of the blocks for a file * put it into the map * @param map * @param path * @param size * @return * @throws IOException */ private int countNNBlocks(Map<String, BlockLocs> map, String path, long size) throws IOException { int total = 0; NamenodeProtocols nn = cluster.getNameNodeRpc(); List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks(); //System.out.println("Number of blocks: " + locatedBlocks.size()); for(LocatedBlock lb : locatedBlocks) { String blockId = ""+lb.getBlock().getBlockId(); //System.out.print(blockId + ": "); DatanodeInfo[] dn_locs = lb.getLocations(); BlockLocs bl = map.get(blockId); if(bl == null) { bl = new BlockLocs(); } //System.out.print(dn_info.name+","); total += dn_locs.length; bl.num_locs += dn_locs.length; map.put(blockId, bl); //System.out.println(); } return total; }
Example 5
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 5 votes |
private void waitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem dfs) throws Exception { for (int i = 0; i < 5; i++) { LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, BLOCK_SIZE); LocatedBlock lb = lbs.get(0); if (lb.getLocations().length >= expectedReplicaNum) { return; } else { Thread.sleep(1000); } } }
Example 6
Source File: TestInterDatanodeProtocol.java From RDFS with Apache License 2.0 | 5 votes |
public static LocatedBlockWithMetaInfo getLastLocatedBlock( ClientProtocol namenode, String src ) throws IOException { //get block info for the last block LocatedBlocksWithMetaInfo locations = namenode.openAndFetchMetaInfo (src, 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); DataNode.LOG.info("blocks.size()=" + blocks.size()); assertTrue(blocks.size() > 0); LocatedBlock blk = blocks.get(blocks.size() - 1); return new LocatedBlockWithMetaInfo(blk.getBlock(), blk.getLocations(), blk.getStartOffset(), locations.getDataProtocolVersion(), locations.getNamespaceID(), locations.getMethodFingerPrint()); }
Example 7
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static LocatedBlockProto convert(LocatedBlock b) { if (b == null) return null; Builder builder = LocatedBlockProto.newBuilder(); DatanodeInfo[] locs = b.getLocations(); List<DatanodeInfo> cachedLocs = Lists.newLinkedList(Arrays.asList(b.getCachedLocations())); for (int i = 0; i < locs.length; i++) { DatanodeInfo loc = locs[i]; builder.addLocs(i, PBHelper.convert(loc)); boolean locIsCached = cachedLocs.contains(loc); builder.addIsCached(locIsCached); if (locIsCached) { cachedLocs.remove(loc); } } Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!"); StorageType[] storageTypes = b.getStorageTypes(); if (storageTypes != null) { for (int i = 0; i < storageTypes.length; ++i) { builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i])); } } final String[] storageIDs = b.getStorageIDs(); if (storageIDs != null) { builder.addAllStorageIDs(Arrays.asList(storageIDs)); } return builder.setB(PBHelper.convert(b.getBlock())) .setBlockToken(PBHelper.convert(b.getBlockToken())) .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); }
Example 8
Source File: NNThroughputBenchmark.java From RDFS with Apache License 2.0 | 5 votes |
private void addBlocks(String fileName, String clientName) throws IOException { for (int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNode.addBlock(fileName, clientName); for (DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays .binarySearch(datanodes, dnInfo.getName()); datanodes[dnIdx].addBlock(loc.getBlock()); Block[] bi = new Block[] { loc.getBlock() }; nameNode.blockReceivedAndDeleted( datanodes[dnIdx].dnRegistration, bi); } } }
Example 9
Source File: BlockReconstructor.java From RDFS with Apache License 2.0 | 5 votes |
boolean isBlockDecom(LocatedBlock block) { // Copy this block iff all good copies are being decommissioned boolean allDecommissioning = true; for (DatanodeInfo i : block.getLocations()) { allDecommissioning &= i.isDecommissionInProgress(); } if (allDecommissioning) { return true; } return false; }
Example 10
Source File: TestAvatarDataNodeRBW.java From RDFS with Apache License 2.0 | 5 votes |
private int initializeTest(String testName) throws IOException { String fileName = testName; createRBWFile(fileName); // Verify we have 1 RBW block. AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar; LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0, Long.MAX_VALUE); int blocksBefore = lbks.locatedBlockCount(); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); assertNotNull(locs); assertTrue(locs.length != 0); } return blocksBefore; }
Example 11
Source File: HFileSystem.java From hbase with Apache License 2.0 | 5 votes |
@Override public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException { ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(conf, src); if (sn == null) { // It's not an WAL return; } // Ok, so it's an WAL String hostName = sn.getHostname(); if (LOG.isTraceEnabled()) { LOG.trace(src + " is an WAL file, so reordering blocks, last hostname will be:" + hostName); } // Just check for all blocks for (LocatedBlock lb : lbs.getLocatedBlocks()) { DatanodeInfo[] dnis = lb.getLocations(); if (dnis != null && dnis.length > 1) { boolean found = false; for (int i = 0; i < dnis.length - 1 && !found; i++) { if (hostName.equals(dnis[i].getHostName())) { // advance the other locations by one and put this one at the last place. DatanodeInfo toLast = dnis[i]; System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1); dnis[dnis.length - 1] = toLast; found = true; } } } } }
Example 12
Source File: DFSOutputStream.java From RDFS with Apache License 2.0 | 4 votes |
/** * Create a new output stream to the given DataNode with namespace id. */ DFSOutputStream(DFSClient dfsClient, String src, int buffersize, Progressable progress, LocatedBlock lastBlock, FileStatus stat, int bytesPerChecksum, int namespaceId) throws IOException { this(dfsClient, src, stat.getBlockSize(), progress, bytesPerChecksum, stat.getReplication(), false, false, null); initialFileSize = stat.getLen(); // length of file when opened this.namespaceId = namespaceId; // // The last partial block of the file has to be filled. // if (lastBlock != null) { block = lastBlock.getBlock(); long usedInLastBlock = stat.getLen() % blockSize; int freeInLastBlock = (int)(blockSize - usedInLastBlock); // calculate the amount of free space in the pre-existing // last crc chunk int usedInCksum = (int)(stat.getLen() % bytesPerChecksum); int freeInCksum = bytesPerChecksum - usedInCksum; // if there is space in the last block, then we have to // append to that block if (freeInLastBlock > blockSize) { throw new IOException("The last block for file " + src + " is full."); } // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); if (usedInCksum > 0 && freeInCksum > 0) { // if there is space in the last partial chunk, then // setup in such a way that the next packet will have only // one chunk that fills up the partial chunk. // computePacketChunkSize(0, freeInCksum); resetChecksumChunk(freeInCksum); this.appendChunk = true; } else { // if the remaining space in the block is smaller than // that expected size of of a packet, then create // smaller size packet. // computePacketChunkSize(Math.min(dfsClient.writePacketSize, freeInLastBlock), bytesPerChecksum); } // setup pipeline to append to the last block nodes = lastBlock.getLocations(); errorIndex = -1; // no errors yet. if (nodes.length < 1) { throw new IOException("Unable to retrieve blocks locations" + " for append to last block " + block + " of file " + src); } // keep trying to setup a pipeline until you know all DNs are dead while (processDatanodeError(true, true)) { try { Thread.sleep(1000); } catch (InterruptedException e) { } } if (lastException != null) { throw lastException; } } else { computePacketChunkSize(dfsClient.writePacketSize, bytesPerChecksum); } long blockOffset = stat.getLen(); blockOffset -= blockOffset % blockSize; setOffsets(blockOffset); streamer.start(); }
Example 13
Source File: TestBlockTokenWithDFS.java From big-c with Apache License 2.0 | 4 votes |
private static void tryRead(final Configuration conf, LocatedBlock lblock, boolean shouldSucceed) { InetSocketAddress targetAddr = null; IOException ioe = null; BlockReader blockReader = null; ExtendedBlock block = lblock.getBlock(); try { DatanodeInfo[] nodes = lblock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr()); blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)). setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())). setBlock(block). setBlockToken(lblock.getBlockToken()). setInetSocketAddress(targetAddr). setStartOffset(0). setLength(-1). setVerifyChecksum(true). setClientName("TestBlockTokenWithDFS"). setDatanodeInfo(nodes[0]). setCachingStrategy(CachingStrategy.newDefaultStrategy()). setClientCacheContext(ClientContext.getFromConf(conf)). setConfiguration(conf). setRemotePeerFactory(new RemotePeerFactory() { @Override public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocket(sock); } finally { if (peer == null) { IOUtils.closeSocket(sock); } } return peer; } }). build(); } catch (IOException ex) { ioe = ex; } finally { if (blockReader != null) { try { blockReader.close(); } catch (IOException e) { throw new RuntimeException(e); } } } if (shouldSucceed) { Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid", blockReader); } else { Assert.assertNotNull("OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid", ioe); Assert.assertTrue( "OP_READ_BLOCK failed due to reasons other than access token: ", ioe instanceof InvalidBlockTokenException); } }
Example 14
Source File: DFSInputStream.java From RDFS with Apache License 2.0 | 4 votes |
/** Get block info from a datanode */ private Block getBlockInfo(LocatedBlock locatedblock) throws IOException { if (locatedblock == null || locatedblock.getLocations().length == 0) { return null; } int replicaNotFoundCount = locatedblock.getLocations().length; for(DatanodeInfo datanode : locatedblock.getLocations()) { ProtocolProxy<ClientDatanodeProtocol> cdp = null; try { cdp = DFSClient.createClientDNProtocolProxy(datanode, dfsClient.conf, dfsClient.socketTimeout); final Block newBlock; if (cdp.isMethodSupported("getBlockInfo", int.class, Block.class)) { newBlock = cdp.getProxy().getBlockInfo( namespaceId, locatedblock.getBlock()); } else { newBlock = cdp.getProxy().getBlockInfo(locatedblock.getBlock()); } if (newBlock == null) { // special case : replica might not be on the DN, treat as 0 length replicaNotFoundCount--; } else { return newBlock; } } catch(IOException ioe) { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Failed to getBlockInfo from datanode " + datanode + " for block " + locatedblock.getBlock(), ioe); } } finally { if (cdp != null) { RPC.stopProxy(cdp.getProxy()); } } } // Namenode told us about these locations, but none know about the replica // means that we hit the race between pipeline creation start and end. // we require all because some other exception could have happened // on a DN that has it. we want to report that error if (replicaNotFoundCount == 0) { return null; } throw new IOException("Cannot obtain block info for " + locatedblock); }
Example 15
Source File: DFSInputStream.java From RDFS with Apache License 2.0 | 4 votes |
private DNAddrPair chooseDataNode(LocatedBlock block) throws IOException { while (true) { DatanodeInfo[] nodes = block.getLocations(); String blockInfo = block.getBlock() + " file=" + src; if(block.isCorrupt()) throw new BlockMissingException(src, "Block: " + blockInfo + " is corrupt ", block.getStartOffset()); /*if(nodes.length == 1) { long lastContact = System.currentTimeMillis() - nodes[0].getLastUpdate(); if(lastContact > 9000) throw new BlockMissingException(src, "Could not obtain block: " + blockInfo, block.getStartOffset()); }*/ DatanodeInfo chosenNode = null; try { chosenNode = dfsClient.bestNode(nodes, deadNodes); InetSocketAddress targetAddr = NetUtils.createSocketAddr(chosenNode.getName()); return new DNAddrPair(chosenNode, targetAddr); } catch (IOException ie) { int failureTimes = DFSClient.dfsInputStreamfailures.get(); if (failureTimes >= dfsClient.maxBlockAcquireFailures || failureTimes >= block.getLocations().length) { throw new BlockMissingException(src, "Could not obtain block: " + blockInfo, block.getStartOffset()); } if (nodes == null || nodes.length == 0) { DFSClient.LOG.info("No node available for block: " + blockInfo); } DFSClient.LOG.info("Could not obtain block " + block.getBlock() + " from node: " + (chosenNode == null ? "" : chosenNode.getHostName()) + ie + ". Will get new block locations from namenode and retry..."); try { // Introducing a random factor to the wait time before another retry. // The wait time is dependent on # of failures and a random factor. // At the first time of getting a BlockMissingException, the wait time // is a random number between 0..3000 ms. If the first retry // still fails, we will wait 3000 ms grace period before the 2nd retry. // Also at the second retry, the waiting window is expanded to 6000 ms // alleviating the request rate from the server. Similarly the 3rd retry // will wait 6000ms grace period before retry and the waiting window is // expanded to 9000ms. // waitTime = grace period for the last round of attempt + // expanding time window for each failure double waitTime = timeWindow * failureTimes + timeWindow * (failureTimes + 1) * DFSClient.r.nextDouble(); DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failureTimes + 1) + " IOException, will wait for " + waitTime + " msec.", ie); Thread.sleep((long)waitTime); } catch (InterruptedException iex) { } deadNodes.clear(); //2nd option is to remove only nodes[blockId] openInfo(); block = getBlockAt(block.getStartOffset(), false, true); DFSClient.dfsInputStreamfailures.set(failureTimes+1); continue; } } }
Example 16
Source File: BlockReaderTestUtil.java From big-c with Apache License 2.0 | 4 votes |
/** * Get a DataNode that serves our testBlock. */ public DataNode getDataNode(LocatedBlock testBlock) { DatanodeInfo[] nodes = testBlock.getLocations(); int ipcport = nodes[0].getIpcPort(); return cluster.getDataNode(ipcport); }
Example 17
Source File: BlockReaderTestUtil.java From hadoop with Apache License 2.0 | 4 votes |
/** * Get a DataNode that serves our testBlock. */ public DataNode getDataNode(LocatedBlock testBlock) { DatanodeInfo[] nodes = testBlock.getLocations(); int ipcport = nodes[0].getIpcPort(); return cluster.getDataNode(ipcport); }
Example 18
Source File: TestPlacementMonitor.java From RDFS with Apache License 2.0 | 4 votes |
/** * Test that {@link PlacementMonitor} moves block correctly * @throws Exception */ @Test public void testMoveBlock() throws Exception { setupCluster(); try { Path path = new Path("/dir/file"); DFSTestUtil.createFile(fs, path, 1, (short)1, 0L); DFSTestUtil.waitReplication(fs, path, (short)1); FileStatus status = fs.getFileStatus(path); LocatedBlocksWithMetaInfo blocks = namenode.openAndFetchMetaInfo( path.toString(), 0, status.getLen()); Assert.assertEquals(1, blocks.getLocatedBlocks().size()); LocatedBlock block = blocks.getLocatedBlocks().get(0); Assert.assertEquals(1, block.getLocations().length); DatanodeInfo source = block.getLocations()[0]; Set<DatanodeInfo> excluded = new HashSet<DatanodeInfo>(); for (DatanodeInfo d : datanodes) { excluded.add(d); } excluded.remove(source); DatanodeInfo target = excluded.iterator().next(); excluded.add(source); excluded.remove(target); BlockMover.BlockMoveAction action = blockMover.new BlockMoveAction(block, source, excluded, 1, blocks.getDataProtocolVersion(), blocks.getNamespaceID()); LOG.info("Start moving block from " + source + " to " + target); action.run(); LOG.info("Done moving block"); boolean blockMoved = false; for (int i = 0; i < 100; ++i) { blocks = namenode.openAndFetchMetaInfo( path.toString(), 0, status.getLen()); block = blocks.getLocatedBlocks().get(0); if (block.getLocations().length == 1 && block.getLocations()[0].equals((target))) { blockMoved = true; break; } Thread.sleep(100L); } Assert.assertTrue(blockMoved); } finally { if (cluster != null) { cluster.shutdown(); } if (placementMonitor != null) { placementMonitor.stop(); } } }
Example 19
Source File: TestInterDatanodeProtocol.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test for * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} * */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid = cluster.getNamesystem().getBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.getFileSystem(); String filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L); //get block info final LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); //get DataNode and FSDataset objects final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); //initReplicaRecovery final ExtendedBlock b = locatedblock.getBlock(); final long recoveryid = b.getGenerationStamp() + 1; final long newlength = b.getNumBytes() - 1; final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery( new RecoveringBlock(b, null, recoveryid)); //check replica final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo( fsdataset, bpid, b.getBlockId()); Assert.assertEquals(ReplicaState.RUR, replica.getState()); //check meta data before update FsDatasetImpl.checkReplicaFiles(replica); //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). { //create a block with same id and gs but different length. final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp()); try { //update should fail fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength); Assert.fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); } } //update final String storageID = fsdataset.updateReplicaUnderRecovery( new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }
Example 20
Source File: TestFavoredNodes.java From RDFS with Apache License 2.0 | 4 votes |
@Test public void testCrossFileSystemAddBlock() throws Exception { // Create source file. String fileName = "/testCrossFileSystemAddBlock"; DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fileName), (long) FILE_SIZE, (short) 3, (long) 0); // Create RPC connections ClientProtocol dstNamenode = DFSClient.createRPCNamenode( NameNode.getAddress(remoteCluster.getFileSystem().getUri() .getAuthority()), remoteConf, UnixUserGroupInformation.login(remoteConf, true)).getProxy(); ClientProtocol srcNamenode = DFSClient.createRPCNamenode( NameNode.getAddress(cluster.getFileSystem().getUri().getAuthority()), conf, UnixUserGroupInformation.login(conf, true)).getProxy(); // Create destination file. String dstFile = "/dst" + fileName; FileStatus srcStat = cluster.getFileSystem().getFileStatus( new Path(fileName)); String clientName = "testClient"; dstNamenode.create(dstFile, srcStat.getPermission(), clientName, true, true, srcStat.getReplication(), srcStat.getBlockSize()); FSNamesystem dstNamesystem = remoteCluster.getNameNode().getNamesystem(); LocatedBlocks lbks = srcNamenode.getBlockLocations(fileName, 0, Long.MAX_VALUE); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); int slice = r.nextInt(locs.length); LocatedBlock dstlbk = dstNamenode.addBlock(dstFile, clientName, null, Arrays.copyOfRange(locs, 0, slice + 1)); DatanodeInfo[] dstlocs = dstlbk.getLocations(); List<String> dstlocHostnames = new ArrayList<String>(dstlocs.length); for (DatanodeInfo dstloc : dstlocs) { dstlocHostnames.add(dstloc.getHostName()); } assertEquals(conf.getInt("dfs.replication", 3), dstlocs.length); for (int i = 0; i <= slice; i++) { assertTrue("Expected " + locs[i].getHostName() + " was not found", dstlocHostnames.contains(locs[i].getHostName())); // Allows us to make the namenode think that these blocks have been // successfully written to the datanode, helps us to add the next block // without completing the previous block. dstNamesystem.blocksMap.addNode(dstlbk.getBlock(), dstNamesystem.getDatanode(dstlocs[i]), srcStat.getReplication()); } } }