Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlocks#getLocatedBlocks()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LocatedBlocks#getLocatedBlocks() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAvatarDataNodeRBW.java From RDFS with Apache License 2.0 | 6 votes |
private void verifyResults(int blocksBefore, String fileName) throws IOException { // Verify we have RBWs after restart. AvatarNode avatarAfter = cluster.getPrimaryAvatar(0).avatar; LocatedBlocks lbks = avatarAfter.namesystem .getBlockLocations(fileName, 0, Long.MAX_VALUE); long blocksAfter = lbks.locatedBlockCount(); System.out.println("blocksBefore : " + blocksBefore + " blocksAfter : " + blocksAfter); assertEquals(blocksBefore, blocksAfter); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); assertNotNull(locs); assertTrue(locs.length != 0); } }
Example 2
Source File: DFSInputStream.java From RDFS with Apache License 2.0 | 6 votes |
private void checkLocatedBlocks(LocatedBlocks locatedBlocks) throws IOException { if (null == locatedBlocks) { return; } if(!locatedBlocks.isUnderConstruction()) { return; } List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks(); if (lbs == null) { return; } for (int i = 0; i < lbs.size() - 1; i++) { if (lbs.get(i).getBlockSize() <= 1) { throw new IOException( "File is under construction and namenode hasn't received the second last block yet."); } } }
Example 3
Source File: TestInterDatanodeProtocol.java From big-c with Apache License 2.0 | 5 votes |
public static LocatedBlock getLastLocatedBlock( ClientProtocol namenode, String src) throws IOException { //get block info for the last block LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); DataNode.LOG.info("blocks.size()=" + blocks.size()); assertTrue(blocks.size() > 0); return blocks.get(blocks.size() - 1); }
Example 4
Source File: LazyPersistTestCase.java From big-c with Apache License 2.0 | 5 votes |
/** * Make sure at least one non-transient volume has a saved copy of the replica. * An infinite loop is used to ensure the async lazy persist tasks are completely * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects * either a successful pass or timeout failure. */ protected final void ensureLazyPersistBlocksAreSaved( LocatedBlocks locatedBlocks) throws IOException, InterruptedException { final String bpid = cluster.getNamesystem().getBlockPoolId(); List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes(); final Set<Long> persistedBlockIds = new HashSet<Long>(); while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) { // Take 1 second sleep before each verification iteration Thread.sleep(1000); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { for (FsVolumeSpi v : volumes) { if (v.isTransientStorage()) { continue; } FsVolumeImpl volume = (FsVolumeImpl) v; File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir(); long blockId = lb.getBlock().getBlockId(); File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, blockId); File blockFile = new File(targetDir, lb.getBlock().getBlockName()); if (blockFile.exists()) { // Found a persisted copy for this block and added to the Set persistedBlockIds.add(blockId); } } } } // We should have found a persisted copy for each located block. assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size())); }
Example 5
Source File: TestLazyPersistFiles.java From big-c with Apache License 2.0 | 5 votes |
/** * File partially fit in RamDisk after eviction. * RamDisk can fit 2 blocks. Write a file with 5 blocks. * Expect 2 or less blocks are on RamDisk and 3 or more on disk. * @throws IOException */ @Test public void testFallbackToDiskPartial() throws IOException, InterruptedException { startUpCluster(true, 2); final String METHOD_NAME = GenericTestUtils.getMethodName(); Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE * 5, true); // Sleep for a short time to allow the lazy writer thread to do its job Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000); triggerBlockReport(); int numBlocksOnRamDisk = 0; int numBlocksOnDisk = 0; long fileLength = client.getFileInfo(path.toString()).getLen(); LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength); for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { if (locatedBlock.getStorageTypes()[0] == RAM_DISK) { numBlocksOnRamDisk++; } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) { numBlocksOnDisk++; } } // Since eviction is asynchronous, depending on the timing of eviction // wrt writes, we may get 2 or less blocks on RAM disk. assert(numBlocksOnRamDisk <= 2); assert(numBlocksOnDisk >= 3); }
Example 6
Source File: TestBlockUnderConstruction.java From big-c with Apache License 2.0 | 5 votes |
/** * Test NameNode.getBlockLocations(..) on reading un-closed files. */ @Test public void testGetBlockLocations() throws IOException { final NamenodeProtocols namenode = cluster.getNameNodeRpc(); final Path p = new Path(BASE_DIR, "file2.dat"); final String src = p.toString(); final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3); // write a half block int len = BLOCK_SIZE >>> 1; writeFile(p, out, len); for(int i = 1; i < NUM_BLOCKS; ) { // verify consistency final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len); final List<LocatedBlock> blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoContiguousUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block writeFile(p, out, BLOCK_SIZE); len += BLOCK_SIZE; } } // close file out.close(); }
Example 7
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 5 votes |
private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) { List<LocatedBlock> list1 = l1.getLocatedBlocks(); List<LocatedBlock> list2 = l2.getLocatedBlocks(); assertEquals(list1.size(), list2.size()); for (int i = 0; i < list1.size(); i++) { LocatedBlock b1 = list1.get(i); LocatedBlock b2 = list2.get(i); assertEquals(b1.getBlock(), b2.getBlock()); assertEquals(b1.getBlockSize(), b2.getBlockSize()); } }
Example 8
Source File: TestAvatarDataNodeRBW.java From RDFS with Apache License 2.0 | 5 votes |
private int initializeTest(String testName) throws IOException { String fileName = testName; createRBWFile(fileName); // Verify we have 1 RBW block. AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar; LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0, Long.MAX_VALUE); int blocksBefore = lbks.locatedBlockCount(); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); assertNotNull(locs); assertTrue(locs.length != 0); } return blocksBefore; }
Example 9
Source File: TestInterDatanodeProtocol.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public static LocatedBlock getLastLocatedBlock( ClientProtocol namenode, String src ) throws IOException { //get block info for the last block LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); DataNode.LOG.info("blocks.size()=" + blocks.size()); assertTrue(blocks.size() > 0); return blocks.get(blocks.size() - 1); }
Example 10
Source File: DistributedRaidFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
private static long getFileSize(LocatedBlocks lbs) throws IOException { List<LocatedBlock> locatedBlocks = lbs.getLocatedBlocks(); long fileSize = 0; for (LocatedBlock lb: locatedBlocks) { fileSize += lb.getBlockSize(); } if (fileSize != lbs.getFileLength()) { throw new IOException("lbs.getFileLength() " + lbs.getFileLength() + " does not match sum of block sizes " + fileSize); } return fileSize; }
Example 11
Source File: TestINodeFile.java From hadoop with Apache License 2.0 | 5 votes |
private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) { List<LocatedBlock> list1 = l1.getLocatedBlocks(); List<LocatedBlock> list2 = l2.getLocatedBlocks(); assertEquals(list1.size(), list2.size()); for (int i = 0; i < list1.size(); i++) { LocatedBlock b1 = list1.get(i); LocatedBlock b2 = list2.get(i); assertEquals(b1.getBlock(), b2.getBlock()); assertEquals(b1.getBlockSize(), b2.getBlockSize()); } }
Example 12
Source File: TestDatanodeDeath2.java From RDFS with Apache License 2.0 | 4 votes |
public void testBlockAbandoned() throws Exception { Callback newPipeline = new Callback() { @Override public void execute() { try { FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); LocatedBlocks blocks = namesystem.getBlockLocations(FILE1, 0, 2* BLOCK_SIZE); List<LocatedBlock> blockList = blocks.getLocatedBlocks(); String holder = ((DistributedFileSystem) fileSystem).getClient().clientName; // abandonBlock clears the targets of the INodeFileUnderConstruction namesystem.abandonBlock( blockList.get(blockList.size() - 1).getBlock(), FILE1, holder ); // take down the datanode DataNode dataNode = cluster.getDataNodes().get(0); // get a new block for the same file which we exclude the node from Node excludedNode = cluster .getNameNode() .getNamesystem() .getDatanode(dataNode.getDNRegistrationForNS( cluster.getNameNode().getNamespaceID())); namesystem.getAdditionalBlock( FILE1, holder, Arrays.<Node>asList(excludedNode) ); dataNode.shutdown(); } catch (IOException e) { fail("exception: " + StringUtils.stringifyException(e)); } } }; runTestDatanodeRemovedFromPipeline(false, newPipeline); }
Example 13
Source File: TestFileAppend.java From big-c with Apache License 2.0 | 4 votes |
/** Tests appending after soft-limit expires. */ @Test public void testAppend2AfterSoftLimit() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); //Set small soft-limit for lease final long softLimit = 1L; final long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .build(); cluster.setLeasePeriod(softLimit, hardLimit); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs2 = new DistributedFileSystem(); fs2.initialize(fs.getUri(), conf); final Path testPath = new Path("/testAppendAfterSoftLimit"); final byte[] fileContents = AppendTestUtil.initBuffer(32); // create a new file without closing FSDataOutputStream out = fs.create(testPath); out.write(fileContents); //Wait for > soft-limit Thread.sleep(250); try { FSDataOutputStream appendStream2 = fs2.append(testPath, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null); appendStream2.write(fileContents); appendStream2.close(); assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen()); // make sure we now have 1 block since the first writer was revoked LocatedBlocks blks = fs.getClient().getLocatedBlocks(testPath.toString(), 0L); assertEquals(1, blks.getLocatedBlocks().size()); for (LocatedBlock blk : blks.getLocatedBlocks()) { assertEquals(fileContents.length, blk.getBlockSize()); } } finally { fs.close(); fs2.close(); cluster.shutdown(); } }
Example 14
Source File: TestStorageMover.java From big-c with Apache License 2.0 | 4 votes |
/** * Move an open file into archival storage */ @Test public void testMigrateOpenFileToArchival() throws Exception { LOG.info("testMigrateOpenFileToArchival"); final Path fooDir = new Path("/foo"); Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap(); policyMap.put(fooDir, COLD); NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap); ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null); MigrationTest test = new MigrationTest(clusterScheme, nsScheme); test.setupCluster(); // create an open file banner("writing to file /foo/bar"); final Path barFile = new Path(fooDir, "bar"); DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L); FSDataOutputStream out = test.dfs.append(barFile); out.writeBytes("hello, "); ((DFSOutputStream) out.getWrappedStream()).hsync(); try { banner("start data migration"); test.setStoragePolicy(); // set /foo to COLD test.migrate(); // make sure the under construction block has not been migrated LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks( barFile.toString(), BLOCK_SIZE); LOG.info("Locations: " + lbs); List<LocatedBlock> blks = lbs.getLocatedBlocks(); Assert.assertEquals(1, blks.size()); Assert.assertEquals(1, blks.get(0).getLocations().length); banner("finish the migration, continue writing"); // make sure the writing can continue out.writeBytes("world!"); ((DFSOutputStream) out.getWrappedStream()).hsync(); IOUtils.cleanup(LOG, out); lbs = test.dfs.getClient().getLocatedBlocks( barFile.toString(), BLOCK_SIZE); LOG.info("Locations: " + lbs); blks = lbs.getLocatedBlocks(); Assert.assertEquals(1, blks.size()); Assert.assertEquals(1, blks.get(0).getLocations().length); banner("finish writing, starting reading"); // check the content of /foo/bar FSDataInputStream in = test.dfs.open(barFile); byte[] buf = new byte[13]; // read from offset 1024 in.readFully(BLOCK_SIZE, buf, 0, buf.length); IOUtils.cleanup(LOG, in); Assert.assertEquals("hello, world!", new String(buf)); } finally { test.shutdownCluster(); } }
Example 15
Source File: TestDFSClientFavoredNodes.java From RDFS with Apache License 2.0 | 4 votes |
@Test public void testFavoredNodes() throws Exception { // Get the datanodes in the system and choose some favored ones. DatanodeInfo[] nodes = cluster.getNameNode().getDatanodeReport( FSConstants.DatanodeReportType.ALL); InetSocketAddress[] favoredNodes = new InetSocketAddress[REPLICATION]; for (int i = 0; i < REPLICATION; i++) { favoredNodes[i] = new InetSocketAddress(nodes[i].getHost(), nodes[i].getPort()); } DatanodeInfo[] favoredNodeInfos = Arrays.copyOfRange(nodes, 0, REPLICATION); // Sort the favored nodes for future comparison. Arrays.sort(favoredNodeInfos); // Write a file, specifying the favored nodes. String fileName = "/testFavoredNodes"; DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); OutputStream out = fs.create(new Path(fileName), FsPermission.getDefault(), false, BUFFER_SIZE, REPLICATION, BLOCK_SIZE, BYTES_PER_CHECKSUM, null, favoredNodes); Random rand = new Random(); byte[] bytes = new byte[BLOCK_SIZE]; for (int i = 0; i < BLOCKS; i++) { rand.nextBytes(bytes); out.write(bytes); } out.close(); // Get the locations of every block that was just written, and compare them // to the favored nodes. LocatedBlocks lbks = cluster.getNameNode().getBlockLocations(fileName, 0, Long.MAX_VALUE); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); // The lists of blocks must be sorted first because nodes are not // necessarily listed in the same order (order does not matter anyways). // The sorted lists must be equal. Arrays.sort(locs); for (int i = 0; i < locs.length; i++) { assertEquals(locs[i], favoredNodeInfos[i]); } } }
Example 16
Source File: TestNodeCount.java From RDFS with Apache License 2.0 | 4 votes |
public void testInvalidateMultipleReplicas() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 5, true, null); final int FILE_LEN = 123; final String pathStr = "/testInvalidateMultipleReplicas"; try { FileSystem fs = cluster.getFileSystem(); Path path = new Path(pathStr); cluster.waitActive(); // create a small file on 3 nodes DFSTestUtil.createFile(fs, path, 123, (short)3, 0); DFSTestUtil.waitReplication(fs, path, (short)3); NameNode nn = cluster.getNameNode(); LocatedBlocks located = nn.getBlockLocations(pathStr, 0, FILE_LEN); // Get the original block locations List<LocatedBlock> blocks = located.getLocatedBlocks(); LocatedBlock firstBlock = blocks.get(0); DatanodeInfo[] locations = firstBlock.getLocations(); assertEquals("Should have 3 good blocks", 3, locations.length); nn.getNamesystem().stallReplicationWork(); DatanodeInfo[] badLocations = new DatanodeInfo[2]; badLocations[0] = locations[0]; badLocations[1] = locations[1]; // Report some blocks corrupt LocatedBlock badLBlock = new LocatedBlock( firstBlock.getBlock(), badLocations); nn.reportBadBlocks(new LocatedBlock[] {badLBlock}); nn.getNamesystem().restartReplicationWork(); DFSTestUtil.waitReplication(fs, path, (short)3); NumberReplicas num = nn.getNamesystem().countNodes( firstBlock.getBlock()); assertEquals(0, num.corruptReplicas()); } finally { cluster.shutdown(); } }
Example 17
Source File: TestFavoredNodes.java From RDFS with Apache License 2.0 | 4 votes |
@Test public void testPartiallySpecifiedFavoredNodes() throws Exception { // Create source file. String fileName = "/testPartiallySpecifiedFavoredNodes"; DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fileName), (long) FILE_SIZE, (short) 3, (long) 0); // Create RPC connections ClientProtocol srcNamenode = DFSClient.createRPCNamenode( NameNode.getAddress(cluster.getFileSystem().getUri().getAuthority()), conf, UnixUserGroupInformation.login(conf, true)).getProxy(); // Create destination file. String dstFile = "/dst" + fileName; FileStatus srcStat = cluster.getFileSystem().getFileStatus( new Path(fileName)); String clientName = "testClient"; srcNamenode.create(dstFile, srcStat.getPermission(), clientName, true, true, srcStat.getReplication(), srcStat.getBlockSize()); FSNamesystem dstNamesystem = cluster.getNameNode().getNamesystem(); LocatedBlocks lbks = srcNamenode.getBlockLocations(fileName, 0, Long.MAX_VALUE); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); // Partially-specified nodes have only hostname and port. DatanodeInfo[] partialLocs = new DatanodeInfo[locs.length]; for (int i = 0; i < partialLocs.length; i++) { partialLocs[i] = new DatanodeInfo(new DatanodeID(locs[i].getName())); } int slice = r.nextInt(locs.length); LocatedBlock dstlbk = srcNamenode.addBlock(dstFile, clientName, null, Arrays.copyOfRange(partialLocs, 0, slice + 1)); List<DatanodeInfo>dstlocs = Arrays.asList(dstlbk.getLocations()); assertEquals(conf.getInt("dfs.replication", 3), dstlocs.size()); for (int i = 0; i <= slice; i++) { assertTrue("Expected " + locs[i].getName() + " was not found", dstlocs.contains(locs[i])); // Allows us to make the namenode think that these blocks have been // successfully written to the datanode, helps us to add the next block // without completing the previous block. dstNamesystem.blocksMap.addNode(dstlbk.getBlock(), dstNamesystem.getDatanode(dstlocs.get(i)), srcStat.getReplication()); } } }
Example 18
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 4 votes |
/** * Move an open file into archival storage */ @Test public void testMigrateOpenFileToArchival() throws Exception { LOG.info("testMigrateOpenFileToArchival"); final Path fooDir = new Path("/foo"); Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap(); policyMap.put(fooDir, COLD); NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap); ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null); MigrationTest test = new MigrationTest(clusterScheme, nsScheme); test.setupCluster(); // create an open file banner("writing to file /foo/bar"); final Path barFile = new Path(fooDir, "bar"); DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L); FSDataOutputStream out = test.dfs.append(barFile); out.writeBytes("hello, "); ((DFSOutputStream) out.getWrappedStream()).hsync(); try { banner("start data migration"); test.setStoragePolicy(); // set /foo to COLD test.migrate(); // make sure the under construction block has not been migrated LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks( barFile.toString(), BLOCK_SIZE); LOG.info("Locations: " + lbs); List<LocatedBlock> blks = lbs.getLocatedBlocks(); Assert.assertEquals(1, blks.size()); Assert.assertEquals(1, blks.get(0).getLocations().length); banner("finish the migration, continue writing"); // make sure the writing can continue out.writeBytes("world!"); ((DFSOutputStream) out.getWrappedStream()).hsync(); IOUtils.cleanup(LOG, out); lbs = test.dfs.getClient().getLocatedBlocks( barFile.toString(), BLOCK_SIZE); LOG.info("Locations: " + lbs); blks = lbs.getLocatedBlocks(); Assert.assertEquals(1, blks.size()); Assert.assertEquals(1, blks.get(0).getLocations().length); banner("finish writing, starting reading"); // check the content of /foo/bar FSDataInputStream in = test.dfs.open(barFile); byte[] buf = new byte[13]; // read from offset 1024 in.readFully(BLOCK_SIZE, buf, 0, buf.length); IOUtils.cleanup(LOG, in); Assert.assertEquals("hello, world!", new String(buf)); } finally { test.shutdownCluster(); } }
Example 19
Source File: DFSLocatedBlocks.java From RDFS with Apache License 2.0 | 4 votes |
public DFSLocatedBlocks(LocatedBlocks lbs) { super(lbs.getFileLength(), lbs.getLocatedBlocks(), lbs.isUnderConstruction()); this.fileLength = lbs.getFileLength(); lock = new ReentrantReadWriteLock(true); // fair }
Example 20
Source File: TestFavoredNodes.java From RDFS with Apache License 2.0 | 4 votes |
@Test public void testCrossFileSystemAddBlock() throws Exception { // Create source file. String fileName = "/testCrossFileSystemAddBlock"; DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fileName), (long) FILE_SIZE, (short) 3, (long) 0); // Create RPC connections ClientProtocol dstNamenode = DFSClient.createRPCNamenode( NameNode.getAddress(remoteCluster.getFileSystem().getUri() .getAuthority()), remoteConf, UnixUserGroupInformation.login(remoteConf, true)).getProxy(); ClientProtocol srcNamenode = DFSClient.createRPCNamenode( NameNode.getAddress(cluster.getFileSystem().getUri().getAuthority()), conf, UnixUserGroupInformation.login(conf, true)).getProxy(); // Create destination file. String dstFile = "/dst" + fileName; FileStatus srcStat = cluster.getFileSystem().getFileStatus( new Path(fileName)); String clientName = "testClient"; dstNamenode.create(dstFile, srcStat.getPermission(), clientName, true, true, srcStat.getReplication(), srcStat.getBlockSize()); FSNamesystem dstNamesystem = remoteCluster.getNameNode().getNamesystem(); LocatedBlocks lbks = srcNamenode.getBlockLocations(fileName, 0, Long.MAX_VALUE); for (LocatedBlock lbk : lbks.getLocatedBlocks()) { DatanodeInfo[] locs = lbk.getLocations(); int slice = r.nextInt(locs.length); LocatedBlock dstlbk = dstNamenode.addBlock(dstFile, clientName, null, Arrays.copyOfRange(locs, 0, slice + 1)); DatanodeInfo[] dstlocs = dstlbk.getLocations(); List<String> dstlocHostnames = new ArrayList<String>(dstlocs.length); for (DatanodeInfo dstloc : dstlocs) { dstlocHostnames.add(dstloc.getHostName()); } assertEquals(conf.getInt("dfs.replication", 3), dstlocs.length); for (int i = 0; i <= slice; i++) { assertTrue("Expected " + locs[i].getHostName() + " was not found", dstlocHostnames.contains(locs[i].getHostName())); // Allows us to make the namenode think that these blocks have been // successfully written to the datanode, helps us to add the next block // without completing the previous block. dstNamesystem.blocksMap.addNode(dstlbk.getBlock(), dstNamesystem.getDatanode(dstlocs[i]), srcStat.getReplication()); } } }