Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#createFiles()
The following examples show how to use
org.apache.hadoop.hdfs.DFSTestUtil#createFiles() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFsck.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFsckNonExistent() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck"). setNumFiles(20).build(); MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); util.waitReplication(fs, "/srcdat", (short)3); String outStr = runFsck(conf, 0, true, "/non-existent"); assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); util.cleanup(fs, "/srcdat"); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
Example 2
Source File: TestDatanodeRestart.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil.Builder(). setName("TestDatanodeRestart").setNumFiles(2).build(); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
Example 3
Source File: TestDatanodeRestart.java From RDFS with Apache License 2.0 | 6 votes |
public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new Configuration(); conf.setLong("dfs.block.size", 1024L); conf.setInt("dfs.write.packet.size", 512); MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8 * 1024); util.createFiles(fs, TopDir, (short) 3); util.waitReplication(fs, TopDir, (short) 3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
Example 4
Source File: TestDatanodeRestart.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFinalizedReplicas() throws Exception { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // test finalized replicas final String TopDir = "/test"; DFSTestUtil util = new DFSTestUtil.Builder(). setName("TestDatanodeRestart").setNumFiles(2).build(); util.createFiles(fs, TopDir, (short)3); util.waitReplication(fs, TopDir, (short)3); util.checkFiles(fs, TopDir); cluster.restartDataNodes(); cluster.waitActive(); util.checkFiles(fs, TopDir); } finally { cluster.shutdown(); } }
Example 5
Source File: TestFsck.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFsckNonExistent() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck"). setNumFiles(20).build(); MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); util.waitReplication(fs, "/srcdat", (short)3); String outStr = runFsck(conf, 0, true, "/non-existent"); assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); util.cleanup(fs, "/srcdat"); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
Example 6
Source File: TestStartupDefaultRack.java From RDFS with Apache License 2.0 | 5 votes |
@Test public void testStartup() throws Exception { conf = new Configuration(); conf.setClass("dfs.block.replicator.classname", BlockPlacementPolicyConfigurable.class, BlockPlacementPolicy.class); File baseDir = MiniDFSCluster.getBaseDirectory(conf); baseDir.mkdirs(); File hostsFile = new File(baseDir, "hosts"); FileOutputStream out = new FileOutputStream(hostsFile); out.write("h1\n".getBytes()); out.write("h2\n".getBytes()); out.write("h3\n".getBytes()); out.close(); conf.set("dfs.hosts", hostsFile.getAbsolutePath()); StaticMapping.addNodeToRack("h1", "/r1"); StaticMapping.addNodeToRack("h2", "/r2"); StaticMapping.addNodeToRack("h3", NetworkTopology.DEFAULT_RACK); cluster = new MiniDFSCluster(conf, 3, new String[] { "/r1", "/r2", NetworkTopology.DEFAULT_RACK }, new String[] { "h1", "h2", "h3" }, true, false); DFSTestUtil util = new DFSTestUtil("/testStartup", 10, 10, 1024); util.createFiles(cluster.getFileSystem(), "/"); util.checkFiles(cluster.getFileSystem(), "/"); assertEquals(2, cluster.getNameNode().getDatanodeReport(DatanodeReportType.LIVE).length); cluster.shutdown(); }
Example 7
Source File: TestFTPFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
/** * Tests FTPFileSystem, create(), open(), delete(), mkdirs(), rename(), * listStatus(), getStatus() APIs. * * * @throws Exception */ public void testReadWrite() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFTPFileSystem", 20, 3, 1024 * 1024); localFs.setWorkingDirectory(workDir); Path localData = new Path(workDir, "srcData"); Path remoteData = new Path("srcData"); util.createFiles(localFs, localData.toUri().getPath()); boolean dataConsistency = util.checkFiles(localFs, localData.getName()); assertTrue("Test data corrupted", dataConsistency); // Copy files and directories recursively to FTP file system. boolean filesCopied = FileUtil.copy(localFs, localData, ftpFs, remoteData, false, defaultConf); assertTrue("Copying to FTPFileSystem failed", filesCopied); // Rename the remote copy Path renamedData = new Path("Renamed"); boolean renamed = ftpFs.rename(remoteData, renamedData); assertTrue("Rename failed", renamed); // Copy files and directories from FTP file system and delete remote copy. filesCopied = FileUtil.copy(ftpFs, renamedData, localFs, workDir, true, defaultConf); assertTrue("Copying from FTPFileSystem fails", filesCopied); // Check if the data was received completely without any corruption. dataConsistency = util.checkFiles(localFs, renamedData.getName()); assertTrue("Invalid or corrupted data recieved from FTP Server!", dataConsistency); // Delete local copies boolean deleteSuccess = localFs.delete(renamedData, true) & localFs.delete(localData, true); assertTrue("Local test data deletion failed", deleteSuccess); }
Example 8
Source File: TestFsck.java From hadoop with Apache License 2.0 | 5 votes |
/** Test fsck with symlinks in the filesystem */ @Test public void testFsckSymlink() throws Exception { final DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); final Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); MiniDFSCluster cluster = null; FileSystem fs = null; try { final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); final String fileName = "/srcdat"; util.createFiles(fs, fileName); final FileContext fc = FileContext.getFileContext( cluster.getConfiguration(0)); final Path file = new Path(fileName); final Path symlink = new Path("/srcdat-symlink"); fc.createSymlink(file, symlink, false); util.waitReplication(fs, fileName, (short)3); long aTime = fc.getFileStatus(symlink).getAccessTime(); Thread.sleep(precision); setupAuditLogs(); String outStr = runFsck(conf, 0, true, "/"); verifyAuditLogs(); assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime()); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); assertTrue(outStr.contains("Total symlinks:\t\t1")); util.cleanup(fs, fileName); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
Example 9
Source File: TestFTPFileSystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Tests FTPFileSystem, create(), open(), delete(), mkdirs(), rename(), * listStatus(), getStatus() APIs. * * * @throws Exception */ public void testReadWrite() throws Exception { DFSTestUtil util = new DFSTestUtil("TestFTPFileSystem", 20, 3, 1024 * 1024); localFs.setWorkingDirectory(workDir); Path localData = new Path(workDir, "srcData"); Path remoteData = new Path("srcData"); util.createFiles(localFs, localData.toUri().getPath()); boolean dataConsistency = util.checkFiles(localFs, localData.getName()); assertTrue("Test data corrupted", dataConsistency); // Copy files and directories recursively to FTP file system. boolean filesCopied = FileUtil.copy(localFs, localData, ftpFs, remoteData, false, defaultConf); assertTrue("Copying to FTPFileSystem failed", filesCopied); // Rename the remote copy Path renamedData = new Path("Renamed"); boolean renamed = ftpFs.rename(remoteData, renamedData); assertTrue("Rename failed", renamed); // Copy files and directories from FTP file system and delete remote copy. filesCopied = FileUtil.copy(ftpFs, renamedData, localFs, workDir, true, defaultConf); assertTrue("Copying from FTPFileSystem fails", filesCopied); // Check if the data was received completely without any corruption. dataConsistency = util.checkFiles(localFs, renamedData.getName()); assertTrue("Invalid or corrupted data recieved from FTP Server!", dataConsistency); // Delete local copies boolean deleteSuccess = localFs.delete(renamedData, true) & localFs.delete(localData, true); assertTrue("Local test data deletion failed", deleteSuccess); }
Example 10
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
/** Test fsck with symlinks in the filesystem */ @Test public void testFsckSymlink() throws Exception { final DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); final Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); MiniDFSCluster cluster = null; FileSystem fs = null; try { final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = cluster.getFileSystem(); final String fileName = "/srcdat"; util.createFiles(fs, fileName); final FileContext fc = FileContext.getFileContext( cluster.getConfiguration(0)); final Path file = new Path(fileName); final Path symlink = new Path("/srcdat-symlink"); fc.createSymlink(file, symlink, false); util.waitReplication(fs, fileName, (short)3); long aTime = fc.getFileStatus(symlink).getAccessTime(); Thread.sleep(precision); setupAuditLogs(); String outStr = runFsck(conf, 0, true, "/"); verifyAuditLogs(); assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime()); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); assertTrue(outStr.contains("Total symlinks:\t\t1")); util.cleanup(fs, fileName); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} if (cluster != null) { cluster.shutdown(); } } }
Example 11
Source File: TestListCorruptFileBlocks.java From big-c with Apache License 2.0 | 4 votes |
/** * Test if NN.listCorruptFiles() returns the right number of results. * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @Test (timeout=300000) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED; // create 110 files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles"). setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512). build(); util.createFiles(fs, "/srcdat2", (short) 1); util.waitReplication(fs, "/srcdat2", (short) 1); // verify that there are no bad blocks. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0); // Now deliberately blocks from all files final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // Occasionally the BlockPoolSliceScanner can run before we have removed // the blocks. Restart the Datanode to trigger the scanner into running // once more. LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); cluster.restartDataNodes(); cluster.waitActive(); badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); while (badFiles.size() < maxCorruptFileBlocks) { LOG.info("# of corrupt files is: " + badFiles.size()); Thread.sleep(10000); badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); } badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks); assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1); util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 12
Source File: TestNameNodeReconfigure.java From RDFS with Apache License 2.0 | 4 votes |
/** * Test that we can change the block placement policy through the * reconfigurable API. */ @Test public void testChangeBlockPlacementPolicy() throws IOException, ReconfigurationException { AtomicInteger callCounter = new AtomicInteger(0); MockPlacementPolicy.setCallCounter(callCounter); DFSTestUtil util = new DFSTestUtil("", 2, 1, 512); // write some files with the default block placement policy util.createFiles(fs, "/reconfdat1", (short) 3); util.waitReplication(fs, "/reconfdat1", (short) 3); assertTrue("calls already made to MockPlacementPolicy", callCounter.get() == 0); // switch over to the mock placement policy cluster.getNameNode().reconfigureProperty("dfs.block.replicator.classname", "org.apache.hadoop.hdfs.server." + "namenode." + "TestNameNodeReconfigure$" + "MockPlacementPolicy"); // write some files with the mock placement policy util.createFiles(fs, "/reconfdat2", (short) 3); util.waitReplication(fs, "/reconfdat2", (short) 3); int callsMade1 = callCounter.get(); // check that calls were made to mock placement policy assertTrue("no calls made to MockPlacementPolicy", callsMade1 > 0); LOG.info("" + callsMade1 + " calls made to MockPlacementPolicy"); // now try to change it to a non-existent class try { cluster.getNameNode(). reconfigureProperty("dfs.block.replicator.classname", "does.not.exist"); fail("ReconfigurationException expected"); } catch (RuntimeException expected) { assertTrue("exception should have cause", expected.getCause() != null); assertTrue("exception's cause should have cause", expected.getCause().getCause() != null); assertTrue("ClassNotFoundException expected but got " + expected.getCause().getCause().getClass().getCanonicalName(), expected.getCause().getCause() instanceof ClassNotFoundException); } // write some files, they should still go to the mock placemeny policy util.createFiles(fs, "/reconfdat3", (short) 3); util.waitReplication(fs, "/reconfdat3", (short) 3); int callsMade2 = callCounter.get(); // check that more calls were made to mock placement policy assertTrue("no calls made to MockPlacementPolicy", callsMade2 > callsMade1); LOG.info("" + (callsMade2 - callsMade1) + " calls made to MockPlacementPolicy"); // now revert back to the default policy cluster.getNameNode().reconfigureProperty("dfs.block.replicator.classname", null); // write some files with the default block placement policy util.createFiles(fs, "/reconfdat4", (short) 3); util.waitReplication(fs, "/reconfdat4", (short) 3); // make sure that no more calls were made to mock placement policy assertTrue("more calls made to MockPlacementPolicy", callCounter.get() == callsMade2); util.cleanup(fs, "/reconfdat1"); util.cleanup(fs, "/reconfdat2"); util.cleanup(fs, "/reconfdat3"); util.cleanup(fs, "/reconfdat4"); }
Example 13
Source File: TestFastCopyDeletedBlocks.java From RDFS with Apache License 2.0 | 4 votes |
@Test public void testDeadDatanodes() throws Exception { DFSTestUtil util = new DFSTestUtil("testDeadDatanodes", 1, 1, MAX_FILE_SIZE); String topDir = "/testDeadDatanodes"; util.createFiles(fs, topDir); FastCopy fastCopy = new FastCopy(conf); // Find the locations for the last block of the file. String filename = util.getFileNames(topDir)[0]; LocatedBlocks lbks = cluster.getNameNode().getBlockLocations(filename, 0, Long.MAX_VALUE); assertNotNull(lbks); int namespaceID = cluster.getNameNode().getNamespaceID(); DataNode dn = cluster.getDataNodes().get(0); DatanodeID dnId = dn.getDNRegistrationForNS(namespaceID); List <Block> deleteList = new ArrayList <Block> (); for(LocatedBlock block : lbks.getLocatedBlocks()) { deleteList.add(block.getBlock()); } assertEquals(lbks.locatedBlockCount(), dn.getFSDataset().getBlockReport(namespaceID).length); DatanodeDescriptor dnDs = cluster.getNameNode().namesystem.getDatanode(dnId); dnDs.addBlocksToBeInvalidated(deleteList); // Make sure all blocks are deleted. while(dn.getFSDataset().getBlockReport(namespaceID).length != 0) { Thread.sleep(1000); } // Now run FastCopy try { for (String fileName : util.getFileNames(topDir)) { fastCopy.copy(fileName, fileName + "dst", (DistributedFileSystem) fs, (DistributedFileSystem) fs); } } finally { fastCopy.shutdown(); } // Make sure no errors are reported. Map<DatanodeInfo, Integer> dnErrors = fastCopy.getDatanodeErrors(); assertEquals(0, dnErrors.size()); }
Example 14
Source File: TestFsck.java From big-c with Apache License 2.0 | 4 votes |
/** check if option -list-corruptfiles of fsck command works properly */ @Test public void testFsckListCorruptFilesBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1); // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks"); String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // wait for the namenode to see the corruption final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; while (numCorrupt == 0) { Thread.sleep(1000); corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.getFiles().length; } outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); assertTrue(outStr.contains("has 3 CORRUPT files")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); util.cleanup(fs,"/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 15
Source File: TestFsck.java From big-c with Apache License 2.0 | 4 votes |
/** Test fsck with permission set on inodes */ @Test public void testFsckPermission() throws Exception { final DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(20).build(); final Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); MiniDFSCluster cluster = null; try { // Create a cluster with the current user, write some files cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); final MiniDFSCluster c2 = cluster; final String dir = "/dfsck"; final Path dirpath = new Path(dir); final FileSystem fs = c2.getFileSystem(); util.createFiles(fs, dir); util.waitReplication(fs, dir, (short) 3); fs.setPermission(dirpath, new FsPermission((short) 0700)); // run DFSck as another user, should fail with permission issue UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( "ProbablyNotARealUserName", new String[] { "ShangriLa" }); fakeUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { System.out.println(runFsck(conf, -1, true, dir)); return null; } }); // set permission and try DFSck again as the fake user, should succeed fs.setPermission(dirpath, new FsPermission((short) 0777)); fakeUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final String outStr = runFsck(conf, 0, true, dir); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); return null; } }); util.cleanup(fs, dir); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 16
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout=300000) public void testlistCorruptFileBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i = 0; i < 4; i++) { for (int j = 0; j <= 1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } } int count = 0; corruptFileBlocks = namenode.getNamesystem(). listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); // test the paging here FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); // now get the 2nd and 3rd file that is corrupt String[] cookie = new String[]{"1"}; Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); numCorrupt = nextCorruptFileBlocks.size(); assertTrue(numCorrupt == 2); assertTrue(ncfb[0].block.getBlockName() .equalsIgnoreCase(cfb[1].block.getBlockName())); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // Do a listing on a dir which doesn't have any corrupt blocks and // validate util.createFiles(fs, "/goodData"); corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/goodData", null); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 17
Source File: TestFsck.java From hadoop with Apache License 2.0 | 4 votes |
/** check if option -list-corruptfiles of fsck command works properly */ @Test public void testFsckListCorruptFilesBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1); // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks"); String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // wait for the namenode to see the corruption final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; while (numCorrupt == 0) { Thread.sleep(1000); corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.getFiles().length; } outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); assertTrue(outStr.contains("has 3 CORRUPT files")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); util.cleanup(fs,"/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 18
Source File: TestFsck.java From hadoop with Apache License 2.0 | 4 votes |
/** Test fsck with permission set on inodes */ @Test public void testFsckPermission() throws Exception { final DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(20).build(); final Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); MiniDFSCluster cluster = null; try { // Create a cluster with the current user, write some files cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); final MiniDFSCluster c2 = cluster; final String dir = "/dfsck"; final Path dirpath = new Path(dir); final FileSystem fs = c2.getFileSystem(); util.createFiles(fs, dir); util.waitReplication(fs, dir, (short) 3); fs.setPermission(dirpath, new FsPermission((short) 0700)); // run DFSck as another user, should fail with permission issue UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( "ProbablyNotARealUserName", new String[] { "ShangriLa" }); fakeUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { System.out.println(runFsck(conf, -1, true, dir)); return null; } }); // set permission and try DFSck again as the fake user, should succeed fs.setPermission(dirpath, new FsPermission((short) 0777)); fakeUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final String outStr = runFsck(conf, 0, true, dir); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); return null; } }); util.cleanup(fs, dir); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 19
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test if NN.listCorruptFiles() returns the right number of results. * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @Test (timeout=300000) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED; // create 110 files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles"). setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512). build(); util.createFiles(fs, "/srcdat2", (short) 1); util.waitReplication(fs, "/srcdat2", (short) 1); // verify that there are no bad blocks. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0); // Now deliberately blocks from all files final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // Occasionally the BlockPoolSliceScanner can run before we have removed // the blocks. Restart the Datanode to trigger the scanner into running // once more. LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); cluster.restartDataNodes(); cluster.waitActive(); badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); while (badFiles.size() < maxCorruptFileBlocks) { LOG.info("# of corrupt files is: " + badFiles.size()); Thread.sleep(10000); badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); } badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks); assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1); util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 20
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * test listCorruptFileBlocks in DistributedFileSystem */ @Test (timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3). setMaxLevels(1).setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); RemoteIterator<Path> corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); int numCorrupt = countPaths(corruptFileBlocks); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); // For loop through number of datadirectories per datanode (2) for (int i = 0; i < 2; i++) { File storageDir = cluster.getInstanceStorageDir(0, i); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } int count = 0; corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }