Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getAllBlockMetadataFiles()
The following examples show how to use
org.apache.hadoop.hdfs.MiniDFSCluster#getAllBlockMetadataFiles() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout=300000) public void testlistCorruptFileBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i = 0; i < 4; i++) { for (int j = 0; j <= 1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } } int count = 0; corruptFileBlocks = namenode.getNamesystem(). listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); // test the paging here FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); // now get the 2nd and 3rd file that is corrupt String[] cookie = new String[]{"1"}; Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); numCorrupt = nextCorruptFileBlocks.size(); assertTrue(numCorrupt == 2); assertTrue(ncfb[0].block.getBlockName() .equalsIgnoreCase(cfb[1].block.getBlockName())); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // Do a listing on a dir which doesn't have any corrupt blocks and // validate util.createFiles(fs, "/goodData"); corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/goodData", null); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 2
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * test listCorruptFileBlocks in DistributedFileSystem */ @Test (timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3). setMaxLevels(1).setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); RemoteIterator<Path> corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); int numCorrupt = countPaths(corruptFileBlocks); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); // For loop through number of datadirectories per datanode (2) for (int i = 0; i < 2; i++) { File storageDir = cluster.getInstanceStorageDir(0, i); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } int count = 0; corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 3
Source File: TestListCorruptFileBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test if NN.listCorruptFiles() returns the right number of results. * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @Test (timeout=300000) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED; // create 110 files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles"). setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512). build(); util.createFiles(fs, "/srcdat2", (short) 1); util.waitReplication(fs, "/srcdat2", (short) 1); // verify that there are no bad blocks. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0); // Now deliberately blocks from all files final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // Occasionally the BlockPoolSliceScanner can run before we have removed // the blocks. Restart the Datanode to trigger the scanner into running // once more. LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); cluster.restartDataNodes(); cluster.waitActive(); badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); while (badFiles.size() < maxCorruptFileBlocks) { LOG.info("# of corrupt files is: " + badFiles.size()); Thread.sleep(10000); badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); } badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks); assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1); util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 4
Source File: TestFsck.java From hadoop with Apache License 2.0 | 4 votes |
/** check if option -list-corruptfiles of fsck command works properly */ @Test public void testFsckListCorruptFilesBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1); // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks"); String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // wait for the namenode to see the corruption final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; while (numCorrupt == 0) { Thread.sleep(1000); corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.getFiles().length; } outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); assertTrue(outStr.contains("has 3 CORRUPT files")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); util.cleanup(fs,"/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 5
Source File: TestDataNodeVolumeFailure.java From hadoop with Apache License 2.0 | 4 votes |
/** * look for real blocks * by counting *.meta files in all the storage dirs * @param map * @return */ private int countRealBlocks(Map<String, BlockLocs> map) { int total = 0; final String bpid = cluster.getNamesystem().getBlockPoolId(); for(int i=0; i<dn_num; i++) { for(int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); if(dir == null) { System.out.println("dir is null for dn=" + i + " and data_dir=" + j); continue; } List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir); if(res == null) { System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j); continue; } //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files"); //int ii = 0; for(File f: res) { String s = f.getName(); // cut off "blk_-" at the beginning and ".meta" at the end assertNotNull("Block file name should not be null", s); String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_")); //System.out.println(ii++ + ". block " + s + "; id=" + bid); BlockLocs val = map.get(bid); if(val == null) { val = new BlockLocs(); } val.num_files ++; // one more file for the block map.put(bid, val); } //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length); //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length); total += res.size(); } } return total; }
Example 6
Source File: TestListCorruptFileBlocks.java From big-c with Apache License 2.0 | 4 votes |
@Test (timeout=300000) public void testlistCorruptFileBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i = 0; i < 4; i++) { for (int j = 0; j <= 1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } } int count = 0; corruptFileBlocks = namenode.getNamesystem(). listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.size(); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); // test the paging here FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); // now get the 2nd and 3rd file that is corrupt String[] cookie = new String[]{"1"}; Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks .toArray(new FSNamesystem.CorruptFileBlockInfo[0]); numCorrupt = nextCorruptFileBlocks.size(); assertTrue(numCorrupt == 2); assertTrue(ncfb[0].block.getBlockName() .equalsIgnoreCase(cfb[1].block.getBlockName())); corruptFileBlocks = namenode.getNamesystem() .listCorruptFileBlocks("/corruptData", cookie); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); // Do a listing on a dir which doesn't have any corrupt blocks and // validate util.createFiles(fs, "/goodData"); corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/goodData", null); numCorrupt = corruptFileBlocks.size(); assertTrue(numCorrupt == 0); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 7
Source File: TestListCorruptFileBlocks.java From big-c with Apache License 2.0 | 4 votes |
/** * test listCorruptFileBlocks in DistributedFileSystem */ @Test (timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans // directories FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3). setMaxLevels(1).setMaxSize(1024).build(); util.createFiles(fs, "/corruptData"); RemoteIterator<Path> corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); int numCorrupt = countPaths(corruptFileBlocks); assertTrue(numCorrupt == 0); // delete the blocks String bpid = cluster.getNamesystem().getBlockPoolId(); // For loop through number of datadirectories per datanode (2) for (int i = 0; i < 2; i++) { File storageDir = cluster.getInstanceStorageDir(0, i); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); LOG.info("Deliberately removing file " + blockFile.getName()); assertTrue("Cannot remove file.", blockFile.delete()); LOG.info("Deliberately removing file " + metadataFile.getName()); assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } int count = 0; corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); while (numCorrupt < 3) { Thread.sleep(1000); corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData")); numCorrupt = countPaths(corruptFileBlocks); count++; if (count > 30) break; } // Validate we get all the corrupt files LOG.info("Namenode has bad files. " + numCorrupt); assertTrue(numCorrupt == 3); util.cleanup(fs, "/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: TestListCorruptFileBlocks.java From big-c with Apache License 2.0 | 4 votes |
/** * Test if NN.listCorruptFiles() returns the right number of results. * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @Test (timeout=300000) public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED; // create 110 files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles"). setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512). build(); util.createFiles(fs, "/srcdat2", (short) 1); util.waitReplication(fs, "/srcdat2", (short) 1); // verify that there are no bad blocks. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode. getNamesystem().listCorruptFileBlocks("/srcdat2", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0); // Now deliberately blocks from all files final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // Occasionally the BlockPoolSliceScanner can run before we have removed // the blocks. Restart the Datanode to trigger the scanner into running // once more. LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); cluster.restartDataNodes(); cluster.waitActive(); badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); while (badFiles.size() < maxCorruptFileBlocks) { LOG.info("# of corrupt files is: " + badFiles.size()); Thread.sleep(10000); badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); } badFiles = namenode.getNamesystem(). listCorruptFileBlocks("/srcdat2", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks); CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2")); int corruptPaths = countPaths(iter); assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks); assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1); util.cleanup(fs, "/srcdat2"); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 9
Source File: TestFsck.java From big-c with Apache License 2.0 | 4 votes |
/** check if option -list-corruptfiles of fsck command works properly */ @Test public void testFsckListCorruptFilesBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1); // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks"); String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // wait for the namenode to see the corruption final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; while (numCorrupt == 0) { Thread.sleep(1000); corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.getFiles().length; } outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); assertTrue(outStr.contains("has 3 CORRUPT files")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); util.cleanup(fs,"/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) {cluster.shutdown();} } }
Example 10
Source File: TestDataNodeVolumeFailure.java From big-c with Apache License 2.0 | 4 votes |
/** * look for real blocks * by counting *.meta files in all the storage dirs * @param map * @return */ private int countRealBlocks(Map<String, BlockLocs> map) { int total = 0; final String bpid = cluster.getNamesystem().getBlockPoolId(); for(int i=0; i<dn_num; i++) { for(int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); if(dir == null) { System.out.println("dir is null for dn=" + i + " and data_dir=" + j); continue; } List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir); if(res == null) { System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j); continue; } //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files"); //int ii = 0; for(File f: res) { String s = f.getName(); // cut off "blk_-" at the beginning and ".meta" at the end assertNotNull("Block file name should not be null", s); String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_")); //System.out.println(ii++ + ". block " + s + "; id=" + bid); BlockLocs val = map.get(bid); if(val == null) { val = new BlockLocs(); } val.num_files ++; // one more file for the block map.put(bid, val); } //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length); //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length); total += res.size(); } } return total; }