Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getNameDirs()
The following examples show how to use
org.apache.hadoop.hdfs.MiniDFSCluster#getNameDirs() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 6 votes |
/** * Make sure that an HA NN will start if a previous upgrade was in progress. */ @Test public void testStartingWithUpgradeInProgressSucceeds() throws Exception { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); // Simulate an upgrade having started. for (int i = 0; i < 2; i++) { for (URI uri : cluster.getNameDirs(i)) { File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS); LOG.info("creating previous tmp dir: " + prevTmp); assertTrue(prevTmp.mkdirs()); } } cluster.restartNameNodes(); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 2
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 6 votes |
/** * Make sure that an HA NN will start if a previous upgrade was in progress. */ @Test public void testStartingWithUpgradeInProgressSucceeds() throws Exception { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); // Simulate an upgrade having started. for (int i = 0; i < 2; i++) { for (URI uri : cluster.getNameDirs(i)) { File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS); LOG.info("creating previous tmp dir: " + prevTmp); assertTrue(prevTmp.mkdirs()); } } cluster.restartNameNodes(); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 3
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 5 votes |
private static void checkNnPreviousDirExistence(MiniDFSCluster cluster, int index, boolean shouldExist) { Collection<URI> nameDirs = cluster.getNameDirs(index); for (URI nnDir : nameDirs) { checkPreviousDirExistence(new File(nnDir), shouldExist); } }
Example 4
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testNameDirError() throws IOException { LOG.info("Starting testNameDirError"); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .build(); Collection<URI> nameDirs = cluster.getNameDirs(0); cluster.shutdown(); cluster = null; for (URI nameDirUri : nameDirs) { File dir = new File(nameDirUri.getPath()); try { // Simulate the mount going read-only FileUtil.setWritable(dir, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false).build(); fail("NN should have failed to start with " + dir + " set unreadable"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "storage directory does not exist or is not accessible", ioe); } finally { cleanup(cluster); cluster = null; FileUtil.setWritable(dir, true); } } }
Example 5
Source File: FSImageTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
public static List<File> getNameNodeCurrentDirs(MiniDFSCluster cluster, int nnIdx) { List<File> nameDirs = Lists.newArrayList(); for (URI u : cluster.getNameDirs(nnIdx)) { nameDirs.add(new File(u.getPath(), "current")); } return nameDirs; }
Example 6
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 5 votes |
private static void checkNnPreviousDirExistence(MiniDFSCluster cluster, int index, boolean shouldExist) { Collection<URI> nameDirs = cluster.getNameDirs(index); for (URI nnDir : nameDirs) { checkPreviousDirExistence(new File(nnDir), shouldExist); } }
Example 7
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testNameDirError() throws IOException { LOG.info("Starting testNameDirError"); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .build(); Collection<URI> nameDirs = cluster.getNameDirs(0); cluster.shutdown(); cluster = null; for (URI nameDirUri : nameDirs) { File dir = new File(nameDirUri.getPath()); try { // Simulate the mount going read-only FileUtil.setWritable(dir, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false).build(); fail("NN should have failed to start with " + dir + " set unreadable"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "storage directory does not exist or is not accessible", ioe); } finally { cleanup(cluster); cluster = null; FileUtil.setWritable(dir, true); } } }
Example 8
Source File: FSImageTestUtil.java From big-c with Apache License 2.0 | 5 votes |
public static List<File> getNameNodeCurrentDirs(MiniDFSCluster cluster, int nnIdx) { List<File> nameDirs = Lists.newArrayList(); for (URI u : cluster.getNameDirs(nnIdx)) { nameDirs.add(new File(u.getPath(), "current")); } return nameDirs; }
Example 9
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test rollback with NFS shared dir. */ @Test public void testRollbackWithNfs() throws Exception { MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); File sharedDir = new File(cluster.getSharedEditsDir(0, 1)); // No upgrade is in progress at the moment. checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir, false); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkPreviousDirExistence(sharedDir, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, true); checkPreviousDirExistence(sharedDir, true); assertCTimesEqual(cluster); // Now shut down the cluster and do the rollback. Collection<URI> nn1NameDirs = cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf, false); // The rollback operation should have rolled back the first NN's local // dirs, and the shared dir, but not the other NN's dirs. Those have to be // done by bootstrapping the standby. checkNnPreviousDirExistence(cluster, 0, false); checkPreviousDirExistence(sharedDir, false); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }
Example 10
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); checkJnPreviousDirExistence(qjCluster, false); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster); assertTrue(cidDuringUpgrade > cidBeforeUpgrade); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, true); checkJnPreviousDirExistence(qjCluster, true); assertCTimesEqual(cluster); // Shut down the NNs, but deliberately leave the JNs up and running. Collection<URI> nn1NameDirs = cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf, false); final long cidAfterRollback = getCommittedTxnIdValue(qjCluster); assertTrue(cidBeforeUpgrade < cidAfterRollback); // make sure the committedTxnId has been reset correctly after rollback assertTrue(cidDuringUpgrade > cidAfterRollback); // The rollback operation should have rolled back the first NN's local // dirs, and the shared dir, but not the other NN's dirs. Those have to be // done by bootstrapping the standby. checkNnPreviousDirExistence(cluster, 0, false); checkJnPreviousDirExistence(qjCluster, false); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 11
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test the importCheckpoint startup option. Verifies: * 1. if the NN already contains an image, it will not be allowed * to import a checkpoint. * 2. if the NN does not contain an image, importing a checkpoint * succeeds and re-saves the image */ @Test public void testImportCheckpoint() throws Exception { Configuration conf = new HdfsConfiguration(); Path testPath = new Path("/testfile"); SecondaryNameNode snn = null; MiniDFSCluster cluster = null; Collection<URI> nameDirs = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); nameDirs = cluster.getNameDirs(0); // Make an entry in the namespace, used for verifying checkpoint // later. cluster.getFileSystem().mkdirs(testPath); // Take a checkpoint snn = startSecondaryNameNode(conf); snn.doCheckpoint(); } finally { cleanup(snn); cleanup(cluster); cluster = null; } LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail."); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false) .startupOption(StartupOption.IMPORT).build(); fail("NameNode did not fail to start when it already contained " + "an image"); } catch (IOException ioe) { // Expected GenericTestUtils.assertExceptionContains( "NameNode already contains an image", ioe); } finally { cleanup(cluster); cluster = null; } LOG.info("Removing NN storage contents"); for(URI uri : nameDirs) { File dir = new File(uri.getPath()); LOG.info("Cleaning " + dir); removeAndRecreateDir(dir); } LOG.info("Trying to import checkpoint"); try { cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0) .startupOption(StartupOption.IMPORT).build(); assertTrue("Path from checkpoint should exist after import", cluster.getFileSystem().exists(testPath)); // Make sure that the image got saved on import FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3)); } finally { cleanup(cluster); cluster = null; } }
Example 12
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 4 votes |
/** * Test rollback with NFS shared dir. */ @Test public void testRollbackWithNfs() throws Exception { MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); File sharedDir = new File(cluster.getSharedEditsDir(0, 1)); // No upgrade is in progress at the moment. checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir, false); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkPreviousDirExistence(sharedDir, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, true); checkPreviousDirExistence(sharedDir, true); assertCTimesEqual(cluster); // Now shut down the cluster and do the rollback. Collection<URI> nn1NameDirs = cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf, false); // The rollback operation should have rolled back the first NN's local // dirs, and the shared dir, but not the other NN's dirs. Those have to be // done by bootstrapping the standby. checkNnPreviousDirExistence(cluster, 0, false); checkPreviousDirExistence(sharedDir, false); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }
Example 13
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); checkJnPreviousDirExistence(qjCluster, false); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster); assertTrue(cidDuringUpgrade > cidBeforeUpgrade); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, true); checkJnPreviousDirExistence(qjCluster, true); assertCTimesEqual(cluster); // Shut down the NNs, but deliberately leave the JNs up and running. Collection<URI> nn1NameDirs = cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf, false); final long cidAfterRollback = getCommittedTxnIdValue(qjCluster); assertTrue(cidBeforeUpgrade < cidAfterRollback); // make sure the committedTxnId has been reset correctly after rollback assertTrue(cidDuringUpgrade > cidAfterRollback); // The rollback operation should have rolled back the first NN's local // dirs, and the shared dir, but not the other NN's dirs. Those have to be // done by bootstrapping the standby. checkNnPreviousDirExistence(cluster, 0, false); checkJnPreviousDirExistence(qjCluster, false); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 14
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 4 votes |
/** * Test the importCheckpoint startup option. Verifies: * 1. if the NN already contains an image, it will not be allowed * to import a checkpoint. * 2. if the NN does not contain an image, importing a checkpoint * succeeds and re-saves the image */ @Test public void testImportCheckpoint() throws Exception { Configuration conf = new HdfsConfiguration(); Path testPath = new Path("/testfile"); SecondaryNameNode snn = null; MiniDFSCluster cluster = null; Collection<URI> nameDirs = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); nameDirs = cluster.getNameDirs(0); // Make an entry in the namespace, used for verifying checkpoint // later. cluster.getFileSystem().mkdirs(testPath); // Take a checkpoint snn = startSecondaryNameNode(conf); snn.doCheckpoint(); } finally { cleanup(snn); cleanup(cluster); cluster = null; } LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail."); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false) .startupOption(StartupOption.IMPORT).build(); fail("NameNode did not fail to start when it already contained " + "an image"); } catch (IOException ioe) { // Expected GenericTestUtils.assertExceptionContains( "NameNode already contains an image", ioe); } finally { cleanup(cluster); cluster = null; } LOG.info("Removing NN storage contents"); for(URI uri : nameDirs) { File dir = new File(uri.getPath()); LOG.info("Cleaning " + dir); removeAndRecreateDir(dir); } LOG.info("Trying to import checkpoint"); try { cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0) .startupOption(StartupOption.IMPORT).build(); assertTrue("Path from checkpoint should exist after import", cluster.getFileSystem().exists(testPath)); // Make sure that the image got saved on import FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3)); } finally { cleanup(cluster); cluster = null; } }
Example 15
Source File: TestEditLog.java From RDFS with Apache License 2.0 | 4 votes |
/** * Tests transaction logging in dfs. */ public void testEditLog() throws IOException { // start a cluster Collection<File> namedirs = null; Collection<File> editsdirs = null; Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(0, conf, numDatanodes, true, true, null, null); cluster.waitActive(); FileSystem fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); int numdirs = 0; try { namedirs = cluster.getNameDirs(); editsdirs = cluster.getNameEditsDirs(); } finally { fileSys.close(); cluster.shutdown(); } for (Iterator it = namedirs.iterator(); it.hasNext(); ) { File dir = (File)it.next(); System.out.println(dir); numdirs++; } FSImage fsimage = namesystem.getFSImage(); FSEditLog editLog = fsimage.getEditLog(); // set small size of flush buffer editLog.setBufferCapacity(2048); editLog.close(); editLog.open(); // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { Transactions trans = new Transactions(namesystem, numberTransactions); threadId[i] = new Thread(trans, "TransactionThread-" + i); threadId[i].start(); } // wait for all transactions to get over for (int i = 0; i < numThreads; i++) { try { threadId[i].join(); } catch (InterruptedException e) { i--; // retry } } editLog.close(); // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. // for (Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS); // Start from 0 when loading edit logs. editLog.setStartTransactionId(0); System.out.println("Verifying file: " + editFile); EditLogInputStream is = new EditLogFileInputStream(editFile); FSEditLogLoader loader = new FSEditLogLoader(namesystem); int numEdits = loader.loadFSEdits(is, namesystem.getEditLog().getCurrentTxId()); int numLeases = namesystem.leaseManager.countLease(); System.out.println("Number of outstanding leases " + numLeases); assertEquals(0, numLeases); assertTrue("Verification for " + editFile + " failed. " + "Expected " + (numThreads * 3 * numberTransactions) + " transactions. "+ "Found " + numEdits + " transactions.", numEdits == numThreads * 3 * numberTransactions); } }
Example 16
Source File: TestEditLog.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Tests transaction logging in dfs. */ public void testEditLog() throws IOException { // start a cluster Collection<File> namedirs = null; Collection<File> editsdirs = null; Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(0, conf, numDatanodes, true, true, null, null); cluster.waitActive(); FileSystem fileSys = cluster.getFileSystem(); int numdirs = 0; try { namedirs = cluster.getNameDirs(); editsdirs = cluster.getNameEditsDirs(); } finally { fileSys.close(); cluster.shutdown(); } for (Iterator it = namedirs.iterator(); it.hasNext(); ) { File dir = (File)it.next(); System.out.println(dir); numdirs++; } FSImage fsimage = new FSImage(namedirs, editsdirs); FSEditLog editLog = fsimage.getEditLog(); // set small size of flush buffer editLog.setBufferCapacity(2048); editLog.close(); editLog.open(); // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { Transactions trans = new Transactions(editLog, numberTransactions); threadId[i] = new Thread(trans, "TransactionThread-" + i); threadId[i].start(); } // wait for all transactions to get over for (int i = 0; i < numThreads; i++) { try { threadId[i].join(); } catch (InterruptedException e) { i--; // retry } } editLog.close(); // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. // for (Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS); System.out.println("Verifying file: " + editFile); int numEdits = FSEditLog.loadFSEdits(new EditLogFileInputStream(editFile)); int numLeases = FSNamesystem.getFSNamesystem().leaseManager.countLease(); System.out.println("Number of outstanding leases " + numLeases); assertEquals(0, numLeases); assertTrue("Verification for " + editFile + " failed. " + "Expected " + (numThreads * 2 * numberTransactions) + " transactions. "+ "Found " + numEdits + " transactions.", numEdits == numThreads * 2 * numberTransactions); } }