Java Code Examples for org.apache.hadoop.test.GenericTestUtils#assertGlobEquals()
The following examples show how to use
org.apache.hadoop.test.GenericTestUtils#assertGlobEquals() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestEditLogsDuringFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Check that the given list of edits files are present in the given storage * dirs. */ private void assertEditFiles(Iterable<URI> dirs, String ... files) throws IOException { for (URI u : dirs) { File editDirRoot = new File(u.getPath()); File editDir = new File(editDirRoot, "current"); GenericTestUtils.assertExists(editDir); if (files.length == 0) { LOG.info("Checking no edit files exist in " + editDir); } else { LOG.info("Checking for following edit files in " + editDir + ": " + Joiner.on(",").join(files)); } GenericTestUtils.assertGlobEquals(editDir, "edits_.*", files); } }
Example 2
Source File: TestQuorumJournalManager.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testSelectInputStreamsNotOnBoundary() throws Exception { final int txIdsPerSegment = 10; for (int txid = 1; txid <= 5 * txIdsPerSegment; txid += txIdsPerSegment) { writeSegment(cluster, qjm, txid, txIdsPerSegment, true); } File curDir = cluster.getCurrentDir(0, JID); GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 10), NNStorage.getFinalizedEditsFileName(11, 20), NNStorage.getFinalizedEditsFileName(21, 30), NNStorage.getFinalizedEditsFileName(31, 40), NNStorage.getFinalizedEditsFileName(41, 50)); ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>(); qjm.selectInputStreams(streams, 25, false); verifyEdits(streams, 25, 50); }
Example 3
Source File: TestEditLogsDuringFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Check that the given list of edits files are present in the given storage * dirs. */ private void assertEditFiles(Iterable<URI> dirs, String ... files) throws IOException { for (URI u : dirs) { File editDirRoot = new File(u.getPath()); File editDir = new File(editDirRoot, "current"); GenericTestUtils.assertExists(editDir); if (files.length == 0) { LOG.info("Checking no edit files exist in " + editDir); } else { LOG.info("Checking for following edit files in " + editDir + ": " + Joiner.on(",").join(files)); } GenericTestUtils.assertGlobEquals(editDir, "edits_.*", files); } }
Example 4
Source File: TestQuorumJournalManager.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testSelectInputStreamsNotOnBoundary() throws Exception { final int txIdsPerSegment = 10; for (int txid = 1; txid <= 5 * txIdsPerSegment; txid += txIdsPerSegment) { writeSegment(cluster, qjm, txid, txIdsPerSegment, true); } File curDir = cluster.getCurrentDir(0, JID); GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 10), NNStorage.getFinalizedEditsFileName(11, 20), NNStorage.getFinalizedEditsFileName(21, 30), NNStorage.getFinalizedEditsFileName(31, 40), NNStorage.getFinalizedEditsFileName(41, 50)); ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>(); qjm.selectInputStreams(streams, 25, false); verifyEdits(streams, 25, 50); }
Example 5
Source File: TestSaveNamespace.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout=20000) public void testCancelSaveNamespace() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy final FSImage image = fsn.getFSImage(); NNStorage storage = image.getStorage(); storage.close(); // unlock any directories that FSNamesystem's initialization may have locked storage.setStorageDirectories( FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf)); FSNamesystem spyFsn = spy(fsn); final FSNamesystem finalFsn = spyFsn; DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); BlockIdManager bid = spy(spyFsn.getBlockIdManager()); Whitebox.setInternalState(finalFsn, "blockIdManager", bid); doAnswer(delayer).when(bid).getGenerationStampV2(); ExecutorService pool = Executors.newFixedThreadPool(2); try { doAnEdit(fsn, 1); final Canceler canceler = new Canceler(); // Save namespace fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); try { Future<Void> saverFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler); return null; } }); // Wait until saveNamespace calls getGenerationStamp delayer.waitForCall(); // then cancel the saveNamespace Future<Void> cancelFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { canceler.cancel("cancelled"); return null; } }); // give the cancel call time to run Thread.sleep(500); // allow saveNamespace to proceed - it should check the cancel flag after // this point and throw an exception delayer.proceed(); cancelFuture.get(); saverFuture.get(); fail("saveNamespace did not fail even though cancelled!"); } catch (Throwable t) { GenericTestUtils.assertExceptionContains( "SaveNamespaceCancelledException", t); } LOG.info("Successfully cancelled a saveNamespace"); // Check that we have only the original image and not any // cruft left over from half-finished images FSImageTestUtil.logStorageContents(LOG, storage); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*", NNStorage.getImageFileName(0), NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX); } } finally { fsn.close(); } }
Example 6
Source File: TestEditLog.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that the NN handles the corruption properly * after it crashes just after creating an edit log * (ie before writing START_LOG_SEGMENT). In the case * that all logs have this problem, it should mark them * as corrupt instead of trying to finalize them. * * @param inBothDirs if true, there will be a truncated log in * both of the edits directories. If false, the truncated log * will only be in one of the directories. In both cases, the * NN should fail to start up, because it's aware that txid 3 * was reached, but unable to find a non-corrupt log starting there. * @param updateTransactionIdFile if true update the seen_txid file. * If false, it will not be updated. This will simulate a case where * the NN crashed between creating the new segment and updating the * seen_txid file. * @param shouldSucceed true if the test is expected to succeed. */ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, boolean updateTransactionIdFile, boolean shouldSucceed) throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).build(); cluster.shutdown(); Collection<URI> editsDirs = cluster.getNameEditsDirs(0); for (URI uri : editsDirs) { File dir = new File(uri.getPath()); File currentDir = new File(dir, "current"); // We should start with only the finalized edits_1-2 GenericTestUtils.assertGlobEquals(currentDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 2)); // Make a truncated edits_3_inprogress File log = new File(currentDir, NNStorage.getInProgressEditsFileName(3)); EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024); try { stream.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); if (!inBothDirs) { break; } NNStorage storage = new NNStorage(conf, Collections.<URI>emptyList(), Lists.newArrayList(uri)); if (updateTransactionIdFile) { storage.writeTransactionIdFileToStorage(3); } storage.close(); } finally { stream.close(); } } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).format(false).build(); if (!shouldSucceed) { fail("Should not have succeeded in startin cluster"); } } catch (IOException ioe) { if (shouldSucceed) { LOG.info("Should have succeeded in starting cluster, but failed", ioe); throw ioe; } else { GenericTestUtils.assertExceptionContains( "Gap in transactions. Expected to be able to read up until " + "at least txid 3 but unable to find any edit logs containing " + "txid 3", ioe); } } finally { cluster.shutdown(); } }
Example 7
Source File: TestQuorumJournalManager.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test the case where, at the beginning of a segment, transactions * have been written to one JN but not others. */ public void doTestOutOfSyncAtBeginningOfSegment(int nodeWithOneTxn) throws Exception { int nodeWithEmptySegment = (nodeWithOneTxn + 1) % 3; int nodeMissingSegment = (nodeWithOneTxn + 2) % 3; writeSegment(cluster, qjm, 1, 3, true); waitForAllPendingCalls(qjm.getLoggerSetForTests()); cluster.getJournalNode(nodeMissingSegment).stopAndJoin(0); // Open segment on 2/3 nodes EditLogOutputStream stm = qjm.startLogSegment(4, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); try { waitForAllPendingCalls(qjm.getLoggerSetForTests()); // Write transactions to only 1/3 nodes failLoggerAtTxn(spies.get(nodeWithEmptySegment), 4); try { writeTxns(stm, 4, 1); fail("Did not fail even though 2/3 failed"); } catch (QuorumException qe) { GenericTestUtils.assertExceptionContains("mock failure", qe); } } finally { stm.abort(); } // Bring back the down JN. cluster.restartJournalNode(nodeMissingSegment); // Make a new QJM. At this point, the state is as follows: // A: nodeWithEmptySegment: 1-3 finalized, 4_inprogress (empty) // B: nodeWithOneTxn: 1-3 finalized, 4_inprogress (1 txn) // C: nodeMissingSegment: 1-3 finalized GenericTestUtils.assertGlobEquals( cluster.getCurrentDir(nodeWithEmptySegment, JID), "edits_.*", NNStorage.getFinalizedEditsFileName(1, 3), NNStorage.getInProgressEditsFileName(4)); GenericTestUtils.assertGlobEquals( cluster.getCurrentDir(nodeWithOneTxn, JID), "edits_.*", NNStorage.getFinalizedEditsFileName(1, 3), NNStorage.getInProgressEditsFileName(4)); GenericTestUtils.assertGlobEquals( cluster.getCurrentDir(nodeMissingSegment, JID), "edits_.*", NNStorage.getFinalizedEditsFileName(1, 3)); // Stop one of the nodes. Since we run this test three // times, rotating the roles of the nodes, we'll test // all the permutations. cluster.getJournalNode(2).stopAndJoin(0); qjm = createSpyingQJM(); qjm.recoverUnfinalizedSegments(); if (nodeWithOneTxn == 0 || nodeWithOneTxn == 1) { // If the node that had the transaction committed was one of the nodes // that responded during recovery, then we should have recovered txid // 4. checkRecovery(cluster, 4, 4); writeSegment(cluster, qjm, 5, 3, true); } else { // Otherwise, we should have recovered only 1-3 and should be able to // start a segment at 4. checkRecovery(cluster, 1, 3); writeSegment(cluster, qjm, 4, 3, true); } }
Example 8
Source File: TestQuorumJournalManager.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testPurgeLogs() throws Exception { for (int txid = 1; txid <= 5; txid++) { writeSegment(cluster, qjm, txid, 1, true); } File curDir = cluster.getCurrentDir(0, JID); GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 1), NNStorage.getFinalizedEditsFileName(2, 2), NNStorage.getFinalizedEditsFileName(3, 3), NNStorage.getFinalizedEditsFileName(4, 4), NNStorage.getFinalizedEditsFileName(5, 5)); File paxosDir = new File(curDir, "paxos"); GenericTestUtils.assertExists(paxosDir); // Create new files in the paxos directory, which should get purged too. assertTrue(new File(paxosDir, "1").createNewFile()); assertTrue(new File(paxosDir, "3").createNewFile()); GenericTestUtils.assertGlobEquals(paxosDir, "\\d+", "1", "3"); // Create some temporary files of the sort that are used during recovery. assertTrue(new File(curDir, "edits_inprogress_0000000000000000001.epoch=140").createNewFile()); assertTrue(new File(curDir, "edits_inprogress_0000000000000000002.empty").createNewFile()); qjm.purgeLogsOlderThan(3); // Log purging is asynchronous, so we have to wait for the calls // to be sent and respond before verifying. waitForAllPendingCalls(qjm.getLoggerSetForTests()); // Older edits should be purged GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(3, 3), NNStorage.getFinalizedEditsFileName(4, 4), NNStorage.getFinalizedEditsFileName(5, 5)); // Older paxos files should be purged GenericTestUtils.assertGlobEquals(paxosDir, "\\d+", "3"); }
Example 9
Source File: TestSaveNamespace.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout=20000) public void testCancelSaveNamespace() throws Exception { Configuration conf = getConf(); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); // Replace the FSImage with a spy final FSImage image = fsn.getFSImage(); NNStorage storage = image.getStorage(); storage.close(); // unlock any directories that FSNamesystem's initialization may have locked storage.setStorageDirectories( FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf)); FSNamesystem spyFsn = spy(fsn); final FSNamesystem finalFsn = spyFsn; DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG); BlockIdManager bid = spy(spyFsn.getBlockIdManager()); Whitebox.setInternalState(finalFsn, "blockIdManager", bid); doAnswer(delayer).when(bid).getGenerationStampV2(); ExecutorService pool = Executors.newFixedThreadPool(2); try { doAnEdit(fsn, 1); final Canceler canceler = new Canceler(); // Save namespace fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); try { Future<Void> saverFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler); return null; } }); // Wait until saveNamespace calls getGenerationStamp delayer.waitForCall(); // then cancel the saveNamespace Future<Void> cancelFuture = pool.submit(new Callable<Void>() { @Override public Void call() throws Exception { canceler.cancel("cancelled"); return null; } }); // give the cancel call time to run Thread.sleep(500); // allow saveNamespace to proceed - it should check the cancel flag after // this point and throw an exception delayer.proceed(); cancelFuture.get(); saverFuture.get(); fail("saveNamespace did not fail even though cancelled!"); } catch (Throwable t) { GenericTestUtils.assertExceptionContains( "SaveNamespaceCancelledException", t); } LOG.info("Successfully cancelled a saveNamespace"); // Check that we have only the original image and not any // cruft left over from half-finished images FSImageTestUtil.logStorageContents(LOG, storage); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*", NNStorage.getImageFileName(0), NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX); } } finally { fsn.close(); } }
Example 10
Source File: TestEditLog.java From big-c with Apache License 2.0 | 4 votes |
/** * Test that the NN handles the corruption properly * after it crashes just after creating an edit log * (ie before writing START_LOG_SEGMENT). In the case * that all logs have this problem, it should mark them * as corrupt instead of trying to finalize them. * * @param inBothDirs if true, there will be a truncated log in * both of the edits directories. If false, the truncated log * will only be in one of the directories. In both cases, the * NN should fail to start up, because it's aware that txid 3 * was reached, but unable to find a non-corrupt log starting there. * @param updateTransactionIdFile if true update the seen_txid file. * If false, it will not be updated. This will simulate a case where * the NN crashed between creating the new segment and updating the * seen_txid file. * @param shouldSucceed true if the test is expected to succeed. */ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, boolean updateTransactionIdFile, boolean shouldSucceed) throws Exception { // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).build(); cluster.shutdown(); Collection<URI> editsDirs = cluster.getNameEditsDirs(0); for (URI uri : editsDirs) { File dir = new File(uri.getPath()); File currentDir = new File(dir, "current"); // We should start with only the finalized edits_1-2 GenericTestUtils.assertGlobEquals(currentDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 2)); // Make a truncated edits_3_inprogress File log = new File(currentDir, NNStorage.getInProgressEditsFileName(3)); EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024); try { stream.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); if (!inBothDirs) { break; } NNStorage storage = new NNStorage(conf, Collections.<URI>emptyList(), Lists.newArrayList(uri)); if (updateTransactionIdFile) { storage.writeTransactionIdFileToStorage(3); } storage.close(); } finally { stream.close(); } } try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).format(false).build(); if (!shouldSucceed) { fail("Should not have succeeded in startin cluster"); } } catch (IOException ioe) { if (shouldSucceed) { LOG.info("Should have succeeded in starting cluster, but failed", ioe); throw ioe; } else { GenericTestUtils.assertExceptionContains( "Gap in transactions. Expected to be able to read up until " + "at least txid 3 but unable to find any edit logs containing " + "txid 3", ioe); } } finally { cluster.shutdown(); } }
Example 11
Source File: TestQuorumJournalManager.java From big-c with Apache License 2.0 | 4 votes |
/** * Test the case where, at the beginning of a segment, transactions * have been written to one JN but not others. */ public void doTestOutOfSyncAtBeginningOfSegment(int nodeWithOneTxn) throws Exception { int nodeWithEmptySegment = (nodeWithOneTxn + 1) % 3; int nodeMissingSegment = (nodeWithOneTxn + 2) % 3; writeSegment(cluster, qjm, 1, 3, true); waitForAllPendingCalls(qjm.getLoggerSetForTests()); cluster.getJournalNode(nodeMissingSegment).stopAndJoin(0); // Open segment on 2/3 nodes EditLogOutputStream stm = qjm.startLogSegment(4, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); try { waitForAllPendingCalls(qjm.getLoggerSetForTests()); // Write transactions to only 1/3 nodes failLoggerAtTxn(spies.get(nodeWithEmptySegment), 4); try { writeTxns(stm, 4, 1); fail("Did not fail even though 2/3 failed"); } catch (QuorumException qe) { GenericTestUtils.assertExceptionContains("mock failure", qe); } } finally { stm.abort(); } // Bring back the down JN. cluster.restartJournalNode(nodeMissingSegment); // Make a new QJM. At this point, the state is as follows: // A: nodeWithEmptySegment: 1-3 finalized, 4_inprogress (empty) // B: nodeWithOneTxn: 1-3 finalized, 4_inprogress (1 txn) // C: nodeMissingSegment: 1-3 finalized GenericTestUtils.assertGlobEquals( cluster.getCurrentDir(nodeWithEmptySegment, JID), "edits_.*", NNStorage.getFinalizedEditsFileName(1, 3), NNStorage.getInProgressEditsFileName(4)); GenericTestUtils.assertGlobEquals( cluster.getCurrentDir(nodeWithOneTxn, JID), "edits_.*", NNStorage.getFinalizedEditsFileName(1, 3), NNStorage.getInProgressEditsFileName(4)); GenericTestUtils.assertGlobEquals( cluster.getCurrentDir(nodeMissingSegment, JID), "edits_.*", NNStorage.getFinalizedEditsFileName(1, 3)); // Stop one of the nodes. Since we run this test three // times, rotating the roles of the nodes, we'll test // all the permutations. cluster.getJournalNode(2).stopAndJoin(0); qjm = createSpyingQJM(); qjm.recoverUnfinalizedSegments(); if (nodeWithOneTxn == 0 || nodeWithOneTxn == 1) { // If the node that had the transaction committed was one of the nodes // that responded during recovery, then we should have recovered txid // 4. checkRecovery(cluster, 4, 4); writeSegment(cluster, qjm, 5, 3, true); } else { // Otherwise, we should have recovered only 1-3 and should be able to // start a segment at 4. checkRecovery(cluster, 1, 3); writeSegment(cluster, qjm, 4, 3, true); } }
Example 12
Source File: TestQuorumJournalManager.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testPurgeLogs() throws Exception { for (int txid = 1; txid <= 5; txid++) { writeSegment(cluster, qjm, txid, 1, true); } File curDir = cluster.getCurrentDir(0, JID); GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(1, 1), NNStorage.getFinalizedEditsFileName(2, 2), NNStorage.getFinalizedEditsFileName(3, 3), NNStorage.getFinalizedEditsFileName(4, 4), NNStorage.getFinalizedEditsFileName(5, 5)); File paxosDir = new File(curDir, "paxos"); GenericTestUtils.assertExists(paxosDir); // Create new files in the paxos directory, which should get purged too. assertTrue(new File(paxosDir, "1").createNewFile()); assertTrue(new File(paxosDir, "3").createNewFile()); GenericTestUtils.assertGlobEquals(paxosDir, "\\d+", "1", "3"); // Create some temporary files of the sort that are used during recovery. assertTrue(new File(curDir, "edits_inprogress_0000000000000000001.epoch=140").createNewFile()); assertTrue(new File(curDir, "edits_inprogress_0000000000000000002.empty").createNewFile()); qjm.purgeLogsOlderThan(3); // Log purging is asynchronous, so we have to wait for the calls // to be sent and respond before verifying. waitForAllPendingCalls(qjm.getLoggerSetForTests()); // Older edits should be purged GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getFinalizedEditsFileName(3, 3), NNStorage.getFinalizedEditsFileName(4, 4), NNStorage.getFinalizedEditsFileName(5, 5)); // Older paxos files should be purged GenericTestUtils.assertGlobEquals(paxosDir, "\\d+", "3"); }