Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#leaveSafeMode()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#leaveSafeMode() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestOpenFilesWithSnapshot.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testOpenFilesWithRename() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // check for zero sized blocks Path fileWithEmptyBlock = new Path("/test/test/test4"); fs.create(fileWithEmptyBlock); NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); fs.delete(new Path("/test/test-renamed"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); }
Example 2
Source File: TestOpenFilesWithSnapshot.java From big-c with Apache License 2.0 | 6 votes |
private void doTestMultipleSnapshots(boolean saveNamespace) throws IOException { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.createSnapshot(path, "s2"); fs.delete(new Path("/test/test"), true); fs.deleteSnapshot(path, "s2"); cluster.triggerBlockReports(); if (saveNamespace) { NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); } cluster.restartNameNode(true); }
Example 3
Source File: TestHASafeMode.java From hadoop with Apache License 2.0 | 6 votes |
/** * Regression test for a bug experienced while developing * HDFS-2742. The scenario here is: * - image contains some blocks * - edits log contains at least one block addition, followed * by deletion of more blocks than were added. * - When node starts up, some incorrect accounting of block * totals caused an assertion failure. */ @Test public void testBlocksDeletedInEditLog() throws Exception { banner("Starting with NN0 active and NN1 standby, creating some blocks"); // Make 4 blocks persisted in the image. DFSTestUtil.createFile(fs, new Path("/test"), 4*BLOCK_SIZE, (short) 3, 1L); NameNodeAdapter.enterSafeMode(nn0, false); NameNodeAdapter.saveNamespace(nn0); NameNodeAdapter.leaveSafeMode(nn0); // OP_ADD for 2 blocks DFSTestUtil.createFile(fs, new Path("/test2"), 2*BLOCK_SIZE, (short) 3, 1L); // OP_DELETE for 4 blocks fs.delete(new Path("/test"), true); restartActive(); }
Example 4
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout = 30000) public void testReadSnapshotFileWithCheckpoint() throws Exception { Path foo = new Path("/foo"); hdfs.mkdirs(foo); hdfs.allowSnapshot(foo); Path bar = new Path("/foo/bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); assertTrue(hdfs.delete(bar, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(snapshotPath)); }
Example 5
Source File: TestOpenFilesWithSnapshot.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFilesDeletionWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test/test2"), true); fs.delete(new Path("/test/test/test3"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
Example 6
Source File: TestOpenFilesWithSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
Example 7
Source File: TestOpenFilesWithSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFilesDeletionWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test/test2"), true); fs.delete(new Path("/test/test/test3"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
Example 8
Source File: TestOpenFilesWithSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
private void doTestMultipleSnapshots(boolean saveNamespace) throws IOException { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.createSnapshot(path, "s2"); fs.delete(new Path("/test/test"), true); fs.deleteSnapshot(path, "s2"); cluster.triggerBlockReports(); if (saveNamespace) { NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); } cluster.restartNameNode(true); }
Example 9
Source File: TestOpenFilesWithSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testOpenFilesWithRename() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // check for zero sized blocks Path fileWithEmptyBlock = new Path("/test/test/test4"); fs.create(fileWithEmptyBlock); NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); fs.delete(new Path("/test/test-renamed"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); }
Example 10
Source File: TestOpenFilesWithSnapshot.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
Example 11
Source File: TestSnapshotBlocksMap.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout = 30000) public void testReadSnapshotFileWithCheckpoint() throws Exception { Path foo = new Path("/foo"); hdfs.mkdirs(foo); hdfs.allowSnapshot(foo); Path bar = new Path("/foo/bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); assertTrue(hdfs.delete(bar, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(snapshotPath)); }
Example 12
Source File: TestHASafeMode.java From big-c with Apache License 2.0 | 6 votes |
/** * Regression test for a bug experienced while developing * HDFS-2742. The scenario here is: * - image contains some blocks * - edits log contains at least one block addition, followed * by deletion of more blocks than were added. * - When node starts up, some incorrect accounting of block * totals caused an assertion failure. */ @Test public void testBlocksDeletedInEditLog() throws Exception { banner("Starting with NN0 active and NN1 standby, creating some blocks"); // Make 4 blocks persisted in the image. DFSTestUtil.createFile(fs, new Path("/test"), 4*BLOCK_SIZE, (short) 3, 1L); NameNodeAdapter.enterSafeMode(nn0, false); NameNodeAdapter.saveNamespace(nn0); NameNodeAdapter.leaveSafeMode(nn0); // OP_ADD for 2 blocks DFSTestUtil.createFile(fs, new Path("/test2"), 2*BLOCK_SIZE, (short) 3, 1L); // OP_DELETE for 4 blocks fs.delete(new Path("/test"), true); restartActive(); }
Example 13
Source File: TestBootstrapStandby.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test for downloading a checkpoint made at a later checkpoint * from the active. */ @Test public void testDownloadingLaterCheckpoint() throws Exception { // Roll edit logs a few times to inflate txid nn0.getRpcServer().rollEditLog(); nn0.getRpcServer().rollEditLog(); // Make checkpoint NameNodeAdapter.enterSafeMode(nn0, false); NameNodeAdapter.saveNamespace(nn0); NameNodeAdapter.leaveSafeMode(nn0); long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0) .getFSImage().getMostRecentCheckpointTxId(); assertEquals(6, expectedCheckpointTxId); int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); // Should have copied over the namespace from the active FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of((int)expectedCheckpointTxId)); FSImageTestUtil.assertNNFilesMatch(cluster); // We should now be able to start the standby successfully. cluster.restartNameNode(1); }
Example 14
Source File: TestSnapshotBlocksMap.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 30000) public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception { final Path foo = new Path("/foo"); final Path foo2 = new Path("/foo2"); hdfs.mkdirs(foo); hdfs.mkdirs(foo2); hdfs.allowSnapshot(foo); hdfs.allowSnapshot(foo2); final Path bar = new Path(foo, "bar"); final Path bar2 = new Path(foo2, "bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); // rename to another snapshottable directory and take snapshot assertTrue(hdfs.rename(bar, bar2)); hdfs.createSnapshot(foo2, "s2"); // delete the original renamed file to make sure blocks are not updated by // the original file assertTrue(hdfs.delete(bar2, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); // file in first snapshot String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath)); // file in second snapshot after rename+delete String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(), "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); }
Example 15
Source File: TestBootstrapStandby.java From big-c with Apache License 2.0 | 5 votes |
/** * Test for downloading a checkpoint made at a later checkpoint * from the active. */ @Test public void testDownloadingLaterCheckpoint() throws Exception { // Roll edit logs a few times to inflate txid nn0.getRpcServer().rollEditLog(); nn0.getRpcServer().rollEditLog(); // Make checkpoint NameNodeAdapter.enterSafeMode(nn0, false); NameNodeAdapter.saveNamespace(nn0); NameNodeAdapter.leaveSafeMode(nn0); long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0) .getFSImage().getMostRecentCheckpointTxId(); assertEquals(6, expectedCheckpointTxId); int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); // Should have copied over the namespace from the active FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of((int)expectedCheckpointTxId)); FSImageTestUtil.assertNNFilesMatch(cluster); // We should now be able to start the standby successfully. cluster.restartNameNode(1); }
Example 16
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 30000) public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception { final Path foo = new Path("/foo"); final Path foo2 = new Path("/foo2"); hdfs.mkdirs(foo); hdfs.mkdirs(foo2); hdfs.allowSnapshot(foo); hdfs.allowSnapshot(foo2); final Path bar = new Path(foo, "bar"); final Path bar2 = new Path(foo2, "bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); // rename to another snapshottable directory and take snapshot assertTrue(hdfs.rename(bar, bar2)); hdfs.createSnapshot(foo2, "s2"); // delete the original renamed file to make sure blocks are not updated by // the original file assertTrue(hdfs.delete(bar2, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); // file in first snapshot String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath)); // file in second snapshot after rename+delete String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(), "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); }
Example 17
Source File: TestHASafeMode.java From big-c with Apache License 2.0 | 4 votes |
/** * Make sure the client retries when the active NN is in safemode */ @Test (timeout=300000) public void testClientRetrySafeMode() throws Exception { final Map<Path, Boolean> results = Collections .synchronizedMap(new HashMap<Path, Boolean>()); final Path test = new Path("/test"); // let nn0 enter safemode NameNodeAdapter.enterSafeMode(nn0, false); SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState( nn0.getNamesystem(), "safeMode"); Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000)); LOG.info("enter safemode"); new Thread() { @Override public void run() { try { boolean mkdir = fs.mkdirs(test); LOG.info("mkdir finished, result is " + mkdir); synchronized (TestHASafeMode.this) { results.put(test, mkdir); TestHASafeMode.this.notifyAll(); } } catch (Exception e) { LOG.info("Got Exception while calling mkdir", e); } } }.start(); // make sure the client's call has actually been handled by the active NN assertFalse("The directory should not be created while NN in safemode", fs.exists(test)); Thread.sleep(1000); // let nn0 leave safemode NameNodeAdapter.leaveSafeMode(nn0); LOG.info("leave safemode"); synchronized (this) { while (!results.containsKey(test)) { this.wait(); } assertTrue(results.get(test)); } }
Example 18
Source File: TestDelegationToken.java From big-c with Apache License 2.0 | 4 votes |
/** * Test that the delegation token secret manager only runs when the * NN is out of safe mode. This is because the secret manager * has to log to the edit log, which should not be written in * safe mode. Regression test for HDFS-2579. */ @Test public void testDTManagerInSafeMode() throws Exception { cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null); FileSystem fs = cluster.getFileSystem(); for (int i = 0; i < 5; i++) { DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short)1, 1L); } cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500); cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000); cluster.setWaitSafeMode(false); cluster.restartNameNode(); NameNode nn = cluster.getNameNode(); assertTrue(nn.isInSafeMode()); DelegationTokenSecretManager sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem()); assertFalse("Secret manager should not run in safe mode", sm.isRunning()); NameNodeAdapter.leaveSafeMode(nn); assertTrue("Secret manager should start when safe mode is exited", sm.isRunning()); LOG.info("========= entering safemode again"); NameNodeAdapter.enterSafeMode(nn, false); assertFalse("Secret manager should stop again when safe mode " + "is manually entered", sm.isRunning()); // Set the cluster to leave safemode quickly on its own. cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); cluster.setWaitSafeMode(true); cluster.restartNameNode(); nn = cluster.getNameNode(); sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem()); assertFalse(nn.isInSafeMode()); assertTrue(sm.isRunning()); }
Example 19
Source File: TestHASafeMode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Make sure the client retries when the active NN is in safemode */ @Test (timeout=300000) public void testClientRetrySafeMode() throws Exception { final Map<Path, Boolean> results = Collections .synchronizedMap(new HashMap<Path, Boolean>()); final Path test = new Path("/test"); // let nn0 enter safemode NameNodeAdapter.enterSafeMode(nn0, false); SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState( nn0.getNamesystem(), "safeMode"); Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000)); LOG.info("enter safemode"); new Thread() { @Override public void run() { try { boolean mkdir = fs.mkdirs(test); LOG.info("mkdir finished, result is " + mkdir); synchronized (TestHASafeMode.this) { results.put(test, mkdir); TestHASafeMode.this.notifyAll(); } } catch (Exception e) { LOG.info("Got Exception while calling mkdir", e); } } }.start(); // make sure the client's call has actually been handled by the active NN assertFalse("The directory should not be created while NN in safemode", fs.exists(test)); Thread.sleep(1000); // let nn0 leave safemode NameNodeAdapter.leaveSafeMode(nn0); LOG.info("leave safemode"); synchronized (this) { while (!results.containsKey(test)) { this.wait(); } assertTrue(results.get(test)); } }
Example 20
Source File: TestDelegationToken.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that the delegation token secret manager only runs when the * NN is out of safe mode. This is because the secret manager * has to log to the edit log, which should not be written in * safe mode. Regression test for HDFS-2579. */ @Test public void testDTManagerInSafeMode() throws Exception { cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null); FileSystem fs = cluster.getFileSystem(); for (int i = 0; i < 5; i++) { DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short)1, 1L); } cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500); cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000); cluster.setWaitSafeMode(false); cluster.restartNameNode(); NameNode nn = cluster.getNameNode(); assertTrue(nn.isInSafeMode()); DelegationTokenSecretManager sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem()); assertFalse("Secret manager should not run in safe mode", sm.isRunning()); NameNodeAdapter.leaveSafeMode(nn); assertTrue("Secret manager should start when safe mode is exited", sm.isRunning()); LOG.info("========= entering safemode again"); NameNodeAdapter.enterSafeMode(nn, false); assertFalse("Secret manager should stop again when safe mode " + "is manually entered", sm.isRunning()); // Set the cluster to leave safemode quickly on its own. cluster.getConfiguration(0).setInt( DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); cluster.setWaitSafeMode(true); cluster.restartNameNode(); nn = cluster.getNameNode(); sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem()); assertFalse(nn.isInSafeMode()); assertTrue(sm.isRunning()); }