Java Code Examples for org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster#Builder
The following examples show how to use
org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster#Builder .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFailureToReadEdits.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setUpCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); if (clusterType == TestType.SHARED_DIR_HA) { MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(10000); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .checkExitOnShutdown(false) .build(); } else { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false); miniQjmHaCluster = builder.build(); cluster = miniQjmHaCluster.getDfsCluster(); } cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); }
Example 2
Source File: TestFailureToReadEdits.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUpCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); if (clusterType == TestType.SHARED_DIR_HA) { MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(10000); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .checkExitOnShutdown(false) .build(); } else { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false); miniQjmHaCluster = builder.build(); cluster = miniQjmHaCluster.getDfsCluster(); } cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); }
Example 3
Source File: TestMiniClusterHadoopNNAWithStreamEngine.java From NNAnalytics with Apache License 2.0 | 4 votes |
@BeforeClass public static void beforeClass() throws Exception { RANDOM.nextBytes(TINY_FILE_BYTES); RANDOM.nextBytes(SMALL_FILE_BYTES); RANDOM.nextBytes(MEDIUM_FILE_BYTES); // Speed up editlog tailing. CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); CONF.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); CONF.setBoolean("fs.hdfs.impl.disable.cache", true); CONF.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NAMESERVICE); MiniQJMHACluster.Builder qjmBuilder = new MiniQJMHACluster.Builder(CONF); qjmBuilder.getDfsBuilder().numDataNodes(NUMDATANODES); cluster = qjmBuilder.build(); cluster.getDfsCluster().waitActive(); cluster.getDfsCluster().transitionToActive(0); HATestUtil.setFailoverConfigurations(cluster.getDfsCluster(), CONF, NAMESERVICE, 0); CONF.set("dfs.nameservice.id", NAMESERVICE); nna = new HadoopWebServerMain(); nnaConf = new ApplicationConfiguration(); nnaConf.set("nna.support.bootstrap.overrides", "true"); nnaConf.set("ldap.enable", "false"); nnaConf.set("authorization.enable", "false"); nnaConf.set("nna.historical", "true"); nnaConf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory()); nnaConf.set("nna.web.base.dir", "src/main/resources/webapps/nna"); nnaConf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName()); nna.init(nnaConf, null, CONF); hostPort = new HttpHost("localhost", 4567); client = new DefaultHttpClient(); // Fetch NNA Namespace. HttpGet fetch = new HttpGet("http://localhost:4567/fetchNamespace"); HttpResponse fetchRes = client.execute(hostPort, fetch); assertThat(fetchRes.getStatusLine().getStatusCode(), is(200)); IOUtils.readLines(fetchRes.getEntity().getContent()); // Reload NNA Namespace. HttpGet reload = new HttpGet("http://localhost:4567/reloadNamespace"); HttpResponse reloadRes = client.execute(hostPort, reload); assertThat(reloadRes.getStatusLine().getStatusCode(), is(200)); IOUtils.readLines(reloadRes.getEntity().getContent()); }
Example 4
Source File: TestWithMiniClusterWithStreamEngine.java From NNAnalytics with Apache License 2.0 | 4 votes |
@BeforeClass public static void beforeClass() throws Exception { RANDOM.nextBytes(TINY_FILE_BYTES); RANDOM.nextBytes(SMALL_FILE_BYTES); RANDOM.nextBytes(MEDIUM_FILE_BYTES); // Speed up editlog tailing. CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); CONF.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); CONF.setBoolean("fs.hdfs.impl.disable.cache", true); CONF.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NAMESERVICE); MiniQJMHACluster.Builder qjmBuilder = new MiniQJMHACluster.Builder(CONF); qjmBuilder.getDfsBuilder().numDataNodes(NUMDATANODES); cluster = qjmBuilder.build(); cluster.getDfsCluster().waitActive(); cluster.getDfsCluster().transitionToActive(0); HATestUtil.setFailoverConfigurations(cluster.getDfsCluster(), CONF, NAMESERVICE, 0); CONF.set("dfs.nameservice.id", NAMESERVICE); nna = new WebServerMain(); nnaConf = new ApplicationConfiguration(); nnaConf.set("nna.support.bootstrap.overrides", "true"); nnaConf.set("ldap.enable", "false"); nnaConf.set("authorization.enable", "false"); nnaConf.set("nna.historical", "true"); nnaConf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory()); nnaConf.set("nna.web.base.dir", "src/main/resources/webapps/nna"); nnaConf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName()); nna.init(nnaConf, null, CONF); hostPort = new HttpHost("localhost", 4567); client = new DefaultHttpClient(); // Fetch NNA Namespace. HttpGet fetch = new HttpGet("http://localhost:4567/fetchNamespace"); HttpResponse fetchRes = client.execute(hostPort, fetch); assertThat(fetchRes.getStatusLine().getStatusCode(), is(200)); IOUtils.readLines(fetchRes.getEntity().getContent()); // Reload NNA Namespace. HttpGet reload = new HttpGet("http://localhost:4567/reloadNamespace"); HttpResponse reloadRes = client.execute(hostPort, reload); assertThat(reloadRes.getStatusLine().getStatusCode(), is(200)); IOUtils.readLines(reloadRes.getEntity().getContent()); }
Example 5
Source File: TestMiniClusterHadoopNNAWithStreamEngine.java From NNAnalytics with Apache License 2.0 | 4 votes |
@BeforeClass public static void beforeClass() throws Exception { RANDOM.nextBytes(TINY_FILE_BYTES); RANDOM.nextBytes(SMALL_FILE_BYTES); RANDOM.nextBytes(MEDIUM_FILE_BYTES); // Speed up editlog tailing. CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); CONF.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); CONF.setBoolean("fs.hdfs.impl.disable.cache", true); CONF.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NAMESERVICE); MiniQJMHACluster.Builder qjmBuilder = new MiniQJMHACluster.Builder(CONF); qjmBuilder.getDfsBuilder().numDataNodes(NUMDATANODES); cluster = qjmBuilder.build(); cluster.getDfsCluster().waitActive(); cluster.getDfsCluster().transitionToActive(0); HATestUtil.setFailoverConfigurations(cluster.getDfsCluster(), CONF, NAMESERVICE, 0); CONF.set("dfs.nameservice.id", NAMESERVICE); nna = new HadoopWebServerMain(); nnaConf = new ApplicationConfiguration(); nnaConf.set("nna.support.bootstrap.overrides", "true"); nnaConf.set("ldap.enable", "false"); nnaConf.set("authorization.enable", "false"); nnaConf.set("nna.historical", "true"); nnaConf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory()); nnaConf.set("nna.web.base.dir", "src/main/resources/webapps/nna"); nnaConf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName()); nna.init(nnaConf, null, CONF); hostPort = new HttpHost("localhost", 4567); client = new DefaultHttpClient(); // Fetch NNA Namespace. HttpGet fetch = new HttpGet("http://localhost:4567/fetchNamespace"); HttpResponse fetchRes = client.execute(hostPort, fetch); assertThat(fetchRes.getStatusLine().getStatusCode(), is(200)); IOUtils.readLines(fetchRes.getEntity().getContent()); // Reload NNA Namespace. HttpGet reload = new HttpGet("http://localhost:4567/reloadNamespace"); HttpResponse reloadRes = client.execute(hostPort, reload); assertThat(reloadRes.getStatusLine().getStatusCode(), is(200)); IOUtils.readLines(reloadRes.getEntity().getContent()); }
Example 6
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 4 votes |
/** * Make sure that an HA NN can successfully upgrade when configured using * JournalNodes. */ @Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkJnPreviousDirExistence(qjCluster, false); checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); // get the value of the committedTxnId in journal nodes final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster)); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); // Restart NN0 without the -upgrade flag, to make sure that works. cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0, false); // Make sure we can still do FS ops after upgrading. cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); assertTrue(getCommittedTxnIdValue(qjCluster) > cidBeforeUpgrade); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); // Now restart NN1 and make sure that we can do ops against that as well. cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 7
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkJnPreviousDirExistence(qjCluster, false); checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster)); assertTrue(fs.mkdirs(new Path("/foo2"))); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster); assertTrue(cidDuringUpgrade > cidBeforeUpgrade); runFinalizeCommand(cluster); assertEquals(cidDuringUpgrade, getCommittedTxnIdValue(qjCluster)); checkClusterPreviousDirExistence(cluster, false); checkJnPreviousDirExistence(qjCluster, false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 8
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 4 votes |
/** * Make sure that even if the NN which initiated the upgrade is in the standby * state that we're allowed to finalize. */ @Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkJnPreviousDirExistence(qjCluster, false); checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); // Make the second NN (not the one that initiated the upgrade) active when // the finalize command is run. cluster.transitionToStandby(0); cluster.transitionToActive(1); runFinalizeCommand(cluster); checkClusterPreviousDirExistence(cluster, false); checkJnPreviousDirExistence(qjCluster, false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 9
Source File: TestDFSUpgradeWithHA.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); checkJnPreviousDirExistence(qjCluster, false); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster); assertTrue(cidDuringUpgrade > cidBeforeUpgrade); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, true); checkJnPreviousDirExistence(qjCluster, true); assertCTimesEqual(cluster); // Shut down the NNs, but deliberately leave the JNs up and running. Collection<URI> nn1NameDirs = cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf, false); final long cidAfterRollback = getCommittedTxnIdValue(qjCluster); assertTrue(cidBeforeUpgrade < cidAfterRollback); // make sure the committedTxnId has been reset correctly after rollback assertTrue(cidDuringUpgrade > cidAfterRollback); // The rollback operation should have rolled back the first NN's local // dirs, and the shared dir, but not the other NN's dirs. Those have to be // done by bootstrapping the standby. checkNnPreviousDirExistence(cluster, 0, false); checkJnPreviousDirExistence(qjCluster, false); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 10
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 4 votes |
/** * Make sure that an HA NN can successfully upgrade when configured using * JournalNodes. */ @Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkJnPreviousDirExistence(qjCluster, false); checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); // get the value of the committedTxnId in journal nodes final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster)); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); // Restart NN0 without the -upgrade flag, to make sure that works. cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0, false); // Make sure we can still do FS ops after upgrading. cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); assertTrue(getCommittedTxnIdValue(qjCluster) > cidBeforeUpgrade); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); // Now restart NN1 and make sure that we can do ops against that as well. cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 11
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkJnPreviousDirExistence(qjCluster, false); checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster)); assertTrue(fs.mkdirs(new Path("/foo2"))); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster); assertTrue(cidDuringUpgrade > cidBeforeUpgrade); runFinalizeCommand(cluster); assertEquals(cidDuringUpgrade, getCommittedTxnIdValue(qjCluster)); checkClusterPreviousDirExistence(cluster, false); checkJnPreviousDirExistence(qjCluster, false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 12
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 4 votes |
/** * Make sure that even if the NN which initiated the upgrade is in the standby * state that we're allowed to finalize. */ @Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkJnPreviousDirExistence(qjCluster, false); checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); // Make the second NN (not the one that initiated the upgrade) active when // the finalize command is run. cluster.transitionToStandby(0); cluster.transitionToActive(1); runFinalizeCommand(cluster); checkClusterPreviousDirExistence(cluster, false); checkJnPreviousDirExistence(qjCluster, false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }
Example 13
Source File: TestDFSUpgradeWithHA.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster = null; FileSystem fs = null; try { Builder builder = new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder() .numDataNodes(0); qjCluster = builder.build(); MiniDFSCluster cluster = qjCluster.getDfsCluster(); // No upgrade is in progress at the moment. checkClusterPreviousDirExistence(cluster, false); assertCTimesEqual(cluster); checkJnPreviousDirExistence(qjCluster, false); // Transition NN0 to active and do some FS ops. cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(new Path("/foo1"))); final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0, false); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, false); checkJnPreviousDirExistence(qjCluster, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. assertTrue(fs.mkdirs(new Path("/foo2"))); final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster); assertTrue(cidDuringUpgrade > cidBeforeUpgrade); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.run( new String[]{"-force"}, cluster.getConfiguration(1)); assertEquals(0, rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster, 0, true); checkNnPreviousDirExistence(cluster, 1, true); checkJnPreviousDirExistence(qjCluster, true); assertCTimesEqual(cluster); // Shut down the NNs, but deliberately leave the JNs up and running. Collection<URI> nn1NameDirs = cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf, false); final long cidAfterRollback = getCommittedTxnIdValue(qjCluster); assertTrue(cidBeforeUpgrade < cidAfterRollback); // make sure the committedTxnId has been reset correctly after rollback assertTrue(cidDuringUpgrade > cidAfterRollback); // The rollback operation should have rolled back the first NN's local // dirs, and the shared dir, but not the other NN's dirs. Those have to be // done by bootstrapping the standby. checkNnPreviousDirExistence(cluster, 0, false); checkJnPreviousDirExistence(qjCluster, false); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }