org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY, (short)1, 1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); // Check that it functions even if the URL becomes canonicalized // to include a port number. Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":" + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }
Example #2
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testMultipleNamespacesConfigured() throws Exception { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote"); DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote"); fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf); Assert.assertEquals(2, fs.getResolvedNNAddr().length); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #3
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createFile(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY, (short)1, 1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY); // Check that it functions even if the URL becomes canonicalized // to include a port number. Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":" + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }
Example #4
Source File: TestNameNodeRetryCacheMetrics.java From hadoop with Apache License 2.0 | 6 votes |
/** Start a cluster */ @Before public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3) .build(); cluster.waitActive(); cluster.transitionToActive(namenodeId); HATestUtil.setFailoverConfigurations(cluster, conf); filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); namesystem = cluster.getNamesystem(namenodeId); metrics = namesystem.getRetryCache().getMetricsForTests(); }
Example #5
Source File: TestMover.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testMoverCliWithHAConf() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster .Builder(new HdfsConfiguration()) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster"); try { Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); Assert.assertEquals(new URI("hdfs://MyCluster"), nn); Assert.assertTrue(movePaths.containsKey(nn)); checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); } finally { cluster.shutdown(); } }
Example #6
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Test that even a non-idempotent method will properly fail-over if the * first IPC attempt times out trying to connect. Regression test for * HDFS-4404. */ @Test public void testFailoverOnConnectTimeout() throws Exception { conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, InjectingSocketFactory.class, SocketFactory.class); // Set up the InjectingSocketFactory to throw a ConnectTimeoutException // when connecting to the first NN. InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); // Make the second NN the active one. cluster.shutdownNameNode(0); cluster.transitionToActive(1); // Call a non-idempotent method, and ensure the failover of the call proceeds // successfully. IOUtils.closeStream(fs.create(TEST_FILE)); }
Example #7
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test to verify legacy proxy providers are correctly wrapped. */ @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); String logicalName = HATestUtil.getLogicalHostname(cluster); HATestUtil.setFailoverConfigurations(cluster, config, logicalName); config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); // not to use IP address for token service SecurityUtil.setTokenServiceUseIp(false); // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); }
Example #8
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Same test as above, but for FileContext. */ @Test public void testFileContextDoesntDnsResolveLogicalURI() throws Exception { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); NameService spyNS = spyOnNameService(); String logicalHost = fs.getUri().getHost(); Configuration haClientConf = fs.getConf(); FileContext fc = FileContext.getFileContext(haClientConf); Path root = new Path("/"); fc.listStatus(root); fc.listStatus(fc.makeQualified(root)); fc.getDefaultFileSystem().getCanonicalServiceName(); // Ensure that the logical hostname was never resolved. Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost)); }
Example #9
Source File: TestDFSClientFailover.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test that even a non-idempotent method will properly fail-over if the * first IPC attempt times out trying to connect. Regression test for * HDFS-4404. */ @Test public void testFailoverOnConnectTimeout() throws Exception { conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, InjectingSocketFactory.class, SocketFactory.class); // Set up the InjectingSocketFactory to throw a ConnectTimeoutException // when connecting to the first NN. InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); // Make the second NN the active one. cluster.shutdownNameNode(0); cluster.transitionToActive(1); // Call a non-idempotent method, and ensure the failover of the call proceeds // successfully. IOUtils.closeStream(fs.create(TEST_FILE)); }
Example #10
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Same test as above, but for FileContext. */ @Test public void testFileContextDoesntDnsResolveLogicalURI() throws Exception { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); NameService spyNS = spyOnNameService(); String logicalHost = fs.getUri().getHost(); Configuration haClientConf = fs.getConf(); FileContext fc = FileContext.getFileContext(haClientConf); Path root = new Path("/"); fc.listStatus(root); fc.listStatus(fc.makeQualified(root)); fc.getDefaultFileSystem().getCanonicalServiceName(); // Ensure that the logical hostname was never resolved. Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost)); }
Example #11
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testMultipleNamespacesConfigured() throws Exception { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote"); DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote"); fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf); Assert.assertEquals(2, fs.getResolvedNNAddr().length); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #12
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 6 votes |
/** * Test to verify legacy proxy providers are correctly wrapped. */ @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); String logicalName = HATestUtil.getLogicalHostname(cluster); HATestUtil.setFailoverConfigurations(cluster, config, logicalName); config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); // not to use IP address for token service SecurityUtil.setTokenServiceUseIp(false); // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); }
Example #13
Source File: TestEncryptionZonesWithHA.java From big-c with Apache License 2.0 | 6 votes |
/** * Test that encryption zones are properly tracked by the standby. */ @Test(timeout = 60000) public void testEncryptionZonesTrackedOnStandby() throws Exception { final int len = 8196; final Path dir = new Path("/enc"); final Path dirChild = new Path(dir, "child"); final Path dirFile = new Path(dir, "file"); fs.mkdir(dir, FsPermission.getDirDefault()); dfsAdmin0.createEncryptionZone(dir, TEST_KEY); fs.mkdir(dirChild, FsPermission.getDirDefault()); DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED); String contents = DFSTestUtil.readFile(fs, dirFile); // Failover the current standby to active. HATestUtil.waitForStandbyToCatchUp(nn0, nn1); cluster.shutdownNameNode(0); cluster.transitionToActive(1); Assert.assertEquals("Got unexpected ez path", dir.toString(), dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString()); Assert.assertEquals("Got unexpected ez path", dir.toString(), dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString()); Assert.assertEquals("File contents after failover were changed", contents, DFSTestUtil.readFile(fs, dirFile)); }
Example #14
Source File: TestMover.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testMoverCliWithHAConf() throws Exception { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster .Builder(new HdfsConfiguration()) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster"); try { Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); Assert.assertEquals(new URI("hdfs://MyCluster"), nn); Assert.assertTrue(movePaths.containsKey(nn)); checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); } finally { cluster.shutdown(); } }
Example #15
Source File: TestEncryptionZonesWithHA.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test that encryption zones are properly tracked by the standby. */ @Test(timeout = 60000) public void testEncryptionZonesTrackedOnStandby() throws Exception { final int len = 8196; final Path dir = new Path("/enc"); final Path dirChild = new Path(dir, "child"); final Path dirFile = new Path(dir, "file"); fs.mkdir(dir, FsPermission.getDirDefault()); dfsAdmin0.createEncryptionZone(dir, TEST_KEY); fs.mkdir(dirChild, FsPermission.getDirDefault()); DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED); String contents = DFSTestUtil.readFile(fs, dirFile); // Failover the current standby to active. HATestUtil.waitForStandbyToCatchUp(nn0, nn1); cluster.shutdownNameNode(0); cluster.transitionToActive(1); Assert.assertEquals("Got unexpected ez path", dir.toString(), dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString()); Assert.assertEquals("Got unexpected ez path", dir.toString(), dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString()); Assert.assertEquals("File contents after failover were changed", contents, DFSTestUtil.readFile(fs, dirFile)); }
Example #16
Source File: TestNameNodeRetryCacheMetrics.java From big-c with Apache License 2.0 | 6 votes |
/** Start a cluster */ @Before public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3) .build(); cluster.waitActive(); cluster.transitionToActive(namenodeId); HATestUtil.setFailoverConfigurations(cluster, conf); filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf); namesystem = cluster.getNamesystem(namenodeId); metrics = namesystem.getRetryCache().getMetricsForTests(); }
Example #17
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 5 votes |
/** * Regression test for HDFS-2683. */ @Test public void testLogicalUriShouldNotHavePorts() { Configuration config = new HdfsConfiguration(conf); String logicalName = HATestUtil.getLogicalHostname(cluster); HATestUtil.setFailoverConfigurations(cluster, config, logicalName); Path p = new Path("hdfs://" + logicalName + ":12345/"); try { p.getFileSystem(config).exists(p); fail("Did not fail with fake FS"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "does not use port information", ioe); } }
Example #18
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testHA() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(0); final Path dir = new Path("/test"); Assert.assertTrue(fs.mkdirs(dir)); cluster.shutdownNameNode(0); cluster.transitionToActive(1); final Path dir2 = new Path("/test2"); Assert.assertTrue(fs.mkdirs(dir2)); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #19
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testSecureHAToken() throws IOException, InterruptedException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.setBoolean(DFSConfigKeys .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf)); FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs); cluster.transitionToActive(0); Token<?> token = fs.getDelegationToken(null); cluster.shutdownNameNode(0); cluster.transitionToActive(1); token.renew(conf); token.cancel(conf); verify(fs).renewDelegationToken(token); verify(fs).cancelDelegationToken(token); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #20
Source File: TestEncryptionZonesWithHA.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example #21
Source File: TestBookKeeperAsHASharedDir.java From big-c with Apache License 2.0 | 5 votes |
/** * Test simple HA failover usecase with BK */ @Test public void testFailoverWithBK() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil.createJournalURI("/hotfailover").toString()); BKJMUtil.addJournalManagerDefinition(conf); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .manageNameDfsSharedDirs(false) .build(); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); cluster.waitActive(); cluster.transitionToActive(0); Path p = new Path("/testBKJMfailover"); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); fs.mkdirs(p); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertTrue(fs.exists(p)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #22
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testFailoverAfterOpen() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME + "://" + LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; final Path p = new Path("/test"); final byte[] data = "Hello".getBytes(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(1); FSDataOutputStream out = fs.create(p); cluster.shutdownNameNode(1); cluster.transitionToActive(0); out.write(data); out.close(); FSDataInputStream in = fs.open(p); byte[] buf = new byte[data.length]; IOUtils.readFully(in, buf, 0, buf.length); Assert.assertArrayEquals(data, buf); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #23
Source File: HdfsTestUtil.java From lucene-solr with Apache License 2.0 | 5 votes |
public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) { Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0)); if (dfsCluster.getNumNameNodes() > 1) { HATestUtil.setFailoverConfigurations(dfsCluster, conf); } return conf; }
Example #24
Source File: TestDFSClientFailover.java From big-c with Apache License 2.0 | 5 votes |
/** * Test that the client doesn't ever try to DNS-resolve the logical URI. * Regression test for HADOOP-9150. */ @Test public void testDoesntDnsResolveLogicalURI() throws Exception { FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); NameService spyNS = spyOnNameService(); String logicalHost = fs.getUri().getHost(); Path qualifiedRoot = fs.makeQualified(new Path("/")); // Make a few calls against the filesystem. fs.getCanonicalServiceName(); fs.listStatus(qualifiedRoot); // Ensure that the logical hostname was never resolved. Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost)); }
Example #25
Source File: TestDFSInotifyEventInputStream.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 120000) public void testNNFailover() throws IOException, URISyntaxException, MissingEventsException { Configuration conf = new HdfsConfiguration(); MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build(); try { cluster.getDfsCluster().waitActive(); cluster.getDfsCluster().transitionToActive(0); DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs (cluster.getDfsCluster(), conf)).dfs; DFSInotifyEventInputStream eis = client.getInotifyEventStream(); for (int i = 0; i < 10; i++) { client.mkdirs("/dir" + i, null, false); } cluster.getDfsCluster().shutdownNameNode(0); cluster.getDfsCluster().transitionToActive(1); EventBatch batch = null; // we can read all of the edits logged by the old active from the new // active for (int i = 0; i < 10; i++) { batch = waitForNextEvents(eis); Assert.assertEquals(1, batch.getEvents().length); Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE); Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" + i)); } Assert.assertTrue(eis.poll() == null); } finally { cluster.shutdown(); } }
Example #26
Source File: TestEditLogAutoroll.java From big-c with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { conf = new Configuration(); // Stall the standby checkpointer in two ways conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE); conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20); // Make it autoroll after 10 edits conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f); conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100); int retryCount = 0; while (true) { try { int basePort = 10060 + random.nextInt(100) * 2; MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); fs = cluster.getFileSystem(0); editLog = nn0.getNamesystem().getEditLog(); ++retryCount; break; } catch (BindException e) { LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times"); } } }
Example #27
Source File: TestAllowFormat.java From big-c with Apache License 2.0 | 5 votes |
/** * Test to skip format for non file scheme directory configured * * @throws Exception */ @Test public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception { Configuration conf = new HdfsConfiguration(); String logicalName = "mycluster"; // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY // is considered. String localhost = "127.0.0.1"; InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020); InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020); HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2); conf.set(DFS_NAMENODE_NAME_DIR_KEY, new File(DFS_BASE_DIR, "name").getAbsolutePath()); conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"), DummyJournalManager.class.getName()); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://" + localhost + ":2181/ledgers"); conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); // An internal assert is added to verify the working of test NameNode.format(conf); }
Example #28
Source File: TestBookKeeperAsHASharedDir.java From hadoop with Apache License 2.0 | 5 votes |
private void assertCanStartHANameNodes(MiniDFSCluster cluster, Configuration conf, String path) throws ServiceFailedException, IOException, URISyntaxException, InterruptedException { // Now should be able to start both NNs. Pass "false" here so that we don't // try to waitActive on all NNs, since the second NN doesn't exist yet. cluster.restartNameNode(0, false); cluster.restartNameNode(1, true); // Make sure HA is working. cluster .getNameNode(0) .getRpcServer() .transitionToActive( new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER)); FileSystem fs = null; try { Path newPath = new Path(path); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(newPath)); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), newPath.toString(), false).isDir()); } finally { if (fs != null) { fs.close(); } } }
Example #29
Source File: TestEncryptionZonesWithHA.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); fsHelper = new FileSystemTestHelper(); String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri() ); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .build(); cluster.waitActive(); cluster.transitionToActive(0); fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf); DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf); dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf); KeyProviderCryptoExtension nn0Provider = cluster.getNameNode(0).getNamesystem().getProvider(); fs.getClient().setKeyProvider(nn0Provider); }
Example #30
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testHA() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(0); final Path dir = new Path("/test"); Assert.assertTrue(fs.mkdirs(dir)); cluster.shutdownNameNode(0); cluster.transitionToActive(1); final Path dir2 = new Path("/test2"); Assert.assertTrue(fs.mkdirs(dir2)); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }