Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#newHAConfiguration()
The following examples show how to use
org.apache.hadoop.hdfs.DFSTestUtil#newHAConfiguration() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testMultipleNamespacesConfigured() throws Exception { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote"); DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote"); fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf); Assert.assertEquals(2, fs.getResolvedNNAddr().length); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 2
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testMultipleNamespacesConfigured() throws Exception { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote"); DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote"); fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf); Assert.assertEquals(2, fs.getResolvedNNAddr().length); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 3
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testHA() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(0); final Path dir = new Path("/test"); Assert.assertTrue(fs.mkdirs(dir)); cluster.shutdownNameNode(0); cluster.transitionToActive(1); final Path dir2 = new Path("/test2"); Assert.assertTrue(fs.mkdirs(dir2)); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 4
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testSecureHAToken() throws IOException, InterruptedException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.setBoolean(DFSConfigKeys .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf)); FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs); cluster.transitionToActive(0); Token<?> token = fs.getDelegationToken(null); cluster.shutdownNameNode(0); cluster.transitionToActive(1); token.renew(conf); token.cancel(conf); verify(fs).renewDelegationToken(token); verify(fs).cancelDelegationToken(token); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 5
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testFailoverAfterOpen() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME + "://" + LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; final Path p = new Path("/test"); final byte[] data = "Hello".getBytes(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(1); FSDataOutputStream out = fs.create(p); cluster.shutdownNameNode(1); cluster.transitionToActive(0); out.write(data); out.close(); FSDataInputStream in = fs.open(p); byte[] buf = new byte[data.length]; IOUtils.readFully(in, buf, 0, buf.length); Assert.assertArrayEquals(data, buf); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 6
Source File: TestParameterParser.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testDeserializeHAToken() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); QueryStringDecoder decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + "/?" + NamenodeAddressParam.NAME + "=" + LOGICAL_NAME + "&" + DelegationParam.NAME + "=" + token.encodeToUrlString()); ParameterParser testParser = new ParameterParser(decoder, conf); final Token<DelegationTokenIdentifier> tok2 = testParser.delegationToken(); Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2)); }
Example 7
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testHA() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(0); final Path dir = new Path("/test"); Assert.assertTrue(fs.mkdirs(dir)); cluster.shutdownNameNode(0); cluster.transitionToActive(1); final Path dir2 = new Path("/test2"); Assert.assertTrue(fs.mkdirs(dir2)); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testSecureHAToken() throws IOException, InterruptedException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.setBoolean(DFSConfigKeys .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); MiniDFSCluster cluster = null; WebHdfsFileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf)); FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs); cluster.transitionToActive(0); Token<?> token = fs.getDelegationToken(null); cluster.shutdownNameNode(0); cluster.transitionToActive(1); token.renew(conf); token.cancel(conf); verify(fs).renewDelegationToken(token); verify(fs).cancelDelegationToken(token); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 9
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testFailoverAfterOpen() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME + "://" + LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; final Path p = new Path("/test"); final byte[] data = "Hello".getBytes(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(1); FSDataOutputStream out = fs.create(p); cluster.shutdownNameNode(1); cluster.transitionToActive(0); out.write(data); out.close(); FSDataInputStream in = fs.open(p); byte[] buf = new byte[data.length]; IOUtils.readFully(in, buf, 0, buf.length); Assert.assertArrayEquals(data, buf); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 10
Source File: TestParameterParser.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testDeserializeHAToken() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); QueryStringDecoder decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + "/?" + NamenodeAddressParam.NAME + "=" + LOGICAL_NAME + "&" + DelegationParam.NAME + "=" + token.encodeToUrlString()); ParameterParser testParser = new ParameterParser(decoder, conf); final Token<DelegationTokenIdentifier> tok2 = testParser.delegationToken(); Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2)); }
Example 11
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 4 votes |
/** * Make sure the WebHdfsFileSystem will retry based on RetriableException when * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. */ @Test (timeout=120000) public void testRetryWhileNNStartup() throws Exception { final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; final Map<String, Boolean> resultMap = new HashMap<String, Boolean>(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); cluster.transitionToActive(0); final NameNode namenode = cluster.getNameNode(0); final NamenodeProtocols rpcServer = namenode.getRpcServer(); Whitebox.setInternalState(namenode, "rpcServer", null); new Thread() { @Override public void run() { boolean result = false; FileSystem fs = null; try { fs = FileSystem.get(WEBHDFS_URI, conf); final Path dir = new Path("/test"); result = fs.mkdirs(dir); } catch (IOException e) { result = false; } finally { IOUtils.cleanup(null, fs); } synchronized (TestWebHDFSForHA.this) { resultMap.put("mkdirs", result); TestWebHDFSForHA.this.notifyAll(); } } }.start(); Thread.sleep(1000); Whitebox.setInternalState(namenode, "rpcServer", rpcServer); synchronized (this) { while (!resultMap.containsKey("mkdirs")) { this.wait(); } Assert.assertTrue(resultMap.get("mkdirs")); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 12
Source File: TestWebHDFSForHA.java From big-c with Apache License 2.0 | 4 votes |
/** * Make sure the WebHdfsFileSystem will retry based on RetriableException when * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. */ @Test (timeout=120000) public void testRetryWhileNNStartup() throws Exception { final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster = null; final Map<String, Boolean> resultMap = new HashMap<String, Boolean>(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(0).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); cluster.transitionToActive(0); final NameNode namenode = cluster.getNameNode(0); final NamenodeProtocols rpcServer = namenode.getRpcServer(); Whitebox.setInternalState(namenode, "rpcServer", null); new Thread() { @Override public void run() { boolean result = false; FileSystem fs = null; try { fs = FileSystem.get(WEBHDFS_URI, conf); final Path dir = new Path("/test"); result = fs.mkdirs(dir); } catch (IOException e) { result = false; } finally { IOUtils.cleanup(null, fs); } synchronized (TestWebHDFSForHA.this) { resultMap.put("mkdirs", result); TestWebHDFSForHA.this.notifyAll(); } } }.start(); Thread.sleep(1000); Whitebox.setInternalState(namenode, "rpcServer", rpcServer); synchronized (this) { while (!resultMap.containsKey("mkdirs")) { this.wait(); } Assert.assertTrue(resultMap.get("mkdirs")); } } finally { if (cluster != null) { cluster.shutdown(); } } }