Java Code Examples for org.apache.hadoop.net.NetUtils#getHostPortString()
The following examples show how to use
org.apache.hadoop.net.NetUtils#getHostPortString() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Returns list of InetSocketAddresses corresponding to namenodes from the * configuration. * * Returns namenode address specifically configured for datanodes (using * service ports), if found. If not, regular RPC address configured for other * clients is returned. * * @param conf configuration * @return list of InetSocketAddress * @throws IOException on error */ public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses( Configuration conf) throws IOException { // Use default address as fall back String defaultAddress; try { defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, defaultAddress, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " + DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured."); } return addressList; }
Example 2
Source File: DFSUtil.java From big-c with Apache License 2.0 | 6 votes |
/** * Returns list of InetSocketAddresses corresponding to namenodes from the * configuration. * * Returns namenode address specifically configured for datanodes (using * service ports), if found. If not, regular RPC address configured for other * clients is returned. * * @param conf configuration * @return list of InetSocketAddress * @throws IOException on error */ public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses( Configuration conf) throws IOException { // Use default address as fall back String defaultAddress; try { defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, defaultAddress, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " + DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured."); } return addressList; }
Example 3
Source File: TestHttpServer.java From hadoop with Apache License 2.0 | 5 votes |
/** * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics * servlets, when authentication filters are set, but authorization is not * enabled. * @throws Exception */ @Test public void testDisabledAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); // Authorization is disabled by default conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB")); HttpServer2 myServer = new HttpServer2.Builder().setName("test") .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB" }) { assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } } myServer.stop(); }
Example 4
Source File: TestSSLHttpServer.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws Exception { conf = new Configuration(); conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); Configuration sslConf = new Configuration(false); sslConf.addResource("ssl-server.xml"); sslConf.addResource("ssl-client.xml"); clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf); clientSslFactory.init(); server = new HttpServer2.Builder() .setName("test") .addEndpoint(new URI("https://localhost")) .setConf(conf) .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) .keyStore(sslConf.get("ssl.server.keystore.location"), sslConf.get("ssl.server.keystore.password"), sslConf.get("ssl.server.keystore.type", "jks")) .trustStore(sslConf.get("ssl.server.truststore.location"), sslConf.get("ssl.server.truststore.password"), sslConf.get("ssl.server.truststore.type", "jks")).build(); server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.addServlet("longheader", "/longheader", LongHeaderServlet.class); server.start(); baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); }
Example 5
Source File: HttpServerFunctionalTest.java From hadoop with Apache License 2.0 | 5 votes |
/** * Pass in a server, return a URL bound to localhost and its port * @param server server * @return a URL bonded to the base of the server * @throws MalformedURLException if the URL cannot be created. */ public static URL getServerURL(HttpServer2 server) throws MalformedURLException { assertNotNull("No server", server); return new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); }
Example 6
Source File: TestHttpServer.java From hbase with Apache License 2.0 | 5 votes |
/** * Verify the administrator access for /logs, /stacks, /conf, /logLevel and * /metrics servlets. */ @Test @Ignore public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); MyGroupsProvider.mapping.put("userB", Collections.singletonList("groupB")); MyGroupsProvider.mapping.put("userC", Collections.singletonList("groupC")); MyGroupsProvider.mapping.put("userD", Collections.singletonList("groupD")); MyGroupsProvider.mapping.put("userE", Collections.singletonList("groupE")); HttpServer myServer = new HttpServer.Builder().setName("test") .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, getHttpStatusCode( serverURL + servlet, "userE")); } myServer.stop(); }
Example 7
Source File: HttpServerFunctionalTest.java From hbase with Apache License 2.0 | 5 votes |
/** * Pass in a server, return a URL bound to localhost and its port * @param server server * @return a URL bonded to the base of the server * @throws MalformedURLException if the URL cannot be created. */ public static URL getServerURL(HttpServer server) throws MalformedURLException { assertNotNull("No server", server); return new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); }
Example 8
Source File: TestStorageContainerManagerHttpServer.java From hadoop-ozone with Apache License 2.0 | 5 votes |
private static boolean canAccess(String scheme, InetSocketAddress addr) { if (addr == null) { return false; } try { URL url = new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx"); URLConnection conn = connectionFactory.openConnection(url); conn.connect(); conn.getContent(); } catch (IOException e) { return false; } return true; }
Example 9
Source File: TestHttpCookieFlag.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testHttpsCookie() throws IOException, GeneralSecurityException { URL base = new URL("https://" + NetUtils.getHostPortString(server .getConnectorAddress(1))); HttpsURLConnection conn = (HttpsURLConnection) new URL(base, "/echo").openConnection(); conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory()); String header = conn.getHeaderField("Set-Cookie"); List<HttpCookie> cookies = HttpCookie.parse(header); Assert.assertTrue(!cookies.isEmpty()); Assert.assertTrue(header.contains("; HttpOnly")); Assert.assertTrue(cookies.get(0).getSecure()); Assert.assertTrue("token".equals(cookies.get(0).getValue())); }
Example 10
Source File: DFSUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Returns list of InetSocketAddresses corresponding to the namenode * that manages this cluster. Note this is to be used by datanodes to get * the list of namenode addresses to talk to. * * Returns namenode address specifically configured for datanodes (using * service ports), if found. If not, regular RPC address configured for other * clients is returned. * * @param conf configuration * @return list of InetSocketAddress * @throws IOException on error */ public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException { // Use default address as fall back String defaultAddress; try { defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } Collection<String> parentNameServices = conf.getTrimmedStringCollection (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); if (parentNameServices.isEmpty()) { parentNameServices = conf.getTrimmedStringCollection (DFSConfigKeys.DFS_NAMESERVICES); } else { // Ensure that the internal service is ineed in the list of all available // nameservices. Set<String> availableNameServices = Sets.newHashSet(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES)); for (String nsId : parentNameServices) { if (!availableNameServices.contains(nsId)) { throw new IOException("Unknown nameservice: " + nsId); } } } Map<String, Map<String, InetSocketAddress>> addressList = getAddressesForNsIds(conf, parentNameServices, defaultAddress, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " + DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured."); } return addressList; }
Example 11
Source File: TestHttpServer.java From big-c with Apache License 2.0 | 5 votes |
/** * Verify the administrator access for /logs, /stacks, /conf, /logLevel and * /metrics servlets. * * @throws Exception */ @Test public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB")); MyGroupsProvider.mapping.put("userC", Arrays.asList("groupC")); MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD")); MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE")); HttpServer2 myServer = new HttpServer2.Builder().setName("test") .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf) .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } assertEquals(HttpURLConnection.HTTP_FORBIDDEN, getHttpStatusCode( serverURL + servlet, "userE")); } myServer.stop(); }
Example 12
Source File: TestSSLHttpServer.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws Exception { conf = new Configuration(); conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); Configuration sslConf = new Configuration(false); sslConf.addResource("ssl-server.xml"); sslConf.addResource("ssl-client.xml"); clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf); clientSslFactory.init(); server = new HttpServer2.Builder() .setName("test") .addEndpoint(new URI("https://localhost")) .setConf(conf) .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) .keyStore(sslConf.get("ssl.server.keystore.location"), sslConf.get("ssl.server.keystore.password"), sslConf.get("ssl.server.keystore.type", "jks")) .trustStore(sslConf.get("ssl.server.truststore.location"), sslConf.get("ssl.server.truststore.password"), sslConf.get("ssl.server.truststore.type", "jks")).build(); server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.addServlet("longheader", "/longheader", LongHeaderServlet.class); server.start(); baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); }
Example 13
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 4 votes |
@Override // SecondaryNameNodeInfoMXXBean public String getHostAndPort() { return NetUtils.getHostPortString(nameNodeAddr); }
Example 14
Source File: TestWebHdfsTokens.java From big-c with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") // for any(Token.class) @Test public void testLazyTokenFetchForSWebhdfs() throws Exception { MiniDFSCluster cluster = null; SWebHdfsFileSystem fs = null; try { final Configuration clusterConf = new HdfsConfiguration(conf); SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf); clusterConf.setBoolean(DFSConfigKeys .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + "/" + TestWebHdfsTokens.class.getSimpleName(); String keystoresDir; String sslConfDir; clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false); // trick the NN into thinking security is enabled w/o it trying // to login from a keytab UserGroupInformation.setConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build(); cluster.waitActive(); InetSocketAddress addr = cluster.getNameNode().getHttpsAddress(); String nnAddr = NetUtils.getHostPortString(addr); clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr); SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf); UserGroupInformation.setConfiguration(clusterConf); uri = DFSUtil.createUri( "swebhdfs", cluster.getNameNode().getHttpsAddress()); validateLazyTokenFetch(clusterConf); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example 15
Source File: TestGlobalFilter.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testServletFilter() throws Exception { Configuration conf = new Configuration(); //start a http server with CountingFilter conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, RecordingFilter.Initializer.class.getName()); HttpServer2 http = createTestServer(conf); http.start(); final String fsckURL = "/fsck"; final String stacksURL = "/stacks"; final String ajspURL = "/a.jsp"; final String listPathsURL = "/listPaths"; final String dataURL = "/data"; final String streamFile = "/streamFile"; final String rootURL = "/"; final String allURL = "/*"; final String outURL = "/static/a.out"; final String logURL = "/logs/a.log"; final String[] urls = {fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, allURL, outURL, logURL}; //access the urls final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for(int i = 0; i < urls.length; i++) { access(prefix + urls[i]); } } finally { http.stop(); } LOG.info("RECORDS = " + RECORDS); //verify records for(int i = 0; i < urls.length; i++) { assertTrue(RECORDS.remove(urls[i])); } assertTrue(RECORDS.isEmpty()); }
Example 16
Source File: TestGlobalFilter.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testServletFilter() throws Exception { Configuration conf = new Configuration(); //start a http server with CountingFilter conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, RecordingFilter.Initializer.class.getName()); HttpServer2 http = createTestServer(conf); http.start(); final String fsckURL = "/fsck"; final String stacksURL = "/stacks"; final String ajspURL = "/a.jsp"; final String listPathsURL = "/listPaths"; final String dataURL = "/data"; final String streamFile = "/streamFile"; final String rootURL = "/"; final String allURL = "/*"; final String outURL = "/static/a.out"; final String logURL = "/logs/a.log"; final String[] urls = {fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, allURL, outURL, logURL}; //access the urls final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for(int i = 0; i < urls.length; i++) { access(prefix + urls[i]); } } finally { http.stop(); } LOG.info("RECORDS = " + RECORDS); //verify records for(int i = 0; i < urls.length; i++) { assertTrue(RECORDS.remove(urls[i])); } assertTrue(RECORDS.isEmpty()); }
Example 17
Source File: NameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Initialize name-node. * * @param conf the configuration */ protected void initialize(Configuration conf) throws IOException { if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) { String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY); if (intervals != null) { conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS, intervals); } } UserGroupInformation.setConfiguration(conf); loginAsNameNodeUser(conf); NameNode.initMetrics(conf, this.getRole()); StartupProgressMetrics.register(startupProgress); if (NamenodeRole.NAMENODE == role) { startHttpServer(conf); } this.spanReceiverHost = SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX); loadNamesystem(conf); rpcServer = createRpcServer(conf); if (clientNamenodeAddress == null) { // This is expected for MiniDFSCluster. Set it now using // the RPC server's bind address. clientNamenodeAddress = NetUtils.getHostPortString(rpcServer.getRpcAddress()); LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + " this namenode/service."); } if (NamenodeRole.NAMENODE == role) { httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); } pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); startCommonServices(conf); }
Example 18
Source File: NameNode.java From big-c with Apache License 2.0 | 4 votes |
/** * @return NameNode RPC address in "host:port" string form */ public String getNameNodeAddressHostPortString() { return NetUtils.getHostPortString(rpcServer.getRpcAddress()); }
Example 19
Source File: TestWebHdfsTimeouts.java From hadoop with Apache License 2.0 | 2 votes |
/** * Creates an HTTP 307 response with the redirect location set back to the * test server's address. HTTP is supposed to terminate newlines with CRLF, so * we hard-code that instead of using the line separator property. * * @return String HTTP 307 response */ private String temporaryRedirect() { return "HTTP/1.1 307 Temporary Redirect\r\n" + "Location: http://" + NetUtils.getHostPortString(nnHttpAddress) + "\r\n" + "\r\n"; }
Example 20
Source File: TestWebHdfsTimeouts.java From big-c with Apache License 2.0 | 2 votes |
/** * Creates an HTTP 307 response with the redirect location set back to the * test server's address. HTTP is supposed to terminate newlines with CRLF, so * we hard-code that instead of using the line separator property. * * @return String HTTP 307 response */ private String temporaryRedirect() { return "HTTP/1.1 307 Temporary Redirect\r\n" + "Location: http://" + NetUtils.getHostPortString(nnHttpAddress) + "\r\n" + "\r\n"; }