Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#createClientDatanodeProtocolProxy()
The following examples show how to use
org.apache.hadoop.hdfs.DFSUtil#createClientDatanodeProtocolProxy() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 6 votes |
private ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException { InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode); // Get the current configuration Configuration conf = getConf(); // For datanode proxy the server principal should be DN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, "")); // Create the client ClientDatanodeProtocol dnProtocol = DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class)); return dnProtocol; }
Example 2
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 6 votes |
private ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException { InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode); // Get the current configuration Configuration conf = getConf(); // For datanode proxy the server principal should be DN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, "")); // Create the client ClientDatanodeProtocol dnProtocol = DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class)); return dnProtocol; }
Example 3
Source File: TestBlockToken.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testBlockTokenRpc() throws Exception { Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server = createMockDatanode(sm, token, conf); server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); final UserGroupInformation ticket = UserGroupInformation .createRemoteUser(block3.toString()); ticket.addToken(token); ClientDatanodeProtocol proxy = null; try { proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf, NetUtils.getDefaultSocketFactory(conf)); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }
Example 4
Source File: TestShortCircuitLocalRead.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=10000) public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException { final Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23); LocatedBlocks lb = cluster.getNameNode().getRpcServer() .getBlockLocations("/tmp/x", 0, 16); // Create a new block object, because the block inside LocatedBlock at // namenode is of type BlockInfo. ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock()); Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken(); final DatanodeInfo dnInfo = lb.get(0).getLocations()[0]; ClientDatanodeProtocol proxy = DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false); try { proxy.getBlockLocalPathInfo(blk, token); Assert.fail("The call should have failed as this user " + " is not allowed to call getBlockLocalPathInfo"); } catch (IOException ex) { Assert.assertTrue(ex.getMessage().contains( "not allowed to call getBlockLocalPathInfo")); } } finally { fs.close(); cluster.shutdown(); } }
Example 5
Source File: TestBlockToken.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testBlockTokenRpc() throws Exception { Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server = createMockDatanode(sm, token, conf); server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); final UserGroupInformation ticket = UserGroupInformation .createRemoteUser(block3.toString()); ticket.addToken(token); ClientDatanodeProtocol proxy = null; try { proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf, NetUtils.getDefaultSocketFactory(conf)); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }
Example 6
Source File: TestShortCircuitLocalRead.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout=10000) public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException { final Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23); LocatedBlocks lb = cluster.getNameNode().getRpcServer() .getBlockLocations("/tmp/x", 0, 16); // Create a new block object, because the block inside LocatedBlock at // namenode is of type BlockInfo. ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock()); Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken(); final DatanodeInfo dnInfo = lb.get(0).getLocations()[0]; ClientDatanodeProtocol proxy = DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false); try { proxy.getBlockLocalPathInfo(blk, token); Assert.fail("The call should have failed as this user " + " is not allowed to call getBlockLocalPathInfo"); } catch (IOException ex) { Assert.assertTrue(ex.getMessage().contains( "not allowed to call getBlockLocalPathInfo")); } } finally { fs.close(); cluster.shutdown(); } }
Example 7
Source File: TestBlockToken.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that fast repeated invocations of createClientDatanodeProtocolProxy * will not end up using up thousands of sockets. This is a regression test * for HDFS-1965. */ @Test public void testBlockTokenRpcLeak() throws Exception { Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); Assume.assumeTrue(FD_DIR.exists()); BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server = createMockDatanode(sm, token, conf); server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); fakeBlock.setBlockToken(token); // Create another RPC proxy with the same configuration - this will never // attempt to connect anywhere -- but it causes the refcount on the // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't // actually close the TCP connections to the real target DN. ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy( ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf)); ClientDatanodeProtocol proxy = null; int fdsAtStart = countOpenFileDescriptors(); try { long endTime = Time.now() + 3000; while (Time.now() < endTime) { proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); if (proxy != null) { RPC.stopProxy(proxy); } LOG.info("Num open fds:" + countOpenFileDescriptors()); } int fdsAtEnd = countOpenFileDescriptors(); if (fdsAtEnd - fdsAtStart > 50) { fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!"); } } finally { server.stop(); } RPC.stopProxy(proxyToNoWhere); }
Example 8
Source File: TestBlockToken.java From big-c with Apache License 2.0 | 4 votes |
/** * Test that fast repeated invocations of createClientDatanodeProtocolProxy * will not end up using up thousands of sockets. This is a regression test * for HDFS-1965. */ @Test public void testBlockTokenRpcLeak() throws Exception { Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); Assume.assumeTrue(FD_DIR.exists()); BlockTokenSecretManager sm = new BlockTokenSecretManager( blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server = createMockDatanode(sm, token, conf); server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); fakeBlock.setBlockToken(token); // Create another RPC proxy with the same configuration - this will never // attempt to connect anywhere -- but it causes the refcount on the // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't // actually close the TCP connections to the real target DN. ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy( ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf)); ClientDatanodeProtocol proxy = null; int fdsAtStart = countOpenFileDescriptors(); try { long endTime = Time.now() + 3000; while (Time.now() < endTime) { proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock); assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3)); if (proxy != null) { RPC.stopProxy(proxy); } LOG.info("Num open fds:" + countOpenFileDescriptors()); } int fdsAtEnd = countOpenFileDescriptors(); if (fdsAtEnd - fdsAtStart > 50) { fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!"); } } finally { server.stop(); } RPC.stopProxy(proxyToNoWhere); }