Java Code Examples for org.apache.hadoop.security.SecurityUtil#setPolicy()
The following examples show how to use
org.apache.hadoop.security.SecurityUtil#setPolicy() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestConfiguredPolicy.java From RDFS with Apache License 2.0 | 6 votes |
public void testConfiguredPolicy() throws Exception { Configuration conf = new Configuration(); conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE); conf.set(KEY_2, USER1 + " " + GROUPS1[0]); ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider()); SecurityUtil.setPolicy(policy); Subject user1 = SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1)); // Should succeed ServiceAuthorizationManager.authorize(user1, Protocol1.class); // Should fail Subject user2 = SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2)); boolean failed = false; try { ServiceAuthorizationManager.authorize(user2, Protocol2.class); } catch (AuthorizationException ae) { failed = true; } assertTrue(failed); }
Example 2
Source File: TestConfiguredPolicy.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void testConfiguredPolicy() throws Exception { Configuration conf = new Configuration(); conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE); conf.set(KEY_2, USER1 + " " + GROUPS1[0]); ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider()); SecurityUtil.setPolicy(policy); Subject user1 = SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1)); // Should succeed ServiceAuthorizationManager.authorize(user1, Protocol1.class); // Should fail Subject user2 = SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2)); boolean failed = false; try { ServiceAuthorizationManager.authorize(user2, Protocol2.class); } catch (AuthorizationException ae) { failed = true; } assertTrue(failed); }
Example 3
Source File: TestRPC.java From RDFS with Apache License 2.0 | 5 votes |
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception { SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider())); Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf); TestProtocol proxy = null; server.start(); InetSocketAddress addr = NetUtils.getConnectAddress(server); try { proxy = (TestProtocol)RPC.getProxy( TestProtocol.class, TestProtocol.versionID, addr, conf); proxy.ping(); if (expectFailure) { fail("Expect RPC.getProxy to fail with AuthorizationException!"); } } catch (RemoteException e) { if (expectFailure) { assertTrue(e.unwrapRemoteException() instanceof AuthorizationException); } else { throw e; } } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }
Example 4
Source File: TestRPC.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception { SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider())); Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf); TestProtocol proxy = null; server.start(); InetSocketAddress addr = NetUtils.getConnectAddress(server); try { proxy = (TestProtocol)RPC.getProxy( TestProtocol.class, TestProtocol.versionID, addr, conf); proxy.ping(); if (expectFailure) { fail("Expect RPC.getProxy to fail with AuthorizationException!"); } } catch (RemoteException e) { if (expectFailure) { assertTrue(e.unwrapRemoteException() instanceof AuthorizationException); } else { throw e; } } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }
Example 5
Source File: NameNode.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Initialize name-node. * * @param conf the configuration */ private void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = NameNode.getAddress(conf); int handlerCount = conf.getInt("dfs.namenode.handler.count", 10); // set service-level authorization security policy if (serviceAuthEnabled = conf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider)(ReflectionUtils.newInstance( conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class), conf)); SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider)); } // create rpc server this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, conf); // The rpc-server port can be ephemeral... ensure we have the correct info this.serverAddress = this.server.getListenerAddress(); FileSystem.setDefaultUri(conf, getUri(serverAddress)); LOG.info("Namenode up at: " + this.serverAddress); myMetrics = new NameNodeMetrics(conf, this); this.namesystem = new FSNamesystem(this, conf); startHttpServer(conf); this.server.start(); //start RPC server startTrashEmptier(conf); }
Example 6
Source File: NameNode.java From RDFS with Apache License 2.0 | 4 votes |
/** * Initialize name-node. * */ private void initialize() throws IOException { // set service-level authorization security policy if (serviceAuthEnabled = getConf().getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider)(ReflectionUtils.newInstance( getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class), getConf())); SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider)); } // This is a check that the port is free // create a socket and bind to it, throw exception if port is busy // This has to be done before we are reading Namesystem not to waste time and fail fast InetSocketAddress clientSocket = NameNode.getAddress(getConf()); ServerSocket socket = new ServerSocket(); socket.bind(clientSocket); socket.close(); InetSocketAddress dnSocket = NameNode.getDNProtocolAddress(getConf()); if (dnSocket != null) { socket = new ServerSocket(); socket.bind(dnSocket); socket.close(); //System.err.println("Tested " + dnSocket); } long serverVersion = ClientProtocol.versionID; this.clientProtocolMethodsFingerprint = ProtocolSignature .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion); myMetrics = new NameNodeMetrics(getConf(), this); this.clusterName = getConf().get("dfs.cluster.name"); this.namesystem = new FSNamesystem(this, getConf()); // HACK: from removal of FSNamesystem.getFSNamesystem(). JspHelper.fsn = this.namesystem; this.startDNServer(); startHttpServer(getConf()); }
Example 7
Source File: DataNode.java From RDFS with Apache License 2.0 | 4 votes |
private void initConfig(Configuration conf) throws IOException { if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost( conf.get("dfs.datanode.dns.interface","default"), conf.get("dfs.datanode.dns.nameserver","default")); } // Allow configuration to delay block reports to find bugs artificialBlockReceivedDelay = conf.getInt( "dfs.datanode.artificialBlockReceivedDelay", 0); if (conf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils .newInstance(conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class), conf)); SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider)); } this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); this.socketReadExtentionTimeout = conf.getInt( HdfsConstants.DFS_DATANODE_READ_EXTENSION, HdfsConstants.READ_TIMEOUT_EXTENSION); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT); this.socketWriteExtentionTimeout = conf.getInt( HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION, HdfsConstants.WRITE_TIMEOUT_EXTENSION); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); // TODO: remove the global setting and change data protocol to support // per session setting for this value. this.ignoreChecksumWhenRead = conf.getBoolean("dfs.datanode.read.ignore.checksum", false); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024); this.deletedReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); // Calculate the full block report interval int fullReportMagnifier = conf.getInt("dfs.fullblockreport.magnifier", 2); this.blockReportInterval = fullReportMagnifier * deletedReportInterval; this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; long heartbeatRecheckInterval = conf.getInt( "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * heartBeatInterval; this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } // do we need to sync block file contents to disk when blockfile is closed? this.syncOnClose = conf.getBoolean("dfs.datanode.synconclose", false); this.minDiskCheckIntervalMsec = conf.getLong( "dfs.datnode.checkdisk.mininterval", FSConstants.MIN_INTERVAL_CHECK_DIR_MSEC); }