org.apache.hadoop.security.SecurityUtil Java Examples
The following examples show how to use
org.apache.hadoop.security.SecurityUtil.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IPCLoggerChannel.java From big-c with Apache License 2.0 | 6 votes |
protected QJournalProtocol createProxy() throws IOException { final Configuration confCopy = new Configuration(conf); // Need to set NODELAY or else batches larger than MTU can trigger // 40ms nagling delays. confCopy.setBoolean( CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY, true); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); return SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<QJournalProtocol>() { @Override public QJournalProtocol run() throws IOException { RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); QJournalProtocolPB pbproxy = RPC.getProxy( QJournalProtocolPB.class, RPC.getProtocolVersion(QJournalProtocolPB.class), addr, confCopy); return new QJournalProtocolTranslatorPB(pbproxy); } }); }
Example #2
Source File: OMKeyRequest.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private static EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager, String ezKeyName) throws IOException { if (ezKeyName == null) { return null; } long generateEDEKStartTime = monotonicNow(); EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<EncryptedKeyVersion >() { @Override public EncryptedKeyVersion run() throws IOException { try { return ozoneManager.getKmsProvider() .generateEncryptedKey(ezKeyName); } catch (GeneralSecurityException e) { throw new IOException(e); } } }); long generateEDEKTime = monotonicNow() - generateEDEKStartTime; LOG.debug("generateEDEK takes {} ms", generateEDEKTime); Preconditions.checkNotNull(edek); return edek; }
Example #3
Source File: KeyManagerImpl.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private EncryptedKeyVersion generateEDEK( final String ezKeyName) throws IOException { if (ezKeyName == null) { return null; } long generateEDEKStartTime = monotonicNow(); EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<EncryptedKeyVersion>() { @Override public EncryptedKeyVersion run() throws IOException { try { return getKMSProvider().generateEncryptedKey(ezKeyName); } catch (GeneralSecurityException e) { throw new IOException(e); } } }); long generateEDEKTime = monotonicNow() - generateEDEKStartTime; LOG.debug("generateEDEK takes {} ms", generateEDEKTime); Preconditions.checkNotNull(edek); return edek; }
Example #4
Source File: SecureLogin.java From pxf with Apache License 2.0 | 6 votes |
/** * Returns the service principal name from the configuration if available, * or defaults to the system property for the default server for backwards * compatibility. If the prncipal name contains _HOST element, replaces it with the * name of the host where the service is running. * * @param serverName the name of the server * @param configuration the hadoop configuration * @return the service principal for the given server and configuration */ String getServicePrincipal(String serverName, Configuration configuration) { // use system property as default for backward compatibility when only 1 Kerberized cluster was supported String defaultPrincipal = StringUtils.equalsIgnoreCase(serverName, "default") ? System.getProperty(CONFIG_KEY_SERVICE_PRINCIPAL) : null; String principal = configuration.get(CONFIG_KEY_SERVICE_PRINCIPAL, defaultPrincipal); try { principal = SecurityUtil.getServerPrincipal(principal, getLocalHostName(configuration)); LOG.debug("Resolved Kerberos principal name to {} for server {}", principal, serverName); return principal; } catch (Exception e) { throw new IllegalStateException( String.format("Failed to determine local hostname for server {} : {}", serverName, e.getMessage()), e); } }
Example #5
Source File: RpcProgramMountd.java From big-c with Apache License 2.0 | 6 votes |
public RpcProgramMountd(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts); exports = new ArrayList<String>(); exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT)); this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>()); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); }
Example #6
Source File: TestOzoneS3Util.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testBuildServiceNameForToken() { Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration, serviceID); configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om1"), "om1:9862"); configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om2"), "om2:9862"); configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, "om3"), "om3:9862"); String expectedOmServiceAddress = buildServiceAddress(nodeIDList); SecurityUtil.setConfiguration(configuration); String omserviceAddr = OzoneS3Util.buildServiceNameForToken(configuration, serviceID, nodeIDList); Assert.assertEquals(expectedOmServiceAddress, omserviceAddr); }
Example #7
Source File: StramClientUtils.java From attic-apex-core with Apache License 2.0 | 6 votes |
private Token<RMDelegationTokenIdentifier> getRMHAToken(org.apache.hadoop.yarn.api.records.Token rmDelegationToken) { // Build a list of service addresses to form the service name ArrayList<String> services = new ArrayList<>(); for (String rmId : ConfigUtils.getRMHAIds(conf)) { LOG.info("Yarn Resource Manager id: {}", rmId); // Set RM_ID to get the corresponding RM_ADDRESS services.add(SecurityUtil.buildTokenService(getRMHAAddress(rmId)).toString()); } Text rmTokenService = new Text(Joiner.on(',').join(services)); return new Token<>( rmDelegationToken.getIdentifier().array(), rmDelegationToken.getPassword().array(), new Text(rmDelegationToken.getKind()), rmTokenService); }
Example #8
Source File: SaslDataTransferTestCase.java From big-c with Apache License 2.0 | 6 votes |
/** * Creates configuration for starting a secure cluster. * * @param dataTransferProtection supported QOPs * @return configuration for starting a secure cluster * @throws Exception if there is any failure */ protected HdfsConfiguration createSecureConfig( String dataTransferProtection) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection); conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); String keystoresDir = baseDir.getAbsolutePath(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass()); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); return conf; }
Example #9
Source File: ClientRMProxy.java From hadoop with Apache License 2.0 | 6 votes |
@Unstable public static Text getTokenService(Configuration conf, String address, String defaultAddr, int defaultPort) { if (HAUtil.isHAEnabled(conf)) { // Build a list of service addresses to form the service name ArrayList<String> services = new ArrayList<String>(); YarnConfiguration yarnConf = new YarnConfiguration(conf); for (String rmId : HAUtil.getRMHAIds(conf)) { // Set RM_ID to get the corresponding RM_ADDRESS yarnConf.set(YarnConfiguration.RM_HA_ID, rmId); services.add(SecurityUtil.buildTokenService( yarnConf.getSocketAddr(address, defaultAddr, defaultPort)) .toString()); } return new Text(Joiner.on(',').join(services)); } // Non-HA case - no need to set RM_ID return SecurityUtil.buildTokenService(conf.getSocketAddr(address, defaultAddr, defaultPort)); }
Example #10
Source File: TestConfiguredPolicy.java From RDFS with Apache License 2.0 | 6 votes |
public void testConfiguredPolicy() throws Exception { Configuration conf = new Configuration(); conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE); conf.set(KEY_2, USER1 + " " + GROUPS1[0]); ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider()); SecurityUtil.setPolicy(policy); Subject user1 = SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1)); // Should succeed ServiceAuthorizationManager.authorize(user1, Protocol1.class); // Should fail Subject user2 = SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2)); boolean failed = false; try { ServiceAuthorizationManager.authorize(user2, Protocol2.class); } catch (AuthorizationException ae) { failed = true; } assertTrue(failed); }
Example #11
Source File: HdfsRepository.java From crate with Apache License 2.0 | 6 votes |
private static String preparePrincipal(String originalPrincipal) { String finalPrincipal = originalPrincipal; // Don't worry about host name resolution if they don't have the _HOST pattern in the name. if (originalPrincipal.contains("_HOST")) { try { finalPrincipal = SecurityUtil.getServerPrincipal(originalPrincipal, getHostName()); } catch (IOException e) { throw new UncheckedIOException(e); } if (originalPrincipal.equals(finalPrincipal) == false) { LOGGER.debug("Found service principal. Converted original principal name [{}] to server principal [{}]", originalPrincipal, finalPrincipal); } } return finalPrincipal; }
Example #12
Source File: TestWithSecureMiniDFSCluster.java From streamx with Apache License 2.0 | 6 votes |
private Configuration createSecureConfig(String dataTransferProtection) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection); conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY, "true");//https://issues.apache.org/jira/browse/HDFS-7431 String keystoresDir = baseDir.getAbsolutePath(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass()); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); return conf; }
Example #13
Source File: SaslDataTransferTestCase.java From hadoop with Apache License 2.0 | 6 votes |
/** * Creates configuration for starting a secure cluster. * * @param dataTransferProtection supported QOPs * @return configuration for starting a secure cluster * @throws Exception if there is any failure */ protected HdfsConfiguration createSecureConfig( String dataTransferProtection) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal); conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection); conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); String keystoresDir = baseDir.getAbsolutePath(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass()); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); return conf; }
Example #14
Source File: TestRMContainerAllocator.java From hadoop with Apache License 2.0 | 6 votes |
@Override public synchronized Allocation allocate( ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask, List<ContainerId> release, List<String> blacklistAdditions, List<String> blacklistRemovals) { List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>(); for (ResourceRequest req : ask) { ResourceRequest reqCopy = ResourceRequest.newInstance(req .getPriority(), req.getResourceName(), req.getCapability(), req .getNumContainers(), req.getRelaxLocality()); askCopy.add(reqCopy); } SecurityUtil.setTokenServiceUseIp(false); lastAsk = ask; lastRelease = release; lastBlacklistAdditions = blacklistAdditions; lastBlacklistRemovals = blacklistRemovals; return super.allocate( applicationAttemptId, askCopy, release, blacklistAdditions, blacklistRemovals); }
Example #15
Source File: YARNRunner.java From hadoop with Apache License 2.0 | 6 votes |
@VisibleForTesting void addHistoryToken(Credentials ts) throws IOException, InterruptedException { /* check if we have a hsproxy, if not, no need */ MRClientProtocol hsProxy = clientCache.getInitializedHSProxy(); if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) { /* * note that get delegation token was called. Again this is hack for oozie * to make sure we add history server delegation tokens to the credentials */ RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector(); Text service = resMgrDelegate.getRMDelegationTokenService(); if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) { Text hsService = SecurityUtil.buildTokenService(hsProxy .getConnectAddress()); if (ts.getToken(hsService) == null) { ts.addToken(hsService, getDelegationTokenFromHS(hsProxy)); } } } }
Example #16
Source File: JournalNode.java From hadoop with Apache License 2.0 | 6 votes |
/** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
Example #17
Source File: MRDelegationTokenRenewer.java From hadoop with Apache License 2.0 | 6 votes |
@Override public long renew(Token<?> token, Configuration conf) throws IOException, InterruptedException { org.apache.hadoop.yarn.api.records.Token dToken = org.apache.hadoop.yarn.api.records.Token.newInstance( token.getIdentifier(), token.getKind().toString(), token.getPassword(), token.getService().toString()); MRClientProtocol histProxy = instantiateHistoryProxy(conf, SecurityUtil.getTokenServiceAddr(token)); try { RenewDelegationTokenRequest request = Records .newRecord(RenewDelegationTokenRequest.class); request.setDelegationToken(dToken); return histProxy.renewDelegationToken(request).getNextExpirationTime(); } finally { stopHistoryProxy(histProxy); } }
Example #18
Source File: RpcProgramMountd.java From hadoop with Apache License 2.0 | 6 votes |
public RpcProgramMountd(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts); exports = new ArrayList<String>(); exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT)); this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>()); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); }
Example #19
Source File: HftpFileSystem.java From hadoop with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override public void cancelDelegationToken(final Token<?> token) throws IOException { UserGroupInformation connectUgi = ugi.getRealUser(); if (connectUgi == null) { connectUgi = ugi; } try { connectUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { InetSocketAddress serviceAddr = SecurityUtil .getTokenServiceAddr(token); DelegationTokenFetcher.cancelDelegationToken(connectionFactory, DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr), (Token<DelegationTokenIdentifier>) token); return null; } }); } catch (InterruptedException e) { throw new IOException(e); } }
Example #20
Source File: DelegationTokenSelector.java From hadoop with Apache License 2.0 | 6 votes |
/** * Select the delegation token for hdfs. The port will be rewritten to * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. * This method should only be called by non-hdfs filesystems that do not * use the rpc port to acquire tokens. Ex. webhdfs, hftp * @param nnUri of the remote namenode * @param tokens as a collection * @param conf hadoop configuration * @return Token */ public Token<DelegationTokenIdentifier> selectToken( final URI nnUri, Collection<Token<?>> tokens, final Configuration conf) { // this guesses the remote cluster's rpc service port. // the current token design assumes it's the same as the local cluster's // rpc port unless a config key is set. there should be a way to automatic // and correctly determine the value Text serviceName = SecurityUtil.buildTokenService(nnUri); final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); int nnRpcPort = NameNode.DEFAULT_PORT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } // use original hostname from the uri to avoid unintentional host resolving serviceName = SecurityUtil.buildTokenService( NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort)); return selectToken(serviceName, tokens); }
Example #21
Source File: HttpServer2.java From hadoop-ozone with Apache License 2.0 | 6 votes |
private void initSpnego(ConfigurationSource conf, String hostName, String usernameConfKey, String keytabConfKey) throws IOException { Map<String, String> params = new HashMap<>(); String principalInConf = conf.get(usernameConfKey); if (principalInConf != null && !principalInConf.isEmpty()) { params.put("kerberos.principal", SecurityUtil.getServerPrincipal( principalInConf, hostName)); } String httpKeytab = conf.get(keytabConfKey); if (httpKeytab != null && !httpKeytab.isEmpty()) { params.put("kerberos.keytab", httpKeytab); } params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(), params, null); }
Example #22
Source File: NameNode.java From hadoop with Apache License 2.0 | 6 votes |
private void startTrashEmptier(final Configuration conf) throws IOException { long trashInterval = conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); if (trashInterval == 0) { return; } else if (trashInterval < 0) { throw new IOException("Cannot start trash emptier with negative interval." + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value."); } // This may be called from the transitionToActive code path, in which // case the current user is the administrator, not the NN. The trash // emptier needs to run as the NN. See HDFS-3972. FileSystem fs = SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<FileSystem>() { @Override public FileSystem run() throws IOException { return FileSystem.get(conf); } }); this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); }
Example #23
Source File: NameNodeProxies.java From hadoop with Apache License 2.0 | 5 votes |
/** * Creates the namenode proxy with the passed protocol. This will handle * creation of either HA- or non-HA-enabled proxy objects, depending upon * if the provided URI is a configured logical URI. * * @param conf the configuration containing the required IPC * properties, client failover configurations, etc. * @param nameNodeUri the URI pointing either to a specific NameNode * or to a logical nameservice. * @param xface the IPC interface which should be created * @param fallbackToSimpleAuth set to true or false during calls to indicate if * a secure client falls back to simple auth * @return an object containing both the proxy and the associated * delegation token service it corresponds to * @throws IOException if there is an error creating the proxy **/ @SuppressWarnings("unchecked") public static <T> ProxyAndInfo<T> createProxy(Configuration conf, URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth) throws IOException { AbstractNNFailoverProxyProvider<T> failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth); if (failoverProxyProvider == null) { // Non-HA case return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth); } else { // HA case Conf config = new Conf(conf); T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts, config.maxRetryAttempts, config.failoverSleepBaseMillis, config.failoverSleepMaxMillis)); Text dtService; if (failoverProxyProvider.useLogicalURI()) { dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); } return new ProxyAndInfo<T>(proxy, dtService, NameNode.getAddress(nameNodeUri)); } }
Example #24
Source File: TokenAspect.java From big-c with Apache License 2.0 | 5 votes |
private TokenManagementDelegator getInstance(Token<?> token, Configuration conf) throws IOException { final URI uri; final String scheme = getSchemeByKind(token.getKind()); if (HAUtil.isTokenForLogicalUri(token)) { uri = HAUtil.getServiceUriFromToken(scheme, token); } else { final InetSocketAddress address = SecurityUtil.getTokenServiceAddr (token); uri = URI.create(scheme + "://" + NetUtils.getHostPortString(address)); } return (TokenManagementDelegator) FileSystem.get(uri, conf); }
Example #25
Source File: UserProvider.java From hadoop with Apache License 2.0 | 5 votes |
@Override public UserGroupInformation getValue(final HttpContext context) { final Configuration conf = (Configuration) servletcontext .getAttribute(JspHelper.CURRENT_CONF); try { return JspHelper.getUGI(servletcontext, request, conf, AuthenticationMethod.KERBEROS, false); } catch (IOException e) { throw new SecurityException( SecurityUtil.FAILED_TO_GET_UGI_MSG_HEADER + " " + e, e); } }
Example #26
Source File: KMSClientProvider.java From big-c with Apache License 2.0 | 5 votes |
private Text getDelegationTokenService() throws IOException { URL url = new URL(kmsUrl); InetSocketAddress addr = new InetSocketAddress(url.getHost(), url.getPort()); Text dtService = SecurityUtil.buildTokenService(addr); return dtService; }
Example #27
Source File: EditLogTailer.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void run() { SecurityUtil.doAsLoginUserOrFatal( new PrivilegedAction<Object>() { @Override public Object run() { doWork(); return null; } }); }
Example #28
Source File: YarnClientImpl.java From hadoop with Apache License 2.0 | 5 votes |
private static String getTimelineDelegationTokenRenewer(Configuration conf) throws IOException, YarnException { // Parse the RM daemon user if it exists in the config String rmPrincipal = conf.get(YarnConfiguration.RM_PRINCIPAL); String renewer = null; if (rmPrincipal != null && rmPrincipal.length() > 0) { String rmHost = conf.getSocketAddr( YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT).getHostName(); renewer = SecurityUtil.getServerPrincipal(rmPrincipal, rmHost); } return renewer; }
Example #29
Source File: StandbyCheckpointer.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void run() { // We have to make sure we're logged in as far as JAAS // is concerned, in order to use kerberized SSL properly. SecurityUtil.doAsLoginUserOrFatal( new PrivilegedAction<Object>() { @Override public Object run() { doWork(); return null; } }); }
Example #30
Source File: NameNodeHttpServer.java From hadoop with Apache License 2.0 | 5 votes |
private Map<String, String> getAuthFilterParams(Configuration conf) throws IOException { Map<String, String> params = new HashMap<String, String>(); String principalInConf = conf .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); if (principalInConf != null && !principalInConf.isEmpty()) { params .put( DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SecurityUtil.getServerPrincipal(principalInConf, bindAddress.getHostName())); } else if (UserGroupInformation.isSecurityEnabled()) { HttpServer2.LOG.error( "WebHDFS and security are enabled, but configuration property '" + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY + "' is not set."); } String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); if (httpKeytab != null && !httpKeytab.isEmpty()) { params.put( DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, httpKeytab); } else if (UserGroupInformation.isSecurityEnabled()) { HttpServer2.LOG.error( "WebHDFS and security are enabled, but configuration property '" + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY + "' is not set."); } String anonymousAllowed = conf .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED); if (anonymousAllowed != null && !anonymousAllowed.isEmpty()) { params.put( DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED, anonymousAllowed); } return params; }