Java Code Examples for org.apache.hadoop.security.SecurityUtil#login()
The following examples show how to use
org.apache.hadoop.security.SecurityUtil#login() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BootstrapStandby.java From big-c with Apache License 2.0 | 6 votes |
@Override public int run(String[] args) throws Exception { parseArgs(args); parseConfAndFindOtherNN(); NameNode.checkAllowFormat(conf); InetSocketAddress myAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName()); return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() { @Override public Integer run() { try { return doRun(); } catch (IOException e) { throw new RuntimeException(e); } } }); }
Example 2
Source File: OzoneManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Login OM service user if security and Kerberos are enabled. * * @param conf * @throws IOException, AuthenticationException */ private static void loginOMUser(OzoneConfiguration conf) throws IOException, AuthenticationException { if (SecurityUtil.getAuthenticationMethod(conf).equals( AuthenticationMethod.KERBEROS)) { if (LOG.isDebugEnabled()) { LOG.debug("Ozone security is enabled. Attempting login for OM user. " + "Principal: {}, keytab: {}", conf.get( OZONE_OM_KERBEROS_PRINCIPAL_KEY), conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY)); } UserGroupInformation.setConfiguration(conf); InetSocketAddress socAddr = OmUtils.getOmAddress(conf); SecurityUtil.login(conf, OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, OZONE_OM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } else { throw new AuthenticationException(SecurityUtil.getAuthenticationMethod( conf) + " authentication method not supported. OM user login " + "failed."); } LOG.info("Ozone Manager login successful."); }
Example 3
Source File: RpcProgramMountd.java From big-c with Apache License 2.0 | 6 votes |
public RpcProgramMountd(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts); exports = new ArrayList<String>(); exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT)); this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>()); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); }
Example 4
Source File: HdfsSecurityUtil.java From jstorm with Apache License 2.0 | 6 votes |
public static void login(Map conf, Configuration hdfsConfig) throws IOException { //If AutoHDFS is specified, do not attempt to login using keytabs, only kept for backward compatibility. if(conf.get(TOPOLOGY_AUTO_CREDENTIALS) == null || (!(((List)conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoHDFS.class.getName())) && !(((List)conf.get(TOPOLOGY_AUTO_CREDENTIALS)).contains(AutoTGT.class.getName())))) { if (UserGroupInformation.isSecurityEnabled()) { // compareAndSet added because of https://issues.apache.org/jira/browse/STORM-1535 if (isLoggedIn.compareAndSet(false, true)) { LOG.info("Logging in using keytab as AutoHDFS is not specified for " + TOPOLOGY_AUTO_CREDENTIALS); String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY); if (keytab != null) { hdfsConfig.set(STORM_KEYTAB_FILE_KEY, keytab); } String userName = (String) conf.get(STORM_USER_NAME_KEY); if (userName != null) { hdfsConfig.set(STORM_USER_NAME_KEY, userName); } SecurityUtil.login(hdfsConfig, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY); } } } }
Example 5
Source File: JournalNode.java From big-c with Apache License 2.0 | 6 votes |
/** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); }
Example 6
Source File: TestSaslRPC.java From big-c with Apache License 2.0 | 5 votes |
static void testKerberosRpc(String principal, String keytab) throws Exception { final Configuration newConf = new Configuration(conf); newConf.set(SERVER_PRINCIPAL_KEY, principal); newConf.set(SERVER_KEYTAB_KEY, keytab); SecurityUtil.login(newConf, SERVER_KEYTAB_KEY, SERVER_PRINCIPAL_KEY); TestUserGroupInformation.verifyLoginMetrics(1, 0); UserGroupInformation current = UserGroupInformation.getCurrentUser(); System.out.println("UGI: " + current); Server server = new RPC.Builder(newConf) .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) .build(); TestSaslProtocol proxy = null; server.start(); InetSocketAddress addr = NetUtils.getConnectAddress(server); try { proxy = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy.ping(); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } System.out.println("Test is successful."); }
Example 7
Source File: AutoHDFS.java From jstorm with Apache License 2.0 | 5 votes |
private void login(Configuration configuration) throws IOException { configuration.set(STORM_KEYTAB_FILE_KEY, this.hdfsKeyTab); configuration.set(STORM_USER_NAME_KEY, this.hdfsPrincipal); SecurityUtil.login(configuration, STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY); LOG.info("Logged into hdfs with principal {}", this.hdfsPrincipal); }
Example 8
Source File: DFSZKFailoverController.java From big-c with Apache License 2.0 | 4 votes |
@Override public void loginAsFCUser() throws IOException { InetSocketAddress socAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); }
Example 9
Source File: WebAppProxyServer.java From hadoop with Apache License 2.0 | 4 votes |
/** * Log in as the Kerberose principal designated for the proxy * @param conf the configuration holding this information in it. * @throws IOException on any error. */ protected void doSecureLogin(Configuration conf) throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(conf, YarnConfiguration.PROXY_KEYTAB, YarnConfiguration.PROXY_PRINCIPAL, socAddr.getHostName()); }
Example 10
Source File: ApplicationHistoryServer.java From hadoop with Apache License 2.0 | 4 votes |
private void doSecureLogin(Configuration conf) throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB, YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, socAddr.getHostName()); }
Example 11
Source File: JobHistoryServer.java From hadoop with Apache License 2.0 | 4 votes |
protected void doSecureLogin(Configuration conf) throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(conf, JHAdminConfig.MR_HISTORY_KEYTAB, JHAdminConfig.MR_HISTORY_PRINCIPAL, socAddr.getHostName()); }
Example 12
Source File: RpcProgramNfs3.java From big-c with Apache License 2.0 | 4 votes |
public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { super("NFS3", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket, allowInsecurePorts); this.config = config; config.set(FsPermission.UMASK_LABEL, "000"); iug = new ShellBasedIdMapping(config); aixCompatMode = config.getBoolean( NfsConfigKeys.AIX_COMPAT_MODE_KEY, NfsConfigKeys.AIX_COMPAT_MODE_DEFAULT); exports = NfsExports.getInstance(config); writeManager = new WriteManager(iug, config, aixCompatMode); clientCache = new DFSClientCache(config); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); bufferSize = config.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT); boolean enableDump = config.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DEFAULT); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY, NfsConfigKeys.NFS_SUPERUSER_DEFAULT); LOG.info("Configured HDFS superuser is " + superuser); if (!enableDump) { writeDumpDir = null; } else { clearDirectory(writeDumpDir); } rpcCallCache = new RpcCallCache("NFS3", 256); infoServer = new Nfs3HttpServer(config); }
Example 13
Source File: RpcProgramNfs3.java From hadoop with Apache License 2.0 | 4 votes |
public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { super("NFS3", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket, allowInsecurePorts); this.config = config; config.set(FsPermission.UMASK_LABEL, "000"); iug = new ShellBasedIdMapping(config); aixCompatMode = config.getBoolean( NfsConfigKeys.AIX_COMPAT_MODE_KEY, NfsConfigKeys.AIX_COMPAT_MODE_DEFAULT); exports = NfsExports.getInstance(config); writeManager = new WriteManager(iug, config, aixCompatMode); clientCache = new DFSClientCache(config); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); bufferSize = config.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT); boolean enableDump = config.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DEFAULT); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY, NfsConfigKeys.NFS_SUPERUSER_DEFAULT); LOG.info("Configured HDFS superuser is " + superuser); if (!enableDump) { writeDumpDir = null; } else { clearDirectory(writeDumpDir); } rpcCallCache = new RpcCallCache("NFS3", 256); infoServer = new Nfs3HttpServer(config); }
Example 14
Source File: JobHistoryServer.java From XLearning with Apache License 2.0 | 4 votes |
protected void doSecureLogin(Configuration conf) throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(conf, XLearningConfiguration.XLEARNING_HISTORY_KEYTAB, XLearningConfiguration.XLEARNING_HISTORY_PRINCIPAL, socAddr.getHostName()); }
Example 15
Source File: NameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Login as the configured user for the NameNode. */ void loginAsNameNodeUser(Configuration conf) throws IOException { InetSocketAddress socAddr = getRpcServerAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); }
Example 16
Source File: NameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Verify that configured directories exist, then * Interactively confirm that formatting is desired * for each existing directory and format them. * * @param conf configuration to use * @param force if true, format regardless of whether dirs exist * @return true if formatting was aborted, false otherwise * @throws IOException */ private static boolean format(Configuration conf, boolean force, boolean isInteractive) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); checkAllowFormat(conf); if (UserGroupInformation.isSecurityEnabled()) { InetSocketAddress socAddr = getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf); List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf); List<URI> dirsToPrompt = new ArrayList<URI>(); dirsToPrompt.addAll(nameDirsToFormat); dirsToPrompt.addAll(sharedDirs); List<URI> editDirsToFormat = FSNamesystem.getNamespaceEditsDirs(conf); // if clusterID is not provided - see if you can find the current one String clusterId = StartupOption.FORMAT.getClusterId(); if(clusterId == null || clusterId.equals("")) { //Generate a new cluster id clusterId = NNStorage.newClusterID(); } System.out.println("Formatting using clusterid: " + clusterId); FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat); try { FSNamesystem fsn = new FSNamesystem(conf, fsImage); fsImage.getEditLog().initJournalsForWrite(); if (!fsImage.confirmFormat(force, isInteractive)) { return true; // aborted } fsImage.format(fsn, clusterId); } catch (IOException ioe) { LOG.warn("Encountered exception during format: ", ioe); fsImage.close(); throw ioe; } return false; }
Example 17
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { final InetSocketAddress infoSocAddr = getHttpAddress(conf); final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { SecurityUtil.login(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getServiceAddress(conf, true); this.conf = conf; this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs); checkpointImage.recoverCreate(commandLineOpts.shouldFormat()); checkpointImage.deleteTempEdits(); namesystem = new FSNamesystem(conf, checkpointImage, true); // Disable quota checks namesystem.dir.disableQuotaChecks(); // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); final InetSocketAddress httpAddr = infoSocAddr; final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "secondary", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); nameNodeStatusBeanName = MBeans.register("SecondaryNameNode", "SecondaryNameNodeInfo", this); infoServer = builder.build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, ImageServlet.class, true); infoServer.start(); LOG.info("Web server init done"); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); int connIdx = 0; if (policy.isHttpEnabled()) { InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NetUtils.getHostPortString(httpAddress)); } if (policy.isHttpsEnabled()) { InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, NetUtils.getHostPortString(httpsAddress)); } legacyOivImageDir = conf.get( DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); }
Example 18
Source File: DFSZKFailoverController.java From hadoop with Apache License 2.0 | 4 votes |
@Override public void loginAsFCUser() throws IOException { InetSocketAddress socAddr = NameNode.getAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); }
Example 19
Source File: JobHistoryServer.java From big-c with Apache License 2.0 | 4 votes |
protected void doSecureLogin(Configuration conf) throws IOException { InetSocketAddress socAddr = getBindAddress(conf); SecurityUtil.login(conf, JHAdminConfig.MR_HISTORY_KEYTAB, JHAdminConfig.MR_HISTORY_PRINCIPAL, socAddr.getHostName()); }
Example 20
Source File: NodeManager.java From big-c with Apache License 2.0 | 4 votes |
protected void doSecureLogin() throws IOException { SecurityUtil.login(getConfig(), YarnConfiguration.NM_KEYTAB, YarnConfiguration.NM_PRINCIPAL); }