Java Code Examples for org.apache.hadoop.security.SecurityUtil#setTokenService()
The following examples show how to use
org.apache.hadoop.security.SecurityUtil#setTokenService() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestWebHdfsUrl.java From big-c with Apache License 2.0 | 6 votes |
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf) throws IOException { if (UserGroupInformation.isSecurityEnabled()) { DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text( ugi.getUserName()), null, null); FSNamesystem namesystem = mock(FSNamesystem.class); DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager( 86400000, 86400000, 86400000, 86400000, namesystem); dtSecretManager.startThreads(); Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>( dtId, dtSecretManager); SecurityUtil.setTokenService( token, NetUtils.createSocketAddr(uri.getAuthority())); token.setKind(WebHdfsFileSystem.TOKEN_KIND); ugi.addToken(token); } return (WebHdfsFileSystem) FileSystem.get(uri, conf); }
Example 2
Source File: TestWebHdfsUrl.java From hadoop with Apache License 2.0 | 6 votes |
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf) throws IOException { if (UserGroupInformation.isSecurityEnabled()) { DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text( ugi.getUserName()), null, null); FSNamesystem namesystem = mock(FSNamesystem.class); DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager( 86400000, 86400000, 86400000, 86400000, namesystem); dtSecretManager.startThreads(); Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>( dtId, dtSecretManager); SecurityUtil.setTokenService( token, NetUtils.createSocketAddr(uri.getAuthority())); token.setKind(WebHdfsFileSystem.TOKEN_KIND); ugi.addToken(token); } return (WebHdfsFileSystem) FileSystem.get(uri, conf); }
Example 3
Source File: TestAMAuthorization.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static Token<? extends TokenIdentifier> setupAndReturnAMRMToken( InetSocketAddress rmBindAddress, Collection<Token<? extends TokenIdentifier>> allTokens) { for (Token<? extends TokenIdentifier> token : allTokens) { if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { SecurityUtil.setTokenService(token, rmBindAddress); return (Token<AMRMTokenIdentifier>) token; } } return null; }
Example 4
Source File: MockRMWithCustomAMLauncher.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected ApplicationMasterLauncher createAMLauncher() { return new ApplicationMasterLauncher(getRMContext()) { @Override protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) { return new AMLauncher(context, application, event, getConfig()) { @Override protected ContainerManagementProtocol getContainerMgrProxy( ContainerId containerId) { return containerManager; } @Override protected Token<AMRMTokenIdentifier> createAndSetAMRMToken() { Token<AMRMTokenIdentifier> amRmToken = super.createAndSetAMRMToken(); InetSocketAddress serviceAddr = getConfig().getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); SecurityUtil.setTokenService(amRmToken, serviceAddr); return amRmToken; } }; } }; }
Example 5
Source File: DelegationTokenSecretManager.java From hadoop with Apache License 2.0 | 5 votes |
/** A utility method for creating credentials. */ public static Credentials createCredentials(final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Token<DelegationTokenIdentifier> token = namenode.getRpcServer( ).getDelegationToken(new Text(renewer)); if (token == null) { return null; } final InetSocketAddress addr = namenode.getNameNodeAddress(); SecurityUtil.setTokenService(token, addr); final Credentials c = new Credentials(); c.addToken(new Text(ugi.getShortUserName()), token); return c; }
Example 6
Source File: HAUtil.java From hadoop with Apache License 2.0 | 5 votes |
/** * Locate a delegation token associated with the given HA cluster URI, and if * one is found, clone it to also represent the underlying namenode address. * @param ugi the UGI to modify * @param haUri the logical URI for the cluster * @param nnAddrs collection of NNs in the cluster to which the token * applies */ public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, Collection<InetSocketAddress> nnAddrs) { // this cloning logic is only used by hdfs Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri, HdfsConstants.HDFS_URI_SCHEME); Token<DelegationTokenIdentifier> haToken = tokenSelector.selectToken(haService, ugi.getTokens()); if (haToken != null) { for (InetSocketAddress singleNNAddr : nnAddrs) { // this is a minor hack to prevent physical HA tokens from being // exposed to the user via UGI.getCredentials(), otherwise these // cloned tokens may be inadvertently propagated to jobs Token<DelegationTokenIdentifier> specificToken = new Token.PrivateToken<DelegationTokenIdentifier>(haToken); SecurityUtil.setTokenService(specificToken, singleNNAddr); Text alias = new Text( buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + "//" + specificToken.getService()); ugi.addToken(alias, specificToken); LOG.debug("Mapped HA service delegation token for logical URI " + haUri + " to namenode " + singleNNAddr); } } else { LOG.debug("No HA service delegation token found for logical URI " + haUri); } }
Example 7
Source File: HAUtil.java From big-c with Apache License 2.0 | 5 votes |
/** * Locate a delegation token associated with the given HA cluster URI, and if * one is found, clone it to also represent the underlying namenode address. * @param ugi the UGI to modify * @param haUri the logical URI for the cluster * @param nnAddrs collection of NNs in the cluster to which the token * applies */ public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, Collection<InetSocketAddress> nnAddrs) { // this cloning logic is only used by hdfs Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri, HdfsConstants.HDFS_URI_SCHEME); Token<DelegationTokenIdentifier> haToken = tokenSelector.selectToken(haService, ugi.getTokens()); if (haToken != null) { for (InetSocketAddress singleNNAddr : nnAddrs) { // this is a minor hack to prevent physical HA tokens from being // exposed to the user via UGI.getCredentials(), otherwise these // cloned tokens may be inadvertently propagated to jobs Token<DelegationTokenIdentifier> specificToken = new Token.PrivateToken<DelegationTokenIdentifier>(haToken); SecurityUtil.setTokenService(specificToken, singleNNAddr); Text alias = new Text( buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + "//" + specificToken.getService()); ugi.addToken(alias, specificToken); LOG.debug("Mapped HA service delegation token for logical URI " + haUri + " to namenode " + singleNNAddr); } } else { LOG.debug("No HA service delegation token found for logical URI " + haUri); } }
Example 8
Source File: TestSaslRPC.java From hadoop with Apache License 2.0 | 5 votes |
private void doDigestRpc(Server server, TestTokenSecretManager sm ) throws Exception { server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current .getUserName())); Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm); SecurityUtil.setTokenService(token, addr); current.addToken(token); TestSaslProtocol proxy = null; try { proxy = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, conf); AuthMethod authMethod = proxy.getAuthMethod(); assertEquals(TOKEN, authMethod); //QOP must be auth assertEquals(expectedQop.saslQop, RPC.getConnectionIdForProxy(proxy).getSaslQop()); proxy.ping(); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }
Example 9
Source File: ConverterUtils.java From hadoop with Apache License 2.0 | 5 votes |
/** * Convert a protobuf token into a rpc token and set its service. Supposed * to be used for tokens other than RMDelegationToken. For * RMDelegationToken, use * {@link #convertFromYarn(org.apache.hadoop.yarn.api.records.Token, * org.apache.hadoop.io.Text)} instead. * * @param protoToken the yarn token * @param serviceAddr the connect address for the service * @return rpc token */ public static <T extends TokenIdentifier> Token<T> convertFromYarn( org.apache.hadoop.yarn.api.records.Token protoToken, InetSocketAddress serviceAddr) { Token<T> token = new Token<T>(protoToken.getIdentifier().array(), protoToken.getPassword().array(), new Text(protoToken.getKind()), new Text(protoToken.getService())); if (serviceAddr != null) { SecurityUtil.setTokenService(token, serviceAddr); } return token; }
Example 10
Source File: JspHelper.java From big-c with Apache License 2.0 | 5 votes |
private static UserGroupInformation getTokenUGI(ServletContext context, HttpServletRequest request, String tokenString, Configuration conf) throws IOException { final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); token.decodeFromUrlString(tokenString); InetSocketAddress serviceAddress = getNNServiceAddress(context, request); if (serviceAddress != null) { SecurityUtil.setTokenService(token, serviceAddress); token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); } ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); DelegationTokenIdentifier id = new DelegationTokenIdentifier(); id.readFields(in); if (context != null) { final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); if (nn != null) { // Verify the token. nn.getNamesystem().verifyToken(id, token.getPassword()); } } UserGroupInformation ugi = id.getUser(); ugi.addToken(token); return ugi; }
Example 11
Source File: TestSaslRPC.java From big-c with Apache License 2.0 | 5 votes |
private void doDigestRpc(Server server, TestTokenSecretManager sm ) throws Exception { server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current .getUserName())); Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm); SecurityUtil.setTokenService(token, addr); current.addToken(token); TestSaslProtocol proxy = null; try { proxy = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, conf); AuthMethod authMethod = proxy.getAuthMethod(); assertEquals(TOKEN, authMethod); //QOP must be auth assertEquals(expectedQop.saslQop, RPC.getConnectionIdForProxy(proxy).getSaslQop()); proxy.ping(); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }
Example 12
Source File: DelegationTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
/** A utility method for creating credentials. */ public static Credentials createCredentials(final NameNode namenode, final UserGroupInformation ugi, final String renewer) throws IOException { final Token<DelegationTokenIdentifier> token = namenode.getRpcServer( ).getDelegationToken(new Text(renewer)); if (token == null) { return null; } final InetSocketAddress addr = namenode.getNameNodeAddress(); SecurityUtil.setTokenService(token, addr); final Credentials c = new Credentials(); c.addToken(new Text(ugi.getShortUserName()), token); return c; }
Example 13
Source File: MockRMWithCustomAMLauncher.java From big-c with Apache License 2.0 | 5 votes |
@Override protected ApplicationMasterLauncher createAMLauncher() { return new ApplicationMasterLauncher(getRMContext()) { @Override protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) { return new AMLauncher(context, application, event, getConfig()) { @Override protected ContainerManagementProtocol getContainerMgrProxy( ContainerId containerId) { return containerManager; } @Override protected Token<AMRMTokenIdentifier> createAndSetAMRMToken() { Token<AMRMTokenIdentifier> amRmToken = super.createAndSetAMRMToken(); InetSocketAddress serviceAddr = getConfig().getSocketAddr( YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); SecurityUtil.setTokenService(amRmToken, serviceAddr); return amRmToken; } }; } }; }
Example 14
Source File: TestSaslRPC.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testPerConnectionConf() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); final Server server = new RPC.Builder(conf) .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) .setSecretManager(sm).build(); server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current .getUserName())); Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm); SecurityUtil.setTokenService(token, addr); current.addToken(token); Configuration newConf = new Configuration(conf); newConf.set(CommonConfigurationKeysPublic. HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, ""); Client client = null; TestSaslProtocol proxy1 = null; TestSaslProtocol proxy2 = null; TestSaslProtocol proxy3 = null; int timeouts[] = {111222, 3333333}; try { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]); proxy1 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy1.getAuthMethod(); client = WritableRpcEngine.getClient(newConf); Set<ConnectionId> conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // same conf, connection should be re-used proxy2 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy2.getAuthMethod(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // different conf, new connection should be set up newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]); proxy3 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy3.getAuthMethod(); assertEquals("number of connections in cache is wrong", 2, conns.size()); // now verify the proxies have the correct connection ids and timeouts ConnectionId[] connsArray = { RPC.getConnectionIdForProxy(proxy1), RPC.getConnectionIdForProxy(proxy2), RPC.getConnectionIdForProxy(proxy3) }; assertEquals(connsArray[0], connsArray[1]); assertEquals(connsArray[0].getMaxIdleTime(), timeouts[0]); assertFalse(connsArray[0].equals(connsArray[2])); assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]); } finally { server.stop(); // this is dirty, but clear out connection cache for next run if (client != null) { client.getConnectionIds().clear(); } if (proxy1 != null) RPC.stopProxy(proxy1); if (proxy2 != null) RPC.stopProxy(proxy2); if (proxy3 != null) RPC.stopProxy(proxy3); } }
Example 15
Source File: TestClientProtocolWithDelegationToken.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testDelegationTokenRpc() throws Exception { ClientProtocol mockNN = mock(ClientProtocol.class); FSNamesystem mockNameSys = mock(FSNamesystem.class); DelegationTokenSecretManager sm = new DelegationTokenSecretManager( DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT, 3600000, mockNameSys); sm.startThreads(); final Server server = new RPC.Builder(conf) .setProtocol(ClientProtocol.class).setInstance(mockNN) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) .setSecretManager(sm).build(); server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); String user = current.getUserName(); Text owner = new Text(user); DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null); Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>( dtId, sm); SecurityUtil.setTokenService(token, addr); LOG.info("Service for token is " + token.getService()); current.addToken(token); current.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { ClientProtocol proxy = null; try { proxy = RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID, addr, conf); proxy.getServerDefaults(); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } return null; } }); }
Example 16
Source File: TestSaslRPC.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testPerConnectionConf() throws Exception { TestTokenSecretManager sm = new TestTokenSecretManager(); final Server server = new RPC.Builder(conf) .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) .setSecretManager(sm).build(); server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current .getUserName())); Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm); SecurityUtil.setTokenService(token, addr); current.addToken(token); Configuration newConf = new Configuration(conf); newConf.set(CommonConfigurationKeysPublic. HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, ""); Client client = null; TestSaslProtocol proxy1 = null; TestSaslProtocol proxy2 = null; TestSaslProtocol proxy3 = null; int timeouts[] = {111222, 3333333}; try { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]); proxy1 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy1.getAuthMethod(); client = WritableRpcEngine.getClient(newConf); Set<ConnectionId> conns = client.getConnectionIds(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // same conf, connection should be re-used proxy2 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy2.getAuthMethod(); assertEquals("number of connections in cache is wrong", 1, conns.size()); // different conf, new connection should be set up newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]); proxy3 = RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, newConf); proxy3.getAuthMethod(); assertEquals("number of connections in cache is wrong", 2, conns.size()); // now verify the proxies have the correct connection ids and timeouts ConnectionId[] connsArray = { RPC.getConnectionIdForProxy(proxy1), RPC.getConnectionIdForProxy(proxy2), RPC.getConnectionIdForProxy(proxy3) }; assertEquals(connsArray[0], connsArray[1]); assertEquals(connsArray[0].getMaxIdleTime(), timeouts[0]); assertFalse(connsArray[0].equals(connsArray[2])); assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]); } finally { server.stop(); // this is dirty, but clear out connection cache for next run if (client != null) { client.getConnectionIds().clear(); } if (proxy1 != null) RPC.stopProxy(proxy1); if (proxy2 != null) RPC.stopProxy(proxy2); if (proxy3 != null) RPC.stopProxy(proxy3); } }
Example 17
Source File: ContainerRunnerImpl.java From tez with Apache License 2.0 | 4 votes |
@Override public ContainerExecutionResult call() throws Exception { // TODO Consolidate this code with TezChild. StopWatch sw = new StopWatch().start(); UserGroupInformation taskUgi = UserGroupInformation.createRemoteUser(request.getUser()); taskUgi.addCredentials(credentials); Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials); Map<String, ByteBuffer> serviceConsumerMetadata = new HashMap<String, ByteBuffer>(); String auxiliaryService = conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT); serviceConsumerMetadata.put(auxiliaryService, TezCommonUtils.convertJobTokenToBytes(jobToken)); Multimap<String, String> startedInputsMap = HashMultimap.create(); UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(request.getTokenIdentifier()); final InetSocketAddress address = NetUtils.createSocketAddrForHost(request.getAmHost(), request.getAmPort()); SecurityUtil.setTokenService(jobToken, address); taskOwner.addToken(jobToken); umbilical = taskOwner.doAs(new PrivilegedExceptionAction<TezTaskUmbilicalProtocol>() { @Override public TezTaskUmbilicalProtocol run() throws Exception { return RPC.getProxy(TezTaskUmbilicalProtocol.class, TezTaskUmbilicalProtocol.versionID, address, conf); } }); // TODO Stop reading this on each request. taskReporter = new TaskReporter( umbilical, conf.getInt(TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS, TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS_DEFAULT), conf.getLong( TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS, TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS_DEFAULT), conf.getInt(TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT, TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT_DEFAULT), new AtomicLong(0), request.getContainerIdString()); TezCommonUtils.logCredentials(LOG, taskUgi.getCredentials(), "taskUgi"); taskRunner = new TezTaskRunner2(conf, taskUgi, localDirs, ProtoConverters.getTaskSpecfromProto(request.getTaskSpec()), request.getAppAttemptNumber(), serviceConsumerMetadata, envMap, startedInputsMap, taskReporter, executor, objectRegistry, pid, executionContext, memoryAvailable, false, new DefaultHadoopShim(), sharedExecutor); boolean shouldDie; try { TaskRunner2Result result = taskRunner.run(); LOG.info("TaskRunner2Result: {}", result); shouldDie = result.isContainerShutdownRequested(); if (shouldDie) { LOG.info("Got a shouldDie notification via heartbeats. Shutting down"); return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null, "Asked to die by the AM"); } if (result.getError() != null) { Throwable e = result.getError(); return new ContainerExecutionResult( ContainerExecutionResult.ExitStatus.EXECUTION_FAILURE, e, "TaskExecutionFailure: " + e.getMessage()); } } finally { FileSystem.closeAllForUGI(taskUgi); } LOG.info("ExecutionTime for Container: " + request.getContainerIdString() + "=" + sw.stop().now(TimeUnit.MILLISECONDS)); return new ContainerExecutionResult(ContainerExecutionResult.ExitStatus.SUCCESS, null, null); }
Example 18
Source File: TestClientProtocolWithDelegationToken.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testDelegationTokenRpc() throws Exception { ClientProtocol mockNN = mock(ClientProtocol.class); FSNamesystem mockNameSys = mock(FSNamesystem.class); DelegationTokenSecretManager sm = new DelegationTokenSecretManager( DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT, 3600000, mockNameSys); sm.startThreads(); final Server server = new RPC.Builder(conf) .setProtocol(ClientProtocol.class).setInstance(mockNN) .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true) .setSecretManager(sm).build(); server.start(); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); String user = current.getUserName(); Text owner = new Text(user); DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null); Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>( dtId, sm); SecurityUtil.setTokenService(token, addr); LOG.info("Service for token is " + token.getService()); current.addToken(token); current.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { ClientProtocol proxy = null; try { proxy = RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID, addr, conf); proxy.getServerDefaults(); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } return null; } }); }
Example 19
Source File: TezChild.java From incubator-tez with Apache License 2.0 | 4 votes |
public TezChild(Configuration conf, String host, int port, String containerIdentifier, String tokenIdentifier, int appAttemptNumber, String[] localDirs, ObjectRegistryImpl objectRegistry) throws IOException, InterruptedException { this.defaultConf = conf; this.containerIdString = containerIdentifier; this.appAttemptNumber = appAttemptNumber; this.localDirs = localDirs; getTaskMaxSleepTime = defaultConf.getInt( TezConfiguration.TEZ_TASK_GET_TASK_SLEEP_INTERVAL_MS_MAX, TezConfiguration.TEZ_TASK_GET_TASK_SLEEP_INTERVAL_MS_MAX_DEFAULT); amHeartbeatInterval = defaultConf.getInt(TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS, TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS_DEFAULT); sendCounterInterval = defaultConf.getLong( TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS, TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS_DEFAULT); maxEventsToGet = defaultConf.getInt(TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT, TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT_DEFAULT); address = NetUtils.createSocketAddrForHost(host, port); ExecutorService executor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() .setDaemon(true).setNameFormat("TezChild").build()); this.executor = MoreExecutors.listeningDecorator(executor); this.objectRegistry = objectRegistry; // Security framework already loaded the tokens into current ugi Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); if (LOG.isDebugEnabled()) { LOG.debug("Executing with tokens:"); for (Token<?> token : credentials.getAllTokens()) { LOG.debug(token); } } UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(tokenIdentifier); Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials); SecurityUtil.setTokenService(jobToken, address); taskOwner.addToken(jobToken); serviceConsumerMetadata.put(ShuffleUtils.SHUFFLE_HANDLER_SERVICE_ID, ShuffleUtils.convertJobTokenToBytes(jobToken)); umbilical = taskOwner.doAs(new PrivilegedExceptionAction<TezTaskUmbilicalProtocol>() { @Override public TezTaskUmbilicalProtocol run() throws Exception { return (TezTaskUmbilicalProtocol) RPC.getProxy(TezTaskUmbilicalProtocol.class, TezTaskUmbilicalProtocol.versionID, address, defaultConf); } }); }
Example 20
Source File: TestRMRestart.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout = 60000) public void testDelegationTokenRestoredInDelegationTokenRenewer() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); RMState rmState = memStore.getState(); Map<ApplicationId, ApplicationStateData> rmAppState = rmState.getApplicationState(); MockRM rm1 = new TestSecurityMockRM(conf, memStore); rm1.start(); HashSet<Token<RMDelegationTokenIdentifier>> tokenSet = new HashSet<Token<RMDelegationTokenIdentifier>>(); // create an empty credential Credentials ts = new Credentials(); // create tokens and add into credential Text userText1 = new Text("user1"); RMDelegationTokenIdentifier dtId1 = new RMDelegationTokenIdentifier(userText1, new Text("renewer1"), userText1); Token<RMDelegationTokenIdentifier> token1 = new Token<RMDelegationTokenIdentifier>(dtId1, rm1.getRMContext().getRMDelegationTokenSecretManager()); SecurityUtil.setTokenService(token1, rmAddr); ts.addToken(userText1, token1); tokenSet.add(token1); Text userText2 = new Text("user2"); RMDelegationTokenIdentifier dtId2 = new RMDelegationTokenIdentifier(userText2, new Text("renewer2"), userText2); Token<RMDelegationTokenIdentifier> token2 = new Token<RMDelegationTokenIdentifier>(dtId2, rm1.getRMContext().getRMDelegationTokenSecretManager()); SecurityUtil.setTokenService(token2, rmAddr); ts.addToken(userText2, token2); tokenSet.add(token2); // submit an app with customized credential RMApp app = rm1.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", 1, ts); // assert app info is saved ApplicationStateData appState = rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); // assert delegation tokens exist in rm1 DelegationTokenRenewr Assert.assertEquals(tokenSet, rm1.getRMContext() .getDelegationTokenRenewer().getDelegationTokens()); // assert delegation tokens are saved DataOutputBuffer dob = new DataOutputBuffer(); ts.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); securityTokens.rewind(); Assert.assertEquals(securityTokens, appState .getApplicationSubmissionContext().getAMContainerSpec() .getTokens()); // start new RM MockRM rm2 = new TestSecurityMockRM(conf, memStore); rm2.start(); // Need to wait for a while as now token renewal happens on another thread // and is asynchronous in nature. waitForTokensToBeRenewed(rm2); // verify tokens are properly populated back to rm2 DelegationTokenRenewer Assert.assertEquals(tokenSet, rm2.getRMContext() .getDelegationTokenRenewer().getDelegationTokens()); }