Java Code Examples for org.apache.hadoop.security.Credentials#readTokenStorageStream()
The following examples show how to use
org.apache.hadoop.security.Credentials#readTokenStorageStream() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ContainerManagerImpl.java From big-c with Apache License 2.0 | 6 votes |
private Credentials parseCredentials(ContainerLaunchContext launchContext) throws IOException { Credentials credentials = new Credentials(); // //////////// Parse credentials ByteBuffer tokens = launchContext.getTokens(); if (tokens != null) { DataInputByteBuffer buf = new DataInputByteBuffer(); tokens.rewind(); buf.reset(tokens); credentials.readTokenStorageStream(buf); if (LOG.isDebugEnabled()) { for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) { LOG.debug(tk.getService() + " = " + tk.toString()); } } } // //////////// End of parsing credentials return credentials; }
Example 2
Source File: ContainerManagerImpl.java From hadoop with Apache License 2.0 | 6 votes |
private Credentials parseCredentials(ContainerLaunchContext launchContext) throws IOException { Credentials credentials = new Credentials(); // //////////// Parse credentials ByteBuffer tokens = launchContext.getTokens(); if (tokens != null) { DataInputByteBuffer buf = new DataInputByteBuffer(); tokens.rewind(); buf.reset(tokens); credentials.readTokenStorageStream(buf); if (LOG.isDebugEnabled()) { for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) { LOG.debug(tk.getService() + " = " + tk.toString()); } } } // //////////// End of parsing credentials return credentials; }
Example 3
Source File: UserCredentialSecurityTokenProvider.java From reef with Apache License 2.0 | 6 votes |
/** * Add serialized token to teh credentials. * @param tokens ByteBuffer containing token. */ @Override public void addTokens(final byte[] tokens) { try (DataInputBuffer buf = new DataInputBuffer()) { buf.reset(tokens, tokens.length); final Credentials credentials = new Credentials(); credentials.readTokenStorageStream(buf); final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ugi.addCredentials(credentials); LOG.log(Level.FINEST, "Added {0} tokens for user {1}", new Object[] {credentials.numberOfTokens(), ugi}); } catch (final IOException ex) { LOG.log(Level.SEVERE, "Could not access tokens in user credentials.", ex); throw new RuntimeException(ex); } }
Example 4
Source File: TwillContainerMain.java From twill with Apache License 2.0 | 6 votes |
private static void loadSecureStore() throws IOException { if (!UserGroupInformation.isSecurityEnabled()) { return; } File file = new File(Constants.Files.CREDENTIALS); if (file.exists()) { Credentials credentials = new Credentials(); try (DataInputStream input = new DataInputStream(new FileInputStream(file))) { credentials.readTokenStorageStream(input); } UserGroupInformation.getCurrentUser().addCredentials(credentials); LOG.info("Secure store updated from {}", file); } }
Example 5
Source File: YarnClientImpl.java From hadoop with Apache License 2.0 | 5 votes |
private void addTimelineDelegationToken( ContainerLaunchContext clc) throws YarnException, IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } // If the timeline delegation token is already in the CLC, no need to add // one more for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials .getAllTokens()) { if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) { return; } } org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier> timelineDelegationToken = getTimelineDelegationToken(); if (timelineDelegationToken == null) { return; } credentials.addToken(timelineService, timelineDelegationToken); if (LOG.isDebugEnabled()) { LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); clc.setTokens(tokens); }
Example 6
Source File: RMAppImpl.java From big-c with Apache License 2.0 | 5 votes |
protected Credentials parseCredentials() throws IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = submissionContext.getAMContainerSpec().getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } return credentials; }
Example 7
Source File: AMLauncher.java From big-c with Apache License 2.0 | 5 votes |
private void setupTokens( ContainerLaunchContext container, ContainerId containerID) throws IOException { Map<String, String> environment = container.getEnvironment(); environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV, application.getWebProxyBase()); // Set AppSubmitTime and MaxAppAttempts to be consumable by the AM. ApplicationId applicationId = application.getAppAttemptId().getApplicationId(); environment.put( ApplicationConstants.APP_SUBMIT_TIME_ENV, String.valueOf(rmContext.getRMApps() .get(applicationId) .getSubmitTime())); environment.put(ApplicationConstants.MAX_APP_ATTEMPTS_ENV, String.valueOf(rmContext.getRMApps().get( applicationId).getMaxAppAttempts())); Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); if (container.getTokens() != null) { // TODO: Don't do this kind of checks everywhere. dibb.reset(container.getTokens()); credentials.readTokenStorageStream(dibb); } // Add AMRMToken Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken(); if (amrmToken != null) { credentials.addToken(amrmToken.getService(), amrmToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength())); }
Example 8
Source File: ContainerManagerImpl.java From big-c with Apache License 2.0 | 5 votes |
private void recoverApplication(ContainerManagerApplicationProto p) throws IOException { ApplicationId appId = new ApplicationIdPBImpl(p.getId()); Credentials creds = new Credentials(); creds.readTokenStorageStream( new DataInputStream(p.getCredentials().newInput())); List<ApplicationACLMapProto> aclProtoList = p.getAclsList(); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(aclProtoList.size()); for (ApplicationACLMapProto aclProto : aclProtoList) { acls.put(ProtoUtils.convertFromProtoFormat(aclProto.getAccessType()), aclProto.getAcl()); } LogAggregationContext logAggregationContext = null; if (p.getLogAggregationContext() != null) { logAggregationContext = new LogAggregationContextPBImpl(p.getLogAggregationContext()); } LOG.info("Recovering application " + appId); ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId, creds, context); context.getApplications().put(appId, app); app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext)); }
Example 9
Source File: AbstractYarnTwillService.java From twill with Apache License 2.0 | 5 votes |
/** * Attempts to handle secure store update. * * @param message The message received * @return {@code true} if the message requests for secure store update, {@code false} otherwise. */ protected final boolean handleSecureStoreUpdate(Message message) { if (!SystemMessages.SECURE_STORE_UPDATED.equals(message)) { return false; } // If not in secure mode, simply ignore the message. if (!UserGroupInformation.isSecurityEnabled()) { return true; } try { Credentials credentials = new Credentials(); Location location = getSecureStoreLocation(); // If failed to determine the secure store location, simply ignore the message. if (location == null) { return true; } try (DataInputStream input = new DataInputStream(new BufferedInputStream(location.getInputStream()))) { credentials.readTokenStorageStream(input); } UserGroupInformation.getCurrentUser().addCredentials(credentials); // Clone the HDFS tokens for HA NameNode. This is to workaround bug HDFS-9276. YarnUtils.cloneHaNnCredentials(config); this.credentials = credentials; LOG.info("Secure store updated from {}.", location); } catch (Throwable t) { LOG.error("Failed to update secure store.", t); } return true; }
Example 10
Source File: DagTypeConverters.java From incubator-tez with Apache License 2.0 | 5 votes |
public static Credentials convertByteStringToCredentials(ByteString byteString) { if (byteString == null) { return null; } DataInputByteBuffer dib = new DataInputByteBuffer(); dib.reset(byteString.asReadOnlyByteBuffer()); Credentials credentials = new Credentials(); try { credentials.readTokenStorageStream(dib); return credentials; } catch (IOException e) { throw new TezUncheckedException("Failed to deserialize Credentials", e); } }
Example 11
Source File: YarnClientImpl.java From big-c with Apache License 2.0 | 5 votes |
private void addTimelineDelegationToken( ContainerLaunchContext clc) throws YarnException, IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } // If the timeline delegation token is already in the CLC, no need to add // one more for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials .getAllTokens()) { if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) { return; } } org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier> timelineDelegationToken = getTimelineDelegationToken(); if (timelineDelegationToken == null) { return; } credentials.addToken(timelineService, timelineDelegationToken); if (LOG.isDebugEnabled()) { LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); clc.setTokens(tokens); }
Example 12
Source File: RMAppManager.java From hadoop with Apache License 2.0 | 5 votes |
protected Credentials parseCredentials( ApplicationSubmissionContext application) throws IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = application.getAMContainerSpec().getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } return credentials; }
Example 13
Source File: AMLauncher.java From hadoop with Apache License 2.0 | 5 votes |
private void setupTokens( ContainerLaunchContext container, ContainerId containerID) throws IOException { Map<String, String> environment = container.getEnvironment(); environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV, application.getWebProxyBase()); // Set AppSubmitTime and MaxAppAttempts to be consumable by the AM. ApplicationId applicationId = application.getAppAttemptId().getApplicationId(); environment.put( ApplicationConstants.APP_SUBMIT_TIME_ENV, String.valueOf(rmContext.getRMApps() .get(applicationId) .getSubmitTime())); environment.put(ApplicationConstants.MAX_APP_ATTEMPTS_ENV, String.valueOf(rmContext.getRMApps().get( applicationId).getMaxAppAttempts())); Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); if (container.getTokens() != null) { // TODO: Don't do this kind of checks everywhere. dibb.reset(container.getTokens()); credentials.readTokenStorageStream(dibb); } // Add AMRMToken Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken(); if (amrmToken != null) { credentials.addToken(amrmToken.getService(), amrmToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength())); }
Example 14
Source File: TestAMAuthorization.java From big-c with Apache License 2.0 | 5 votes |
public Credentials getContainerCredentials() throws IOException { Credentials credentials = new Credentials(); DataInputByteBuffer buf = new DataInputByteBuffer(); containerTokens.rewind(); buf.reset(containerTokens); credentials.readTokenStorageStream(buf); return credentials; }
Example 15
Source File: UtilsTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testCreateTaskExecutorCredentials() throws Exception { File root = temporaryFolder.getRoot(); File home = new File(root, "home"); boolean created = home.mkdir(); assertTrue(created); Configuration flinkConf = new Configuration(); YarnConfiguration yarnConf = new YarnConfiguration(); Map<String, String> env = new HashMap<>(); env.put(YarnConfigKeys.ENV_APP_ID, "foo"); env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, home.getAbsolutePath()); env.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, ""); env.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, ""); env.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, "foo"); env.put(YarnConfigKeys.FLINK_JAR_PATH, root.toURI().toString()); env = Collections.unmodifiableMap(env); File credentialFile = temporaryFolder.newFile("container_tokens"); final Text amRmTokenKind = AMRMTokenIdentifier.KIND_NAME; final Text hdfsDelegationTokenKind = new Text("HDFS_DELEGATION_TOKEN"); final Text service = new Text("test-service"); Credentials amCredentials = new Credentials(); amCredentials.addToken(amRmTokenKind, new Token<>(new byte[4], new byte[4], amRmTokenKind, service)); amCredentials.addToken(hdfsDelegationTokenKind, new Token<>(new byte[4], new byte[4], hdfsDelegationTokenKind, service)); amCredentials.writeTokenStorageFile(new org.apache.hadoop.fs.Path(credentialFile.getAbsolutePath()), yarnConf); ContaineredTaskManagerParameters tmParams = new ContaineredTaskManagerParameters(64, 64, 16, 1, new HashMap<>(1)); Configuration taskManagerConf = new Configuration(); String workingDirectory = root.getAbsolutePath(); Class<?> taskManagerMainClass = YarnTaskExecutorRunner.class; ContainerLaunchContext ctx; final Map<String, String> originalEnv = System.getenv(); try { Map<String, String> systemEnv = new HashMap<>(originalEnv); systemEnv.put("HADOOP_TOKEN_FILE_LOCATION", credentialFile.getAbsolutePath()); CommonTestUtils.setEnv(systemEnv); ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, env, tmParams, taskManagerConf, workingDirectory, taskManagerMainClass, LOG); } finally { CommonTestUtils.setEnv(originalEnv); } Credentials credentials = new Credentials(); try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(ctx.getTokens().array()))) { credentials.readTokenStorageStream(dis); } Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens(); boolean hasHdfsDelegationToken = false; boolean hasAmRmToken = false; for (Token<? extends TokenIdentifier> token : tokens) { if (token.getKind().equals(amRmTokenKind)) { hasAmRmToken = true; } else if (token.getKind().equals(hdfsDelegationTokenKind)) { hasHdfsDelegationToken = true; } } assertTrue(hasHdfsDelegationToken); assertFalse(hasAmRmToken); }
Example 16
Source File: TestTaskAttemptContainerRequest.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testAttemptContainerRequest() throws Exception { final Text SECRET_KEY_ALIAS = new Text("secretkeyalias"); final byte[] SECRET_KEY = ("secretkey").getBytes(); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(1); acls.put(ApplicationAccessType.VIEW_APP, "otheruser"); ApplicationId appId = ApplicationId.newInstance(1, 1); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); Path jobFile = mock(Path.class); EventHandler eventHandler = mock(EventHandler.class); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); // setup UGI for security so tokens and keys are preserved jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(jobConf); Credentials credentials = new Credentials(); credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY); Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>( ("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice")); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, mock(TaskSplitMetaInfo.class), jobConf, taListener, jobToken, credentials, new SystemClock(), null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString()); ContainerLaunchContext launchCtx = TaskAttemptImpl.createContainerLaunchContext(acls, jobConf, jobToken, taImpl.createRemoteTask(), TypeConverter.fromYarn(jobId), mock(WrappedJvmID.class), taListener, credentials); Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs()); Credentials launchCredentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); dibb.reset(launchCtx.getTokens()); launchCredentials.readTokenStorageStream(dibb); // verify all tokens specified for the task attempt are in the launch context for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) { Token<? extends TokenIdentifier> launchToken = launchCredentials.getToken(token.getService()); Assert.assertNotNull("Token " + token.getService() + " is missing", launchToken); Assert.assertEquals("Token " + token.getService() + " mismatch", token, launchToken); } // verify the secret key is in the launch context Assert.assertNotNull("Secret key missing", launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY, launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); }
Example 17
Source File: TestDelegationTokenRenewer.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testAppSubmissionWithoutDelegationToken() throws Exception { conf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true); // create token2 Text userText2 = new Text("user2"); DelegationTokenIdentifier dtId2 = new DelegationTokenIdentifier(new Text("user2"), new Text("renewer2"), userText2); final Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2")); final MockRM rm = new TestSecurityMockRM(conf, null) { @Override protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer() { @Override protected Token<?>[] obtainSystemTokensForUser(String user, final Credentials credentials) throws IOException { credentials.addToken(token2.getService(), token2); return new Token<?>[] { token2 }; } }; } }; rm.start(); // submit an app without delegationToken RMApp app = rm.submitApp(200); // wait for the new retrieved hdfs token. GenericTestUtils.waitFor(new Supplier<Boolean>() { public Boolean get() { return rm.getRMContext().getDelegationTokenRenewer() .getDelegationTokens().contains(token2); } }, 1000, 20000); // check nm can retrieve the token final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); nm1.registerNode(); NodeHeartbeatResponse response = nm1.nodeHeartbeat(true); ByteBuffer tokenBuffer = response.getSystemCredentialsForApps().get(app.getApplicationId()); Assert.assertNotNull(tokenBuffer); Credentials appCredentials = new Credentials(); DataInputByteBuffer buf = new DataInputByteBuffer(); tokenBuffer.rewind(); buf.reset(tokenBuffer); appCredentials.readTokenStorageStream(buf); Assert.assertTrue(appCredentials.getAllTokens().contains(token2)); }
Example 18
Source File: TestTaskAttemptContainerRequest.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testAttemptContainerRequest() throws Exception { final Text SECRET_KEY_ALIAS = new Text("secretkeyalias"); final byte[] SECRET_KEY = ("secretkey").getBytes(); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(1); acls.put(ApplicationAccessType.VIEW_APP, "otheruser"); ApplicationId appId = ApplicationId.newInstance(1, 1); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); Path jobFile = mock(Path.class); EventHandler eventHandler = mock(EventHandler.class); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); // setup UGI for security so tokens and keys are preserved jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(jobConf); Credentials credentials = new Credentials(); credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY); Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>( ("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice")); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, mock(TaskSplitMetaInfo.class), jobConf, taListener, jobToken, credentials, new SystemClock(), null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString()); ContainerLaunchContext launchCtx = TaskAttemptImpl.createContainerLaunchContext(acls, jobConf, jobToken, taImpl.createRemoteTask(), TypeConverter.fromYarn(jobId), mock(WrappedJvmID.class), taListener, credentials); Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs()); Credentials launchCredentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); dibb.reset(launchCtx.getTokens()); launchCredentials.readTokenStorageStream(dibb); // verify all tokens specified for the task attempt are in the launch context for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) { Token<? extends TokenIdentifier> launchToken = launchCredentials.getToken(token.getService()); Assert.assertNotNull("Token " + token.getService() + " is missing", launchToken); Assert.assertEquals("Token " + token.getService() + " mismatch", token, launchToken); } // verify the secret key is in the launch context Assert.assertNotNull("Secret key missing", launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY, launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); }
Example 19
Source File: ContainerLocalizer.java From hadoop with Apache License 2.0 | 4 votes |
@SuppressWarnings("deprecation") public int runLocalization(final InetSocketAddress nmAddr) throws IOException, InterruptedException { // load credentials initDirs(conf, user, appId, lfs, localDirs); final Credentials creds = new Credentials(); DataInputStream credFile = null; try { // assume credentials in cwd // TODO: Fix Path tokenPath = new Path(String.format(TOKEN_FILE_NAME_FMT, localizerId)); credFile = lfs.open(tokenPath); creds.readTokenStorageStream(credFile); // Explicitly deleting token file. lfs.delete(tokenPath, false); } finally { if (credFile != null) { credFile.close(); } } // create localizer context UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user); remoteUser.addToken(creds.getToken(LocalizerTokenIdentifier.KIND)); final LocalizationProtocol nodeManager = remoteUser.doAs(new PrivilegedAction<LocalizationProtocol>() { @Override public LocalizationProtocol run() { return getProxy(nmAddr); } }); // create user context UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) { ugi.addToken(token); } ExecutorService exec = null; try { exec = createDownloadThreadPool(); CompletionService<Path> ecs = createCompletionService(exec); localizeFiles(nodeManager, ecs, ugi); return 0; } catch (Throwable e) { // Print traces to stdout so that they can be logged by the NM address // space. e.printStackTrace(System.out); return -1; } finally { try { if (exec != null) { exec.shutdownNow(); } LocalDirAllocator.removeContext(appCacheDirContextName); } finally { closeFileSystems(ugi); } } }
Example 20
Source File: UtilsTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testCreateTaskExecutorCredentials() throws Exception { File root = temporaryFolder.getRoot(); File home = new File(root, "home"); boolean created = home.mkdir(); assertTrue(created); Configuration flinkConf = new Configuration(); YarnConfiguration yarnConf = new YarnConfiguration(); Map<String, String> env = new HashMap<>(); env.put(YarnConfigKeys.ENV_APP_ID, "foo"); env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, home.getAbsolutePath()); env.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, ""); env.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, ""); env.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, "foo"); env.put(YarnConfigKeys.FLINK_JAR_PATH, root.toURI().toString()); env = Collections.unmodifiableMap(env); File credentialFile = temporaryFolder.newFile("container_tokens"); final Text amRmTokenKind = AMRMTokenIdentifier.KIND_NAME; final Text hdfsDelegationTokenKind = new Text("HDFS_DELEGATION_TOKEN"); final Text service = new Text("test-service"); Credentials amCredentials = new Credentials(); amCredentials.addToken(amRmTokenKind, new Token<>(new byte[4], new byte[4], amRmTokenKind, service)); amCredentials.addToken(hdfsDelegationTokenKind, new Token<>(new byte[4], new byte[4], hdfsDelegationTokenKind, service)); amCredentials.writeTokenStorageFile(new org.apache.hadoop.fs.Path(credentialFile.getAbsolutePath()), yarnConf); ContaineredTaskManagerParameters tmParams = new ContaineredTaskManagerParameters(64, 64, 16, 1, new HashMap<>(1)); Configuration taskManagerConf = new Configuration(); String workingDirectory = root.getAbsolutePath(); Class<?> taskManagerMainClass = YarnTaskExecutorRunner.class; ContainerLaunchContext ctx; final Map<String, String> originalEnv = System.getenv(); try { Map<String, String> systemEnv = new HashMap<>(originalEnv); systemEnv.put("HADOOP_TOKEN_FILE_LOCATION", credentialFile.getAbsolutePath()); CommonTestUtils.setEnv(systemEnv); ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, env, tmParams, taskManagerConf, workingDirectory, taskManagerMainClass, LOG); } finally { CommonTestUtils.setEnv(originalEnv); } Credentials credentials = new Credentials(); try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(ctx.getTokens().array()))) { credentials.readTokenStorageStream(dis); } Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens(); boolean hasHdfsDelegationToken = false; boolean hasAmRmToken = false; for (Token<? extends TokenIdentifier> token : tokens) { if (token.getKind().equals(amRmTokenKind)) { hasAmRmToken = true; } else if (token.getKind().equals(hdfsDelegationTokenKind)) { hasHdfsDelegationToken = true; } } assertTrue(hasHdfsDelegationToken); assertFalse(hasAmRmToken); }