Java Code Examples for org.apache.hadoop.mapreduce.security.TokenCache#getShuffleSecretKey()
The following examples show how to use
org.apache.hadoop.mapreduce.security.TokenCache#getShuffleSecretKey() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobImpl.java From hadoop with Apache License 2.0 | 5 votes |
protected void setup(JobImpl job) throws IOException { String oldJobIDString = job.oldJobId.toString(); String user = UserGroupInformation.getCurrentUser().getShortUserName(); Path path = MRApps.getStagingAreaDir(job.conf, user); if(LOG.isDebugEnabled()) { LOG.debug("startJobs: parent=" + path + " child=" + oldJobIDString); } job.remoteJobSubmitDir = FileSystem.get(job.conf).makeQualified( new Path(path, oldJobIDString)); job.remoteJobConfFile = new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE); // Prepare the TaskAttemptListener server for authentication of Containers // TaskAttemptListener gets the information via jobTokenSecretManager. JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(oldJobIDString)); job.jobToken = new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager); job.jobToken.setService(identifier.getJobId()); // Add it to the jobTokenSecretManager so that TaskAttemptListener server // can authenticate containers(tasks) job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken); LOG.info("Adding job token for " + oldJobIDString + " to jobTokenSecretManager"); // If the job client did not setup the shuffle secret then reuse // the job token secret for the shuffle. if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) { LOG.warn("Shuffle secret key missing from job credentials." + " Using job token secret as shuffle secret."); TokenCache.setShuffleSecretKey(job.jobToken.getPassword(), job.jobCredentials); } }
Example 2
Source File: MRAppMaster.java From hadoop with Apache License 2.0 | 5 votes |
private void processRecovery() throws IOException{ if (appAttemptID.getAttemptId() == 1) { return; // no need to recover on the first attempt } boolean recoveryEnabled = getConfig().getBoolean( MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT); boolean recoverySupportedByCommitter = isRecoverySupported(); // If a shuffle secret was not provided by the job client then this app // attempt will generate one. However that disables recovery if there // are reducers as the shuffle secret would be app attempt specific. int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0); boolean shuffleKeyValidForRecovery = TokenCache.getShuffleSecretKey(jobCredentials) != null; if (recoveryEnabled && recoverySupportedByCommitter && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) { LOG.info("Recovery is enabled. " + "Will try to recover from previous life on best effort basis."); try { parsePreviousJobHistory(); } catch (IOException e) { LOG.warn("Unable to parse prior job history, aborting recovery", e); // try to get just the AMInfos amInfos.addAll(readJustAMInfos()); } } else { LOG.info("Will not try to recover. recoveryEnabled: " + recoveryEnabled + " recoverySupportedByCommitter: " + recoverySupportedByCommitter + " numReduceTasks: " + numReduceTasks + " shuffleKeyValidForRecovery: " + shuffleKeyValidForRecovery + " ApplicationAttemptID: " + appAttemptID.getAttemptId()); // Get the amInfos anyways whether recovery is enabled or not amInfos.addAll(readJustAMInfos()); } }
Example 3
Source File: JobImpl.java From big-c with Apache License 2.0 | 5 votes |
protected void setup(JobImpl job) throws IOException { String oldJobIDString = job.oldJobId.toString(); String user = UserGroupInformation.getCurrentUser().getShortUserName(); Path path = MRApps.getStagingAreaDir(job.conf, user); if(LOG.isDebugEnabled()) { LOG.debug("startJobs: parent=" + path + " child=" + oldJobIDString); } job.remoteJobSubmitDir = FileSystem.get(job.conf).makeQualified( new Path(path, oldJobIDString)); job.remoteJobConfFile = new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE); // Prepare the TaskAttemptListener server for authentication of Containers // TaskAttemptListener gets the information via jobTokenSecretManager. JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(oldJobIDString)); job.jobToken = new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager); job.jobToken.setService(identifier.getJobId()); // Add it to the jobTokenSecretManager so that TaskAttemptListener server // can authenticate containers(tasks) job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken); LOG.info("Adding job token for " + oldJobIDString + " to jobTokenSecretManager"); // If the job client did not setup the shuffle secret then reuse // the job token secret for the shuffle. if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) { LOG.warn("Shuffle secret key missing from job credentials." + " Using job token secret as shuffle secret."); TokenCache.setShuffleSecretKey(job.jobToken.getPassword(), job.jobCredentials); } }
Example 4
Source File: MRAppMaster.java From big-c with Apache License 2.0 | 5 votes |
private void processRecovery() throws IOException{ if (appAttemptID.getAttemptId() == 1) { return; // no need to recover on the first attempt } boolean recoveryEnabled = getConfig().getBoolean( MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT); boolean recoverySupportedByCommitter = isRecoverySupported(); // If a shuffle secret was not provided by the job client then this app // attempt will generate one. However that disables recovery if there // are reducers as the shuffle secret would be app attempt specific. int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0); boolean shuffleKeyValidForRecovery = TokenCache.getShuffleSecretKey(jobCredentials) != null; if (recoveryEnabled && recoverySupportedByCommitter && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) { LOG.info("Recovery is enabled. " + "Will try to recover from previous life on best effort basis."); try { parsePreviousJobHistory(); } catch (IOException e) { LOG.warn("Unable to parse prior job history, aborting recovery", e); // try to get just the AMInfos amInfos.addAll(readJustAMInfos()); } } else { LOG.info("Will not try to recover. recoveryEnabled: " + recoveryEnabled + " recoverySupportedByCommitter: " + recoverySupportedByCommitter + " numReduceTasks: " + numReduceTasks + " shuffleKeyValidForRecovery: " + shuffleKeyValidForRecovery + " ApplicationAttemptID: " + appAttemptID.getAttemptId()); // Get the amInfos anyways whether recovery is enabled or not amInfos.addAll(readJustAMInfos()); } }
Example 5
Source File: YarnChild.java From hadoop with Apache License 2.0 | 4 votes |
private static void configureTask(JobConf job, Task task, Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException { job.setCredentials(credentials); ApplicationAttemptId appAttemptId = ConverterUtils.toContainerId( System.getenv(Environment.CONTAINER_ID.name())) .getApplicationAttemptId(); LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId); // Set it in conf, so as to be able to be used the the OutputCommitter. job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptId.getAttemptId()); // set tcp nodelay job.setBoolean("ipc.client.tcpnodelay", true); job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS, YarnOutputFiles.class, MapOutputFile.class); // set the jobToken and shuffle secrets into task task.setJobTokenSecret( JobTokenSecretManager.createSecretKey(jt.getPassword())); byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials); if (shuffleSecret == null) { LOG.warn("Shuffle secret missing from task credentials." + " Using job token secret as shuffle secret."); shuffleSecret = jt.getPassword(); } task.setShuffleSecret( JobTokenSecretManager.createSecretKey(shuffleSecret)); // setup the child's MRConfig.LOCAL_DIR. configureLocalDirs(task, job); // setup the child's attempt directories // Do the task-type specific localization task.localizeConfiguration(job); // Set up the DistributedCache related configs MRApps.setupDistributedCacheLocal(job); // Overwrite the localized task jobconf which is linked to in the current // work-dir. Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE); writeLocalJobFile(localTaskFile, job); task.setJobFile(localTaskFile.toString()); task.setConf(job); }
Example 6
Source File: YarnChild.java From big-c with Apache License 2.0 | 4 votes |
private static void configureTask(JobConf job, Task task, Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException { job.setCredentials(credentials); ApplicationAttemptId appAttemptId = ConverterUtils.toContainerId( System.getenv(Environment.CONTAINER_ID.name())) .getApplicationAttemptId(); LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId); // Set it in conf, so as to be able to be used the the OutputCommitter. job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptId.getAttemptId()); // set tcp nodelay job.setBoolean("ipc.client.tcpnodelay", true); job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS, YarnOutputFiles.class, MapOutputFile.class); // set the jobToken and shuffle secrets into task task.setJobTokenSecret( JobTokenSecretManager.createSecretKey(jt.getPassword())); byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials); if (shuffleSecret == null) { LOG.warn("Shuffle secret missing from task credentials." + " Using job token secret as shuffle secret."); shuffleSecret = jt.getPassword(); } task.setShuffleSecret( JobTokenSecretManager.createSecretKey(shuffleSecret)); // setup the child's MRConfig.LOCAL_DIR. configureLocalDirs(task, job); // setup the child's attempt directories // Do the task-type specific localization task.localizeConfiguration(job); // Set up the DistributedCache related configs MRApps.setupDistributedCacheLocal(job); // Overwrite the localized task jobconf which is linked to in the current // work-dir. Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE); writeLocalJobFile(localTaskFile, job); task.setJobFile(localTaskFile.toString()); task.setConf(job); }