Java Code Examples for org.apache.hadoop.mapreduce.Cluster#getJob()
The following examples show how to use
org.apache.hadoop.mapreduce.Cluster#getJob() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MapReduceBackupCopyJob.java From hbase with Apache License 2.0 | 6 votes |
@Override public void cancel(String jobId) throws IOException { JobID id = JobID.forName(jobId); Cluster cluster = new Cluster(this.getConf()); try { Job job = cluster.getJob(id); if (job == null) { LOG.error("No job found for " + id); // should we throw exception return; } if (job.isComplete() || job.isRetired()) { return; } job.killJob(); LOG.debug("Killed copy job " + id); } catch (InterruptedException e) { throw new IOException(e); } }
Example 2
Source File: HadoopShims.java From spork with Apache License 2.0 | 6 votes |
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException { if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) { LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID()); return null; } Cluster cluster = new Cluster(job.getJobConf()); try { org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID()); if (mrJob == null) { // In local mode, mrJob will be null mrJob = job.getJob(); } org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type); return DowngradeHelper.downgradeTaskReports(reports); } catch (InterruptedException ir) { throw new IOException(ir); } }
Example 3
Source File: ClusterDriver.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void stopAllExistingMRJobs(String blurEnv, Configuration conf) throws YarnException, IOException, InterruptedException { Cluster cluster = new Cluster(conf); JobStatus[] allJobStatuses = cluster.getAllJobStatuses(); for (JobStatus jobStatus : allJobStatuses) { if (jobStatus.isJobComplete()) { continue; } String jobFile = jobStatus.getJobFile(); JobID jobID = jobStatus.getJobID(); Job job = cluster.getJob(jobID); FileSystem fileSystem = FileSystem.get(job.getConfiguration()); Configuration configuration = new Configuration(false); Path path = new Path(jobFile); Path makeQualified = path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory()); if (hasReadAccess(fileSystem, makeQualified)) { try (FSDataInputStream in = fileSystem.open(makeQualified)) { configuration.addResource(copy(in)); } String jobBlurEnv = configuration.get(BLUR_ENV); LOG.info("Checking job [{0}] has env [{1}] current env set to [{2}]", jobID, jobBlurEnv, blurEnv); if (blurEnv.equals(jobBlurEnv)) { LOG.info("Killing running job [{0}]", jobID); job.killJob(); } } } }
Example 4
Source File: HadoopShims.java From spork with Apache License 2.0 | 5 votes |
public static Counters getCounters(Job job) throws IOException { try { Cluster cluster = new Cluster(job.getJobConf()); org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID()); if (mrJob == null) { // In local mode, mrJob will be null mrJob = job.getJob(); } return new Counters(mrJob.getCounters()); } catch (Exception ir) { throw new IOException(ir); } }
Example 5
Source File: IndexRebuildTask.java From phoenix with Apache License 2.0 | 5 votes |
@Override public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord) throws Exception { String jobID = getJobID(taskRecord.getData()); if (jobID != null) { Configuration conf = HBaseConfiguration.create(env.getConfiguration()); Configuration configuration = HBaseConfiguration.addHbaseResources(conf); Cluster cluster = new Cluster(configuration); Job job = cluster.getJob(org.apache.hadoop.mapreduce.JobID.forName(jobID)); if (job == null) { return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, ""); } if (job != null && job.isComplete()) { if (job.isSuccessful()) { LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName()); return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, ""); } else { return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL, "Index is DISABLED"); } } } return null; }
Example 6
Source File: TestClientRedirect.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testRedirect() throws Exception { Configuration conf = new YarnConfiguration(); conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_ADDRESS, RMADDRESS); conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, HSHOSTADDRESS); // Start the RM. RMService rmService = new RMService("test"); rmService.init(conf); rmService.start(); // Start the AM. AMService amService = new AMService(); amService.init(conf); amService.start(conf); // Start the HS. HistoryService historyService = new HistoryService(); historyService.init(conf); historyService.start(conf); LOG.info("services started"); Cluster cluster = new Cluster(conf); org.apache.hadoop.mapreduce.JobID jobID = new org.apache.hadoop.mapred.JobID("201103121733", 1); org.apache.hadoop.mapreduce.Counters counters = cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately.."); Thread.sleep(5000); //bring down the AM service amService.stop(); LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly.."); Thread.sleep(5000); amRestarting = true; // Same client //results are returned from fake (not started job) counters = cluster.getJob(jobID).getCounters(); Assert.assertEquals(0, counters.countCounters()); Job job = cluster.getJob(jobID); org.apache.hadoop.mapreduce.TaskID taskId = new org.apache.hadoop.mapreduce.TaskID(jobID, TaskType.MAP, 0); TaskAttemptID tId = new TaskAttemptID(taskId, 0); //invoke all methods to check that no exception is thrown job.killJob(); job.killTask(tId); job.failTask(tId); job.getTaskCompletionEvents(0, 100); job.getStatus(); job.getTaskDiagnostics(tId); job.getTaskReports(TaskType.MAP); job.getTrackingURL(); amRestarting = false; amService = new AMService(); amService.init(conf); amService.start(conf); amContact = false; //reset counters = cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); // Stop the AM. It is not even restarting. So it should be treated as // completed. amService.stop(); // Same client counters = cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(hsContact); rmService.stop(); historyService.stop(); }
Example 7
Source File: TestClientRedirect.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testRedirect() throws Exception { Configuration conf = new YarnConfiguration(); conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_ADDRESS, RMADDRESS); conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, HSHOSTADDRESS); // Start the RM. RMService rmService = new RMService("test"); rmService.init(conf); rmService.start(); // Start the AM. AMService amService = new AMService(); amService.init(conf); amService.start(conf); // Start the HS. HistoryService historyService = new HistoryService(); historyService.init(conf); historyService.start(conf); LOG.info("services started"); Cluster cluster = new Cluster(conf); org.apache.hadoop.mapreduce.JobID jobID = new org.apache.hadoop.mapred.JobID("201103121733", 1); org.apache.hadoop.mapreduce.Counters counters = cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately.."); Thread.sleep(5000); //bring down the AM service amService.stop(); LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly.."); Thread.sleep(5000); amRestarting = true; // Same client //results are returned from fake (not started job) counters = cluster.getJob(jobID).getCounters(); Assert.assertEquals(0, counters.countCounters()); Job job = cluster.getJob(jobID); org.apache.hadoop.mapreduce.TaskID taskId = new org.apache.hadoop.mapreduce.TaskID(jobID, TaskType.MAP, 0); TaskAttemptID tId = new TaskAttemptID(taskId, 0); //invoke all methods to check that no exception is thrown job.killJob(); job.killTask(tId); job.failTask(tId); job.getTaskCompletionEvents(0, 100); job.getStatus(); job.getTaskDiagnostics(tId); job.getTaskReports(TaskType.MAP); job.getTrackingURL(); amRestarting = false; amService = new AMService(); amService.init(conf); amService.start(conf); amContact = false; //reset counters = cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); // Stop the AM. It is not even restarting. So it should be treated as // completed. amService.stop(); // Same client counters = cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(hsContact); rmService.stop(); historyService.stop(); }