Java Code Examples for org.apache.hadoop.mapreduce.JobStatus#isJobComplete()
The following examples show how to use
org.apache.hadoop.mapreduce.JobStatus#isJobComplete() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KillJobByRegex.java From datawave with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws IOException, InterruptedException { Configuration conf = new Configuration(); GenericOptionsParser parser = new GenericOptionsParser(conf, args); args = parser.getRemainingArgs(); if (args.length != 1) { System.err.println("usage: KillJobByRegex jobNamePattern"); System.exit(1); } NAME_PATTERN = Pattern.compile(args[0]); org.apache.hadoop.mapred.JobConf jobConf = new org.apache.hadoop.mapred.JobConf(conf); Cluster cluster = new Cluster(jobConf); for (JobStatus js : cluster.getAllJobStatuses()) { if (!js.isJobComplete()) { JOB_KILLER_SVC.execute(new JobKiller(cluster, js)); } } try { JOB_KILLER_SVC.shutdown(); // signal shutdown JOB_KILLER_SVC.awaitTermination(1, TimeUnit.MINUTES); // allow processes to stop } catch (InterruptedException e) { JOB_KILLER_SVC.shutdownNow(); } System.out.println("Killed " + JOB_KILLED_COUNT.get() + " jobs"); System.out.println("Failed to kill " + JOB_FAILED_COUNT.get() + " jobs"); System.exit(0); }
Example 2
Source File: CLI.java From hadoop with Apache License 2.0 | 5 votes |
/** * Dump a list of currently running jobs * @throws IOException */ private void listJobs(Cluster cluster) throws IOException, InterruptedException { List<JobStatus> runningJobs = new ArrayList<JobStatus>(); for (JobStatus job : cluster.getAllJobStatuses()) { if (!job.isJobComplete()) { runningJobs.add(job); } } displayJobList(runningJobs.toArray(new JobStatus[0])); }
Example 3
Source File: CLI.java From big-c with Apache License 2.0 | 5 votes |
/** * Dump a list of currently running jobs * @throws IOException */ private void listJobs(Cluster cluster) throws IOException, InterruptedException { List<JobStatus> runningJobs = new ArrayList<JobStatus>(); for (JobStatus job : cluster.getAllJobStatuses()) { if (!job.isJobComplete()) { runningJobs.add(job); } } displayJobList(runningJobs.toArray(new JobStatus[0])); }
Example 4
Source File: ClusterDriver.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void stopAllExistingMRJobs(String blurEnv, Configuration conf) throws YarnException, IOException, InterruptedException { Cluster cluster = new Cluster(conf); JobStatus[] allJobStatuses = cluster.getAllJobStatuses(); for (JobStatus jobStatus : allJobStatuses) { if (jobStatus.isJobComplete()) { continue; } String jobFile = jobStatus.getJobFile(); JobID jobID = jobStatus.getJobID(); Job job = cluster.getJob(jobID); FileSystem fileSystem = FileSystem.get(job.getConfiguration()); Configuration configuration = new Configuration(false); Path path = new Path(jobFile); Path makeQualified = path.makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory()); if (hasReadAccess(fileSystem, makeQualified)) { try (FSDataInputStream in = fileSystem.open(makeQualified)) { configuration.addResource(copy(in)); } String jobBlurEnv = configuration.get(BLUR_ENV); LOG.info("Checking job [{0}] has env [{1}] current env set to [{2}]", jobID, jobBlurEnv, blurEnv); if (blurEnv.equals(jobBlurEnv)) { LOG.info("Killing running job [{0}]", jobID); job.killJob(); } } } }
Example 5
Source File: JobClient.java From sequenceiq-samples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { try { JobClient jobClient = new JobClient(); QueueOrchestrator qo = new QueueOrchestrator(); HttpClient client = new HttpClient(); ObjectMapper mapper = new ObjectMapper(); String schedulerURL = "http://sandbox.hortonworks.com:8088/ws/v1/cluster/scheduler"; LOGGER.info("Starting YARN Capacity Queue Test"); LOGGER.info("yarn.scheduler.capacity.root.queues = default,highPriority,lowPriority"); LOGGER.info("yarn.scheduler.capacity.root.highPriority.capacity = 70"); LOGGER.info("yarn.scheduler.capacity.root.lowPriority.capacity = 20"); LOGGER.info("yarn.scheduler.capacity.root.highPriority.default = 10"); LOGGER.info("Scheduler URL: ", schedulerURL); MRJobStatus mrJobStatus = new MRJobStatus(); QueueInformation queueInformation = new QueueInformation(); //Create low priority setup - low priority root queue (capacity-scheduler.xml) Path tempDirLow = jobClient.createTempDir("lowPriority"); //Create high priority setup - high priority root queue (capacity-scheduler.xml) Path tempDirHigh = jobClient.createTempDir("highPriority"); String lowPriorityQueue = new String("lowPriority"); String highPriorityQueue = new String("highPriority"); // create YarnRunner to use for job status listing Configuration lowPriorityConf = qo.getConfiguration(lowPriorityQueue); // doesn't matter the configuration as we use YarnRunner only to retrieve job status info YARNRunner yarnRunner = new YARNRunner(lowPriorityConf); Configuration highPriorityConf = qo.getConfiguration(lowPriorityQueue); JobID lowPriorityJobID = qo.submitJobsIntoQueues(lowPriorityQueue, tempDirLow); JobID highPriorityJobID = qo.submitJobsIntoQueues(highPriorityQueue, tempDirHigh); // list low priority job status JobStatus lowPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, lowPriorityJobID); // list high priority job status JobStatus highPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, highPriorityJobID); // list job statuses & queue information until job(s) are completed for(;!lowPriorityJobStatus.isJobComplete();) { highPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, highPriorityJobID); lowPriorityJobStatus = mrJobStatus.printJobStatus(yarnRunner, lowPriorityJobID); queueInformation.printQueueInfo(client, mapper, schedulerURL); Thread.sleep(1000); } } catch (Exception e) { LOGGER.error("Exception occured", e); } }