Java Code Examples for org.apache.hadoop.mapred.JobClient#getJob()
The following examples show how to use
org.apache.hadoop.mapred.JobClient#getJob() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataJoinJob.java From hadoop with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 2
Source File: DataJoinJob.java From big-c with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 3
Source File: BoaOutputCommitter.java From compiler with Apache License 2.0 | 6 votes |
@Override public void abortJob(final JobContext context, final JobStatus.State runState) throws java.io.IOException { super.abortJob(context, runState); final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration())); final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id"))); String diag = ""; for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0)) switch (event.getTaskStatus()) { case SUCCEEDED: break; default: diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n"; for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId())) diag += s + "\n"; diag += "\n"; break; } updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0)); }
Example 4
Source File: MRPigStatsUtil.java From spork with Apache License 2.0 | 6 votes |
/** * Returns the count for the given counter name in the counter group * 'MultiStoreCounters' * * @param job the MR job * @param jobClient the Hadoop job client * @param counterName the counter name * @return the count of the given counter name */ public static long getMultiStoreCount(Job job, JobClient jobClient, String counterName) { long value = -1; try { RunningJob rj = jobClient.getJob(job.getAssignedJobID()); if (rj != null) { Counters.Counter counter = rj.getCounters().getGroup( MULTI_STORE_COUNTER_GROUP).getCounterForName(counterName); value = counter.getValue(); } } catch (IOException e) { LOG.warn("Failed to get the counter for " + counterName, e); } return value; }
Example 5
Source File: MapReduceLauncher.java From spork with Apache License 2.0 | 6 votes |
@Override public void killJob(String jobID, Configuration conf) throws BackendException { try { if (conf != null) { JobConf jobConf = new JobConf(conf); JobClient jc = new JobClient(jobConf); JobID id = JobID.forName(jobID); RunningJob job = jc.getJob(id); if (job == null) System.out.println("Job with id " + jobID + " is not active"); else { job.killJob(); log.info("Kill " + id + " submitted."); } } } catch (IOException e) { throw new BackendException(e); } }
Example 6
Source File: DataJoinJob.java From RDFS with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 7
Source File: DataJoinJob.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 8
Source File: PigJobServerImpl.java From oink with Apache License 2.0 | 5 votes |
@Override public boolean cancelRequest(String requestId) throws IOException { PigRequestStats stats = this.getRequestStats(requestId); if (stats.getStatus().equals(Status.SUBMITTED.toString())) { List<String> jobs= stats.getJobs(); for (String job : jobs) { job= job.substring(JT_UI.length()); JobConf jobConf = new JobConf(); jobConf.set("fs.default.name", PropertyLoader.getInstance().getProperty("fs.default.name")); jobConf.set("mapred.job.tracker", PropertyLoader.getInstance().getProperty("jobtracker")); try { JobClient jobClient = new JobClient(jobConf); RunningJob rJob = jobClient.getJob(JobID.forName(job)); if (! rJob.isComplete()) { rJob.killJob(); } } catch (Exception e) { throw new IOException ("Unable to kill job " + job); } } PigRequestStats requestStats= new PigRequestStats(0, 0, null, jobs.size()); requestStats.setJobs(jobs); requestStats.setStatus(Status.KILLED.toString()); Path statsPath= new Path(PropertyLoader.getInstance().getProperty(Constants.REQUEST_PATH) + requestId + "/stats"); PigUtils.writeStatsFile(statsPath, requestStats); return true; } else { return false; } }
Example 9
Source File: StreamJob.java From RDFS with Apache License 2.0 | 4 votes |
public int submitAndMonitorJob() throws IOException { if (jar_ != null && isLocalHadoop()) { // getAbs became required when shell and subvm have different working dirs... File wd = new File(".").getAbsoluteFile(); StreamUtil.unJar(new File(jar_), wd); } // if jobConf_ changes must recreate a JobClient jc_ = new JobClient(jobConf_); boolean error = true; running_ = null; String lastReport = null; try { running_ = jc_.submitJob(jobConf_); jobId_ = running_.getJobID(); LOG.info("getLocalDirs(): " + Arrays.asList(jobConf_.getLocalDirs())); LOG.info("Running job: " + jobId_); jobInfo(); while (!running_.isComplete()) { try { Thread.sleep(1000); } catch (InterruptedException e) { } running_ = jc_.getJob(jobId_); String report = null; report = " map " + Math.round(running_.mapProgress() * 100) + "% reduce " + Math.round(running_.reduceProgress() * 100) + "%"; if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } } if (!running_.isSuccessful()) { jobInfo(); LOG.error("Job not Successful!"); return 1; } LOG.info("Job complete: " + jobId_); LOG.info("Output: " + output_); error = false; } catch(FileNotFoundException fe) { LOG.error("Error launching job , bad input path : " + fe.getMessage()); return 2; } catch(InvalidJobConfException je) { LOG.error("Error launching job , Invalid job conf : " + je.getMessage()); return 3; } catch(FileAlreadyExistsException fae) { LOG.error("Error launching job , Output path already exists : " + fae.getMessage()); return 4; } catch(IOException ioe) { LOG.error("Error Launching job : " + ioe.getMessage()); return 5; } finally { if (error && (running_ != null)) { LOG.info("killJob..."); running_.killJob(); } jc_.close(); } return 0; }
Example 10
Source File: StreamJob.java From RDFS with Apache License 2.0 | 4 votes |
public int submitAndMonitorJob() throws IOException { if (jar_ != null && isLocalHadoop()) { // getAbs became required when shell and subvm have different working dirs... File wd = new File(".").getAbsoluteFile(); StreamUtil.unJar(new File(jar_), wd); } // if jobConf_ changes must recreate a JobClient jc_ = new JobClient(jobConf_); boolean error = true; running_ = null; String lastReport = null; try { running_ = jc_.submitJob(jobConf_); jobId_ = running_.getID(); LOG.info("getLocalDirs(): " + Arrays.asList(jobConf_.getLocalDirs())); LOG.info("Running job: " + jobId_); jobInfo(); while (!running_.isComplete()) { try { Thread.sleep(1000); } catch (InterruptedException e) { } running_ = jc_.getJob(jobId_); String report = null; report = " map " + Math.round(running_.mapProgress() * 100) + "% reduce " + Math.round(running_.reduceProgress() * 100) + "%"; if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } } if (!running_.isSuccessful()) { jobInfo(); LOG.error("Job not Successful!"); return 1; } LOG.info("Job complete: " + jobId_); LOG.info("Output: " + output_); error = false; } catch(FileNotFoundException fe) { LOG.error("Error launching job , bad input path : " + fe.getMessage()); return 2; } catch(InvalidJobConfException je) { LOG.error("Error launching job , Invalid job conf : " + je.getMessage()); return 3; } catch(FileAlreadyExistsException fae) { LOG.error("Error launching job , Output path already exists : " + fae.getMessage()); return 4; } catch(IOException ioe) { LOG.error("Error Launching job : " + ioe.getMessage()); return 5; } finally { if (error && (running_ != null)) { LOG.info("killJob..."); running_.killJob(); } jc_.close(); } return 0; }
Example 11
Source File: StreamJob.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public int submitAndMonitorJob() throws IOException { if (jar_ != null && isLocalHadoop()) { // getAbs became required when shell and subvm have different working dirs... File wd = new File(".").getAbsoluteFile(); StreamUtil.unJar(new File(jar_), wd); } // if jobConf_ changes must recreate a JobClient jc_ = new JobClient(jobConf_); boolean error = true; running_ = null; String lastReport = null; try { running_ = jc_.submitJob(jobConf_); jobId_ = running_.getID(); LOG.info("getLocalDirs(): " + Arrays.asList(jobConf_.getLocalDirs())); LOG.info("Running job: " + jobId_); jobInfo(); while (!running_.isComplete()) { try { Thread.sleep(1000); } catch (InterruptedException e) { } running_ = jc_.getJob(jobId_); String report = null; report = " map " + Math.round(running_.mapProgress() * 100) + "% reduce " + Math.round(running_.reduceProgress() * 100) + "%"; if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } } if (!running_.isSuccessful()) { jobInfo(); LOG.error("Job not Successful!"); return 1; } LOG.info("Job complete: " + jobId_); LOG.info("Output: " + output_); error = false; } catch(FileNotFoundException fe) { LOG.error("Error launching job , bad input path : " + fe.getMessage()); return 2; } catch(InvalidJobConfException je) { LOG.error("Error launching job , Invalid job conf : " + je.getMessage()); return 3; } catch(FileAlreadyExistsException fae) { LOG.error("Error launching job , Output path already exists : " + fae.getMessage()); return 4; } catch(IOException ioe) { LOG.error("Error Launching job : " + ioe.getMessage()); return 5; } finally { if (error && (running_ != null)) { LOG.info("killJob..."); running_.killJob(); } jc_.close(); } return 0; }