Java Code Examples for org.apache.hadoop.mapred.RunningJob#killJob()
The following examples show how to use
org.apache.hadoop.mapred.RunningJob#killJob() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataJoinJob.java From hadoop with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 2
Source File: DataJoinJob.java From big-c with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 3
Source File: MapReduceLauncher.java From spork with Apache License 2.0 | 6 votes |
@Override public void killJob(String jobID, Configuration conf) throws BackendException { try { if (conf != null) { JobConf jobConf = new JobConf(conf); JobClient jc = new JobClient(jobConf); JobID id = JobID.forName(jobID); RunningJob job = jc.getJob(id); if (job == null) System.out.println("Job with id " + jobID + " is not active"); else { job.killJob(); log.info("Kill " + id + " submitted."); } } } catch (IOException e) { throw new BackendException(e); } }
Example 4
Source File: DataJoinJob.java From RDFS with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 5
Source File: DataJoinJob.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 6
Source File: MapreduceSubmissionEngine.java From sqoop-on-spark with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override public void stop(String externalJobId) { try { RunningJob runningJob = jobClient.getJob(JobID.forName(externalJobId)); if(runningJob == null) { return; } runningJob.killJob(); } catch (IOException e) { throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0003, e); } }
Example 7
Source File: PigJobServerImpl.java From oink with Apache License 2.0 | 5 votes |
@Override public boolean cancelRequest(String requestId) throws IOException { PigRequestStats stats = this.getRequestStats(requestId); if (stats.getStatus().equals(Status.SUBMITTED.toString())) { List<String> jobs= stats.getJobs(); for (String job : jobs) { job= job.substring(JT_UI.length()); JobConf jobConf = new JobConf(); jobConf.set("fs.default.name", PropertyLoader.getInstance().getProperty("fs.default.name")); jobConf.set("mapred.job.tracker", PropertyLoader.getInstance().getProperty("jobtracker")); try { JobClient jobClient = new JobClient(jobConf); RunningJob rJob = jobClient.getJob(JobID.forName(job)); if (! rJob.isComplete()) { rJob.killJob(); } } catch (Exception e) { throw new IOException ("Unable to kill job " + job); } } PigRequestStats requestStats= new PigRequestStats(0, 0, null, jobs.size()); requestStats.setJobs(jobs); requestStats.setStatus(Status.KILLED.toString()); Path statsPath= new Path(PropertyLoader.getInstance().getProperty(Constants.REQUEST_PATH) + requestId + "/stats"); PigUtils.writeStatsFile(statsPath, requestStats); return true; } else { return false; } }
Example 8
Source File: HadoopShims.java From spork with Apache License 2.0 | 4 votes |
public static void killJob(Job job) throws IOException { RunningJob runningJob = job.getJobClient().getJob(job.getAssignedJobID()); if (runningJob != null) runningJob.killJob(); }