Java Code Examples for org.apache.hadoop.mapred.RunningJob#isComplete()
The following examples show how to use
org.apache.hadoop.mapred.RunningJob#isComplete() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataJoinJob.java From hadoop with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 2
Source File: DataJoinJob.java From big-c with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 3
Source File: DataJoinJob.java From RDFS with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 4
Source File: CartesianCommentComparison.java From hadoop-map-reduce-patterns with Apache License 2.0 | 6 votes |
@Override public int run(String[] args) throws Exception { if (args.length != 2) { System.err.println("Usage: CartesianCommentComparison <in> <out>"); ToolRunner.printGenericCommandUsage(System.err); System.exit(2); } // Configure the join type JobConf conf = new JobConf("Cartesian Product"); conf.setJarByClass(CartesianCommentComparison.class); conf.setMapperClass(CartesianMapper.class); conf.setNumReduceTasks(0); conf.setInputFormat(CartesianInputFormat.class); // Configure the input format CartesianInputFormat.setLeftInputInfo(conf, TextInputFormat.class, args[0]); CartesianInputFormat.setRightInputInfo(conf, TextInputFormat.class, args[0]); TextOutputFormat.setOutputPath(conf, new Path(args[1])); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(Text.class); RunningJob job = JobClient.runJob(conf); while (!job.isComplete()) { Thread.sleep(1000); } return job.isSuccessful() ? 0 : 1; }
Example 5
Source File: DataJoinJob.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example 6
Source File: PigJobServerImpl.java From oink with Apache License 2.0 | 5 votes |
@Override public boolean cancelRequest(String requestId) throws IOException { PigRequestStats stats = this.getRequestStats(requestId); if (stats.getStatus().equals(Status.SUBMITTED.toString())) { List<String> jobs= stats.getJobs(); for (String job : jobs) { job= job.substring(JT_UI.length()); JobConf jobConf = new JobConf(); jobConf.set("fs.default.name", PropertyLoader.getInstance().getProperty("fs.default.name")); jobConf.set("mapred.job.tracker", PropertyLoader.getInstance().getProperty("jobtracker")); try { JobClient jobClient = new JobClient(jobConf); RunningJob rJob = jobClient.getJob(JobID.forName(job)); if (! rJob.isComplete()) { rJob.killJob(); } } catch (Exception e) { throw new IOException ("Unable to kill job " + job); } } PigRequestStats requestStats= new PigRequestStats(0, 0, null, jobs.size()); requestStats.setJobs(jobs); requestStats.setStatus(Status.KILLED.toString()); Path statsPath= new Path(PropertyLoader.getInstance().getProperty(Constants.REQUEST_PATH) + requestId + "/stats"); PigUtils.writeStatsFile(statsPath, requestStats); return true; } else { return false; } }
Example 7
Source File: CompositeUserJoin.java From hadoop-map-reduce-patterns with Apache License 2.0 | 5 votes |
@Override public int run(String[] args) throws Exception { if (args.length != 4) { printUsage(); } Path userPath = new Path(args[0]); Path commentPath = new Path(args[1]); Path outputDir = new Path(args[2]); String joinType = args[3]; JobConf conf = new JobConf("CompositeJoin"); conf.setJarByClass(CompositeUserJoin.class); conf.setMapperClass(CompositeMapper.class); conf.setNumReduceTasks(0); // Set the input format class to a CompositeInputFormat class. // The CompositeInputFormat will parse all of our input files and output // records to our mapper. conf.setInputFormat(CompositeInputFormat.class); // The composite input format join expression will set how the records // are going to be read in, and in what input format. conf.set("mapred.join.expr", CompositeInputFormat.compose(joinType, KeyValueTextInputFormat.class, userPath, commentPath)); TextOutputFormat.setOutputPath(conf, outputDir); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(Text.class); RunningJob job = JobClient.runJob(conf); while (!job.isComplete()) { Thread.sleep(1000); } return job.isSuccessful() ? 0 : 1; }
Example 8
Source File: TestKeyFieldBasedComparator.java From hadoop with Apache License 2.0 | 4 votes |
public void configure(String keySpec, int expect) throws Exception { Path testdir = new Path(TEST_DIR.getAbsolutePath()); Path inDir = new Path(testdir, "in"); Path outDir = new Path(testdir, "out"); FileSystem fs = getFileSystem(); fs.delete(testdir, true); conf.setInputFormat(TextInputFormat.class); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setNumMapTasks(1); conf.setNumReduceTasks(1); conf.setOutputFormat(TextOutputFormat.class); conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class); conf.setKeyFieldComparatorOptions(keySpec); conf.setKeyFieldPartitionerOptions("-k1.1,1.1"); conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " "); conf.setMapperClass(InverseMapper.class); conf.setReducerClass(IdentityReducer.class); if (!fs.mkdirs(testdir)) { throw new IOException("Mkdirs failed to create " + testdir.toString()); } if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } // set up input data in 2 files Path inFile = new Path(inDir, "part0"); FileOutputStream fos = new FileOutputStream(inFile.toString()); fos.write((line1 + "\n").getBytes()); fos.write((line2 + "\n").getBytes()); fos.close(); JobClient jc = new JobClient(conf); RunningJob r_job = jc.submitJob(conf); while (!r_job.isComplete()) { Thread.sleep(1000); } if (!r_job.isSuccessful()) { fail("Oops! The job broke due to an unexpected error"); } Path[] outputFiles = FileUtil.stat2Paths( getFileSystem().listStatus(outDir, new Utils.OutputFileUtils.OutputFilesFilter())); if (outputFiles.length > 0) { InputStream is = getFileSystem().open(outputFiles[0]); BufferedReader reader = new BufferedReader(new InputStreamReader(is)); String line = reader.readLine(); //make sure we get what we expect as the first line, and also //that we have two lines if (expect == 1) { assertTrue(line.startsWith(line1)); } else if (expect == 2) { assertTrue(line.startsWith(line2)); } line = reader.readLine(); if (expect == 1) { assertTrue(line.startsWith(line2)); } else if (expect == 2) { assertTrue(line.startsWith(line1)); } reader.close(); } }
Example 9
Source File: TestKeyFieldBasedComparator.java From big-c with Apache License 2.0 | 4 votes |
public void configure(String keySpec, int expect) throws Exception { Path testdir = new Path(TEST_DIR.getAbsolutePath()); Path inDir = new Path(testdir, "in"); Path outDir = new Path(testdir, "out"); FileSystem fs = getFileSystem(); fs.delete(testdir, true); conf.setInputFormat(TextInputFormat.class); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setNumMapTasks(1); conf.setNumReduceTasks(1); conf.setOutputFormat(TextOutputFormat.class); conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class); conf.setKeyFieldComparatorOptions(keySpec); conf.setKeyFieldPartitionerOptions("-k1.1,1.1"); conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " "); conf.setMapperClass(InverseMapper.class); conf.setReducerClass(IdentityReducer.class); if (!fs.mkdirs(testdir)) { throw new IOException("Mkdirs failed to create " + testdir.toString()); } if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } // set up input data in 2 files Path inFile = new Path(inDir, "part0"); FileOutputStream fos = new FileOutputStream(inFile.toString()); fos.write((line1 + "\n").getBytes()); fos.write((line2 + "\n").getBytes()); fos.close(); JobClient jc = new JobClient(conf); RunningJob r_job = jc.submitJob(conf); while (!r_job.isComplete()) { Thread.sleep(1000); } if (!r_job.isSuccessful()) { fail("Oops! The job broke due to an unexpected error"); } Path[] outputFiles = FileUtil.stat2Paths( getFileSystem().listStatus(outDir, new Utils.OutputFileUtils.OutputFilesFilter())); if (outputFiles.length > 0) { InputStream is = getFileSystem().open(outputFiles[0]); BufferedReader reader = new BufferedReader(new InputStreamReader(is)); String line = reader.readLine(); //make sure we get what we expect as the first line, and also //that we have two lines if (expect == 1) { assertTrue(line.startsWith(line1)); } else if (expect == 2) { assertTrue(line.startsWith(line2)); } line = reader.readLine(); if (expect == 1) { assertTrue(line.startsWith(line2)); } else if (expect == 2) { assertTrue(line.startsWith(line1)); } reader.close(); } }
Example 10
Source File: TestKeyFieldBasedComparator.java From RDFS with Apache License 2.0 | 4 votes |
public void configure(String keySpec, int expect) throws Exception { Path testdir = new Path("build/test/test.mapred.spill"); Path inDir = new Path(testdir, "in"); Path outDir = new Path(testdir, "out"); FileSystem fs = getFileSystem(); fs.delete(testdir, true); conf.setInputFormat(TextInputFormat.class); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setNumMapTasks(1); conf.setNumReduceTasks(2); conf.setOutputFormat(TextOutputFormat.class); conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class); conf.setKeyFieldComparatorOptions(keySpec); conf.setKeyFieldPartitionerOptions("-k1.1,1.1"); conf.set("map.output.key.field.separator", " "); conf.setMapperClass(InverseMapper.class); conf.setReducerClass(IdentityReducer.class); if (!fs.mkdirs(testdir)) { throw new IOException("Mkdirs failed to create " + testdir.toString()); } if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } // set up input data in 2 files Path inFile = new Path(inDir, "part0"); FileOutputStream fos = new FileOutputStream(inFile.toString()); fos.write((line1 + "\n").getBytes()); fos.write((line2 + "\n").getBytes()); fos.close(); JobClient jc = new JobClient(conf); RunningJob r_job = jc.submitJob(conf); while (!r_job.isComplete()) { Thread.sleep(1000); } if (!r_job.isSuccessful()) { fail("Oops! The job broke due to an unexpected error"); } Path[] outputFiles = FileUtil.stat2Paths( getFileSystem().listStatus(outDir, new Utils.OutputFileUtils.OutputFilesFilter())); if (outputFiles.length > 0) { InputStream is = getFileSystem().open(outputFiles[0]); BufferedReader reader = new BufferedReader(new InputStreamReader(is)); String line = reader.readLine(); //make sure we get what we expect as the first line, and also //that we have two lines (both the lines must end up in the same //reducer since the partitioner takes the same key spec for all //lines if (expect == 1) { assertTrue(line.startsWith(line1)); } else if (expect == 2) { assertTrue(line.startsWith(line2)); } line = reader.readLine(); if (expect == 1) { assertTrue(line.startsWith(line2)); } else if (expect == 2) { assertTrue(line.startsWith(line1)); } reader.close(); } }
Example 11
Source File: TestPipes.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, Path program, Path inputPath, Path outputPath, int numMaps, int numReduces, String[] expectedResults ) throws IOException { Path wordExec = new Path("/testing/bin/application"); JobConf job = mr.createJobConf(); job.setNumMapTasks(numMaps); job.setNumReduceTasks(numReduces); { FileSystem fs = dfs.getFileSystem(); fs.delete(wordExec.getParent(), true); fs.copyFromLocalFile(program, wordExec); Submitter.setExecutable(job, fs.makeQualified(wordExec).toString()); Submitter.setIsJavaRecordReader(job, true); Submitter.setIsJavaRecordWriter(job, true); FileInputFormat.setInputPaths(job, inputPath); FileOutputFormat.setOutputPath(job, outputPath); RunningJob rJob = null; if (numReduces == 0) { rJob = Submitter.jobSubmit(job); while (!rJob.isComplete()) { try { Thread.sleep(1000); } catch (InterruptedException ie) { throw new RuntimeException(ie); } } } else { rJob = Submitter.runJob(job); } assertTrue("pipes job failed", rJob.isSuccessful()); Counters counters = rJob.getCounters(); Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT"); int numCounters = 0; for (Counter c : wordCountCounters) { System.out.println(c); ++numCounters; } assertTrue("No counters found!", (numCounters > 0)); } List<String> results = new ArrayList<String>(); for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath, new OutputLogFilter()))) { results.add(TestMiniMRWithDFS.readOutput(p, job)); } assertEquals("number of reduces is wrong", expectedResults.length, results.size()); for(int i=0; i < results.size(); i++) { assertEquals("pipes program " + program + " output " + i + " wrong", expectedResults[i], results.get(i)); } }
Example 12
Source File: TestKeyFieldBasedComparator.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public void configure(String keySpec, int expect) throws Exception { Path testdir = new Path("build/test/test.mapred.spill"); Path inDir = new Path(testdir, "in"); Path outDir = new Path(testdir, "out"); FileSystem fs = getFileSystem(); fs.delete(testdir, true); conf.setInputFormat(TextInputFormat.class); FileInputFormat.setInputPaths(conf, inDir); FileOutputFormat.setOutputPath(conf, outDir); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(LongWritable.class); conf.setNumMapTasks(1); conf.setNumReduceTasks(2); conf.setOutputFormat(TextOutputFormat.class); conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class); conf.setKeyFieldComparatorOptions(keySpec); conf.setKeyFieldPartitionerOptions("-k1.1,1.1"); conf.set("map.output.key.field.separator", " "); conf.setMapperClass(InverseMapper.class); conf.setReducerClass(IdentityReducer.class); if (!fs.mkdirs(testdir)) { throw new IOException("Mkdirs failed to create " + testdir.toString()); } if (!fs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } // set up input data in 2 files Path inFile = new Path(inDir, "part0"); FileOutputStream fos = new FileOutputStream(inFile.toString()); fos.write((line1 + "\n").getBytes()); fos.write((line2 + "\n").getBytes()); fos.close(); JobClient jc = new JobClient(conf); RunningJob r_job = jc.submitJob(conf); while (!r_job.isComplete()) { Thread.sleep(1000); } if (!r_job.isSuccessful()) { fail("Oops! The job broke due to an unexpected error"); } Path[] outputFiles = FileUtil.stat2Paths( getFileSystem().listStatus(outDir, new OutputLogFilter())); if (outputFiles.length > 0) { InputStream is = getFileSystem().open(outputFiles[0]); BufferedReader reader = new BufferedReader(new InputStreamReader(is)); String line = reader.readLine(); //make sure we get what we expect as the first line, and also //that we have two lines (both the lines must end up in the same //reducer since the partitioner takes the same key spec for all //lines if (expect == 1) { assertTrue(line.startsWith(line1)); } else if (expect == 2) { assertTrue(line.startsWith(line2)); } line = reader.readLine(); if (expect == 1) { assertTrue(line.startsWith(line2)); } else if (expect == 2) { assertTrue(line.startsWith(line1)); } reader.close(); } }
Example 13
Source File: ParallelHadoopJobStep.java From imputationserver with GNU Affero General Public License v3.0 | 2 votes |
@Override public void updateProgress() { for (HadoopJob job : jobs.values()) { int state = WAIT; if (job != null) { String hadoopJobId = job.getJobId(); if (hadoopJobId != null) { RunningJob hadoopJob = HadoopUtil.getInstance().getJob( hadoopJobId); try { if (hadoopJob != null) { if (hadoopJob.isComplete()) { if (hadoopJob.isSuccessful()) { state = OK; } else { state = FAILED; } } else { if (hadoopJob.getJobStatus().mapProgress() > 0) { state = RUNNING; } else { state = WAIT; } } } else { state = WAIT; } } catch (IOException e) { state = WAIT; } } } states.put(job, state); } }