org.apache.hadoop.mapred.RunningJob Java Examples
The following examples show how to use
org.apache.hadoop.mapred.RunningJob.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DistCp.java From RDFS with Apache License 2.0 | 6 votes |
/** * Driver to copy srcPath to destPath depending on required protocol. * @param args arguments */ static void copy(final Configuration conf, final Arguments args ) throws IOException { DistCopier copier = getCopier(conf, args); if (copier != null) { try { JobClient client = copier.getJobClient(); RunningJob job = client.submitJob(copier.getJobConf()); try { if (!client.monitorAndPrintJob(copier.getJobConf(), job)) { throw new IOException("Job failed!"); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } copier.finalizeCopiedFiles(); } finally { copier.cleanupJob(); } } }
Example #2
Source File: BoaOutputCommitter.java From compiler with Apache License 2.0 | 6 votes |
@Override public void abortJob(final JobContext context, final JobStatus.State runState) throws java.io.IOException { super.abortJob(context, runState); final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration())); final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id"))); String diag = ""; for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0)) switch (event.getTaskStatus()) { case SUCCEEDED: break; default: diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n"; for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId())) diag += s + "\n"; diag += "\n"; break; } updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0)); }
Example #3
Source File: TestTableMapReduceUtil.java From hbase with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("deprecation") public void shoudBeValidMapReduceEvaluation() throws Exception { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } }
Example #4
Source File: TestTableMapReduceUtil.java From hbase with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("deprecation") public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } }
Example #5
Source File: BatchStateUpdaterTest.java From hbase-indexer with Apache License 2.0 | 6 votes |
@Test public void testRun_Running() throws Exception { String jobId = "job_201407251005_0815"; createDefinition("mytest", jobId); RunningJob job = createJob(jobId, JobStatus.RUNNING); when(job.getJobState()).thenReturn(JobStatus.RUNNING); Assert.assertEquals(0, executorService.getQueue().size()); checkAllIndexes(); Assert.assertEquals(1, executorService.getQueue().size()); verify(model, VerificationModeFactory.times(1)).getIndexer(anyString()); verify(model, VerificationModeFactory.times(0)).updateIndexerInternal(any(IndexerDefinition.class)); Thread.sleep(60); Assert.assertEquals(1, executorService.getQueue().size()); verify(model, VerificationModeFactory.times(2)).getIndexer(anyString()); verify(model, VerificationModeFactory.times(0)).updateIndexerInternal(any(IndexerDefinition.class)); when(job.getJobState()).thenReturn(JobStatus.SUCCEEDED); Thread.sleep(60); Assert.assertEquals(0, executorService.getQueue().size()); verify(model, VerificationModeFactory.times(3)).getIndexer(anyString()); verify(model, VerificationModeFactory.times(1)).updateIndexerInternal(any(IndexerDefinition.class)); }
Example #6
Source File: DataJoinJob.java From big-c with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example #7
Source File: TestTableInputFormat.java From hbase with Apache License 2.0 | 6 votes |
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException { Configuration conf = UTIL.getConfiguration(); final JobConf job = new JobConf(conf); job.setInputFormat(clazz); job.setOutputFormat(NullOutputFormat.class); job.setMapperClass(ExampleVerifier.class); job.setNumReduceTasks(0); LOG.debug("submitting job."); final RunningJob run = JobClient.runJob(job); assertTrue("job failed!", run.isSuccessful()); assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); }
Example #8
Source File: TestMultiTableSnapshotInputFormat.java From hbase with Apache License 2.0 | 6 votes |
@Override protected void runJob(String jobName, Configuration c, List<Scan> scans) throws IOException, InterruptedException, ClassNotFoundException { JobConf job = new JobConf(TEST_UTIL.getConfiguration()); job.setJobName(jobName); job.setMapperClass(Mapper.class); job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); job.setReducerClass(Reducer.class); job.setNumReduceTasks(1); // one to get final "first" and "last" key FileOutputFormat.setOutputPath(job, new Path(job.getJobName())); LOG.info("Started " + job.getJobName()); RunningJob runningJob = JobClient.runJob(job); runningJob.waitForCompletion(); assertTrue(runningJob.isSuccessful()); LOG.info("After map/reduce completion - job " + jobName); }
Example #9
Source File: CartesianCommentComparison.java From hadoop-map-reduce-patterns with Apache License 2.0 | 6 votes |
@Override public int run(String[] args) throws Exception { if (args.length != 2) { System.err.println("Usage: CartesianCommentComparison <in> <out>"); ToolRunner.printGenericCommandUsage(System.err); System.exit(2); } // Configure the join type JobConf conf = new JobConf("Cartesian Product"); conf.setJarByClass(CartesianCommentComparison.class); conf.setMapperClass(CartesianMapper.class); conf.setNumReduceTasks(0); conf.setInputFormat(CartesianInputFormat.class); // Configure the input format CartesianInputFormat.setLeftInputInfo(conf, TextInputFormat.class, args[0]); CartesianInputFormat.setRightInputInfo(conf, TextInputFormat.class, args[0]); TextOutputFormat.setOutputPath(conf, new Path(args[1])); conf.setOutputKeyClass(Text.class); conf.setOutputValueClass(Text.class); RunningJob job = JobClient.runJob(conf); while (!job.isComplete()) { Thread.sleep(1000); } return job.isSuccessful() ? 0 : 1; }
Example #10
Source File: ConfigurationProxyTest.java From pentaho-hadoop-shims with Apache License 2.0 | 6 votes |
@Test public void testSubmitWhenUserHasPermissionsToSubmitJobInQueueShouldExecuteSuccessfully() throws IOException, InterruptedException, ClassNotFoundException { Mockito.spy( YarnQueueAclsVerifier.class ); ConfigurationProxy configurationProxy = Mockito.mock( ConfigurationProxy.class ); JobClient jobClient = Mockito.mock( JobClient.class ); RunningJob runningJob = Mockito.mock( RunningJob.class ); Mockito.when( configurationProxy.createJobClient() ).thenReturn( jobClient ); Mockito.when( configurationProxy.submit() ).thenCallRealMethod(); Mockito.when( jobClient.getQueueAclsForCurrentUser() ).thenReturn( new MockQueueAclsInfo[] { new MockQueueAclsInfo( StringUtils.EMPTY, new String[] { "SUBMIT_APPLICATIONS" } ), new MockQueueAclsInfo( StringUtils.EMPTY, new String[] {} ) } ); Mockito.when( jobClient.submitJob( Mockito.any( JobConf.class ) ) ).thenReturn( runningJob ); Assert.assertNotNull( configurationProxy.submit() ); }
Example #11
Source File: TestMiniMRProxyUser.java From big-c with Apache License 2.0 | 6 votes |
private void mrRun() throws Exception { FileSystem fs = FileSystem.get(getJobConf()); Path inputDir = new Path("input"); fs.mkdirs(inputDir); Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt"))); writer.write("hello"); writer.close(); Path outputDir = new Path("output", "output"); JobConf jobConf = new JobConf(getJobConf()); jobConf.setInt("mapred.map.tasks", 1); jobConf.setInt("mapred.map.max.attempts", 1); jobConf.setInt("mapred.reduce.max.attempts", 1); jobConf.set("mapred.input.dir", inputDir.toString()); jobConf.set("mapred.output.dir", outputDir.toString()); JobClient jobClient = new JobClient(jobConf); RunningJob runJob = jobClient.submitJob(jobConf); runJob.waitForCompletion(); assertTrue(runJob.isComplete()); assertTrue(runJob.isSuccessful()); }
Example #12
Source File: TestZstandardCodec.java From parquet-mr with Apache License 2.0 | 6 votes |
private RunningJob runMapReduceJob(CompressionCodecName codec, JobConf jobConf, Configuration conf, Path parquetPath) throws IOException, ClassNotFoundException, InterruptedException { String writeSchema = "message example {\n" + "required int32 line;\n" + "required binary content;\n" + "}"; FileSystem fileSystem = parquetPath.getFileSystem(conf); fileSystem.delete(parquetPath, true); jobConf.setInputFormat(TextInputFormat.class); TextInputFormat.addInputPath(jobConf, inputPath); jobConf.setNumReduceTasks(0); jobConf.setOutputFormat(DeprecatedParquetOutputFormat.class); DeprecatedParquetOutputFormat.setCompression(jobConf, codec); DeprecatedParquetOutputFormat.setOutputPath(jobConf, parquetPath); DeprecatedParquetOutputFormat.setWriteSupportClass(jobConf, GroupWriteSupport.class); GroupWriteSupport.setSchema(MessageTypeParser.parseMessageType(writeSchema), jobConf); jobConf.setMapperClass(TestZstandardCodec.DumpMapper.class); return JobClient.runJob(jobConf); }
Example #13
Source File: MapReduceLauncher.java From spork with Apache License 2.0 | 6 votes |
@Override public void killJob(String jobID, Configuration conf) throws BackendException { try { if (conf != null) { JobConf jobConf = new JobConf(conf); JobClient jc = new JobClient(jobConf); JobID id = JobID.forName(jobID); RunningJob job = jc.getJob(id); if (job == null) System.out.println("Job with id " + jobID + " is not active"); else { job.killJob(); log.info("Kill " + id + " submitted."); } } } catch (IOException e) { throw new BackendException(e); } }
Example #14
Source File: Job.java From RDFS with Apache License 2.0 | 6 votes |
/** * Submit this job to mapred. The state becomes RUNNING if submission * is successful, FAILED otherwise. */ protected synchronized void submit() { try { if (theJobConf.getBoolean("create.empty.dir.if.nonexist", false)) { FileSystem fs = FileSystem.get(theJobConf); Path inputPaths[] = FileInputFormat.getInputPaths(theJobConf); for (int i = 0; i < inputPaths.length; i++) { if (!fs.exists(inputPaths[i])) { try { fs.mkdirs(inputPaths[i]); } catch (IOException e) { } } } } RunningJob running = jc.submitJob(theJobConf); this.mapredJobID = running.getID(); this.state = Job.RUNNING; } catch (IOException ioe) { this.state = Job.FAILED; this.message = StringUtils.stringifyException(ioe); } }
Example #15
Source File: TestMiniMRProxyUser.java From hadoop with Apache License 2.0 | 6 votes |
private void mrRun() throws Exception { FileSystem fs = FileSystem.get(getJobConf()); Path inputDir = new Path("input"); fs.mkdirs(inputDir); Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt"))); writer.write("hello"); writer.close(); Path outputDir = new Path("output", "output"); JobConf jobConf = new JobConf(getJobConf()); jobConf.setInt("mapred.map.tasks", 1); jobConf.setInt("mapred.map.max.attempts", 1); jobConf.setInt("mapred.reduce.max.attempts", 1); jobConf.set("mapred.input.dir", inputDir.toString()); jobConf.set("mapred.output.dir", outputDir.toString()); JobClient jobClient = new JobClient(jobConf); RunningJob runJob = jobClient.submitJob(jobConf); runJob.waitForCompletion(); assertTrue(runJob.isComplete()); assertTrue(runJob.isSuccessful()); }
Example #16
Source File: DataJoinJob.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example #17
Source File: DataJoinJob.java From RDFS with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example #18
Source File: Job.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Submit this job to mapred. The state becomes RUNNING if submission * is successful, FAILED otherwise. */ protected synchronized void submit() { try { if (theJobConf.getBoolean("create.empty.dir.if.nonexist", false)) { FileSystem fs = FileSystem.get(theJobConf); Path inputPaths[] = FileInputFormat.getInputPaths(theJobConf); for (int i = 0; i < inputPaths.length; i++) { if (!fs.exists(inputPaths[i])) { try { fs.mkdirs(inputPaths[i]); } catch (IOException e) { } } } } RunningJob running = jc.submitJob(theJobConf); this.mapredJobID = running.getID(); this.state = Job.RUNNING; } catch (IOException ioe) { this.state = Job.FAILED; this.message = StringUtils.stringifyException(ioe); } }
Example #19
Source File: DataJoinJob.java From hadoop with Apache License 2.0 | 6 votes |
/** * Submit/run a map/reduce job. * * @param job * @return true for success * @throws IOException */ public static boolean runJob(JobConf job) throws IOException { JobClient jc = new JobClient(job); boolean sucess = true; RunningJob running = null; try { running = jc.submitJob(job); JobID jobId = running.getID(); System.out.println("Job " + jobId + " is submitted"); while (!running.isComplete()) { System.out.println("Job " + jobId + " is still running."); try { Thread.sleep(60000); } catch (InterruptedException e) { } running = jc.getJob(jobId); } sucess = running.isSuccessful(); } finally { if (!sucess && (running != null)) { running.killJob(); } jc.close(); } return sucess; }
Example #20
Source File: MRPigStatsUtil.java From spork with Apache License 2.0 | 6 votes |
/** * Returns the count for the given counter name in the counter group * 'MultiStoreCounters' * * @param job the MR job * @param jobClient the Hadoop job client * @param counterName the counter name * @return the count of the given counter name */ public static long getMultiStoreCount(Job job, JobClient jobClient, String counterName) { long value = -1; try { RunningJob rj = jobClient.getJob(job.getAssignedJobID()); if (rj != null) { Counters.Counter counter = rj.getCounters().getGroup( MULTI_STORE_COUNTER_GROUP).getCounterForName(counterName); value = counter.getValue(); } } catch (IOException e) { LOG.warn("Failed to get the counter for " + counterName, e); } return value; }
Example #21
Source File: HadoopShims.java From spork with Apache License 2.0 | 5 votes |
/** * Returns the progress of a Job j which is part of a submitted JobControl * object. The progress is for this Job. So it has to be scaled down by the * num of jobs that are present in the JobControl. * * @param j The Job for which progress is required * @return Returns the percentage progress of this Job * @throws IOException */ public static double progressOfRunningJob(Job j) throws IOException { RunningJob rj = j.getJobClient().getJob(j.getAssignedJobID()); if (rj == null && j.getState() == Job.SUCCESS) return 1; else if (rj == null) return 0; else { return (rj.mapProgress() + rj.reduceProgress()) / 2; } }
Example #22
Source File: JobStatusProvider.java From ankush with GNU Lesser General Public License v3.0 | 5 votes |
/** * @param jobClient * @param jobReport * @param jobId * @param job * @throws IOException */ private Map<String, Object> getDetailedJobReport(org.apache.hadoop.mapred.JobID jobId) throws IOException { Map<String, Object> jobDetailedReport = new HashMap<String, Object>(); RunningJob job = jobClient.getJob(jobId); Counters counters = job.getCounters(); List counterList = new ArrayList(); for (Group group : counters) { Map<String, Object> counterMap = new HashMap<String, Object>(); counterMap.put("name", group.getDisplayName()); List subCounters = new ArrayList(); for (Counter counter : group) { Map subCounter = new HashMap(); subCounter.put("name", counter.getDisplayName()); subCounter.put("value", counter.getCounter()); subCounters.add(subCounter); } counterMap.put("subCounters", subCounters); counterList.add(counterMap); } jobDetailedReport.put("counters", counterList); jobDetailedReport.put("mapReport", getTaskReport(jobClient.getMapTaskReports(jobId))); jobDetailedReport.put("reduceReport", getTaskReport(jobClient.getReduceTaskReports(jobId))); jobDetailedReport.put("cleanupReport", getTaskReport(jobClient.getCleanupTaskReports(jobId))); jobDetailedReport.put("setupReport", getTaskReport(jobClient.getSetupTaskReports(jobId))); return jobDetailedReport; }
Example #23
Source File: IngestJob.java From datawave with Apache License 2.0 | 5 votes |
protected int jobFailed(Job job, RunningJob runningJob, FileSystem fs, Path workDir) throws IOException { log.error("Map Reduce job " + job.getJobName() + " was unsuccessful. Check the logs."); log.error("Since job was not successful, deleting work directory: " + workDir); boolean deleted = fs.delete(workDir, true); if (!deleted) { log.error("Unable to remove job working directory: " + workDir); } if (runningJob.getJobState() == JobStatus.KILLED) { log.warn("Job was killed"); return -2; } else { log.error("Job failed with a jobstate of " + runningJob.getJobState()); return -3; } }
Example #24
Source File: TestZstandardCodec.java From parquet-mr with Apache License 2.0 | 5 votes |
private long runMrWithConf(int level) throws Exception { JobConf jobConf = new JobConf(); Configuration conf = new Configuration(); jobConf.setInt(ZstandardCodec.PARQUET_COMPRESS_ZSTD_LEVEL, level); jobConf.setInt(ZstandardCodec.PARQUET_COMPRESS_ZSTD_WORKERS, 4); Path path = new Path(Files.createTempDirectory("zstd" + level).toAbsolutePath().toString()); RunningJob mapRedJob = runMapReduceJob(CompressionCodecName.ZSTD, jobConf, conf, path); Assert.assertTrue(mapRedJob.isSuccessful()); return getFileSize(path, conf); }
Example #25
Source File: HadoopJob.java From RDFS with Apache License 2.0 | 5 votes |
/** * Constructor for a Hadoop job representation * * @param location * @param id * @param running * @param status */ public HadoopJob(HadoopServer location, JobID id, RunningJob running, JobStatus status) { this.location = location; this.jobId = id; this.running = running; loadJobFile(); update(status); }
Example #26
Source File: PigJobServerImpl.java From oink with Apache License 2.0 | 5 votes |
@Override public boolean cancelRequest(String requestId) throws IOException { PigRequestStats stats = this.getRequestStats(requestId); if (stats.getStatus().equals(Status.SUBMITTED.toString())) { List<String> jobs= stats.getJobs(); for (String job : jobs) { job= job.substring(JT_UI.length()); JobConf jobConf = new JobConf(); jobConf.set("fs.default.name", PropertyLoader.getInstance().getProperty("fs.default.name")); jobConf.set("mapred.job.tracker", PropertyLoader.getInstance().getProperty("jobtracker")); try { JobClient jobClient = new JobClient(jobConf); RunningJob rJob = jobClient.getJob(JobID.forName(job)); if (! rJob.isComplete()) { rJob.killJob(); } } catch (Exception e) { throw new IOException ("Unable to kill job " + job); } } PigRequestStats requestStats= new PigRequestStats(0, 0, null, jobs.size()); requestStats.setJobs(jobs); requestStats.setStatus(Status.KILLED.toString()); Path statsPath= new Path(PropertyLoader.getInstance().getProperty(Constants.REQUEST_PATH) + requestId + "/stats"); PigUtils.writeStatsFile(statsPath, requestStats); return true; } else { return false; } }
Example #27
Source File: HadoopAlgoRunner.java From mr4c with Apache License 2.0 | 5 votes |
private void submitJob() throws IOException { // most of this method copies JobClient.runJob() // addition here is logging the job URI JobClient client = new JobClient(m_jobConf); RunningJob job = client.submitJob(m_jobConf); m_log.info("Job URL is [{}]" , job.getTrackingURL()); try { if ( !client.monitorAndPrintJob(m_jobConf, job) ) { throw new IOException("Job failed!"); } } catch (InterruptedException ie ) { Thread.currentThread().interrupt(); } }
Example #28
Source File: MapreduceSubmissionEngine.java From sqoop-on-spark with Apache License 2.0 | 5 votes |
private double progress(RunningJob runningJob) { try { if(runningJob == null) { // Return default value return -1; } return (runningJob.mapProgress() + runningJob.reduceProgress()) / 2; } catch (IOException e) { throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0003, e); } }
Example #29
Source File: IngestJob.java From hadoop-solr with Apache License 2.0 | 5 votes |
public void doFinalCommit(JobConf conf, RunningJob job) { if (conf.getBoolean("lww.commit.on.close", false) && checkSolrOrZkString(conf)) { String jobName = job.getJobName(); log.info("Performing final commit for job " + jobName); // Progress can be null here, because no write operation is performed. LucidWorksWriter lww = new LucidWorksWriter(null); try { lww.open(conf, jobName); lww.commit(); } catch (Exception e) { log.error("Error in final job commit", e); } } }
Example #30
Source File: HadoopJob.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Constructor for a Hadoop job representation * * @param location * @param id * @param running * @param status */ public HadoopJob(HadoopServer location, JobID id, RunningJob running, JobStatus status) { this.location = location; this.jobId = id; this.running = running; loadJobFile(); update(status); }