Java Code Examples for org.apache.hadoop.util.StringUtils#formatPercent()
The following examples show how to use
org.apache.hadoop.util.StringUtils#formatPercent() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MC.java From hbase-tools with Apache License 2.0 | 5 votes |
private String getRegionInfo(byte[] regionName) { return "Table: " + regionTableMap.get(regionName) + ", RS: " + regionRSMap.get(regionName) + ", Locality: " + (regionLocalityMap.get(regionName) == null ? "null" : StringUtils.formatPercent(regionLocalityMap.get(regionName), 2)) + ", SizeMB: " + regionSizeMap.get(regionName); }
Example 2
Source File: MC.java From hbase-tools with Apache License 2.0 | 5 votes |
private String getRegionInfo(byte[] regionName) { return "Table: " + regionTableMap.get(regionName) + ", RS: " + regionRSMap.get(regionName) + ", Locality: " + (regionLocalityMap.get(regionName) == null ? "null" : StringUtils.formatPercent(regionLocalityMap.get(regionName), 2)) + ", SizeMB: " + regionSizeMap.get(regionName); }
Example 3
Source File: MC.java From hbase-tools with Apache License 2.0 | 5 votes |
private String getRegionInfo(byte[] regionName) { return "Table: " + regionTableMap.get(regionName) + ", RS: " + regionRSMap.get(regionName) + ", Locality: " + (regionLocalityMap.get(regionName) == null ? "null" : StringUtils.formatPercent(regionLocalityMap.get(regionName), 2)) + ", SizeMB: " + regionSizeMap.get(regionName); }
Example 4
Source File: MC.java From hbase-tools with Apache License 2.0 | 5 votes |
private String getRegionInfo(byte[] regionName) { return "Table: " + regionTableMap.get(regionName) + ", RS: " + regionRSMap.get(regionName) + ", Locality: " + (regionLocalityMap.get(regionName) == null ? "null" : StringUtils.formatPercent(regionLocalityMap.get(regionName), 2)) + ", SizeMB: " + regionSizeMap.get(regionName); }
Example 5
Source File: MC.java From hbase-tools with Apache License 2.0 | 5 votes |
private String getRegionInfo(byte[] regionName) { return "Table: " + regionTableMap.get(regionName) + ", RS: " + regionRSMap.get(regionName) + ", Locality: " + (regionLocalityMap.get(regionName) == null ? "null" : StringUtils.formatPercent(regionLocalityMap.get(regionName), 2)) + ", SizeMB: " + regionSizeMap.get(regionName); }
Example 6
Source File: Job.java From hadoop with Apache License 2.0 | 4 votes |
/** * Monitor a job and print status in real-time as progress is made and tasks * fail. * @return true if the job succeeded * @throws IOException if communication to the JobTracker fails */ public boolean monitorAndPrintJob() throws IOException, InterruptedException { String lastReport = null; Job.TaskStatusFilter filter; Configuration clientConf = getConfiguration(); filter = Job.getTaskOutputFilter(clientConf); JobID jobId = getJobID(); LOG.info("Running job: " + jobId); int eventCounter = 0; boolean profiling = getProfileEnabled(); IntegerRanges mapRanges = getProfileTaskRange(true); IntegerRanges reduceRanges = getProfileTaskRange(false); int progMonitorPollIntervalMillis = Job.getProgressPollInterval(clientConf); /* make sure to report full progress after the job is done */ boolean reportedAfterCompletion = false; boolean reportedUberMode = false; while (!isComplete() || !reportedAfterCompletion) { if (isComplete()) { reportedAfterCompletion = true; } else { Thread.sleep(progMonitorPollIntervalMillis); } if (status.getState() == JobStatus.State.PREP) { continue; } if (!reportedUberMode) { reportedUberMode = true; LOG.info("Job " + jobId + " running in uber mode : " + isUber()); } String report = (" map " + StringUtils.formatPercent(mapProgress(), 0)+ " reduce " + StringUtils.formatPercent(reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = getTaskCompletionEvents(eventCounter, 10); eventCounter += events.length; printTaskEvents(events, filter, profiling, mapRanges, reduceRanges); } boolean success = isSuccessful(); if (success) { LOG.info("Job " + jobId + " completed successfully"); } else { LOG.info("Job " + jobId + " failed with state " + status.getState() + " due to: " + status.getFailureInfo()); } Counters counters = getCounters(); if (counters != null) { LOG.info(counters.toString()); } return success; }
Example 7
Source File: Job.java From big-c with Apache License 2.0 | 4 votes |
/** * Monitor a job and print status in real-time as progress is made and tasks * fail. * @return true if the job succeeded * @throws IOException if communication to the JobTracker fails */ public boolean monitorAndPrintJob() throws IOException, InterruptedException { String lastReport = null; Job.TaskStatusFilter filter; Configuration clientConf = getConfiguration(); filter = Job.getTaskOutputFilter(clientConf); JobID jobId = getJobID(); LOG.info("Running job: " + jobId); int eventCounter = 0; boolean profiling = getProfileEnabled(); IntegerRanges mapRanges = getProfileTaskRange(true); IntegerRanges reduceRanges = getProfileTaskRange(false); int progMonitorPollIntervalMillis = Job.getProgressPollInterval(clientConf); /* make sure to report full progress after the job is done */ boolean reportedAfterCompletion = false; boolean reportedUberMode = false; while (!isComplete() || !reportedAfterCompletion) { if (isComplete()) { reportedAfterCompletion = true; } else { Thread.sleep(progMonitorPollIntervalMillis); } if (status.getState() == JobStatus.State.PREP) { continue; } if (!reportedUberMode) { reportedUberMode = true; LOG.info("Job " + jobId + " running in uber mode : " + isUber()); } String report = (" map " + StringUtils.formatPercent(mapProgress(), 0)+ " reduce " + StringUtils.formatPercent(reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = getTaskCompletionEvents(eventCounter, 10); eventCounter += events.length; printTaskEvents(events, filter, profiling, mapRanges, reduceRanges); } boolean success = isSuccessful(); if (success) { LOG.info("Job " + jobId + " completed successfully"); } else { LOG.info("Job " + jobId + " failed with state " + status.getState() + " due to: " + status.getFailureInfo()); } Counters counters = getCounters(); if (counters != null) { LOG.info(counters.toString()); } return success; }
Example 8
Source File: JobClient.java From RDFS with Apache License 2.0 | 4 votes |
/** * Monitor a job and print status in real-time as progress is made and tasks * fail. * @param conf the job's configuration * @param job the job to track * @return true if the job succeeded * @throws IOException if communication to the JobTracker fails */ public boolean monitorAndPrintJob(JobConf conf, RunningJob job ) throws IOException, InterruptedException { String lastReport = null; TaskStatusFilter filter; filter = getTaskOutputFilter(conf); JobID jobId = job.getID(); LOG.info("Running job: " + jobId); int eventCounter = 0; boolean profiling = conf.getProfileEnabled(); Configuration.IntegerRanges mapRanges = conf.getProfileTaskRange(true); Configuration.IntegerRanges reduceRanges = conf.getProfileTaskRange(false); while (!job.isComplete()) { Thread.sleep(MAX_JOBPROFILE_AGE); String report = (" map " + StringUtils.formatPercent(job.mapProgress(), 0)+ " reduce " + StringUtils.formatPercent(job.reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = job.getTaskCompletionEvents(eventCounter); eventCounter += events.length; for(TaskCompletionEvent event : events){ TaskCompletionEvent.Status status = event.getTaskStatus(); if (profiling && (status == TaskCompletionEvent.Status.SUCCEEDED || status == TaskCompletionEvent.Status.FAILED) && (event.isMap ? mapRanges : reduceRanges). isIncluded(event.idWithinJob())) { downloadProfile(event); } switch(filter){ case NONE: break; case SUCCEEDED: if (event.getTaskStatus() == TaskCompletionEvent.Status.SUCCEEDED){ LOG.info(event.toString()); displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); } break; case FAILED: if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED){ LOG.info(event.toString()); // Displaying the task diagnostic information TaskAttemptID taskId = event.getTaskAttemptId(); String[] taskDiagnostics = jobSubmitClient.getTaskDiagnostics(taskId); if (taskDiagnostics != null) { for(String diagnostics : taskDiagnostics){ System.err.println(diagnostics); } } // Displaying the task logs displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); } break; case KILLED: if (event.getTaskStatus() == TaskCompletionEvent.Status.KILLED){ LOG.info(event.toString()); } break; case ALL: LOG.info(event.toString()); displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); break; } } } LOG.info("Job complete: " + jobId); Counters counters = job.getCounters(); if (counters != null) { counters.log(LOG); } return job.isSuccessful(); }
Example 9
Source File: DistRaid.java From RDFS with Apache License 2.0 | 4 votes |
/** Checks if the map-reduce job has completed. * * @return true if the job completed, false otherwise. * @throws IOException */ public boolean checkComplete() throws IOException { JobID jobID = runningJob.getID(); if (runningJob.isComplete()) { // delete job directory final String jobdir = jobconf.get(JOB_DIR_LABEL); if (jobdir != null) { final Path jobpath = new Path(jobdir); jobpath.getFileSystem(jobconf).delete(jobpath, true); } if (runningJob.isSuccessful()) { LOG.info("Job Complete(Succeeded): " + jobID); } else { LOG.info("Job Complete(Failed): " + jobID); } raidPolicyPathPairList.clear(); Counters ctrs = runningJob.getCounters(); if (ctrs != null) { RaidNodeMetrics metrics = RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID); if (ctrs.findCounter(Counter.FILES_FAILED) != null) { long filesFailed = ctrs.findCounter(Counter.FILES_FAILED).getValue(); metrics.raidFailures.inc(filesFailed); } long slotSeconds = ctrs.findCounter( JobInProgress.Counter.SLOTS_MILLIS_MAPS).getValue() / 1000; metrics.raidSlotSeconds.inc(slotSeconds); } return true; } else { String report = (" job " + jobID + " map " + StringUtils.formatPercent(runningJob.mapProgress(), 0)+ " reduce " + StringUtils.formatPercent(runningJob.reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = runningJob.getTaskCompletionEvents(jobEventCounter); jobEventCounter += events.length; for(TaskCompletionEvent event : events) { if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED) { LOG.info(" Job " + jobID + " " + event.toString()); } } return false; } }
Example 10
Source File: JobClient.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Monitor a job and print status in real-time as progress is made and tasks * fail. * @param conf the job's configuration * @param job the job to track * @return true if the job succeeded * @throws IOException if communication to the JobTracker fails */ public boolean monitorAndPrintJob(JobConf conf, RunningJob job ) throws IOException, InterruptedException { String lastReport = null; TaskStatusFilter filter; filter = getTaskOutputFilter(conf); JobID jobId = job.getID(); LOG.info("Running job: " + jobId); int eventCounter = 0; boolean profiling = conf.getProfileEnabled(); Configuration.IntegerRanges mapRanges = conf.getProfileTaskRange(true); Configuration.IntegerRanges reduceRanges = conf.getProfileTaskRange(false); while (!job.isComplete()) { Thread.sleep(1000); String report = (" map " + StringUtils.formatPercent(job.mapProgress(), 0)+ " reduce " + StringUtils.formatPercent(job.reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = job.getTaskCompletionEvents(eventCounter); eventCounter += events.length; for(TaskCompletionEvent event : events){ TaskCompletionEvent.Status status = event.getTaskStatus(); if (profiling && (status == TaskCompletionEvent.Status.SUCCEEDED || status == TaskCompletionEvent.Status.FAILED) && (event.isMap ? mapRanges : reduceRanges). isIncluded(event.idWithinJob())) { downloadProfile(event); } switch(filter){ case NONE: break; case SUCCEEDED: if (event.getTaskStatus() == TaskCompletionEvent.Status.SUCCEEDED){ LOG.info(event.toString()); displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); } break; case FAILED: if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED){ LOG.info(event.toString()); // Displaying the task diagnostic information TaskAttemptID taskId = event.getTaskAttemptId(); String[] taskDiagnostics = jobSubmitClient.getTaskDiagnostics(taskId); if (taskDiagnostics != null) { for(String diagnostics : taskDiagnostics){ System.err.println(diagnostics); } } // Displaying the task logs displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); } break; case KILLED: if (event.getTaskStatus() == TaskCompletionEvent.Status.KILLED){ LOG.info(event.toString()); } break; case ALL: LOG.info(event.toString()); displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp()); break; } } } LOG.info("Job complete: " + jobId); job.getCounters().log(LOG); return job.isSuccessful(); }