Java Code Examples for org.apache.hadoop.mapred.ClusterStatus#getTaskTrackers()
The following examples show how to use
org.apache.hadoop.mapred.ClusterStatus#getTaskTrackers() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: InfrastructureAnalyzer.java From systemds with Apache License 2.0 | 6 votes |
/** * Analyzes properties of hadoop cluster and configuration. */ private static void analyzeHadoopCluster() { try { JobConf job = ConfigurationManager.getCachedJobConf(); JobClient client = new JobClient(job); ClusterStatus stat = client.getClusterStatus(); if( stat != null ) { //if in cluster mode //analyze cluster status _remotePar = stat.getTaskTrackers(); _remoteParMap = stat.getMaxMapTasks(); _remoteParReduce = stat.getMaxReduceTasks(); //analyze pure configuration properties analyzeHadoopConfiguration(); } } catch (IOException e) { throw new RuntimeException("Unable to analyze infrastructure.",e); } }
Example 2
Source File: GenerateData.java From hadoop with Apache License 2.0 | 6 votes |
@Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { final JobClient client = new JobClient(new JobConf(jobCtxt.getConfiguration())); ClusterStatus stat = client.getClusterStatus(true); final long toGen = jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1); if (toGen < 0) { throw new IOException("Invalid/missing generation bytes: " + toGen); } final int nTrackers = stat.getTaskTrackers(); final long bytesPerTracker = toGen / nTrackers; final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers); final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*"); final Matcher m = trackerPattern.matcher(""); for (String tracker : stat.getActiveTrackerNames()) { m.reset(tracker); if (!m.find()) { System.err.println("Skipping node: " + tracker); continue; } final String name = m.group(1); splits.add(new GenSplit(bytesPerTracker, new String[] { name })); } return splits; }
Example 3
Source File: StressJobFactory.java From hadoop with Apache License 2.0 | 6 votes |
/** * STRESS Once you get the notification from StatsCollector.Collect the * clustermetrics. Update current loadStatus with new load status of JT. * * @param item */ @Override public void update(Statistics.ClusterStats item) { ClusterStatus clusterStatus = item.getStatus(); try { // update the max cluster map/reduce task capacity loadStatus.updateMapCapacity(clusterStatus.getMaxMapTasks()); loadStatus.updateReduceCapacity(clusterStatus.getMaxReduceTasks()); int numTrackers = clusterStatus.getTaskTrackers(); int jobLoad = (int) (maxJobTrackerRatio * numTrackers) - item.getNumRunningJob(); loadStatus.updateJobLoad(jobLoad); } catch (Exception e) { LOG.error("Couldn't get the new Status",e); } }
Example 4
Source File: GenerateData.java From big-c with Apache License 2.0 | 6 votes |
@Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { final JobClient client = new JobClient(new JobConf(jobCtxt.getConfiguration())); ClusterStatus stat = client.getClusterStatus(true); final long toGen = jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1); if (toGen < 0) { throw new IOException("Invalid/missing generation bytes: " + toGen); } final int nTrackers = stat.getTaskTrackers(); final long bytesPerTracker = toGen / nTrackers; final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers); final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*"); final Matcher m = trackerPattern.matcher(""); for (String tracker : stat.getActiveTrackerNames()) { m.reset(tracker); if (!m.find()) { System.err.println("Skipping node: " + tracker); continue; } final String name = m.group(1); splits.add(new GenSplit(bytesPerTracker, new String[] { name })); } return splits; }
Example 5
Source File: StressJobFactory.java From big-c with Apache License 2.0 | 6 votes |
/** * STRESS Once you get the notification from StatsCollector.Collect the * clustermetrics. Update current loadStatus with new load status of JT. * * @param item */ @Override public void update(Statistics.ClusterStats item) { ClusterStatus clusterStatus = item.getStatus(); try { // update the max cluster map/reduce task capacity loadStatus.updateMapCapacity(clusterStatus.getMaxMapTasks()); loadStatus.updateReduceCapacity(clusterStatus.getMaxReduceTasks()); int numTrackers = clusterStatus.getTaskTrackers(); int jobLoad = (int) (maxJobTrackerRatio * numTrackers) - item.getNumRunningJob(); loadStatus.updateJobLoad(jobLoad); } catch (Exception e) { LOG.error("Couldn't get the new Status",e); } }
Example 6
Source File: InfrastructureAnalyzer.java From systemds with Apache License 2.0 | 6 votes |
/** * Analyzes properties of hadoop cluster and configuration. */ private static void analyzeHadoopCluster() { try { JobConf job = ConfigurationManager.getCachedJobConf(); JobClient client = new JobClient(job); ClusterStatus stat = client.getClusterStatus(); if( stat != null ) { //if in cluster mode //analyze cluster status _remotePar = stat.getTaskTrackers(); _remoteParMap = stat.getMaxMapTasks(); _remoteParReduce = stat.getMaxReduceTasks(); //analyze pure configuration properties analyzeHadoopConfiguration(); } } catch (IOException e) { throw new RuntimeException("Unable to analyze infrastructure.",e); } }
Example 7
Source File: JobTrackerJspHelper.java From RDFS with Apache License 2.0 | 6 votes |
/** * Generates an XML-formatted block that summarizes the state of the JobTracker. */ public void generateSummaryTable(JspWriter out, JobTracker tracker) throws IOException { ClusterStatus status = tracker.getClusterStatus(); int maxMapTasks = status.getMaxMapTasks(); int maxReduceTasks = status.getMaxReduceTasks(); int numTaskTrackers = status.getTaskTrackers(); String tasksPerNodeStr; if (numTaskTrackers > 0) { double tasksPerNodePct = (double) (maxMapTasks + maxReduceTasks) / (double) numTaskTrackers; tasksPerNodeStr = percentFormat.format(tasksPerNodePct); } else { tasksPerNodeStr = "-"; } out.print("<maps>" + status.getMapTasks() + "</maps>\n" + "<reduces>" + status.getReduceTasks() + "</reduces>\n" + "<total_submissions>" + tracker.getTotalSubmissions() + "</total_submissions>\n" + "<nodes>" + status.getTaskTrackers() + "</nodes>\n" + "<map_task_capacity>" + status.getMaxMapTasks() + "</map_task_capacity>\n" + "<reduce_task_capacity>" + status.getMaxReduceTasks() + "</reduce_task_capacity>\n" + "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n"); }
Example 8
Source File: GenerateData.java From RDFS with Apache License 2.0 | 6 votes |
@Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { final JobClient client = new JobClient(new JobConf(jobCtxt.getConfiguration())); ClusterStatus stat = client.getClusterStatus(true); final long toGen = jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1); if (toGen < 0) { throw new IOException("Invalid/missing generation bytes: " + toGen); } final int nTrackers = stat.getTaskTrackers(); final long bytesPerTracker = toGen / nTrackers; final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers); final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*"); final Matcher m = trackerPattern.matcher(""); for (String tracker : stat.getActiveTrackerNames()) { m.reset(tracker); if (!m.find()) { System.err.println("Skipping node: " + tracker); continue; } final String name = m.group(1); splits.add(new GenSplit(bytesPerTracker, new String[] { name })); } return splits; }
Example 9
Source File: GenerateDistCacheData.java From hadoop with Apache License 2.0 | 4 votes |
@Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { final JobConf jobConf = new JobConf(jobCtxt.getConfiguration()); final JobClient client = new JobClient(jobConf); ClusterStatus stat = client.getClusterStatus(true); int numTrackers = stat.getTaskTrackers(); final int fileCount = jobConf.getInt(GRIDMIX_DISTCACHE_FILE_COUNT, -1); // Total size of distributed cache files to be generated final long totalSize = jobConf.getLong(GRIDMIX_DISTCACHE_BYTE_COUNT, -1); // Get the path of the special file String distCacheFileList = jobConf.get(GRIDMIX_DISTCACHE_FILE_LIST); if (fileCount < 0 || totalSize < 0 || distCacheFileList == null) { throw new RuntimeException("Invalid metadata: #files (" + fileCount + "), total_size (" + totalSize + "), filelisturi (" + distCacheFileList + ")"); } Path sequenceFile = new Path(distCacheFileList); FileSystem fs = sequenceFile.getFileSystem(jobConf); FileStatus srcst = fs.getFileStatus(sequenceFile); // Consider the number of TTs * mapSlotsPerTracker as number of mappers. int numMapSlotsPerTracker = jobConf.getInt(TTConfig.TT_MAP_SLOTS, 2); int numSplits = numTrackers * numMapSlotsPerTracker; List<InputSplit> splits = new ArrayList<InputSplit>(numSplits); LongWritable key = new LongWritable(); BytesWritable value = new BytesWritable(); // Average size of data to be generated by each map task final long targetSize = Math.max(totalSize / numSplits, DistributedCacheEmulator.AVG_BYTES_PER_MAP); long splitStartPosition = 0L; long splitEndPosition = 0L; long acc = 0L; long bytesRemaining = srcst.getLen(); SequenceFile.Reader reader = null; try { reader = new SequenceFile.Reader(fs, sequenceFile, jobConf); while (reader.next(key, value)) { // If adding this file would put this split past the target size, // cut the last split and put this file in the next split. if (acc + key.get() > targetSize && acc != 0) { long splitSize = splitEndPosition - splitStartPosition; splits.add(new FileSplit( sequenceFile, splitStartPosition, splitSize, (String[])null)); bytesRemaining -= splitSize; splitStartPosition = splitEndPosition; acc = 0L; } acc += key.get(); splitEndPosition = reader.getPosition(); } } finally { if (reader != null) { reader.close(); } } if (bytesRemaining != 0) { splits.add(new FileSplit( sequenceFile, splitStartPosition, bytesRemaining, (String[])null)); } return splits; }
Example 10
Source File: GenerateDistCacheData.java From big-c with Apache License 2.0 | 4 votes |
@Override public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException { final JobConf jobConf = new JobConf(jobCtxt.getConfiguration()); final JobClient client = new JobClient(jobConf); ClusterStatus stat = client.getClusterStatus(true); int numTrackers = stat.getTaskTrackers(); final int fileCount = jobConf.getInt(GRIDMIX_DISTCACHE_FILE_COUNT, -1); // Total size of distributed cache files to be generated final long totalSize = jobConf.getLong(GRIDMIX_DISTCACHE_BYTE_COUNT, -1); // Get the path of the special file String distCacheFileList = jobConf.get(GRIDMIX_DISTCACHE_FILE_LIST); if (fileCount < 0 || totalSize < 0 || distCacheFileList == null) { throw new RuntimeException("Invalid metadata: #files (" + fileCount + "), total_size (" + totalSize + "), filelisturi (" + distCacheFileList + ")"); } Path sequenceFile = new Path(distCacheFileList); FileSystem fs = sequenceFile.getFileSystem(jobConf); FileStatus srcst = fs.getFileStatus(sequenceFile); // Consider the number of TTs * mapSlotsPerTracker as number of mappers. int numMapSlotsPerTracker = jobConf.getInt(TTConfig.TT_MAP_SLOTS, 2); int numSplits = numTrackers * numMapSlotsPerTracker; List<InputSplit> splits = new ArrayList<InputSplit>(numSplits); LongWritable key = new LongWritable(); BytesWritable value = new BytesWritable(); // Average size of data to be generated by each map task final long targetSize = Math.max(totalSize / numSplits, DistributedCacheEmulator.AVG_BYTES_PER_MAP); long splitStartPosition = 0L; long splitEndPosition = 0L; long acc = 0L; long bytesRemaining = srcst.getLen(); SequenceFile.Reader reader = null; try { reader = new SequenceFile.Reader(fs, sequenceFile, jobConf); while (reader.next(key, value)) { // If adding this file would put this split past the target size, // cut the last split and put this file in the next split. if (acc + key.get() > targetSize && acc != 0) { long splitSize = splitEndPosition - splitStartPosition; splits.add(new FileSplit( sequenceFile, splitStartPosition, splitSize, (String[])null)); bytesRemaining -= splitSize; splitStartPosition = splitEndPosition; acc = 0L; } acc += key.get(); splitEndPosition = reader.getPosition(); } } finally { if (reader != null) { reader.close(); } } if (bytesRemaining != 0) { splits.add(new FileSplit( sequenceFile, splitStartPosition, bytesRemaining, (String[])null)); } return splits; }