org.apache.hadoop.mapreduce.TaskID Java Examples
The following examples show how to use
org.apache.hadoop.mapreduce.TaskID.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Task20LineHistoryEventEmitter.java From big-c with Apache License 2.0 | 6 votes |
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName, HistoryEventEmitter thatg) { if (taskIDName == null) { return null; } TaskID taskID = TaskID.forName(taskIDName); String taskType = line.get("TASK_TYPE"); String startTime = line.get("START_TIME"); String splits = line.get("SPLITS"); if (startTime != null && taskType != null) { Task20LineHistoryEventEmitter that = (Task20LineHistoryEventEmitter) thatg; that.originalStartTime = Long.parseLong(startTime); that.originalTaskType = Version20LogInterfaceUtils.get20TaskType(taskType); return new TaskStartedEvent(taskID, that.originalStartTime, that.originalTaskType, splits); } return null; }
Example #2
Source File: TestEvents.java From hadoop with Apache License 2.0 | 6 votes |
/** * test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished * * @throws Exception */ @Test(timeout = 10000) public void testTaskAttemptFinishedEvent() throws Exception { JobID jid = new JobID("001", 1); TaskID tid = new TaskID(jid, TaskType.REDUCE, 2); TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3); Counters counters = new Counters(); TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId, TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS", counters); assertEquals(test.getAttemptId().toString(), taskAttemptId.toString()); assertEquals(test.getCounters(), counters); assertEquals(test.getFinishTime(), 123L); assertEquals(test.getHostname(), "HOSTNAME"); assertEquals(test.getRackName(), "RAKNAME"); assertEquals(test.getState(), "STATUS"); assertEquals(test.getTaskId(), tid); assertEquals(test.getTaskStatus(), "TEST"); assertEquals(test.getTaskType(), TaskType.REDUCE); }
Example #3
Source File: TestCompletedTask.java From hadoop with Apache License 2.0 | 6 votes |
/** * test some methods of CompletedTaskAttempt */ @Test (timeout=5000) public void testCompletedTaskAttempt(){ TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class); when(attemptInfo.getRackname()).thenReturn("Rackname"); when(attemptInfo.getShuffleFinishTime()).thenReturn(11L); when(attemptInfo.getSortFinishTime()).thenReturn(12L); when(attemptInfo.getShufflePort()).thenReturn(10); JobID jobId= new JobID("12345",0); TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0); TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0); when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId); CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo); assertEquals( "Rackname", taskAttemt.getNodeRackName()); assertEquals( Phase.CLEANUP, taskAttemt.getPhase()); assertTrue( taskAttemt.isFinished()); assertEquals( 11L, taskAttemt.getShuffleFinishTime()); assertEquals( 12L, taskAttemt.getSortFinishTime()); assertEquals( 10, taskAttemt.getShufflePort()); }
Example #4
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 6 votes |
private long computeFinishedMaps(JobInfo jobInfo, int numMaps, int numSuccessfulMaps) { if (numMaps == numSuccessfulMaps) { return jobInfo.getFinishedMaps(); } long numFinishedMaps = 0; Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { ++numFinishedMaps; } } return numFinishedMaps; }
Example #5
Source File: CompletedJob.java From hadoop with Apache License 2.0 | 6 votes |
private void loadAllTasks() { if (tasksLoaded.get()) { return; } tasksLock.lock(); try { if (tasksLoaded.get()) { return; } for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) { TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey()); TaskInfo taskInfo = entry.getValue(); Task task = new CompletedTask(yarnTaskID, taskInfo); tasks.put(yarnTaskID, task); if (task.getType() == TaskType.MAP) { mapTasks.put(task.getID(), task); } else if (task.getType() == TaskType.REDUCE) { reduceTasks.put(task.getID(), task); } } tasksLoaded.set(true); } finally { tasksLock.unlock(); } }
Example #6
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 6 votes |
private long computeFinishedMaps(JobInfo jobInfo, int numMaps, int numSuccessfulMaps) { if (numMaps == numSuccessfulMaps) { return jobInfo.getFinishedMaps(); } long numFinishedMaps = 0; Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { ++numFinishedMaps; } } return numFinishedMaps; }
Example #7
Source File: ConfigurableHDFSFileSink.java From components with Apache License 2.0 | 6 votes |
@Override public void open(String uId) throws Exception { this.hash = uId.hashCode(); Job job = ((ConfigurableHDFSFileSink<K, V>) getWriteOperation().getSink()).jobInstance(); FileOutputFormat.setOutputPath(job, new Path(path)); // Each Writer is responsible for writing one bundle of elements and is represented by one // unique Hadoop task based on uId/hash. All tasks share the same job ID. Since Dataflow // handles retrying of failed bundles, each task has one attempt only. JobID jobId = job.getJobID(); TaskID taskId = new TaskID(jobId, TaskType.REDUCE, hash); configure(job); context = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID(taskId, 0)); FileOutputFormat<K, V> outputFormat = formatClass.newInstance(); recordWriter = outputFormat.getRecordWriter(context); outputCommitter = (FileOutputCommitter) outputFormat.getOutputCommitter(context); }
Example #8
Source File: TaskFailedEvent.java From hadoop with Apache License 2.0 | 6 votes |
public void setDatum(Object odatum) { this.datum = (TaskFailed)odatum; this.id = TaskID.forName(datum.taskid.toString()); this.taskType = TaskType.valueOf(datum.taskType.toString()); this.finishTime = datum.finishTime; this.error = datum.error.toString(); this.failedDueToAttempt = datum.failedDueToAttempt == null ? null : TaskAttemptID.forName( datum.failedDueToAttempt.toString()); this.status = datum.status.toString(); this.counters = EventReader.fromAvro(datum.counters); }
Example #9
Source File: HDFSSynchronization.java From beam with Apache License 2.0 | 6 votes |
@Override public TaskID acquireTaskIdLock(Configuration conf) { JobID jobId = HadoopFormats.getJobId(conf); boolean lockAcquired = false; int taskIdCandidate = 0; while (!lockAcquired) { taskIdCandidate = RANDOM_GEN.nextInt(Integer.MAX_VALUE); Path path = new Path( locksDir, String.format(LOCKS_DIR_TASK_PATTERN, getJobJtIdentifier(conf), taskIdCandidate)); lockAcquired = tryCreateFile(conf, path); } return HadoopFormats.createTaskID(jobId, taskIdCandidate); }
Example #10
Source File: TestS3MultipartOutputCommitter.java From s3committer with Apache License 2.0 | 6 votes |
private static Set<String> runTasks(JobContext job, int numTasks, int numFiles) throws IOException { Set<String> uploads = Sets.newHashSet(); for (int taskId = 0; taskId < numTasks; taskId += 1) { TaskAttemptID attemptID = new TaskAttemptID( new TaskID(JOB_ID, TaskType.REDUCE, taskId), (taskId * 37) % numTasks); TaskAttemptContext attempt = new TaskAttemptContextImpl( new Configuration(job.getConfiguration()), attemptID); MockedS3Committer taskCommitter = new MockedS3Committer( S3_OUTPUT_PATH, attempt); commitTask(taskCommitter, attempt, numFiles); uploads.addAll(taskCommitter.results.getUploads()); } return uploads; }
Example #11
Source File: TestCompletedTask.java From big-c with Apache License 2.0 | 6 votes |
/** * test some methods of CompletedTaskAttempt */ @Test (timeout=5000) public void testCompletedTaskAttempt(){ TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class); when(attemptInfo.getRackname()).thenReturn("Rackname"); when(attemptInfo.getShuffleFinishTime()).thenReturn(11L); when(attemptInfo.getSortFinishTime()).thenReturn(12L); when(attemptInfo.getShufflePort()).thenReturn(10); JobID jobId= new JobID("12345",0); TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0); TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0); when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId); CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo); assertEquals( "Rackname", taskAttemt.getNodeRackName()); assertEquals( Phase.CLEANUP, taskAttemt.getPhase()); assertTrue( taskAttemt.isFinished()); assertEquals( 11L, taskAttemt.getShuffleFinishTime()); assertEquals( 12L, taskAttemt.getSortFinishTime()); assertEquals( 10, taskAttemt.getShufflePort()); }
Example #12
Source File: FileOutputFormat.java From hadoop with Apache License 2.0 | 6 votes |
/** * Generate a unique filename, based on the task id, name, and extension * @param context the task that is calling this * @param name the base filename * @param extension the filename extension * @return a string like $name-[mrsct]-$id$extension */ public synchronized static String getUniqueFile(TaskAttemptContext context, String name, String extension) { TaskID taskId = context.getTaskAttemptID().getTaskID(); int partition = taskId.getId(); StringBuilder result = new StringBuilder(); result.append(name); result.append('-'); result.append( TaskID.getRepresentingCharacter(taskId.getTaskType())); result.append('-'); result.append(NUMBER_FORMAT.format(partition)); result.append(extension); return result.toString(); }
Example #13
Source File: Task20LineHistoryEventEmitter.java From big-c with Apache License 2.0 | 6 votes |
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName, HistoryEventEmitter thatg) { if (taskIDName == null) { return null; } TaskID taskID = TaskID.forName(taskIDName); String finishTime = line.get("FINISH_TIME"); if (finishTime != null) { return new TaskUpdatedEvent(taskID, Long.parseLong(finishTime)); } return null; }
Example #14
Source File: CompletedJob.java From big-c with Apache License 2.0 | 6 votes |
private void loadAllTasks() { if (tasksLoaded.get()) { return; } tasksLock.lock(); try { if (tasksLoaded.get()) { return; } for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) { TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey()); TaskInfo taskInfo = entry.getValue(); Task task = new CompletedTask(yarnTaskID, taskInfo); tasks.put(yarnTaskID, task); if (task.getType() == TaskType.MAP) { mapTasks.put(task.getID(), task); } else if (task.getType() == TaskType.REDUCE) { reduceTasks.put(task.getID(), task); } } tasksLoaded.set(true); } finally { tasksLock.unlock(); } }
Example #15
Source File: DynamicInputChunk.java From big-c with Apache License 2.0 | 5 votes |
/** * Reassigns the chunk to a specified Map-Task, for consumption. * @param taskId The Map-Task to which a the chunk is to be reassigned. * @throws IOException Exception on failure to reassign. */ public void assignTo(TaskID taskId) throws IOException { Path newPath = new Path(chunkRootPath, taskId.toString()); if (!fs.rename(chunkFilePath, newPath)) { LOG.warn(chunkFilePath + " could not be assigned to " + taskId); } }
Example #16
Source File: FileOutputFormat.java From RDFS with Apache License 2.0 | 5 votes |
/** * Generate a unique filename, based on the task id, name, and extension * @param context the task that is calling this * @param name the base filename * @param extension the filename extension * @return a string like $name-[mr]-$id$extension */ public synchronized static String getUniqueFile(TaskAttemptContext context, String name, String extension) { TaskID taskId = context.getTaskAttemptID().getTaskID(); int partition = taskId.getId(); StringBuilder result = new StringBuilder(); result.append(name); result.append('-'); result.append(taskId.isMap() ? 'm' : 'r'); result.append('-'); result.append(NUMBER_FORMAT.format(partition)); result.append(extension); return result.toString(); }
Example #17
Source File: TestShuffleScheduler.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") @Test public void testTipFailed() throws Exception { JobConf job = new JobConf(); job.setNumMapTasks(2); TaskStatus status = new TaskStatus() { @Override public boolean getIsMap() { return false; } @Override public void addFetchFailedMap(TaskAttemptID mapTaskId) { } }; Progress progress = new Progress(); TaskAttemptID reduceId = new TaskAttemptID("314159", 0, TaskType.REDUCE, 0, 0); ShuffleSchedulerImpl scheduler = new ShuffleSchedulerImpl(job, status, reduceId, null, progress, null, null, null); JobID jobId = new JobID(); TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1); scheduler.tipFailed(taskId1); Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(), 0.0f); Assert.assertFalse(scheduler.waitUntilDone(1)); TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0); scheduler.tipFailed(taskId0); Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(), 0.0f); Assert.assertTrue(scheduler.waitUntilDone(1)); }
Example #18
Source File: TaskFinishedEvent.java From big-c with Apache License 2.0 | 5 votes |
public void setDatum(Object oDatum) { this.datum = (TaskFinished)oDatum; this.taskid = TaskID.forName(datum.taskid.toString()); if (datum.successfulAttemptId != null) { this.successfulAttemptId = TaskAttemptID .forName(datum.successfulAttemptId.toString()); } this.finishTime = datum.finishTime; this.taskType = TaskType.valueOf(datum.taskType.toString()); this.status = datum.status.toString(); this.counters = EventReader.fromAvro(datum.counters); }
Example #19
Source File: Task20LineHistoryEventEmitter.java From hadoop with Apache License 2.0 | 5 votes |
HistoryEvent maybeEmitEvent(ParsedLine line, String taskIDName, HistoryEventEmitter thatg) { if (taskIDName == null) { return null; } TaskID taskID = TaskID.forName(taskIDName); String status = line.get("TASK_STATUS"); String finishTime = line.get("FINISH_TIME"); String error = line.get("ERROR"); String counters = line.get("COUNTERS"); if (finishTime != null && error == null && (status != null && status.equalsIgnoreCase("success"))) { Counters eventCounters = maybeParseCounters(counters); Task20LineHistoryEventEmitter that = (Task20LineHistoryEventEmitter) thatg; if (that.originalTaskType == null) { return null; } return new TaskFinishedEvent(taskID, null, Long.parseLong(finishTime), that.originalTaskType, status, eventCounters); } return null; }
Example #20
Source File: MapReduceFSFetcherHadoop2Test.java From dr-elephant with Apache License 2.0 | 5 votes |
public MockTaskInfo(int id, boolean succeeded) { this.taskId = new TaskID("job1", 1, TaskType.MAP, id); this.taskType = TaskType.MAP; this.succeeded = succeeded; this.counters = new Counters(); this.finishTime = System.currentTimeMillis(); this.startTime = finishTime - 10000; this.failedDueToAttemptId = new TaskAttemptID(taskId, 0); this.successfulAttemptId = new TaskAttemptID(taskId, 1); this.attemptsMap = new HashMap<TaskAttemptID, JobHistoryParser.TaskAttemptInfo>(); this.attemptsMap.put(failedDueToAttemptId, new JobHistoryParser.TaskAttemptInfo()); this.attemptsMap.put(successfulAttemptId, new JobHistoryParser.TaskAttemptInfo()); }
Example #21
Source File: TestJobInfo.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 5000) public void testTaskID() throws IOException, InterruptedException { JobID jobid = new JobID("1014873536921", 6); TaskID tid = new TaskID(jobid, TaskType.MAP, 0); org.apache.hadoop.mapred.TaskID tid1 = org.apache.hadoop.mapred.TaskID.downgrade(tid); org.apache.hadoop.mapred.TaskReport treport = new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f, State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100, new org.apache.hadoop.mapred.Counters()); Assert .assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000"); Assert.assertEquals(treport.getTaskID().toString(), "task_1014873536921_0006_m_000000"); }
Example #22
Source File: TaskFinishedEvent.java From big-c with Apache License 2.0 | 5 votes |
/** * Create an event to record the successful completion of a task * @param id Task ID * @param attemptId Task Attempt ID of the successful attempt for this task * @param finishTime Finish time of the task * @param taskType Type of the task * @param status Status string * @param counters Counters for the task */ public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime, TaskType taskType, String status, Counters counters) { this.taskid = id; this.successfulAttemptId = attemptId; this.finishTime = finishTime; this.taskType = taskType; this.status = status; this.counters = counters; }
Example #23
Source File: PartialFileOutputCommitter.java From big-c with Apache License 2.0 | 5 votes |
@Override public void cleanUpPartialOutputForTask(TaskAttemptContext context) throws IOException { // we double check this is never invoked from a non-preemptable subclass. // This should never happen, since the invoking codes is checking it too, // but it is safer to double check. Errors handling this would produce // inconsistent output. if (!this.getClass().isAnnotationPresent(Checkpointable.class)) { throw new IllegalStateException("Invoking cleanUpPartialOutputForTask() " + "from non @Preemptable class"); } FileSystem fs = fsFor(getTaskAttemptPath(context), context.getConfiguration()); LOG.info("cleanUpPartialOutputForTask: removing everything belonging to " + context.getTaskAttemptID().getTaskID() + " in: " + getCommittedTaskPath(context).getParent()); final TaskAttemptID taid = context.getTaskAttemptID(); final TaskID tid = taid.getTaskID(); Path pCommit = getCommittedTaskPath(context).getParent(); // remove any committed output for (int i = 0; i < taid.getId(); ++i) { TaskAttemptID oldId = new TaskAttemptID(tid, i); Path pTask = new Path(pCommit, oldId.toString()); if (fs.exists(pTask) && !fs.delete(pTask, true)) { throw new IOException("Failed to delete " + pTask); } } }
Example #24
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 5 votes |
private void printTasks(TaskType taskType, String status) { Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); StringBuffer header = new StringBuffer(); header.append("\n").append(status).append(" "); header.append(taskType).append(" task list for ").append(jobId); header.append("\nTaskId\t\tStartTime\tFinishTime\tError"); if (TaskType.MAP.equals(taskType)) { header.append("\tInputSplits"); } header.append("\n===================================================="); StringBuffer taskList = new StringBuffer(); for (JobHistoryParser.TaskInfo task : tasks.values()) { if (taskType.equals(task.getTaskType()) && (status.equals(task.getTaskStatus()) || status.equalsIgnoreCase("ALL"))) { taskList.setLength(0); taskList.append(task.getTaskId()); taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff( dateFormat, task.getStartTime(), 0)); taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff( dateFormat, task.getFinishTime(), task.getStartTime())); taskList.append("\t").append(task.getError()); if (TaskType.MAP.equals(taskType)) { taskList.append("\t").append(task.getSplitLocations()); } if (taskList != null) { System.out.println(header.toString()); System.out.println(taskList.toString()); } } } }
Example #25
Source File: ZombieJob.java From hadoop with Apache License 2.0 | 5 votes |
/** * Mask the job ID part in a {@link TaskAttemptID}. * * @param attemptId * raw {@link TaskAttemptID} read from trace * @return masked {@link TaskAttemptID} with empty {@link JobID}. */ private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) { JobID jobId = new JobID(); TaskType taskType = attemptId.getTaskType(); TaskID taskId = attemptId.getTaskID(); return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType, taskId.getId(), attemptId.getId()); }
Example #26
Source File: CustomFileNameFileOutputFormat.java From aegisthus with Apache License 2.0 | 5 votes |
/** * Generate a unique filename, based on the task id, name, and extension * @param context the task that is calling this * @param name the base filename * @param extension the filename extension * @return a string like $name-[jobType]-$id$extension */ protected synchronized String getCustomFileName(TaskAttemptContext context, String name, String extension) { TaskID taskId = context.getTaskAttemptID().getTaskID(); int partition = taskId.getId(); return name + '-' + NUMBER_FORMAT.format(partition) + extension; }
Example #27
Source File: ShuffleSchedulerImpl.java From hadoop with Apache License 2.0 | 5 votes |
public synchronized void tipFailed(TaskID taskId) { if (!finishedMaps[taskId.getId()]) { finishedMaps[taskId.getId()] = true; if (--remainingMaps == 0) { notifyAll(); } updateStatus(); } }
Example #28
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 5 votes |
/** Apply the filter (status) on the parsed job and generate summary */ public FilteredJob(JobInfo job, String status) { filter = status; Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus().equals(status)) { String hostname = attempt.getHostname(); TaskID id = attempt.getAttemptId().getTaskID(); Set<TaskID> set = badNodesToFilteredTasks.get(hostname); if (set == null) { set = new TreeSet<TaskID>(); set.add(id); badNodesToFilteredTasks.put(hostname, set); }else{ set.add(id); } } } } }
Example #29
Source File: TaskFailedEvent.java From big-c with Apache License 2.0 | 5 votes |
/** * Create an event to record task failure * @param id Task ID * @param finishTime Finish time of the task * @param taskType Type of the task * @param error Error String * @param status Status * @param failedDueToAttempt The attempt id due to which the task failed * @param counters Counters for the task */ public TaskFailedEvent(TaskID id, long finishTime, TaskType taskType, String error, String status, TaskAttemptID failedDueToAttempt, Counters counters) { this.id = id; this.finishTime = finishTime; this.taskType = taskType; this.error = error; this.status = status; this.failedDueToAttempt = failedDueToAttempt; this.counters = counters; }
Example #30
Source File: TaskFailedEvent.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create an event to record task failure * @param id Task ID * @param finishTime Finish time of the task * @param taskType Type of the task * @param error Error String * @param status Status * @param failedDueToAttempt The attempt id due to which the task failed * @param counters Counters for the task */ public TaskFailedEvent(TaskID id, long finishTime, TaskType taskType, String error, String status, TaskAttemptID failedDueToAttempt, Counters counters) { this.id = id; this.finishTime = finishTime; this.taskType = taskType; this.error = error; this.status = status; this.failedDueToAttempt = failedDueToAttempt; this.counters = counters; }