Examples of TaskAttemptInfo


Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    @Override
    @SuppressWarnings("unchecked")
    public void dispatch(Event event) {
      if (recoveryMode) {
        if (event.getType() == TaskAttemptEventType.TA_CONTAINER_LAUNCHED) {
          TaskAttemptInfo attInfo = getTaskAttemptInfo(((TaskAttemptEvent) event)
              .getTaskAttemptID());
          LOG.info("Recovered Attempt start time " + attInfo.getStartTime());
          clock.setTime(attInfo.getStartTime());

        } else if (event.getType() == TaskAttemptEventType.TA_DONE
            || event.getType() == TaskAttemptEventType.TA_FAILMSG
            || event.getType() == TaskAttemptEventType.TA_KILL) {
          TaskAttemptInfo attInfo = getTaskAttemptInfo(((TaskAttemptEvent) event)
              .getTaskAttemptID());
          LOG.info("Recovered Attempt finish time " + attInfo.getFinishTime());
          clock.setTime(attInfo.getFinishTime());
        }

        else if (event.getType() == TaskEventType.T_ATTEMPT_FAILED
            || event.getType() == TaskEventType.T_ATTEMPT_KILLED
            || event.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

        }
      }

      else if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
        TaskAttemptId aId = ((ContainerAllocatorEvent) event).getAttemptID();
        TaskAttemptInfo attInfo = getTaskAttemptInfo(aId);
        LOG.debug("CONTAINER_REQ " + aId);
        sendAssignedEvent(aId, attInfo);
        return;
      }

      else if (event.getType() == TaskCleaner.EventType.TASK_CLEAN) {
        TaskAttemptId aId = ((TaskCleanupEvent) event).getAttemptID();
        LOG.debug("TASK_CLEAN");
        actualHandler.handle(new TaskAttemptEvent(aId,
            TaskAttemptEventType.TA_CLEANUP_DONE));
        return;
      }

      else if (event.getType() == ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH) {
        TaskAttemptId aId = ((ContainerRemoteLaunchEvent) event)
            .getTaskAttemptID();
        TaskAttemptInfo attInfo = getTaskAttemptInfo(aId);
        actualHandler.handle(new TaskAttemptContainerLaunchedEvent(aId,
            attInfo.getShufflePort()));
        // send the status update event
        sendStatusUpdateEvent(aId, attInfo);

        TaskAttemptState state = TaskAttemptState.valueOf(attInfo.getTaskStatus());
        switch (state) {
        case SUCCEEDED:
          //recover the task output
          TaskAttemptContext taskContext = new TaskAttemptContextImpl(getConfig(),
              attInfo.getAttemptId());
          try {
            TaskType type = taskContext.getTaskAttemptID().getTaskID().getTaskType();
            int numReducers = taskContext.getConfiguration().getInt(MRJobConfig.NUM_REDUCES, 1);
            if(type == TaskType.REDUCE || (type == TaskType.MAP && numReducers <= 0)) {
              committer.recoverTask(taskContext);
              LOG.info("Recovered output from task attempt " + attInfo.getAttemptId());
            } else {
              LOG.info("Will not try to recover output for "
                  + taskContext.getTaskAttemptID());
            }
          } catch (IOException e) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    // Assert at taskAttempt level
    for (TaskInfo taskInfo : allTasks.values()) {
      int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
      Assert
          .assertEquals("total number of task attempts ", 1, taskAttemptCount);
      TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values()
          .iterator().next();
      Assert.assertNotNull(taInfo.getContainerId());
      // Verify the wrong ctor is not being used. Remove after mrv1 is removed.
      Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
    }

    // Deep compare Job and JobInfo
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(
          TypeConverter.fromYarn(task.getID()));
      Assert.assertNotNull("TaskInfo not found", taskInfo);
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
        Assert.assertEquals("Incorrect shuffle port for task attempt",
            taskAttempt.getShufflePort(), taskAttemptInfo.getShufflePort());
        if (numMaps == numSuccessfulMaps) {
          Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
          Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());
         
          // Verify rack-name
          Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
              .getRackname(), RACK_NAME);
        }
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    int noOffailedAttempts = 0;
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        // Verify rack-name for all task attempts
        Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
            .getRackname(), RACK_NAME);
        if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
          noOffailedAttempts++;
        }
      }
    }
    Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    // Assert at taskAttempt level
    for (TaskInfo taskInfo : allTasks.values()) {
      int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
      Assert
          .assertEquals("total number of task attempts ", 1, taskAttemptCount);
      TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values()
          .iterator().next();
      Assert.assertNotNull(taInfo.getContainerId());
      // Verify the wrong ctor is not being used. Remove after mrv1 is removed.
      Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
    }

    // Deep compare Job and JobInfo
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
      Assert.assertNotNull("TaskInfo not found", taskInfo);
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
        Assert.assertEquals("Incorrect shuffle port for task attempt",
            taskAttempt.getShufflePort(), taskAttemptInfo.getShufflePort());
        if (numMaps == numSuccessfulMaps) {
          Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
          Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());

          // Verify rack-name
          Assert.assertEquals("rack-name is incorrect",
              taskAttemptInfo.getRackname(), RACK_NAME);
        }
      }
    }

    // test output for HistoryViewer
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

      int noOffailedAttempts = 0;
      Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
      for (Task task : job.getTasks().values()) {
        TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
        for (TaskAttempt taskAttempt : task.getAttempts().values()) {
          TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
              TypeConverter.fromYarn((taskAttempt.getID())));
          // Verify rack-name for all task attempts
          Assert.assertEquals("rack-name is incorrect",
              taskAttemptInfo.getRackname(), RACK_NAME);
          if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
            noOffailedAttempts++;
          }
        }
      }
      Assert.assertEquals("No of Failed tasks doesn't match.", 2,
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    int noOffailedAttempts = 0;
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        // Verify rack-name for all task attempts
        Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
            .getRackname(), RACK_NAME);
        if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
          noOffailedAttempts++;
        }
      }
    }
    Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    // Assert at taskAttempt level
    for (TaskInfo taskInfo : allTasks.values()) {
      int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
      Assert
          .assertEquals("total number of task attempts ", 1, taskAttemptCount);
      TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values()
          .iterator().next();
      Assert.assertNotNull(taInfo.getContainerId());
      // Verify the wrong ctor is not being used. Remove after mrv1 is removed.
      Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
    }

    // Deep compare Job and JobInfo
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(
          TypeConverter.fromYarn(task.getID()));
      Assert.assertNotNull("TaskInfo not found", taskInfo);
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
        Assert.assertEquals("Incorrect shuffle port for task attempt",
            taskAttempt.getShufflePort(), taskAttemptInfo.getShufflePort());
        if (numMaps == numSuccessfulMaps) {
          Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
          Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());
         
          // Verify rack-name
          Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
              .getRackname(), RACK_NAME);
        }
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

//    List<TaskAttemptInfo> successfulReduceAttemptList =
//      new ArrayList<TaskAttemptInfo>();
    for (JobHistoryParser.TaskInfo taskInfo: taskMap.values()) {
      if (taskInfo.getTaskType().equals(TaskType.MAP)) {
        MapTaskStatistics mapT = new MapTaskStatistics();
        TaskAttemptInfo successfulAttempt  = 
          getLastSuccessfulTaskAttempt(taskInfo);
        mapT.setValue(MapTaskKeys.TASK_ID,
            successfulAttempt.getAttemptId().getTaskID().toString());
        mapT.setValue(MapTaskKeys.ATTEMPT_ID,
            successfulAttempt.getAttemptId().toString());
        mapT.setValue(MapTaskKeys.HOSTNAME,
            successfulAttempt.getTrackerName());
        mapT.setValue(MapTaskKeys.TASK_TYPE,
            successfulAttempt.getTaskType().toString());
        mapT.setValue(MapTaskKeys.STATUS,
            successfulAttempt.getTaskStatus().toString());
        mapT.setValue(MapTaskKeys.START_TIME, successfulAttempt.getStartTime());
        mapT.setValue(MapTaskKeys.FINISH_TIME, successfulAttempt.getFinishTime());
        mapT.setValue(MapTaskKeys.SPLITS, taskInfo.getSplitLocations());
        mapT.setValue(MapTaskKeys.TRACKER_NAME, successfulAttempt.getTrackerName());
        mapT.setValue(MapTaskKeys.STATE_STRING, successfulAttempt.getState());
        mapT.setValue(MapTaskKeys.HTTP_PORT, successfulAttempt.getHttpPort());
        mapT.setValue(MapTaskKeys.ERROR, successfulAttempt.getError());
        parseAndAddMapTaskCounters(mapT,
            successfulAttempt.getCounters().toString());
        mapTaskList.add(mapT);

        // Add number of task attempts
        mapT.setValue(MapTaskKeys.NUM_ATTEMPTS,
            (new Integer(taskInfo.getAllTaskAttempts().size())).toString());

        // Add EXECUTION_TIME = FINISH_TIME - START_TIME
        long etime = mapT.getLongValue(MapTaskKeys.FINISH_TIME) -
          mapT.getLongValue(MapTaskKeys.START_TIME);
        mapT.setValue(MapTaskKeys.EXECUTION_TIME, (new Long(etime)).toString());

      }else if (taskInfo.getTaskType().equals(TaskType.REDUCE)) {

        ReduceTaskStatistics reduceT = new ReduceTaskStatistics();
        TaskAttemptInfo successfulAttempt  =
          getLastSuccessfulTaskAttempt(taskInfo);
        reduceT.setValue(ReduceTaskKeys.TASK_ID,
            successfulAttempt.getAttemptId().getTaskID().toString());
        reduceT.setValue(ReduceTaskKeys.ATTEMPT_ID,
            successfulAttempt.getAttemptId().toString());
        reduceT.setValue(ReduceTaskKeys.HOSTNAME,
            successfulAttempt.getTrackerName());
        reduceT.setValue(ReduceTaskKeys.TASK_TYPE,
            successfulAttempt.getTaskType().toString());
        reduceT.setValue(ReduceTaskKeys.STATUS,
            successfulAttempt.getTaskStatus().toString());
        reduceT.setValue(ReduceTaskKeys.START_TIME,
            successfulAttempt.getStartTime());
        reduceT.setValue(ReduceTaskKeys.FINISH_TIME,
            successfulAttempt.getFinishTime());
        reduceT.setValue(ReduceTaskKeys.SHUFFLE_FINISH_TIME,
            successfulAttempt.getShuffleFinishTime());
        reduceT.setValue(ReduceTaskKeys.SORT_FINISH_TIME,
            successfulAttempt.getSortFinishTime());
        reduceT.setValue(ReduceTaskKeys.SPLITS, "");
        reduceT.setValue(ReduceTaskKeys.TRACKER_NAME,
            successfulAttempt.getTrackerName());
        reduceT.setValue(ReduceTaskKeys.STATE_STRING,
            successfulAttempt.getState());
        reduceT.setValue(ReduceTaskKeys.HTTP_PORT,
            successfulAttempt.getHttpPort());
        parseAndAddReduceTaskCounters(reduceT,
            successfulAttempt.getCounters().toString());

        reduceTaskList.add(reduceT);

        // Add number of task attempts
        reduceT.setValue(ReduceTaskKeys.NUM_ATTEMPTS,
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    @Override
    @SuppressWarnings("unchecked")
    public void dispatch(Event event) {
      if (recoveryMode) {
        if (event.getType() == TaskAttemptEventType.TA_CONTAINER_LAUNCHED) {
          TaskAttemptInfo attInfo = getTaskAttemptInfo(((TaskAttemptEvent) event)
              .getTaskAttemptID());
          LOG.info("Recovered Attempt start time " + attInfo.getStartTime());
          clock.setTime(attInfo.getStartTime());

        } else if (event.getType() == TaskAttemptEventType.TA_DONE
            || event.getType() == TaskAttemptEventType.TA_FAILMSG
            || event.getType() == TaskAttemptEventType.TA_KILL) {
          TaskAttemptInfo attInfo = getTaskAttemptInfo(((TaskAttemptEvent) event)
              .getTaskAttemptID());
          LOG.info("Recovered Attempt finish time " + attInfo.getFinishTime());
          clock.setTime(attInfo.getFinishTime());
        }

        else if (event.getType() == TaskEventType.T_ATTEMPT_FAILED
            || event.getType() == TaskEventType.T_ATTEMPT_KILLED
            || event.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED) {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.