Examples of TaskAttemptInfo


Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

    int noOffailedAttempts = 0;
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        // Verify rack-name for all task attempts
        Assert.assertEquals("rack-name is incorrect", taskAttemptInfo
            .getRackname(), RACK_NAME);
        if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
          noOffailedAttempts++;
        }
      }
    }
    Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo

//    List<TaskAttemptInfo> successfulReduceAttemptList =
//      new ArrayList<TaskAttemptInfo>();
    for (JobHistoryParser.TaskInfo taskInfo: taskMap.values()) {
      if (taskInfo.getTaskType().equals(TaskType.MAP)) {
        MapTaskStatistics mapT = new MapTaskStatistics();
        TaskAttemptInfo successfulAttempt  = 
          getLastSuccessfulTaskAttempt(taskInfo);
        mapT.setValue(MapTaskKeys.TASK_ID,
            successfulAttempt.getAttemptId().getTaskID().toString());
        mapT.setValue(MapTaskKeys.ATTEMPT_ID,
            successfulAttempt.getAttemptId().toString());
        mapT.setValue(MapTaskKeys.HOSTNAME,
            successfulAttempt.getTrackerName());
        mapT.setValue(MapTaskKeys.TASK_TYPE,
            successfulAttempt.getTaskType().toString());
        mapT.setValue(MapTaskKeys.STATUS,
            successfulAttempt.getTaskStatus().toString());
        mapT.setValue(MapTaskKeys.START_TIME, successfulAttempt.getStartTime());
        mapT.setValue(MapTaskKeys.FINISH_TIME, successfulAttempt.getFinishTime());
        mapT.setValue(MapTaskKeys.SPLITS, taskInfo.getSplitLocations());
        mapT.setValue(MapTaskKeys.TRACKER_NAME, successfulAttempt.getTrackerName());
        mapT.setValue(MapTaskKeys.STATE_STRING, successfulAttempt.getState());
        mapT.setValue(MapTaskKeys.HTTP_PORT, successfulAttempt.getHttpPort());
        mapT.setValue(MapTaskKeys.ERROR, successfulAttempt.getError());
        parseAndAddMapTaskCounters(mapT,
            successfulAttempt.getCounters().toString());
        mapTaskList.add(mapT);

        // Add number of task attempts
        mapT.setValue(MapTaskKeys.NUM_ATTEMPTS,
            (new Integer(taskInfo.getAllTaskAttempts().size())).toString());

        // Add EXECUTION_TIME = FINISH_TIME - START_TIME
        long etime = mapT.getLongValue(MapTaskKeys.FINISH_TIME) -
          mapT.getLongValue(MapTaskKeys.START_TIME);
        mapT.setValue(MapTaskKeys.EXECUTION_TIME, (new Long(etime)).toString());

      }else if (taskInfo.getTaskType().equals(TaskType.REDUCE)) {

        ReduceTaskStatistics reduceT = new ReduceTaskStatistics();
        TaskAttemptInfo successfulAttempt  =
          getLastSuccessfulTaskAttempt(taskInfo);
        reduceT.setValue(ReduceTaskKeys.TASK_ID,
            successfulAttempt.getAttemptId().getTaskID().toString());
        reduceT.setValue(ReduceTaskKeys.ATTEMPT_ID,
            successfulAttempt.getAttemptId().toString());
        reduceT.setValue(ReduceTaskKeys.HOSTNAME,
            successfulAttempt.getTrackerName());
        reduceT.setValue(ReduceTaskKeys.TASK_TYPE,
            successfulAttempt.getTaskType().toString());
        reduceT.setValue(ReduceTaskKeys.STATUS,
            successfulAttempt.getTaskStatus().toString());
        reduceT.setValue(ReduceTaskKeys.START_TIME,
            successfulAttempt.getStartTime());
        reduceT.setValue(ReduceTaskKeys.FINISH_TIME,
            successfulAttempt.getFinishTime());
        reduceT.setValue(ReduceTaskKeys.SHUFFLE_FINISH_TIME,
            successfulAttempt.getShuffleFinishTime());
        reduceT.setValue(ReduceTaskKeys.SORT_FINISH_TIME,
            successfulAttempt.getSortFinishTime());
        reduceT.setValue(ReduceTaskKeys.SPLITS, "");
        reduceT.setValue(ReduceTaskKeys.TRACKER_NAME,
            successfulAttempt.getTrackerName());
        reduceT.setValue(ReduceTaskKeys.STATE_STRING,
            successfulAttempt.getState());
        reduceT.setValue(ReduceTaskKeys.HTTP_PORT,
            successfulAttempt.getHttpPort());
        parseAndAddReduceTaskCounters(reduceT,
            successfulAttempt.getCounters().toString());

        reduceTaskList.add(reduceT);

        // Add number of task attempts
        reduceT.setValue(ReduceTaskKeys.NUM_ATTEMPTS,
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

            th(".tsh", "Finished").
            th(".tsh", "Elapsed").
            th(".note", "Note")._()._().
        tbody();
      for (TaskAttempt attempt : getTaskAttempts()) {
        TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
        String taid = ta.getId();
        String progress = percent(ta.getProgress() / 100);
        ContainerId containerId = ta.getAssignedContainerId();

        String nodeHttpAddr = ta.getNode();
        long startTime = ta.getStartTime();
        long finishTime = ta.getFinishTime();
        long elapsed = ta.getElapsedTime();
        String diag = ta.getNote() == null ? "" : ta.getNote();
        TR<TBODY<TABLE<Hamlet>>> row = tbody.tr();
        TD<TR<TBODY<TABLE<Hamlet>>>> nodeTd = row.
          td(".id", taid).
          td(".progress", progress).
          td(".state", ta.getState()).td();
        if (nodeHttpAddr == null) {
          nodeTd._("N/A");
        } else {
          nodeTd.
            a(".nodelink", url(HttpConfig.getSchemePrefix(),
                               nodeHttpAddr), nodeHttpAddr);
        }
        nodeTd._();
        if (containerId != null) {
          String containerIdStr = ta.getAssignedContainerIdStr();
          row.td().
              a(".logslink", url(HttpConfig.getSchemePrefix(),
              nodeHttpAddr, "node", "containerlogs",
              containerIdStr, app.getJob().getUserName()), "logs")._();
        } else {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

    for (TaskAttempt ta : task.getAttempts().values()) {
      if (ta != null) {
        if (task.getType() == TaskType.REDUCE) {
          attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
        } else {
          attempts.add(new TaskAttemptInfo(ta, task.getType(), false));
        }
      }
    }
    return attempts;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

    TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
        task);
    if (task.getType() == TaskType.REDUCE) {
      return new ReduceTaskAttemptInfo(ta, task.getType());
    } else {
      return new TaskAttemptInfo(ta, task.getType(), false);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

    for (TaskAttempt ta : task.getAttempts().values()) {
      if (ta != null) {
        if (task.getType() == TaskType.REDUCE) {
          attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
        } else {
          attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
        }
      }
    }
    return attempts;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

    Task task = getTaskFromTaskIdString(tid, job);
    TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
    if (task.getType() == TaskType.REDUCE) {
      return new ReduceTaskAttemptInfo(ta, task.getType());
    } else {
      return new TaskAttemptInfo(ta, task.getType(), true);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

            th(".tsh", "Finished").
            th(".tsh", "Elapsed").
            th(".note", "Note")._()._().
        tbody();
      for (TaskAttempt attempt : getTaskAttempts()) {
        TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
        String taid = ta.getId();
        String progress = percent(ta.getProgress() / 100);
        ContainerId containerId = ta.getAssignedContainerId();

        String nodeHttpAddr = ta.getNode();
        long startTime = ta.getStartTime();
        long finishTime = ta.getFinishTime();
        long elapsed = ta.getElapsedTime();
        String diag = ta.getNote() == null ? "" : ta.getNote();
        TD<TR<TBODY<TABLE<Hamlet>>>> nodeTd = tbody.
          tr().
            td(".id", taid).
            td(".progress", progress).
            td(".state", ta.getState()).
            td().
              a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr);
        if (containerId != null) {
          String containerIdStr = ta.getAssignedContainerIdStr();
          nodeTd._(" ").
            a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
              containerIdStr, app.getJob().getUserName()), "logs");
        }
        nodeTd._().
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

      // Write all the data into a JavaScript array of arrays for JQuery
      // DataTables to display
      StringBuilder attemptsTableData = new StringBuilder("[\n");

      for (TaskAttempt attempt : getTaskAttempts()) {
        TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
        String progress = percent(ta.getProgress() / 100);

        String nodeHttpAddr = ta.getNode();
        String diag = ta.getNote() == null ? "" : ta.getNote();
        attemptsTableData.append("[\"")
        .append(ta.getId()).append("\",\"")
        .append(progress).append("\",\"")
        .append(ta.getState().toString()).append("\",\"")

        .append(nodeHttpAddr == null ? "N/A" :
          "<a class='nodelink' href='" + MRWebAppUtil.getYARNWebappScheme() + nodeHttpAddr + "'>"
          + nodeHttpAddr + "</a>")
        .append("\",\"")

        .append(ta.getAssignedContainerId() == null ? "N/A" :
          "<a class='logslink' href='" + url(MRWebAppUtil.getYARNWebappScheme(), nodeHttpAddr, "node"
            , "containerlogs", ta.getAssignedContainerIdStr(), app.getJob()
            .getUserName()) + "'>logs</a>")
          .append("\",\"")

        .append(ta.getStartTime()).append("\",\"")
        .append(ta.getFinishTime()).append("\",\"")
        .append(ta.getElapsedTime()).append("\",\"")
        .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
          diag))).append("\"],\n");
      }
      //Remove the last comma and close off the array of arrays
      if(attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo

      long elapsedSortTime = -1;;
      long elapsedReduceTime = -1;
      long attemptElapsed = -1;
      TaskAttempt successful = info.getSuccessful();
      if(successful != null) {
        TaskAttemptInfo ta;
        if(type == TaskType.REDUCE) {
          ReduceTaskAttemptInfo rta = new ReduceTaskAttemptInfo(successful, type);
          shuffleFinishTime = rta.getShuffleFinishTime();
          sortFinishTime = rta.getMergeFinishTime();
          elapsedShuffleTime = rta.getElapsedShuffleTime();
          elapsedSortTime = rta.getElapsedMergeTime();
          elapsedReduceTime = rta.getElapsedReduceTime();
          ta = rta;
        } else {
          ta = new TaskAttemptInfo(successful, type, false);
        }
        attemptStartTime = ta.getStartTime();
        attemptFinishTime = ta.getFinishTime();
        attemptElapsed = ta.getElapsedTime();
      }

      TR<TBODY<TABLE<Hamlet>>> row = tbody.tr();
      row.
          td().
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.