Package org.apache.hadoop.mapreduce.v2.app.job

Examples of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt


    taid.setTaskId(tid);
    taid.setId(i);
    final TaskAttemptReport report = newTaskAttemptReport(taid);
    final List<String> diags = Lists.newArrayList();
    diags.add(DIAGS.next());
    return new TaskAttempt() {
      @Override
      public NodeId getNodeId() throws UnsupportedOperationException{
        throw new UnsupportedOperationException();
      }
     
View Full Code Here


        // here we could have simply called Task.getSuccessfulAttempt() but
        // the event that triggers this code is sent before
        // Task.successfulAttempt is set and so there is no guarantee that it
        // will be available now
        Task task = job.tasks.get(taskId);
        TaskAttempt attempt = task.getAttempt(attemptId);
        NodeId nodeId = attempt.getNodeId();
        assert (nodeId != null); // node must exist for a successful event
        List<TaskAttemptId> taskAttemptIdList = job.nodesToSucceededTaskAttempts
            .get(nodeId);
        if (taskAttemptIdList == null) {
          taskAttemptIdList = new ArrayList<TaskAttemptId>();
          job.nodesToSucceededTaskAttempts.put(nodeId, taskAttemptIdList);
        }
        taskAttemptIdList.add(attempt.getID());
      }
    }
View Full Code Here

      task.failedAttempts++;
      TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
      if (castEvent.getTaskAttemptID().equals(task.commitAttempt)) {
        task.commitAttempt = null;
      }
      TaskAttempt attempt = task.attempts.get(castEvent.getTaskAttemptID());
      if (attempt.getAssignedContainerMgrAddress() != null) {
        //container was assigned
        task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(),
            attempt.getAssignedContainerMgrAddress()));
      }
     
      task.finishedAttempts++;
      if (task.failedAttempts < task.maxAttempts) {
        task.handleTaskAttemptCompletion(
            ((TaskTAttemptEvent) event).getTaskAttemptID(),
            TaskAttemptCompletionEventStatus.FAILED);
        // we don't need a new event if we already have a spare
        if (--task.numberUncompletedAttempts == 0
            && task.successfulAttempt == null) {
          task.addAndScheduleAttempt();
        }
      } else {
        task.handleTaskAttemptCompletion(
            ((TaskTAttemptEvent) event).getTaskAttemptID(),
            TaskAttemptCompletionEventStatus.TIPFAILED);
        TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
        TaskAttemptId taId = ev.getTaskAttemptID();
       
        if (task.historyTaskStartGenerated) {
        TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
            TaskState.FAILED, taId);
        task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
            taskFailedEvent));
        } else {
          LOG.debug("Not generating HistoryFinish event since start event not" +
View Full Code Here

  @Override
  public Counters getCounters() {
    Counters counters = null;
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt != null) {
        counters = bestAttempt.getCounters();
      } else {
        counters = TaskAttemptImpl.EMPTY_COUNTERS;
//        counters.groups = new HashMap<CharSequence, CounterGroup>();
      }
      return counters;
View Full Code Here

  @Override
  public float getProgress() {
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt == null) {
        return 0f;
      }
      return bestAttempt.getProgress();
    } finally {
      readLock.unlock();
    }
  }
View Full Code Here

  //select the nextAttemptNumber with best progress
  // always called inside the Read Lock
  private TaskAttempt selectBestAttempt() {
    float progress = 0f;
    TaskAttempt result = null;
    for (TaskAttempt at : attempts.values()) {
      switch (at.getState()) {
     
      // ignore all failed task attempts
      case FAIL_CONTAINER_CLEANUP:
View Full Code Here

    }
  }

  // This is always called in the Write Lock
  private void addAndScheduleAttempt() {
    TaskAttempt attempt = createAttempt();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Created attempt " + attempt.getID());
    }
    switch (attempts.size()) {
      case 0:
        attempts = Collections.singletonMap(attempt.getID(), attempt);
        break;
       
      case 1:
        Map<TaskAttemptId, TaskAttempt> newAttempts
            = new LinkedHashMap<TaskAttemptId, TaskAttempt>(maxAttempts);
        newAttempts.putAll(attempts);
        attempts = newAttempts;
        attempts.put(attempt.getID(), attempt);
        break;

      default:
        attempts.put(attempt.getID(), attempt);
        break;
    }

    // Update nextATtemptNumber
    if (taskAttemptsFromPreviousGeneration.isEmpty()) {
      ++nextAttemptNumber;
    } else {
      // There are still some TaskAttempts from previous generation, use them
      nextAttemptNumber =
          taskAttemptsFromPreviousGeneration.remove(0).getAttemptId().getId();
    }

    ++numberUncompletedAttempts;
    //schedule the nextAttemptNumber
    if (failedAttempts > 0) {
      eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
        TaskAttemptEventType.TA_RESCHEDULE));
    } else {
      eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
          TaskAttemptEventType.TA_SCHEDULE));
    }
  }
View Full Code Here

  }

  // always called inside a transition, in turn inside the Write Lock
  private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
      TaskAttemptCompletionEventStatus status) {
    TaskAttempt attempt = attempts.get(attemptId);
    //raise the completion event only if the container is assigned
    // to nextAttemptNumber
    if (attempt.getNodeHttpAddress() != null) {
      TaskAttemptCompletionEvent tce = recordFactory
          .newRecordInstance(TaskAttemptCompletionEvent.class);
      tce.setEventId(-1);
      String scheme = (encryptedShuffle) ? "https://" : "http://";
      tce.setMapOutputServerAddress(scheme
         + attempt.getNodeHttpAddress().split(":")[0] + ":"
         + attempt.getShufflePort());
      tce.setStatus(status);
      tce.setAttemptId(attempt.getID());
      int runTime = 0;
      if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
        runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
      tce.setAttemptRunTime(runTime);
     
      //raise the event to job so that it adds the completion event to its
      //data structures
      eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
View Full Code Here

  @Override
  public Counters getCounters() {
    Counters counters = null;
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt != null) {
        counters = bestAttempt.getCounters();
      } else {
        counters = TaskAttemptImpl.EMPTY_COUNTERS;
//        counters.groups = new HashMap<CharSequence, CounterGroup>();
      }
      return counters;
View Full Code Here

  @Override
  public float getProgress() {
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt == null) {
        return 0f;
      }
      return bestAttempt.getProgress();
    } finally {
      readLock.unlock();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.