Package org.apache.hadoop.mapreduce.v2.app.job.event

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent


            (new SpeculatorEvent(taskAttempt.getID().getTaskId(), -1));
      }

      switch(finalState) {
        case FAILED:
          taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
              taskAttempt.attemptId,
              TaskEventType.T_ATTEMPT_FAILED));
          break;
        case KILLED:
          taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
              taskAttempt.attemptId,
              TaskEventType.T_ATTEMPT_KILLED));
          break;
        default:
          LOG.error("Task final state is not FAILED or KILLED: " + finalState);
View Full Code Here


      //make remoteTask reference as null as it is no more needed
      //and free up the memory
      taskAttempt.remoteTask = null;
     
      //tell the Task that attempt has started
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId,
         TaskEventType.T_ATTEMPT_LAUNCHED));
    }
View Full Code Here

      SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
    @SuppressWarnings("unchecked")
    @Override
    public void transition(TaskAttemptImpl taskAttempt,
        TaskAttemptEvent event) {
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId,
         TaskEventType.T_ATTEMPT_COMMIT_PENDING));
    }
View Full Code Here

      //set the finish time
      taskAttempt.setFinishTime();
      taskAttempt.eventHandler.handle(
          createJobCounterUpdateEventTASucceeded(taskAttempt));
      taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.SUCCEEDED);
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId,
          TaskEventType.T_ATTEMPT_SUCCEEDED));
      taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
View Full Code Here

        // handling failed map/reduce events.
      }else {
        LOG.debug("Not generating HistoryFinish event since start event not " +
            "generated for taskAttempt: " + taskAttempt.getID());
      }
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
    }
View Full Code Here

            taskAttempt.attemptId.getTaskId().getJobId(), tauce));
      }else {
        LOG.debug("Not generating HistoryFinish event since start event not " +
            "generated for taskAttempt: " + taskAttempt.getID());
      }
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
    }
View Full Code Here

          .handle(createJobCounterUpdateEventTAKilled(taskAttempt, true));
      TaskAttemptUnsuccessfulCompletionEvent tauce = createTaskAttemptUnsuccessfulCompletionEvent(
          taskAttempt, TaskAttemptStateInternal.KILLED);
      taskAttempt.eventHandler.handle(new JobHistoryEvent(taskAttempt.attemptId
          .getTaskId().getJobId(), tauce));
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
      return TaskAttemptStateInternal.KILLED;
    }
View Full Code Here

    //FAIL_ABORT state
    for(Task t: job.tasks.values()) {
      TaskImpl task = (TaskImpl) t;
      task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
      for(TaskAttempt ta: task.getAttempts().values()) {
        task.handle(new TaskTAttemptEvent(ta.getID(),
          TaskEventType.T_ATTEMPT_FAILED));
      }
    }

    dispatcher.await();
View Full Code Here

        taskAttempt.addDiagnosticInfo(
            ((TaskAttemptKillEvent) event).getMessage());
      }

//      taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.KILLED); Not logging Map/Reduce attempts in case of failure.
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId,
          TaskEventType.T_ATTEMPT_KILLED));
    }
View Full Code Here

              // KILL_WAIT, then relay the actual kill signal to TA
              super.dispatch(new TaskAttemptEvent(taID,
                TaskAttemptEventType.TA_DONE));
              super.dispatch(new TaskAttemptEvent(taID,
                TaskAttemptEventType.TA_CONTAINER_CLEANED));
              super.dispatch(new TaskTAttemptEvent(taID,
                TaskEventType.T_ATTEMPT_SUCCEEDED));
              this.cachedKillEvent = killEvent;
              return;
            }
          }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.