Package org.apache.hadoop.mapreduce.v2.app.job.event

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent


        .size());
    TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt,
        TaskAttemptStateInternal.ASSIGNED);
    app.getDispatcher().getEventHandler().handle(
        new TaskAttemptEvent(attempt.getID(),
            TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    app.waitForState(job, JobState.FAILED);
  }
View Full Code Here


          case CONTAINER_REMOTE_LAUNCH:
            super.handle(event); // Unused event and container.
            break;
          case CONTAINER_REMOTE_CLEANUP:
            getContext().getEventHandler().handle(
                new TaskAttemptEvent(event.getTaskAttemptID(),
                    TaskAttemptEventType.TA_CONTAINER_CLEANED));
            break;
          }
        }
View Full Code Here

    @Override
    protected void attemptLaunched(TaskAttemptId attemptID) {
      if (attemptID.getTaskId().getId() == 0) {//check if it is first task
        // send the Fail event
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID,
                TaskAttemptEventType.TA_FAILMSG));
      } else {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID,
                TaskAttemptEventType.TA_DONE));
      }
    }
View Full Code Here

    protected void attemptLaunched(TaskAttemptId attemptID) {
      if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
        //check if it is first task's first attempt
        // send the Fail event
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID,
                TaskAttemptEventType.TA_FAILMSG));
      } else {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID,
                TaskAttemptEventType.TA_DONE));
      }
    }
View Full Code Here

        boolean isMapFaulty =
            (failureRate >= MAX_ALLOWED_FETCH_FAILURES_FRACTION);
        if (fetchFailures >= MAX_FETCH_FAILURES_NOTIFICATIONS && isMapFaulty) {
          LOG.info("Too many fetch-failures for output of task attempt: " +
              mapId + " ... raising fetch failure to map");
          job.eventHandler.handle(new TaskAttemptEvent(mapId,
              TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
          job.fetchFailuresMapping.remove(mapId);
        }
      }
    }
View Full Code Here

        new HashMap<ApplicationId,List<ContainerStatus>>(1);
    statusUpdate.put(mrApp.getAppID(), contStatus);
    node.nodeHeartbeat(statusUpdate, true);
    rmDispatcher.await();
    mrApp.getContext().getEventHandler().handle(
          new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
    mrApp.waitForState(task, TaskState.SUCCEEDED);
  }
View Full Code Here

    ContainerStatus abortedStatus = ContainerStatus.newInstance(
        containerId, ContainerState.RUNNING, "",
        ContainerExitStatus.ABORTED);
   
    TaskAttemptEvent event = allocator.createContainerFinishedEvent(status,
        attemptId);
    Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
        event.getType());
   
    TaskAttemptEvent abortedEvent = allocator.createContainerFinishedEvent(
        abortedStatus, attemptId);
    Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
   
    ContainerId containerId2 = ContainerId.newInstance(applicationAttemptId, 2);
    ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
        ContainerState.RUNNING, "", 0);

    ContainerStatus preemptedStatus = ContainerStatus.newInstance(containerId2,
        ContainerState.RUNNING, "", ContainerExitStatus.PREEMPTED);

    TaskAttemptEvent event2 = allocator.createContainerFinishedEvent(status2,
        attemptId);
    Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
        event2.getType());

    TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent(
        preemptedStatus, attemptId);
    Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType());
  }
View Full Code Here

  protected void attemptLaunched(TaskAttemptId attemptID) {
    if (autoComplete) {
      // send the done event
      getContext().getEventHandler().handle(
          new TaskAttemptEvent(attemptID,
              TaskAttemptEventType.TA_DONE));
    }
  }
View Full Code Here

       
        attemptLaunched(event.getTaskAttemptID());
        break;
      case CONTAINER_REMOTE_CLEANUP:
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(event.getTaskAttemptID(),
                TaskAttemptEventType.TA_CONTAINER_CLEANED));
        break;
      }
    }
View Full Code Here

    final Dispatcher dispatcher = new AsyncDispatcher() {
      private TaskAttemptEvent cachedKillEvent;
      @Override
      protected void dispatch(Event event) {
        if (event instanceof TaskAttemptEvent) {
          TaskAttemptEvent killEvent = (TaskAttemptEvent) event;
          if (killEvent.getType() == TaskAttemptEventType.TA_KILL) {
            TaskAttemptId taID = killEvent.getTaskAttemptID();
            if (taID.getTaskId().getTaskType() == TaskType.REDUCE
                && taID.getTaskId().getId() == 0 && taID.getId() == 0) {
              // Task is asking the reduce TA to kill itself. 'Create' a race
              // condition. Make the task succeed and then inform the task that
              // TA has succeeded. Once Task gets the TA succeeded event at
              // KILL_WAIT, then relay the actual kill signal to TA
              super.dispatch(new TaskAttemptEvent(taID,
                TaskAttemptEventType.TA_DONE));
              super.dispatch(new TaskAttemptEvent(taID,
                TaskAttemptEventType.TA_CONTAINER_CLEANED));
              super.dispatch(new TaskTAttemptEvent(taID,
                TaskEventType.T_ATTEMPT_SUCCEEDED));
              this.cachedKillEvent = killEvent;
              return;
            }
          }
        } else if (event instanceof TaskEvent) {
          TaskEvent taskEvent = (TaskEvent) event;
          if (taskEvent.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED
              && this.cachedKillEvent != null) {
            // When the TA comes and reports that it is done, send the
            // cachedKillEvent
            super.dispatch(this.cachedKillEvent);
            return;
          }

        }
        super.dispatch(event);
      }
    };
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {
      @Override
      public Dispatcher createDispatcher() {
        return dispatcher;
      }
    };
    Job job = app.submit(new Configuration());
    JobId jobId = app.getJobId();
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);

    // Finish map
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            mapAttempt.getID(),
            TaskAttemptEventType.TA_DONE));
    app.waitForState(mapTask, TaskState.SUCCEEDED);

    // Now kill the job
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.