Examples of TaskAttemptEvent


Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    Assert.assertEquals("Reduce Task state not correct",
        TaskState.NEW, reduceTask.getReport().getTaskState());
   
    //send the done signal to the 1st map task
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            mapTask1.getAttempts().values().iterator().next().getID(),
            TaskAttemptEventType.TA_DONE));
   
    //wait for first map task to complete
    app.waitForState(mapTask1, TaskState.SUCCEEDED);

    //Once the first map completes, it will schedule the reduces
    //now reduce must be running
    app.waitForState(reduceTask, TaskState.RUNNING);
   
    //send the done signal to 2nd map and the reduce to complete the job
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            mapTask2.getAttempts().values().iterator().next().getID(),
            TaskAttemptEventType.TA_DONE));
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            reduceTask.getAttempts().values().iterator().next().getID(),
            TaskAttemptEventType.TA_DONE));
   
    app.waitForState(job, JobState.SUCCEEDED);
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    // send the done signal to the task
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(task1Attempt.getID(),
                TaskAttemptEventType.TA_DONE));
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(task2Attempt.getID(),
                TaskAttemptEventType.TA_DONE));

    // all maps must be succeeded
    app.waitForState(mapTask1, TaskState.SUCCEEDED);
    app.waitForState(mapTask2, TaskState.SUCCEEDED);

    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0,
        100);
    Assert.assertEquals("Expecting 2 completion events for success", 2,
        events.length);

    // send updated nodes info
    ArrayList<NodeReport> updatedNodes = new ArrayList<NodeReport>();
    NodeReport nr = RecordFactoryProvider.getRecordFactory(null)
        .newRecordInstance(NodeReport.class);
    nr.setNodeId(node1);
    nr.setNodeState(NodeState.UNHEALTHY);
    updatedNodes.add(nr);
    app.getContext().getEventHandler()
        .handle(new JobUpdatedNodesEvent(job.getID(), updatedNodes));

    app.waitForState(task1Attempt, TaskAttemptState.KILLED);
    app.waitForState(task2Attempt, TaskAttemptState.KILLED);

    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Expecting 2 more completion events for killed", 4,
        events.length);

    // all maps must be back to running
    app.waitForState(mapTask1, TaskState.RUNNING);
    app.waitForState(mapTask2, TaskState.RUNNING);

    Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
    itr.next();
    task1Attempt = itr.next();

    // send the done signal to the task
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(task1Attempt.getID(),
                TaskAttemptEventType.TA_DONE));

    // map1 must be succeeded. map2 must be running
    app.waitForState(mapTask1, TaskState.SUCCEEDED);
    app.waitForState(mapTask2, TaskState.RUNNING);

    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Expecting 1 more completion events for success", 5,
        events.length);

    // Crash the app again.
    app.stop();

    // rerun
    // in rerun the 1st map will be recovered from previous run
    app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), false,
        ++runCount);
    conf = new Configuration();
    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("No of tasks not correct", 4, job.getTasks().size());
    it = job.getTasks().values().iterator();
    mapTask1 = it.next();
    mapTask2 = it.next();
    Task reduceTask1 = it.next();
    Task reduceTask2 = it.next();

    // map 1 will be recovered, no need to send done
    app.waitForState(mapTask1, TaskState.SUCCEEDED);
    app.waitForState(mapTask2, TaskState.RUNNING);

    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals(
        "Expecting 2 completion events for killed & success of map1", 2,
        events.length);

    task2Attempt = mapTask2.getAttempts().values().iterator().next();
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(task2Attempt.getID(),
                TaskAttemptEventType.TA_DONE));
    app.waitForState(mapTask2, TaskState.SUCCEEDED);

    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Expecting 1 more completion events for success", 3,
        events.length);

    app.waitForState(reduceTask1, TaskState.RUNNING);
    app.waitForState(reduceTask2, TaskState.RUNNING);

    TaskAttempt task3Attempt = reduceTask1.getAttempts().values().iterator()
        .next();
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(task3Attempt.getID(),
                TaskAttemptEventType.TA_DONE));
    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
    app.getContext()
    .getEventHandler()
    .handle(
        new TaskAttemptEvent(task3Attempt.getID(),
            TaskAttemptEventType.TA_KILL));
    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
   
    TaskAttempt task4Attempt = reduceTask2.getAttempts().values().iterator()
        .next();
    app.getContext()
        .getEventHandler()
        .handle(
            new TaskAttemptEvent(task4Attempt.getID(),
                TaskAttemptEventType.TA_DONE));
    app.waitForState(reduceTask2, TaskState.SUCCEEDED);   

    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Expecting 2 more completion events for reduce success",
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

        boolean isMapFaulty =
            (failureRate >= MAX_ALLOWED_FETCH_FAILURES_FRACTION);
        if (fetchFailures >= MAX_FETCH_FAILURES_NOTIFICATIONS && isMapFaulty) {
          LOG.info("Too many fetch-failures for output of task attempt: " +
              mapId + " ... raising fetch failure to map");
          job.eventHandler.handle(new TaskAttemptEvent(mapId,
              TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
          job.fetchFailuresMapping.remove(mapId);
        }
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    @SuppressWarnings("unchecked")
    @Override
    protected void attemptLaunched(TaskAttemptId attemptID) {
      if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
      } else {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    @SuppressWarnings("unchecked")
    @Override
    protected void attemptLaunched(TaskAttemptId attemptID) {
      if (attemptID.getTaskId().getId() == 0) {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
      } else {
        getContext().getEventHandler().handle(
            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    protected void attemptLaunched(TaskAttemptId attemptID) {
      getContext().getEventHandler().handle(
          new TaskAttemptDiagnosticsUpdateEvent(attemptID,
              "Test Diagnostic Event"));
      getContext().getEventHandler().handle(
          new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    TaskSplitMetaInfo splitInfo =
        new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

    TaskAttemptImpl mockTaskAttempt =
        createMapTaskAttemptImplForTest(eventHandler, splitInfo);
    TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);

    rct.transition(mockTaskAttempt, mockTAEvent);

    ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
    verify(eventHandler, times(2)).handle(arg.capture());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

        createMapTaskAttemptImplForTest(eventHandler, splitInfo);
    TaskAttemptImpl spyTa = spy(mockTaskAttempt);
    when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
    spyTa.dataLocalHosts = spyTa.resolveHosts(splitInfo.getLocations());

    TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
    rct.transition(spyTa, mockTAEvent);
    verify(spyTa).resolveHost(hosts[0]);
    ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
    verify(eventHandler, times(2)).handle(arg.capture());
    if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    app.waitForState(rta, TaskAttemptState.RUNNING);

    clock.setTime(11);
    app.getContext()
        .getEventHandler()
        .handle(new TaskAttemptEvent(mta.getID(), TaskAttemptEventType.TA_DONE));
    app.getContext()
        .getEventHandler()
        .handle(new TaskAttemptEvent(rta.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    Assert.assertEquals(mta.getFinishTime(), 11);
    Assert.assertEquals(mta.getLaunchTime(), 10);
    Assert.assertEquals(rta.getFinishTime(), 11);
    Assert.assertEquals(rta.getLaunchTime(), 10);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent

    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);

    taImpl.handle(new TaskAttemptEvent(attemptId,
        TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
        container, mock(Map.class)));
    taImpl.handle(new TaskAttemptEvent(attemptId,
        TaskAttemptEventType.TA_KILL));
    taImpl.handle(new TaskAttemptEvent(attemptId,
        TaskAttemptEventType.TA_CONTAINER_CLEANED));
    taImpl.handle(new TaskAttemptEvent(attemptId,
        TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
    assertFalse(eventHandler.internalError);
    assertEquals("Task attempt is not assigned on the local node",
        Locality.NODE_LOCAL, taImpl.getLocality());
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.