Examples of JobCounterUpdateEvent


Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

  }

  private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
      TaskAttemptImpl taskAttempt) {
    TaskId taskId = taskAttempt.attemptId.getTaskId();
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
    updateMillisCounters(jce, taskAttempt);
    return jce;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

  }
 
  private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
      TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
    TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
   
    if (taskType == TaskType.MAP) {
      jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
    } else {
      jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
    }
    if (!taskAlreadyCompleted) {
      updateMillisCounters(jce, taskAttempt);
    }
    return jce;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

  }
 
  private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
      TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
    TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
   
    if (taskType == TaskType.MAP) {
      jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
    } else {
      jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
    }
    if (!taskAlreadyCompleted) {
      updateMillisCounters(jce, taskAttempt);
    }
    return jce;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

    return tauce;
  }

  @SuppressWarnings("unchecked")
  private void sendLaunchedEvents() {
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
        .getJobId());
    jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
        JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
    eventHandler.handle(jce);

    LOG.info("TaskAttempt: [" + attemptId
        + "] using containerId: [" + container.getId() + " on NM: ["
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

 
  private static class CounterUpdateTransition implements
      SingleArcTransition<JobImpl, JobEvent> {
    @Override
    public void transition(JobImpl job, JobEvent event) {
      JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
      for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
          .getCounterUpdates()) {
        job.jobCounters.findCounter(ci.getCounterKey()).increment(
          ci.getIncrementValue());
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

 
  private static class CounterUpdateTransition implements
      SingleArcTransition<JobImpl, JobEvent> {
    @Override
    public void transition(JobImpl job, JobEvent event) {
      JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
      for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
          .getCounterUpdates()) {
        job.jobCounters.findCounter(ci.getCounterKey()).increment(
          ci.getIncrementValue());
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

        doneWithMaps = true;
      }

      try {
        if (remoteTask.isMapOrReduce()) {
          JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
          jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
          if (remoteTask.isMapTask()) {
            jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
          } else {
            jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
          }
          context.getEventHandler().handle(jce);
        }
        runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
                   (numReduceTasks > 0), localMapFiles);
       
      } catch (RuntimeException re) {
        JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
        jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
        context.getEventHandler().handle(jce);
        // this is our signal that the subtask failed in some way, so
        // simulate a failed JVM/container and send a container-completed
        // event to task attempt (i.e., move state machine from RUNNING
        // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

            jhe.getJobID());
        assertEquals(expectedJobHistoryEvents.get(0),
            jhe.getHistoryEvent().getEventType());
        expectedJobHistoryEvents.remove(0);
      else if (current instanceof JobCounterUpdateEvent) {
        JobCounterUpdateEvent jcue = (JobCounterUpdateEvent) current;

        LOG.info("JobCounterUpdateEvent "
            + jcue.getCounterUpdates().get(0).getCounterKey()
            + " " + jcue.getCounterUpdates().get(0).getIncrementValue());
        if (jcue.getCounterUpdates().get(0).getCounterKey() ==
            JobCounter.NUM_FAILED_MAPS) {
          totalFailedMaps += jcue.getCounterUpdates().get(0)
              .getIncrementValue();
        } else if (jcue.getCounterUpdates().get(0).getCounterKey() ==
            JobCounter.TOTAL_LAUNCHED_MAPS) {
          totalLaunchedMaps += jcue.getCounterUpdates().get(0)
              .getIncrementValue();
        }
      } else if (current instanceof JobTaskEvent) {
        JobTaskEvent jte = (JobTaskEvent) current;
        assertEquals(jte.getState(), finalState);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

      ContainerRequest assigned = null;
      while (assigned == null && earlierFailedMaps.size() > 0) {
        TaskAttemptId tId = earlierFailedMaps.removeFirst();     
        if (maps.containsKey(tId)) {
          assigned = maps.remove(tId);
          JobCounterUpdateEvent jce =
            new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
          jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
          eventHandler.handle(jce);
          LOG.info("Assigned from earlierFailedMaps");
          break;
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

          TaskAttemptId tId = list.removeFirst();
          if (maps.containsKey(tId)) {
            ContainerRequest assigned = maps.remove(tId);
            containerAssigned(allocated, assigned);
            it.remove();
            JobCounterUpdateEvent jce =
              new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1);
            eventHandler.handle(jce);
            hostLocalAssigned++;
            if (LOG.isDebugEnabled()) {
              LOG.debug("Assigned based on host match " + host);
            }
            break;
          }
        }
      }
     
      // try to match all rack local
      it = allocatedContainers.iterator();
      while(it.hasNext() && maps.size() > 0){
        Container allocated = it.next();
        Priority priority = allocated.getPriority();
        assert PRIORITY_MAP.equals(priority);
        // "if (maps.containsKey(tId))" below should be almost always true.
        // hence this while loop would almost always have O(1) complexity
        String host = allocated.getNodeId().getHost();
        String rack = RackResolver.resolve(host).getNetworkLocation();
        LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack);
        while (list != null && list.size() > 0) {
          TaskAttemptId tId = list.removeFirst();
          if (maps.containsKey(tId)) {
            ContainerRequest assigned = maps.remove(tId);
            containerAssigned(allocated, assigned);
            it.remove();
            JobCounterUpdateEvent jce =
              new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1);
            eventHandler.handle(jce);
            rackLocalAssigned++;
            if (LOG.isDebugEnabled()) {
              LOG.debug("Assigned based on rack match " + rack);
            }
            break;
          }
        }
      }
     
      // assign remaining
      it = allocatedContainers.iterator();
      while(it.hasNext() && maps.size() > 0){
        Container allocated = it.next();
        Priority priority = allocated.getPriority();
        assert PRIORITY_MAP.equals(priority);
        TaskAttemptId tId = maps.keySet().iterator().next();
        ContainerRequest assigned = maps.remove(tId);
        containerAssigned(allocated, assigned);
        it.remove();
        JobCounterUpdateEvent jce =
          new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
        jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
        eventHandler.handle(jce);
        if (LOG.isDebugEnabled()) {
          LOG.debug("Assigned based on * match");
        }
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.