Examples of JobCounterUpdateEvent


Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

      InetSocketAddress nodeHttpInetAddr =
          NetUtils.createSocketAddr(taskAttempt.nodeHttpAddress); // TODO:
                                                                  // Costly?
      taskAttempt.trackerName = nodeHttpInetAddr.getHostName();
      taskAttempt.httpPort = nodeHttpInetAddr.getPort();
      JobCounterUpdateEvent jce =
          new JobCounterUpdateEvent(taskAttempt.attemptId.getTaskId()
              .getJobId());
      jce.addCounterUpdate(
          taskAttempt.attemptId.getTaskId().getTaskType() == TaskType.MAP ?
              JobCounter.TOTAL_LAUNCHED_MAPS: JobCounter.TOTAL_LAUNCHED_REDUCES
              , 1);
      taskAttempt.eventHandler.handle(jce);
     
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

        TaskAttemptEvent event) {
      //set the finish time
      taskAttempt.setFinishTime();
      long slotMillis = computeSlotMillis(taskAttempt);
      TaskId taskId = taskAttempt.attemptId.getTaskId();
      JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
      jce.addCounterUpdate(
        taskId.getTaskType() == TaskType.MAP ?
          JobCounter.SLOTS_MILLIS_MAPS : JobCounter.SLOTS_MILLIS_REDUCES,
          slotMillis);
      taskAttempt.eventHandler.handle(jce);
      taskAttempt.logAttemptFinishedEvent(TaskAttemptState.SUCCEEDED);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

      ContainerRequest assigned = null;
      while (assigned == null && earlierFailedMaps.size() > 0) {
        TaskAttemptId tId = earlierFailedMaps.removeFirst();     
        if (maps.containsKey(tId)) {
          assigned = maps.remove(tId);
          JobCounterUpdateEvent jce =
            new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
          jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
          eventHandler.handle(jce);
          LOG.info("Assigned from earlierFailedMaps");
          break;
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

            LOG.debug("Host matched to the request list " + host);
          }
          TaskAttemptId tId = list.removeFirst();
          if (maps.containsKey(tId)) {
            assigned = maps.remove(tId);
            JobCounterUpdateEvent jce =
              new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1);
            eventHandler.handle(jce);
            hostLocalAssigned++;
            if (LOG.isDebugEnabled()) {
              LOG.debug("Assigned based on host match " + host);
            }
            break;
          }
        }
        if (assigned == null) {
          String rack = RackResolver.resolve(host).getNetworkLocation();
          list = mapsRackMapping.get(rack);
          while (list != null && list.size() > 0) {
            TaskAttemptId tId = list.removeFirst();
            if (maps.containsKey(tId)) {
              assigned = maps.remove(tId);
              JobCounterUpdateEvent jce =
                new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
              jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1);
              eventHandler.handle(jce);
              rackLocalAssigned++;
              if (LOG.isDebugEnabled()) {
                LOG.debug("Assigned based on rack match " + rack);
              }
              break;
            }
          }
          if (assigned == null && maps.size() > 0) {
            TaskAttemptId tId = maps.keySet().iterator().next();
            assigned = maps.remove(tId);
            JobCounterUpdateEvent jce =
              new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
            eventHandler.handle(jce);
            if (LOG.isDebugEnabled()) {
              LOG.debug("Assigned based on * match");
            }
            break;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

  }

  private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
      TaskAttemptImpl taskAttempt) {
    TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
   
    long slotMillisIncrement = computeSlotMillis(taskAttempt);
   
    if (taskType == TaskType.MAP) {
      jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    } else {
      jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
    return jce;
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

  }
 
  private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
      TaskAttemptImpl taskAttempt) {
    TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
   
    long slotMillisIncrement = computeSlotMillis(taskAttempt);
   
    if (taskType == TaskType.MAP) {
      jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    } else {
      jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
    return jce;
 
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

      container.setContainerToken(null);
      container.setNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort);
      // send the container-assigned event to task attempt

      if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
        JobCounterUpdateEvent jce =
            new JobCounterUpdateEvent(event.getAttemptID().getTaskId()
                .getJobId());
        // TODO Setting OTHER_LOCAL_MAP for now.
        jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
        eventHandler.handle(jce);
      }
      eventHandler.handle(new TaskAttemptContainerAssignedEvent(
          event.getAttemptID(), container, applicationACLs));
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

      ContainerRequest assigned = null;
      while (assigned == null && earlierFailedMaps.size() > 0) {
        TaskAttemptId tId = earlierFailedMaps.removeFirst();     
        if (maps.containsKey(tId)) {
          assigned = maps.remove(tId);
          JobCounterUpdateEvent jce =
            new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
          jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
          eventHandler.handle(jce);
          LOG.info("Assigned from earlierFailedMaps");
          break;
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

          TaskAttemptId tId = list.removeFirst();
          if (maps.containsKey(tId)) {
            ContainerRequest assigned = maps.remove(tId);
            containerAssigned(allocated, assigned);
            it.remove();
            JobCounterUpdateEvent jce =
              new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1);
            eventHandler.handle(jce);
            hostLocalAssigned++;
            if (LOG.isDebugEnabled()) {
              LOG.debug("Assigned based on host match " + host);
            }
            break;
          }
        }
      }
     
      // try to match all rack local
      it = allocatedContainers.iterator();
      while(it.hasNext() && maps.size() > 0){
        Container allocated = it.next();
        Priority priority = allocated.getPriority();
        assert PRIORITY_MAP.equals(priority);
        // "if (maps.containsKey(tId))" below should be almost always true.
        // hence this while loop would almost always have O(1) complexity
        String host = allocated.getNodeId().getHost();
        String rack = RackResolver.resolve(host).getNetworkLocation();
        LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack);
        while (list != null && list.size() > 0) {
          TaskAttemptId tId = list.removeFirst();
          if (maps.containsKey(tId)) {
            ContainerRequest assigned = maps.remove(tId);
            containerAssigned(allocated, assigned);
            it.remove();
            JobCounterUpdateEvent jce =
              new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1);
            eventHandler.handle(jce);
            rackLocalAssigned++;
            if (LOG.isDebugEnabled()) {
              LOG.debug("Assigned based on rack match " + rack);
            }
            break;
          }
        }
      }
     
      // assign remaining
      it = allocatedContainers.iterator();
      while(it.hasNext() && maps.size() > 0){
        Container allocated = it.next();
        Priority priority = allocated.getPriority();
        assert PRIORITY_MAP.equals(priority);
        TaskAttemptId tId = maps.keySet().iterator().next();
        ContainerRequest assigned = maps.remove(tId);
        containerAssigned(allocated, assigned);
        it.remove();
        JobCounterUpdateEvent jce =
          new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
        jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
        eventHandler.handle(jce);
        if (LOG.isDebugEnabled()) {
          LOG.debug("Assigned based on * match");
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent

            doneWithMaps = true;
          }

          try {
            if (remoteTask.isMapOrReduce()) {
              JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
              jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
              if (remoteTask.isMapTask()) {
                jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
              } else {
                jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
              }
              context.getEventHandler().handle(jce);
            }
            runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
                       (numReduceTasks > 0));
           
          } catch (RuntimeException re) {
            JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
            jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
            context.getEventHandler().handle(jce);
            // this is our signal that the subtask failed in some way, so
            // simulate a failed JVM/container and send a container-completed
            // event to task attempt (i.e., move state machine from RUNNING
            // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.