Examples of TaskAttemptKillEvent


Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

        LOG.info("Ignoring killed event for successful reduce task attempt" +
                  taskAttempt.getID().toString());
        return TaskAttemptStateInternal.SUCCEEDED;
      }
      if(event instanceof TaskAttemptKillEvent) {
        TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
        //add to diagnostic
        taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
      }

      // not setting a finish time since it was set on success
      assert (taskAttempt.getFinishTime() != 0);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          + nodeId;
      for(TaskAttemptId id : taskAttemptIdList) {
        if(TaskType.MAP == id.getTaskId().getTaskType()) {
          // reschedule only map tasks because their outputs maybe unusable
          LOG.info(mesg + ". AttemptId:" + id);
          eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
        }
      }
    }
    // currently running task attempts on unusable nodes are handled in
    // RMContainerAllocator
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          + nodeId;
      for(TaskAttemptId id : taskAttemptIdList) {
        if(TaskType.MAP == id.getTaskId().getTaskType()) {
          // reschedule only map tasks because their outputs maybe unusable
          LOG.info(mesg + ". AttemptId:" + id);
          eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
        }
      }
    }
    // currently running task attempts on unusable nodes are handled in
    // RMContainerAllocator
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

     
      for (int i = 0; i < toPreempt && reduceList.size() > 0; i++) {
        TaskAttemptId id = reduceList.remove(0);//remove the one on top
        LOG.info("Preempting " + id);
        preemptionWaitingReduces.add(id);
        eventHandler.handle(new TaskAttemptKillEvent(id, RAMPDOWN_DIAGNOSTIC));
      }
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          NodeId taskAttemptNodeId = entry.getValue().getNodeId();
          if (unusableNodes.contains(taskAttemptNodeId)) {
            LOG.info("Killing taskAttempt:" + tid
                + " because it is running on unusable node:"
                + taskAttemptNodeId);
            eventHandler.handle(new TaskAttemptKillEvent(tid,
                "TaskAttempt killed because it ran on unusable node"
                    + taskAttemptNodeId));
          }
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          + nodeId;
      for(TaskAttemptId id : taskAttemptIdList) {
        if(TaskType.MAP == id.getTaskId().getTaskType()) {
          // reschedule only map tasks because their outputs maybe unusable
          LOG.info(mesg + ". AttemptId:" + id);
          eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
        }
      }
    }
    // currently running task attempts on unusable nodes are handled in
    // RMContainerAllocator
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          NodeId taskAttemptNodeId = entry.getValue().getNodeId();
          if (unusableNodes.contains(taskAttemptNodeId)) {
            LOG.info("Killing taskAttempt:" + tid
                + " because it is running on unusable node:"
                + taskAttemptNodeId);
            eventHandler.handle(new TaskAttemptKillEvent(tid,
                "TaskAttempt killed because it ran on unusable node"
                    + taskAttemptNodeId));
          }
        }
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

    @SuppressWarnings("unchecked")
    @Override
    public void transition(TaskAttemptImpl taskAttempt,
        TaskAttemptEvent event) {
      TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
      //add to diagnostic
      taskAttempt.addDiagnosticInfo(msgEvent.getMessage());

      // not setting a finish time since it was set on success
      assert (taskAttempt.getFinishTime() != 0);

      assert (taskAttempt.getLaunchTime() != 0);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          + nodeId;
      for(TaskAttemptId id : taskAttemptIdList) {
        if(TaskType.MAP == id.getTaskId().getTaskType()) {
          // reschedule only map tasks because their outputs maybe unusable
          LOG.info(mesg + ". AttemptId:" + id);
          eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
        }
      }
    }
    // currently running task attempts on unusable nodes are handled in
    // RMContainerAllocator
View Full Code Here

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent

          NodeId taskAttemptNodeId = entry.getValue().getNodeId();
          if (unusableNodes.contains(taskAttemptNodeId)) {
            LOG.info("Killing taskAttempt:" + tid
                + " because it is running on unusable node:"
                + taskAttemptNodeId);
            eventHandler.handle(new TaskAttemptKillEvent(tid,
                "TaskAttempt killed because it ran on unusable node"
                    + taskAttemptNodeId));
          }
        }
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.