Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.TaskID$CharTaskTypeMaps


    return hcatShimInstance;
  }
  private final class HCatHadoopShims20S implements HCatHadoopShims {
    @Override
    public TaskID createTaskID() {
      return new TaskID();
    }
View Full Code Here


    // segments required to vacate memory
    List<Segment<K,V>> memDiskSegments = new ArrayList<Segment<K,V>>();
    long inMemToDiskBytes = 0;
    boolean mergePhaseFinished = false;
    if (inMemoryMapOutputs.size() > 0) {
      TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID();
      inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs,
                                                memDiskSegments,
                                                maxInMemReduce);
      final int numMemDiskSegments = memDiskSegments.size();
      if (numMemDiskSegments > 0 &&
View Full Code Here

      //is called (we delete empty files as soon as we see them
      //in the merge method)

      //figure out the mapId
      TaskAttemptID mapId = inputs.get(0).getMapId();
      TaskID mapTaskId = mapId.getTaskID();

      List<Segment<K, V>> inMemorySegments = new ArrayList<Segment<K, V>>();
      long mergeOutputSize =
        createInMemorySegments(inputs, inMemorySegments,0);
      int noInMemorySegments = inMemorySegments.size();
View Full Code Here

    Map<TaskID, TaskInfo> tasks = job.getAllTasks();

    // validate info of each task
    for (TaskInfo task : tasks.values()) {

      TaskID tid = task.getTaskId();
      long startTime = task.getStartTime();
      assertTrue("Invalid Start time", startTime > 0);
     
      long finishTime = task.getFinishTime();
      assertTrue("Task FINISH_TIME is < START_TIME in history file",
View Full Code Here

    JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
    JobInProgress jip = jt.getJob(job.getID());

    // Get the 1st map, 1st reduce, cleanup & setup taskIDs and
    // validate their history info
    TaskID mapTaskId = new TaskID(job.getID(), TaskType.MAP, 0);
    TaskID reduceTaskId = new TaskID(job.getID(), TaskType.REDUCE, 0);

    TaskInProgress cleanups[] = jip.cleanup;
    TaskID cleanupTaskId;
    if (cleanups[0].isComplete()) {
      cleanupTaskId = cleanups[0].getTIPId();
    }
    else {
      cleanupTaskId = cleanups[1].getTIPId();
    }

    TaskInProgress setups[] = jip.setup;
    TaskID setupTaskId;
    if (setups[0].isComplete()) {
      setupTaskId = setups[0].getTIPId();
    }
    else {
      setupTaskId = setups[1].getTIPId();
    }

    Map<TaskID, TaskInfo> tasks = jobInfo.getAllTasks();

    // validate info of the 4 tasks(cleanup, setup, 1st map, 1st reduce)   

    for (TaskInfo task : tasks.values()) {
      TaskID tid = task.getTaskId();

      if (tid.equals(mapTaskId) ||
          tid.equals(reduceTaskId) ||
          tid.equals(cleanupTaskId) ||
          tid.equals(setupTaskId)) {

        TaskInProgress tip = jip.getTaskInProgress
        (org.apache.hadoop.mapred.TaskID.downgrade(tid));
        assertTrue("START_TIME of Task " + tid + " obtained from history " +
            "file did not match the expected value",
View Full Code Here

    for (TaskInfo task : tasks.values()) {
      // validate info of each attempt
      for (TaskAttemptInfo attempt : task.getAllTaskAttempts().values()) {

        TaskAttemptID attemptId = attempt.getAttemptId();
        TaskID tid = attemptId.getTaskID();

        TaskInProgress tip = jip.getTaskInProgress
        (org.apache.hadoop.mapred.TaskID.downgrade(tid));
       
        TaskStatus ts = tip.getTaskStatus(
View Full Code Here

import org.apache.hadoop.net.NetUtils;

public class HCatHadoopShims23 implements HCatHadoopShims {
    @Override
    public TaskID createTaskID() {
        return new TaskID("", 0, TaskType.MAP, 0);
    }
View Full Code Here

import org.apache.hadoop.util.Progressable;

public class HCatHadoopShims20S implements HCatHadoopShims {
    @Override
    public TaskID createTaskID() {
        return new TaskID();
    }
View Full Code Here

          return store;
        }
         
        private DataOutputStream createOutputStream(String fieldValue) throws IOException {
          Configuration conf = ctx.getConfiguration();
          TaskID taskId = ctx.getTaskAttemptID().getTaskID();
         
          // Check whether compression is enabled, if so get the extension and add them to the path
          boolean isCompressed = getCompressOutput(ctx);
          CompressionCodec codec = null;
          String extension = "";
          if (isCompressed) {
             Class<? extends CompressionCodec> codecClass =
                getOutputCompressorClass(ctx, GzipCodec.class);
             codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, ctx.getConfiguration());
             extension = codec.getDefaultExtension();
          }

          NumberFormat nf = NumberFormat.getInstance();
          nf.setMinimumIntegerDigits(4);

          Path path = new Path(fieldValue+extension, fieldValue + '-'
                + nf.format(taskId.getId())+extension);
          Path workOutputPath = ((FileOutputCommitter)getOutputCommitter(ctx)).getWorkPath();
          Path file = new Path(workOutputPath, path);
          FileSystem fs = file.getFileSystem(conf);               
          FSDataOutputStream fileOut = fs.create(file, false);
         
View Full Code Here

    // segments required to vacate memory
    List<Segment<K,V>> memDiskSegments = new ArrayList<Segment<K,V>>();
    long inMemToDiskBytes = 0;
    boolean mergePhaseFinished = false;
    if (inMemoryMapOutputs.size() > 0) {
      TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID();
      inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs,
                                                memDiskSegments,
                                                maxInMemReduce);
      final int numMemDiskSegments = memDiskSegments.size();
      if (numMemDiskSegments > 0 &&
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.TaskID$CharTaskTypeMaps

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.