Package org.apache.hadoop.mapreduce.split.JobSplit

Examples of org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex


    Path localMetaSplit =
        new LocalDirAllocator("mapred.local.dir").getLocalPathToRead(
            TaskTracker.getLocalSplitFile(conf.getUser(), taskId.getJobID()
                .toString(), taskId.toString()), conf);
    DataInputStream splitFile = FileSystem.getLocal(conf).open(localMetaSplit);
    TaskSplitIndex splitIndex = new TaskSplitIndex();
    splitIndex.readFields(splitFile);
    splitFile.close();
    Task task =
      new MapTask(jobFilename.toString(), taskId, partition, splitIndex, 1);
    task.setConf(conf);
    task.run(conf, new FakeUmbilical());
View Full Code Here


    super.write(out);
    if (isMapOrReduce()) {
      if (splitMetaInfo != null) {
        splitMetaInfo.write(out);
      } else {
        new TaskSplitIndex().write(out);
      }
      //TODO do we really need to set this to null?
      splitMetaInfo = null;
    }
  }
View Full Code Here

          mrReader = new MRReaderMapred(jobConf, getContext().getCounters(), inputRecordCounter);
        }
      } else {
        TaskSplitMetaInfo[] allMetaInfo = MRInputUtils.readSplits(jobConf);
        TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[getContext().getTaskIndex()];
        TaskSplitIndex splitMetaInfo = new TaskSplitIndex(thisTaskMetaInfo.getSplitLocation(),
            thisTaskMetaInfo.getStartOffset());
        if (useNewApi) {
          org.apache.hadoop.mapreduce.InputSplit newInputSplit = MRInputUtils
              .getNewSplitDetailsFromDisk(splitMetaInfo, jobConf, getContext().getCounters()
                  .findCounter(TaskCounter.SPLIT_RAW_BYTES));
View Full Code Here

    super.write(out);
    if (isMapOrReduce()) {
      if (splitMetaInfo != null) {
        splitMetaInfo.write(out);
      } else {
        new TaskSplitIndex().write(out);
      }
      //TODO do we really need to set this to null?
      splitMetaInfo = null;
    }
  }
View Full Code Here

      } else {
        // Read split information.
        TaskSplitMetaInfo[] allMetaInfo = readSplits(jobConf);
        TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[inputContext
            .getTaskIndex()];
        this.splitMetaInfo = new TaskSplitIndex(
            thisTaskMetaInfo.getSplitLocation(),
            thisTaskMetaInfo.getStartOffset());
        if (useNewApi) {
          setupNewInputFormat();
          newInputSplit = getNewSplitDetailsFromDisk(splitMetaInfo);
View Full Code Here

      } else {
        // Read split information.
        TaskSplitMetaInfo[] allMetaInfo = readSplits(jobConf);
        TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[inputContext
            .getTaskIndex()];
        this.splitMetaInfo = new TaskSplitIndex(
            thisTaskMetaInfo.getSplitLocation(),
            thisTaskMetaInfo.getStartOffset());
        if (useNewApi) {
          setupNewInputFormat();
          newInputSplit = getNewSplitDetailsFromDisk(splitMetaInfo);
View Full Code Here

      } else {
        // Read split information.
        TaskSplitMetaInfo[] allMetaInfo = readSplits(jobConf);
        TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[inputContext
            .getTaskIndex()];
        this.splitMetaInfo = new TaskSplitIndex(
            thisTaskMetaInfo.getSplitLocation(),
            thisTaskMetaInfo.getStartOffset());
        if (useNewApi) {
          setupNewInputFormat();
          newInputSplit = getNewSplitDetailsFromDisk(splitMetaInfo);
View Full Code Here

    super.write(out);
    if (isMapOrReduce()) {
      if (splitMetaInfo != null) {
        splitMetaInfo.write(out);
      } else {
        new TaskSplitIndex().write(out);
      }
      //TODO do we really need to set this to null?
      splitMetaInfo = null;
    }
  }
View Full Code Here

    Path localMetaSplit =
        new LocalDirAllocator("mapred.local.dir").getLocalPathToRead(
            TaskTracker.getLocalSplitFile(conf.getUser(), taskId.getJobID()
                .toString(), taskId.toString()), conf);
    DataInputStream splitFile = FileSystem.getLocal(conf).open(localMetaSplit);
    TaskSplitIndex splitIndex = new TaskSplitIndex();
    splitIndex.readFields(splitFile);
    splitFile.close();
    Task task =
      new MapTask(jobFilename.toString(), taskId, partition, splitIndex, 1);
    task.setConf(conf);
    task.run(conf, new FakeUmbilical());
View Full Code Here

        new LocalDirAllocator(MRConfig.LOCAL_DIR).getLocalPathToRead(
            TaskTracker.getLocalSplitMetaFile(conf.getUser(),
              taskId.getJobID().toString(), taskId
                .toString()), conf);
    DataInputStream splitFile = FileSystem.getLocal(conf).open(localMetaSplit);
    TaskSplitIndex splitIndex = new TaskSplitIndex();
    splitIndex.readFields(splitFile);
    splitFile.close();

    Task task =
      new MapTask(jobFilename.toString(), taskId, partition, splitIndex, 1);
    task.setConf(conf);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.