Package org.apache.hadoop.mapreduce.split.JobSplit

Examples of org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo


    EventHandler eventHandler = mock(EventHandler.class);
    String[] hosts = new String[3];
    hosts[0] = "host1";
    hosts[1] = "host2";
    hosts[2] = "host3";
    TaskSplitMetaInfo splitInfo =
        new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

    TaskAttemptImpl mockTaskAttempt =
        createMapTaskAttemptImplForTest(eventHandler, splitInfo);
    TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);

View Full Code Here


    EventHandler eventHandler = mock(EventHandler.class);
    String[] hosts = new String[3];
    hosts[0] = "192.168.1.1";
    hosts[1] = "host2";
    hosts[2] = "host3";
    TaskSplitMetaInfo splitInfo =
        new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

    TaskAttemptImpl mockTaskAttempt =
        createMapTaskAttemptImplForTest(eventHandler, splitInfo);
    TaskAttemptImpl spyTa = spy(mockTaskAttempt);
    when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
View Full Code Here

    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
   
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
   
    TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          splits, jobConf, taListener,
          mock(OutputCommitter.class), mock(Token.class), new Credentials(),
View Full Code Here

  private InitTransition getInitTransition() {
    InitTransition initTransition = new InitTransition() {
      @Override
      protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
        return new TaskSplitMetaInfo[] { new TaskSplitMetaInfo(),
            new TaskSplitMetaInfo() };
      }
    };
    return initTransition;
  }
View Full Code Here

    // create cleanup two cleanup tips, one map and one reduce.
    cleanup = new TaskInProgress[2];

    // cleanup map tip. This map doesn't use any splits. Just assign an empty
    // split.
    TaskSplitMetaInfo emptySplit = JobSplit.EMPTY_TASK_SPLIT;
    cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit,
            jobtracker, conf, this, numMapTasks, 1);
    cleanup[0].setJobCleanupTask();

    // cleanup reduce tip.
View Full Code Here

          setupOldInputFormat();
        }
      } else {
        // Read split information.
        TaskSplitMetaInfo[] allMetaInfo = readSplits(jobConf);
        TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[inputContext
            .getTaskIndex()];
        this.splitMetaInfo = new TaskSplitIndex(
            thisTaskMetaInfo.getSplitLocation(),
            thisTaskMetaInfo.getStartOffset());
        if (useNewApi) {
          setupNewInputFormat();
          newInputSplit = getNewSplitDetailsFromDisk(splitMetaInfo);
          setupNewRecordReader();
        } else {
View Full Code Here

          setupOldInputFormat();
        }
      } else {
        // Read split information.
        TaskSplitMetaInfo[] allMetaInfo = readSplits(jobConf);
        TaskSplitMetaInfo thisTaskMetaInfo = allMetaInfo[inputContext
            .getTaskIndex()];
        this.splitMetaInfo = new TaskSplitIndex(
            thisTaskMetaInfo.getSplitLocation(),
            thisTaskMetaInfo.getStartOffset());
        if (useNewApi) {
          setupNewInputFormat();
          newInputSplit = getNewSplitDetailsFromDisk(splitMetaInfo);
          setupNewRecordReader();
        } else {
View Full Code Here

    // create cleanup two cleanup tips, one map and one reduce.
    cleanup = new TaskInProgress[2];

    // cleanup map tip. This map doesn't use any splits. Just assign an empty
    // split.
    TaskSplitMetaInfo emptySplit = JobSplit.EMPTY_TASK_SPLIT;
    cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit,
            jobtracker, conf, this, numMapTasks, 1);
    cleanup[0].setJobCleanupTask();

    // cleanup reduce tip.
View Full Code Here

      // one tracker gets data local, one gets rack local and two others
      // get non-local maps
      TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numMapTasks];
      String[] splitHosts0 = new String[] { hosts[0] };
      for (int i = 0; i < numMapTasks; i++) {
        splits[i] = new TaskSplitMetaInfo(splitHosts0, 0, 0);
      }
      return splits;
    }
View Full Code Here

      String[] splitHosts1 = new String[] { allHosts[2] };
      String[] splitHosts2 = new String[] { allHosts[3] };
      for (int i = 0; i < numMaps; i++) {
      if (i == 0 || i == 2 || i == 3) {
          splits[i] = new TaskSplitMetaInfo(splitHosts0, 0, 0);
        } else if (i == 1) {
          splits[i] = new TaskSplitMetaInfo(splitHosts1, 0, 0);
        } else if (i == 4) {
          splits[i] = new TaskSplitMetaInfo(splitHosts2, 0, 0);
        }
      }

      return splits;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.