Package org.apache.hadoop.tools.rumen

Examples of org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo


    final int maps = jobdesc.getNumberMaps();
    for (int i = 0; i < maps; ++i) {
      final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
      final long[] redDurations = new long[nSpec];
      for (int j = 0; j < nSpec; ++j) {
        final ReduceTaskAttemptInfo info =
          (ReduceTaskAttemptInfo) getSuccessfulAttemptInfo(
            TaskType.REDUCE, i + j * maps);
        // Include only merge/reduce time
        redDurations[j] = info.getMergeRuntime() + info.getReduceRuntime();
        if (LOG.isDebugEnabled()) {
          LOG.debug(
            String.format(
              "SPEC(%d) %d -> %d %d/%d", id(), i, i + j * maps, redDurations[j],
              info.getRuntime()));
        }
      }
      final TaskAttemptInfo info = getSuccessfulAttemptInfo(TaskType.MAP, i);
      splits.add(
        new SleepSplit(
          i, info.getRuntime(), redDurations, maps, new String[0]));
    }
    pushDescription(id(), splits);
  }
View Full Code Here


              m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
            100);

        case REDUCE:
          return new ReduceTaskAttemptInfo(
            State.SUCCEEDED,
            new TaskInfo(
              r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
            100, 100, 100);
View Full Code Here

        org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);       
    Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
                               numSlotsRequired);
    // all byte counters are 0
    TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
    ReduceTaskAttemptInfo taskAttemptInfo =
        new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0,
                                  reduceRuntime);
    TaskTrackerAction action =
        new SimulatorLaunchTaskAction(task, taskAttemptInfo);   
    heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
        action);
View Full Code Here

              m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
            100);

        case REDUCE:
          return new ReduceTaskAttemptInfo(
            State.SUCCEEDED,
            new TaskInfo(
              r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
            100, 100, 100);
View Full Code Here

              m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
            100);

        case REDUCE:
          return new ReduceTaskAttemptInfo(
            State.SUCCEEDED,
            new TaskInfo(
              r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
            100, 100, 100);
View Full Code Here

    final int maps = jobdesc.getNumberMaps();
    for (int i = 0; i < maps; ++i) {
      final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
      final long[] redDurations = new long[nSpec];
      for (int j = 0; j < nSpec; ++j) {
        final ReduceTaskAttemptInfo info =
          (ReduceTaskAttemptInfo) getSuccessfulAttemptInfo(TaskType.REDUCE,
                                                           i + j * maps);
        // Include only merge/reduce time
        redDurations[j] = Math.min(reduceMaxSleepTime, info.getMergeRuntime()
            + info.getReduceRuntime());
        if (LOG.isDebugEnabled()) {
          LOG.debug(
            String.format(
              "SPEC(%d) %d -> %d %d/%d", id(), i, i + j * maps, redDurations[j],
              info.getRuntime()));
        }
      }
      final TaskAttemptInfo info = getSuccessfulAttemptInfo(TaskType.MAP, i);
      ArrayList<String> locations = new ArrayList<String>(fakeLocations);
      if (fakeLocations > 0) {
        selector.reset();
      }
      for (int k=0; k<fakeLocations; ++k) {
        int index = selector.next();
        if (index < 0) break;
        locations.add(hosts[index]);
      }

      splits.add(new SleepSplit(i,
          Math.min(info.getRuntime(), mapMaxSleepTime), redDurations, maps,
          locations.toArray(new String[locations.size()])));    }
    pushDescription(id(), splits);
  }
View Full Code Here

            State.SUCCEEDED, new TaskInfo(
              m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),100);

        case REDUCE:
          return new ReduceTaskAttemptInfo(
            State.SUCCEEDED, new TaskInfo(
              r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),100,100,100);
      }
      throw new UnsupportedOperationException();
View Full Code Here

    final int maps = jobdesc.getNumberMaps();
    for (int i = 0; i < maps; ++i) {
      final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
      final long[] redDurations = new long[nSpec];
      for (int j = 0; j < nSpec; ++j) {
        final ReduceTaskAttemptInfo info =
          (ReduceTaskAttemptInfo) getSuccessfulAttemptInfo(
            TaskType.REDUCE, i + j * maps);
        // Include only merge/reduce time
        redDurations[j] = Math.min(reduceMaxSleepTime, info.getMergeRuntime()
            + info.getReduceRuntime());
        if (LOG.isDebugEnabled()) {
          LOG.debug(
            String.format(
              "SPEC(%d) %d -> %d %d/%d", id(), i, i + j * maps, redDurations[j],
              info.getRuntime()));
        }
      }
      final TaskAttemptInfo info = getSuccessfulAttemptInfo(TaskType.MAP, i);
      ArrayList<String> locations = new ArrayList<String>(fakeLocations);
      if (fakeLocations > 0) {
        selector.reset();
      }
      for (int k=0; k<fakeLocations; ++k) {
        int index = selector.next();
        if (index < 0) break;
        locations.add(hosts[index]);
      }

      splits.add(new SleepSplit(i,
          Math.min(info.getRuntime(), mapMaxSleepTime), redDurations, maps,
          locations.toArray(new String[locations.size()])));
    }
    pushDescription(id(), splits);
  }
View Full Code Here

    final int maps = jobdesc.getNumberMaps();
    for (int i = 0; i < maps; ++i) {
      final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
      final long[] redDurations = new long[nSpec];
      for (int j = 0; j < nSpec; ++j) {
        final ReduceTaskAttemptInfo info =
          (ReduceTaskAttemptInfo) getSuccessfulAttemptInfo(
            TaskType.REDUCE, i + j * maps);
        // Include only merge/reduce time
        redDurations[j] = Math.min(reduceMaxSleepTime, info.getMergeRuntime()
            + info.getReduceRuntime());
        if (LOG.isDebugEnabled()) {
          LOG.debug(
            String.format(
              "SPEC(%d) %d -> %d %d/%d", id(), i, i + j * maps, redDurations[j],
              info.getRuntime()));
        }
      }
      final TaskAttemptInfo info = getSuccessfulAttemptInfo(TaskType.MAP, i);
      ArrayList<String> locations = new ArrayList<String>(fakeLocations);
      if (fakeLocations > 0) {
        selector.reset();
      }
      for (int k=0; k<fakeLocations; ++k) {
        int index = selector.next();
        if (index < 0) break;
        locations.add(hosts[index]);
      }

      splits.add(new SleepSplit(i,
          Math.min(info.getRuntime(), mapMaxSleepTime), redDurations, maps,
          locations.toArray(new String[locations.size()])));
    }
    pushDescription(id(), splits);
  }
View Full Code Here

            State.SUCCEEDED, new TaskInfo(
              m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),100);

        case REDUCE:
          return new ReduceTaskAttemptInfo(
            State.SUCCEEDED, new TaskInfo(
              r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),100,100,100);
      }
      throw new UnsupportedOperationException();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.