Package org.apache.tez.dag.api.VertexLocationHint

Examples of org.apache.tez.dag.api.VertexLocationHint.TaskLocationHint


    Configuration taskConf = new Configuration();
    taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    taskConf.setBoolean("fs.file.impl.disable.cache", true);

    TaskLocationHint locationHint = new TaskLocationHint(
        new HashSet<String>(Arrays.asList(new String[] {"127.0.0.1"})), null);
    Resource resource = Resource.newInstance(1024, 1);

    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
View Full Code Here


    Configuration taskConf = new Configuration();
    taskConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    taskConf.setBoolean("fs.file.impl.disable.cache", true);

    TaskLocationHint locationHint = new TaskLocationHint(
        new HashSet<String>(Arrays.asList(new String[] {"127.0.0.1"})), null);
    Resource resource = Resource.newInstance(1024, 1);

    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
View Full Code Here

              serializationFactory));
          String rack =
              ((org.apache.hadoop.mapreduce.split.TezGroupedSplit) split).getRack();
          if (rack == null) {
            if (split.getLocations() != null) {
              locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
                  .asList(split.getLocations())), null));
            } else {
              locationHints.add(new TaskLocationHint(null, null));
            }
          } else {
            locationHints.add(new TaskLocationHint(null,
                Collections.singleton(rack)));
          }
        }
        inputSplitInfo = new InputSplitInfoMem(splitsBuilder.build(),
            locationHints, splits.length);
      } else {
        LOG.info("Grouping mapred api input splits");
        org.apache.hadoop.mapred.InputSplit[] splits = MRHelpers
            .generateOldSplits(jobConf, realInputFormatName,
                rootInputContext.getNumTasks());
        List<TaskLocationHint> locationHints = Lists
            .newArrayListWithCapacity(splits.length);
        MRSplitsProto.Builder splitsBuilder = MRSplitsProto.newBuilder();
        for (org.apache.hadoop.mapred.InputSplit split : splits) {
          splitsBuilder.addSplits(MRHelpers.createSplitProto(split));
          String rack =
              ((org.apache.hadoop.mapred.split.TezGroupedSplit) split).getRack();
          if (rack == null) {
            if (split.getLocations() != null) {
              locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
                  .asList(split.getLocations())), null));
            } else {
              locationHints.add(new TaskLocationHint(null, null));
            }
          } else {
            locationHints.add(new TaskLocationHint(null,
                Collections.singleton(rack)));
          }
        }
        inputSplitInfo = new InputSplitInfoMem(splitsBuilder.build(),
            locationHints, splits.length);
View Full Code Here

    conf = new Configuration();
    taskAttemptListener = mock(TaskAttemptListener.class);
    taskHeartbeatHandler = mock(TaskHeartbeatHandler.class);
    credentials = new Credentials();
    clock = new SystemClock();
    locationHint = new TaskLocationHint(null, null);

    appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    dagId = new TezDAGID(appId, 1);
    vertexId = new TezVertexID(dagId, 1);
    appContext = mock(AppContext.class);
View Full Code Here

            new Path(jobSubmitDir));
    int splitsCount = splitsInfo.length;
    List<TaskLocationHint> locationHints =
        new ArrayList<TaskLocationHint>(splitsCount);
    for (int i = 0; i < splitsCount; ++i) {
      TaskLocationHint locationHint =
          new TaskLocationHint(
              new HashSet<String>(
                  Arrays.asList(splitsInfo[i].getLocations())), null);
      locationHints.add(locationHint);
    }
    return locationHints;
View Full Code Here

      List<PlanTaskLocationHint> locationHints) {

    List<TaskLocationHint> outputList = new ArrayList<TaskLocationHint>();

    for(PlanTaskLocationHint inputHint : locationHints){
      TaskLocationHint outputHint = new TaskLocationHint(
          new HashSet<String>(inputHint.getHostList()),
          new HashSet<String>(inputHint.getRackList()));
      outputList.add(outputHint);
    }
    return new VertexLocationHint(outputList.size(), outputList);
View Full Code Here

        && this.vertexLocationHint.getTaskLocationHints().size() ==
            this.numTasks) {
      useNullLocationHint = false;
    }
    for (int i=0; i < this.numTasks; ++i) {
      TaskLocationHint locHint = null;
      if (!useNullLocationHint) {
        locHint = this.vertexLocationHint.getTaskLocationHints().get(i);
      }
      TaskImpl task =
          new TaskImpl(this.getVertexId(), i,
View Full Code Here

        for (org.apache.hadoop.mapreduce.InputSplit split : splits) {
          String rack =
              ((org.apache.hadoop.mapreduce.split.TezGroupedSplit) split).getRack();
          if (rack == null) {
            if (split.getLocations() != null) {
              locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
                  .asList(split.getLocations())), null));
            } else {
              locationHints.add(new TaskLocationHint(null, null));
            }
          } else {
            locationHints.add(new TaskLocationHint(null,
                Collections.singleton(rack)));
          }
        }
        inputSplitInfo = new InputSplitInfoMem(splits, locationHints, splits.length, null, jobConf);
      } else {
        LOG.info("Grouping mapred api input splits");
        org.apache.hadoop.mapred.InputSplit[] splits = MRHelpers
            .generateOldSplits(jobConf, realInputFormatName, numTasks);
        List<TaskLocationHint> locationHints = Lists
            .newArrayListWithCapacity(splits.length);
        for (org.apache.hadoop.mapred.InputSplit split : splits) {
          String rack =
              ((org.apache.hadoop.mapred.split.TezGroupedSplit) split).getRack();
          if (rack == null) {
            if (split.getLocations() != null) {
              locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
                  .asList(split.getLocations())), null));
            } else {
              locationHints.add(new TaskLocationHint(null, null));
            }
          } else {
            locationHints.add(new TaskLocationHint(null,
                Collections.singleton(rack)));
          }
        }
        inputSplitInfo = new InputSplitInfoMem(splits, locationHints, splits.length, null, jobConf);
      }
View Full Code Here

    int splitsCount = splitsInfo.length;
    List<TaskLocationHint> locationHints =
        new ArrayList<TaskLocationHint>(splitsCount);
    for (int i = 0; i < splitsCount; ++i) {
      locationHints.add(
          new TaskLocationHint(new HashSet<String>(
              Arrays.asList(splitsInfo[i].getLocations())), null));
    }

    Assert.assertEquals(locationHints, actual);
  }
View Full Code Here

            new Path(jobSubmitDir));
    int splitsCount = splitsInfo.length;
    List<TaskLocationHint> locationHints =
        new ArrayList<TaskLocationHint>(splitsCount);
    for (int i = 0; i < splitsCount; ++i) {
      TaskLocationHint locationHint =
          new TaskLocationHint(
              new HashSet<String>(
                  Arrays.asList(splitsInfo[i].getLocations())), null);
      locationHints.add(locationHint);
    }
    return locationHints;
View Full Code Here

TOP

Related Classes of org.apache.tez.dag.api.VertexLocationHint.TaskLocationHint

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.