Examples of InputSplit


Examples of org.apache.hadoop.mapreduce.InputSplit

        Class<? extends Mapper<?, ?, ?, ?>> mapper = A.class;
        return split(tag, mapper, length, locations);
    }

    private StageInputSplit split(int tag, Class<? extends Mapper<?, ?, ?, ?>> mapper, long length, String... locations) {
        InputSplit split = new MockInputSplit(tag, length, locations);
        return new StageInputSplit(mapper, Collections.singletonList(new StageInputSplit.Source(split, F.class)));
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

    private boolean prepareNext() throws IOException {
        if (splits.isEmpty()) {
            return false;
        }
        InputSplit next = splits.removeFirst();
        try {
            current = format.createRecordReader(next, context);
            current.initialize(next, context);
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

            keys.getFirst()[i] : startRow;
        byte[] splitStop = (stopRow.length == 0 ||
          Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) &&
          keys.getSecond()[i].length > 0 ?
            keys.getSecond()[i] : stopRow;
        InputSplit split = new TableSplit(table.getTableName(),
          splitStart, splitStop, regionLocation);
        splits.add(split);
        if (LOG.isDebugEnabled()) {
          LOG.debug("getSplits: split -> " + i + " -> " + split);
        }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

      long seed = r.nextLong();
      r.setSeed(seed);
      LOG.debug("seed: " + seed);
      // shuffle splits
      for (int i = 0; i < splits.size(); ++i) {
        InputSplit tmp = splits.get(i);
        int j = r.nextInt(splits.size());
        splits.set(i, splits.get(j));
        splits.set(j, tmp);
      }
      // our target rate is in terms of the maximum number of sample splits,
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

        List<List<InputSplit>> result = new ArrayList<List<InputSplit>>();
        List<Long> resultLengths = new ArrayList<Long>();
        long comparableSplitId = 0;
       
        int size = 0, nSplits = oneInputSplits.size();
        InputSplit lastSplit = null;
        int emptyCnt = 0;
        for (InputSplit split : oneInputSplits) {
            if (split.getLength() == 0) {
                emptyCnt++;
                continue;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

      HRegionLocation regLoc = table.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false);
      if (null == regLoc) {
        throw new IOException("Expecting at least one region.");
      }
      List<InputSplit> splits = new ArrayList<InputSplit>(1);
      InputSplit split = new TableSplit(table.getName(),
          HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
              .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0]);
      splits.add(split);
      return splits;
    }
    List<InputSplit> splits = new ArrayList<InputSplit>(keys.getFirst().length);
    for (int i = 0; i < keys.getFirst().length; i++) {
      if ( !includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
        continue;
      }
      HRegionLocation location = table.getRegionLocation(keys.getFirst()[i], false);
      // The below InetSocketAddress creation does a name resolution.
      InetSocketAddress isa = new InetSocketAddress(location.getHostname(), location.getPort());
      if (isa.isUnresolved()) {
        LOG.warn("Failed resolve " + isa);
      }
      InetAddress regionAddress = isa.getAddress();
      String regionLocation;
      try {
        regionLocation = reverseDNS(regionAddress);
      } catch (NamingException e) {
        LOG.error("Cannot resolve the host name for " + regionAddress + " because of " + e);
        regionLocation = location.getHostname();
      }

      byte[] startRow = scan.getStartRow();
      byte[] stopRow = scan.getStopRow();
      // determine if the given start an stop key fall into the region
      if ((startRow.length == 0 || keys.getSecond()[i].length == 0 ||
          Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) &&
          (stopRow.length == 0 ||
           Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) {
        byte[] splitStart = startRow.length == 0 ||
          Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ?
            keys.getFirst()[i] : startRow;
        byte[] splitStop = (stopRow.length == 0 ||
          Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) &&
          keys.getSecond()[i].length > 0 ?
            keys.getSecond()[i] : stopRow;
        InputSplit split = new TableSplit(table.getName(),
          splitStart, splitStop, regionLocation);
        splits.add(split);
        if (LOG.isDebugEnabled()) {
          LOG.debug("getSplits: split -> " + i + " -> " + split);
        }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

    }
    return isParent(possibleParent, child.getParent());
  }

  private Path getCurrentFile(Context context) throws IOException {
    InputSplit split = context.getInputSplit();
    if (split != null) {
      FileSplit inputSplit = (FileSplit) split;
      Path path = inputSplit.getPath();
      return path.makeQualified(path.getFileSystem(context.getConfiguration()));
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

            return false;
        }
        if(reader != null){
            reader.close();
        }
        InputSplit curSplit = inpSplits.get(curSplitIndex);
        TaskAttemptContext tAContext = new TaskAttemptContext(conf,
                new TaskAttemptID());
        reader = inputFormat.createRecordReader(curSplit, tAContext);
        reader.initialize(curSplit, tAContext);
        // create a dummy pigsplit - other than the actual split, the other
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

    HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);

    for (int i = 0; i < splits.size(); i++) {
      // validate input split
      InputSplit split = splits.get(i);
      Assert.assertTrue(split instanceof TableSnapshotRegionSplit);

      // validate record reader
      TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
      when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
View Full Code Here

Examples of org.apache.hadoop.mapreduce.InputSplit

                    .getFirst()[i] : startRow;
            byte[] splitStop =
                (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i],
                    stopRow) <= 0) && keys.getSecond()[i].length > 0 ? keys
                    .getSecond()[i] : stopRow;
            InputSplit split =
                new TableSplit(table.getName(),
                    scan, splitStart, splitStop, regionLocation);
            splits.add(split);
            if (LOG.isDebugEnabled())
              LOG.debug("getSplits: split -> " + (count++) + " -> " + split);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.