Package org.apache.hadoop.zebra.tfile

Examples of org.apache.hadoop.zebra.tfile.RawComparable


    }
   
    RawComparable[] rawKeys = keyDistri.getKeys();
    BytesWritable[] keys = new BytesWritable[rawKeys.length];
    for (int i=0; i<keys.length; ++i) {
      RawComparable rawKey = rawKeys[i];
      keys[i] = new BytesWritable();
      keys[i].setSize(rawKey.size());
      System.arraycopy(rawKey.buffer(), rawKey.offset(), keys[i].get(), 0,
          rawKey.size());
    }
   
    // TODO: Should we change to RawComparable to avoid the creation of
    // BytesWritables?
    for (int i = 0; i < keys.length; ++i) {
View Full Code Here


  static void reduceKeyDistri(SortedMap<RawComparable, BlockDistribution> lv,
      SortedMap<RawComparable, BlockDistribution> rv) {
    for (Iterator<Map.Entry<RawComparable, BlockDistribution>> it =
        rv.entrySet().iterator(); it.hasNext();) {
      Map.Entry<RawComparable, BlockDistribution> e = it.next();
      RawComparable key = e.getKey();
      BlockDistribution sum = lv.get(key);
      BlockDistribution delta = e.getValue();
      lv.put(key, BlockDistribution.sum(sum, delta));
    }
  }
View Full Code Here

    for (int i = 0; i < n; ++i) {
      long targetMarker = (i + 1) * uniqueBytes / n;
      if (adjusted.uniqueBytes >= targetMarker) {
        continue;
      }
      RawComparable key = null;
      do {
        Map.Entry<RawComparable, BlockDistribution> e = it.next();
        if (key == null) {
          key = e.getKey();
        }
View Full Code Here

      }
      if (!isSorted()) {
        throw new IOException(
            "Cannot get key-bounded scanner for unsorted table");
      }
      RawComparable begin =
          (beginKey != null) ? new ByteArray(beginKey.get(), 0, beginKey
              .getSize()) : null;
      RawComparable end =
          (endKey != null) ? new ByteArray(endKey.get(), 0, endKey.getSize())
              : null;
      if (begin != null && end != null) {
        if (comparator.compare(begin, end) >= 0) {
          throw new IOException("Zero-key-range split");
View Full Code Here

              // the split includes almost the whole block, fill it.
              offsetEnd = lastBlockOffsetEnd;
              splitBytes = offsetEnd - offsetBegin;
            }

            RawComparable key = reader.getKeyNear(offsetEnd);
            if (key == null) {
              offsetEnd = fileLen;
              splitBytes = offsetEnd - offsetBegin;
              key = reader.getLastKey();
              done = true; // TFile index too large? Is it necessary now?
View Full Code Here

        logicalSchema = ColumnGroup.Reader.this.getProjection();
        List<TFileScanner> tmpScanners =
            new ArrayList<TFileScanner>(endIndex - beginIndex);
        try {
          for (int i = beginIndex; i < endIndex; ++i) {
            RawComparable begin = (i == beginIndex) ? beginKey : null;
            RawComparable end = (i == endIndex - 1) ? endKey : null;
            TFileScanner scanner;
            if (rowRange != null)
              scanner =
                new TFileScanner(fs, new Path(path, rowRange.name), rowRange,
                                 begin, end,
View Full Code Here

            return cmprv;
          }
        });

        for (int i = 0; i < index.size() - 1; ++i) {
          RawComparable prevLastKey = index.get(i).lastKey;
          RawComparable nextFirstKey = index.get(i + 1).firstKey;
          if (nextFirstKey == null) {
            continue;
          }
          if (comparator.compare(prevLastKey, nextFirstKey) > 0) {
            throw new IOException("Overlapping key ranges");
          }
        }
      }
      else {
        // sort by name
        Collections.sort(index, new Comparator<CGIndexEntry>() {

          @Override
          public int compare(CGIndexEntry o1, CGIndexEntry o2) {
            return o1.name.compareTo(o2.name);
          }
        });
      }

      // update status
      if ((!dirty) && (index.size() > 0)) {
        RawComparable keyFirst = index.get(0).getFirstKey();
        status.beginKey = new BytesWritable();
        status.beginKey.set(keyFirst.buffer(), keyFirst.offset(), keyFirst
            .size());
        RawComparable keyLast = index.get(index.size() - 1).getLastKey();
        status.endKey = new BytesWritable();
        status.endKey.set(keyLast.buffer(), keyLast.offset(), keyLast.size());
      }
      sorted = true;
    }
View Full Code Here

   
    keyDistri.resize(lastBd);
   
    RawComparable[] keys = keyDistri.getKeys();
    for (int i = 0; i <= keys.length; ++i) {
      RawComparable begin = (i == 0) ? null : keys[i - 1];
      RawComparable end = (i == keys.length) ? null : keys[i];
      BlockDistribution bd;
      if (i < keys.length)
        bd = keyDistri.getBlockDistribution(keys[i]);
      else
        bd = lastBd;
      BytesWritable beginB = null, endB = null;
      if (begin != null)
        beginB = new BytesWritable(begin.buffer());
      if (end != null)
        endB = new BytesWritable(end.buffer());
      SortedTableSplit split = new SortedTableSplit(beginB, endB, bd, conf);
      splits.add(split);
    }

    return splits.toArray(new InputSplit[splits.size()]);
View Full Code Here

          boolean first, last, realFirst = true;
          Path myPath;
          for (int i = beginIndex; i < endIndex; ++i) {
            first = (i == beginIndex);
            last = (i == endIndex -1);
            RawComparable begin = first ? beginKey : null;
            RawComparable end = last ? endKey : null;
            TFileScannerInfo scanner;
            if (rowRange == null)
              myPath = cgindex.getPath(i, path);
            else
              myPath = new Path(path, rowRange.names[i]);
View Full Code Here

            return cmprv;
          }
        });

        for (int i = 0; i < index.size() - 1; ++i) {
          RawComparable prevLastKey = index.get(i).lastKey;
          RawComparable nextFirstKey = index.get(i + 1).firstKey;
          if (nextFirstKey == null) {
            continue;
          }
          if (comparator.compare(prevLastKey, nextFirstKey) > 0) {
            throw new IOException("Overlapping key ranges");
          }
        }
      }
      else {
        // sort by name
        Collections.sort(index, new Comparator<CGIndexEntry>() {

          @Override
          public int compare(CGIndexEntry o1, CGIndexEntry o2) {
            return o1.name.compareTo(o2.name);
          }
        });
      }

      // update status
      if ((!dirty) && (index.size() > 0)) {
        RawComparable keyFirst = index.get(0).getFirstKey();
        status.beginKey = new BytesWritable();
        status.beginKey.set(keyFirst.buffer(), keyFirst.offset(), keyFirst
            .size());
        RawComparable keyLast = index.get(index.size() - 1).getLastKey();
        status.endKey = new BytesWritable();
        status.endKey.set(keyLast.buffer(), keyLast.offset(), keyLast.size());
      }
      sorted = true;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.zebra.tfile.RawComparable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.