Examples of RawComparable


Examples of org.apache.hadoop.io.file.tfile.RawComparable

    }
   
    RawComparable[] rawKeys = keyDistri.getKeys();
    BytesWritable[] keys = new BytesWritable[rawKeys.length];
    for (int i=0; i<keys.length; ++i) {
      RawComparable rawKey = rawKeys[i];
      keys[i] = new BytesWritable();
      keys[i].setSize(rawKey.size());
      System.arraycopy(rawKey.buffer(), rawKey.offset(), keys[i].get(), 0,
          rawKey.size());
    }
   
    // TODO: Should we change to RawComparable to avoid the creation of
    // BytesWritables?
    for (int i = 0; i < keys.length; ++i) {
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.RawComparable

  static void reduceKeyDistri(SortedMap<RawComparable, BlockDistribution> lv,
      SortedMap<RawComparable, BlockDistribution> rv) {
    for (Iterator<Map.Entry<RawComparable, BlockDistribution>> it =
        rv.entrySet().iterator(); it.hasNext();) {
      Map.Entry<RawComparable, BlockDistribution> e = it.next();
      RawComparable key = e.getKey();
      BlockDistribution sum = lv.get(key);
      BlockDistribution delta = e.getValue();
      lv.put(key, BlockDistribution.sum(sum, delta));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.RawComparable

    for (int i = 0; i < n; ++i) {
      long targetMarker = (i + 1) * uniqueBytes / n;
      if (adjusted.uniqueBytes >= targetMarker) {
        continue;
      }
      RawComparable key = null;
      do {
        Map.Entry<RawComparable, BlockDistribution> e = it.next();
        if (key == null) {
          key = e.getKey();
        }
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.RawComparable

            return cmprv;
          }
        });

        for (int i = 0; i < index.size() - 1; ++i) {
          RawComparable prevLastKey = index.get(i).lastKey;
          RawComparable nextFirstKey = index.get(i + 1).firstKey;
          if (nextFirstKey == null) {
            continue;
          }
          if (comparator.compare(prevLastKey, nextFirstKey) > 0) {
            throw new IOException("Overlapping key ranges");
          }
        }
      }
      else {
        // sort by name
        Collections.sort(index, new Comparator<CGIndexEntry>() {

          @Override
          public int compare(CGIndexEntry o1, CGIndexEntry o2) {
            return o1.name.compareTo(o2.name);
          }
        });
      }

      // update status
      if ((!dirty) && (index.size() > 0)) {
        RawComparable keyFirst = index.get(0).getFirstKey();
        status.beginKey = new BytesWritable();
        status.beginKey.set(keyFirst.buffer(), keyFirst.offset(), keyFirst
            .size());
        RawComparable keyLast = index.get(index.size() - 1).getLastKey();
        status.endKey = new BytesWritable();
        status.endKey.set(keyLast.buffer(), keyLast.offset(), keyLast.size());
      }
      sorted = true;
    }
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.RawComparable

      }
      if (!isSorted()) {
        throw new IOException(
            "Cannot get key-bounded scanner for unsorted table");
      }
      RawComparable begin =
          (beginKey != null) ? new ByteArray(beginKey.get(), 0, beginKey
              .getSize()) : null;
      RawComparable end =
          (endKey != null) ? new ByteArray(endKey.get(), 0, endKey.getSize())
              : null;
      if (begin != null && end != null) {
        if (comparator.compare(begin, end) >= 0) {
          throw new IOException("Zero-key-range split");
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.RawComparable

              // the split includes almost the whole block, fill it.
              offsetEnd = lastBlockOffsetEnd;
              splitBytes = offsetEnd - offsetBegin;
            }

            RawComparable key = reader.getKeyNear(offsetEnd);
            if (key == null) {
              offsetEnd = fileLen;
              splitBytes = offsetEnd - offsetBegin;
              key = reader.getLastKey();
              done = true; // TFile index too large? Is it necessary now?
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.RawComparable

        logicalSchema = ColumnGroup.Reader.this.getProjection();
        List<TFileScanner> tmpScanners =
            new ArrayList<TFileScanner>(endIndex - beginIndex);
        try {
          for (int i = beginIndex; i < endIndex; ++i) {
            RawComparable begin = (i == beginIndex) ? beginKey : null;
            RawComparable end = (i == endIndex - 1) ? endKey : null;
            TFileScanner scanner =
                new TFileScanner(fs, cgindex.getPath(i, path), begin, end,
                    cgschema, logicalSchema, conf);
            // skip empty scanners.
            if (!scanner.atEnd()) {
View Full Code Here

Examples of org.apache.hadoop.zebra.tfile.RawComparable

    }
   
    RawComparable[] rawKeys = keyDistri.getKeys();
    BytesWritable[] keys = new BytesWritable[rawKeys.length];
    for (int i=0; i<keys.length; ++i) {
      RawComparable rawKey = rawKeys[i];
      keys[i] = new BytesWritable();
      keys[i].setSize(rawKey.size());
      System.arraycopy(rawKey.buffer(), rawKey.offset(), keys[i].get(), 0,
          rawKey.size());
    }
   
    // TODO: Should we change to RawComparable to avoid the creation of
    // BytesWritables?
    for (int i = 0; i < keys.length; ++i) {
View Full Code Here

Examples of org.apache.hadoop.zebra.tfile.RawComparable

  static void reduceKeyDistri(SortedMap<RawComparable, BlockDistribution> lv,
      SortedMap<RawComparable, BlockDistribution> rv) {
    for (Iterator<Map.Entry<RawComparable, BlockDistribution>> it =
        rv.entrySet().iterator(); it.hasNext();) {
      Map.Entry<RawComparable, BlockDistribution> e = it.next();
      RawComparable key = e.getKey();
      BlockDistribution sum = lv.get(key);
      BlockDistribution delta = e.getValue();
      lv.put(key, BlockDistribution.sum(sum, delta));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.tfile.RawComparable

    for (int i = 0; i < n; ++i) {
      long targetMarker = (i + 1) * uniqueBytes / n;
      if (adjusted.uniqueBytes >= targetMarker) {
        continue;
      }
      RawComparable key = null;
      do {
        Map.Entry<RawComparable, BlockDistribution> e = it.next();
        if (key == null) {
          key = e.getKey();
        }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.