Package org.apache.hadoop.zebra.io

Examples of org.apache.hadoop.zebra.io.BlockDistribution


        throw new IOException("Attempting sorted split on unsorted table");
      }
    }

    if (numSplits == 1) {
      BlockDistribution bd = null;
      for (Iterator<BasicTable.Reader> it = readers.iterator(); it.hasNext();) {
        BasicTable.Reader reader = it.next();
        bd = BlockDistribution.sum(bd, reader.getBlockDistribution((RangeSplit)null));
      }
     
      SortedTableSplit split = new SortedTableSplit(null, null, bd, conf);
      return new InputSplit[] { split };
    }
   
    // TODO: Does it make sense to interleave keys for all leaf tables if
    // numSplits <= 0 ?
    int nLeaves = readers.size();
    KeyDistribution keyDistri = null;
    for (int i = 0; i < nLeaves; ++i) {
      KeyDistribution btKeyDistri =
          readers.get(i).getKeyDistribution(
              (numSplits <= 0) ? -1 :
              Math.max(numSplits * 5 / nLeaves, numSplits));
      keyDistri = KeyDistribution.sum(keyDistri, btKeyDistri);
    }
   
    if (keyDistri == null) {
      // should never happen.
      SortedTableSplit split = new SortedTableSplit(null, null, null, conf);
      return new InputSplit[] { split };
    }
   
    if (numSplits > 0) {
      keyDistri.resize(numSplits);
    }
   
    RawComparable[] rawKeys = keyDistri.getKeys();
    BytesWritable[] keys = new BytesWritable[rawKeys.length];
    for (int i=0; i<keys.length; ++i) {
      RawComparable rawKey = rawKeys[i];
      keys[i] = new BytesWritable();
      keys[i].setSize(rawKey.size());
      System.arraycopy(rawKey.buffer(), rawKey.offset(), keys[i].get(), 0,
          rawKey.size());
    }
   
    // TODO: Should we change to RawComparable to avoid the creation of
    // BytesWritables?
    for (int i = 0; i < keys.length; ++i) {
      BytesWritable begin = (i == 0) ? null : keys[i - 1];
      BytesWritable end = (i == keys.length - 1) ? null : keys[i];
      BlockDistribution bd = keyDistri.getBlockDistribution(keys[i]);
      SortedTableSplit split = new SortedTableSplit(begin, end, bd, conf);
      splits.add(split);
    }
    return splits.toArray(new InputSplit[splits.size()]);
  }
View Full Code Here


  public UnsortedTableSplit(Reader reader, RangeSplit split, JobConf conf)
      throws IOException {
    this.path = reader.getPath();
    this.split = split;
    BlockDistribution dataDist = reader.getBlockDistribution(split);
    if (dataDist != null) {
      length = dataDist.getLength();
      hosts =
          dataDist.getHosts(conf.getInt("mapred.lib.table.input.nlocation", 5));
    }
  }
View Full Code Here

  public RowTableSplit(Reader reader, RowSplit split, JobConf conf)
      throws IOException {
    this.path = reader.getPath();
    this.split = split;
    BlockDistribution dataDist = reader.getBlockDistribution(split);
    if (dataDist != null) {
      length = dataDist.getLength();
      hosts =
          dataDist.getHosts(conf.getInt("mapred.lib.table.input.nlocation", 5));
    }
  }
View Full Code Here

    // BytesWritable key22Read = new BytesWritable();
    // scanner2.getKey(key22Read);
    // Assert.assertEquals(new BytesWritable("key22".getBytes()), key22Read);

    // Map<String, Long> map;
    BlockDistribution dist = reader.getBlockDistribution(listRanges.get(0));
    long n = dist.getLength();

    reader.close();
    close();
  }
View Full Code Here

        throw new IOException("Attempting sorted split on unsorted table");
      }
    }

    if (numSplits == 1) {
      BlockDistribution bd = null;
      for (Iterator<BasicTable.Reader> it = readers.iterator(); it.hasNext();) {
        BasicTable.Reader reader = it.next();
        bd = BlockDistribution.sum(bd, reader.getBlockDistribution((RangeSplit) null));
      }
     
      SortedTableSplit split = new SortedTableSplit(null, null, bd, conf);
      return new InputSplit[] { split };
    }
   
    // TODO: Does it make sense to interleave keys for all leaf tables if
    // numSplits <= 0 ?
    int nLeaves = readers.size();
    BlockDistribution lastBd = new BlockDistribution();
    ArrayList<KeyDistribution> btKeyDistributions = new ArrayList<KeyDistribution>();
    for (int i = 0; i < nLeaves; ++i) {
      KeyDistribution btKeyDistri =
          readers.get(i).getKeyDistribution(
              (numSplits <= 0) ? -1 :
              Math.max(numSplits * 5 / nLeaves, numSplits), nLeaves, lastBd);
      btKeyDistributions.add(btKeyDistri);
    }
    int btSize = btKeyDistributions.size();
    KeyDistribution[] btKds = new KeyDistribution[btSize];
    Object[] btArray = btKeyDistributions.toArray();
    for (int i = 0; i < btSize; i++)
      btKds[i] = (KeyDistribution) btArray[i];
   
    KeyDistribution keyDistri = KeyDistribution.merge(btKds);

    if (keyDistri == null) {
      // should never happen.
      SortedTableSplit split = new SortedTableSplit(null, null, null, conf);
      return new InputSplit[] { split };
    }
   
    keyDistri.resize(lastBd);
   
    RawComparable[] keys = keyDistri.getKeys();
    for (int i = 0; i <= keys.length; ++i) {
      RawComparable begin = (i == 0) ? null : keys[i - 1];
      RawComparable end = (i == keys.length) ? null : keys[i];
      BlockDistribution bd;
      if (i < keys.length)
        bd = keyDistri.getBlockDistribution(keys[i]);
      else
        bd = lastBd;
      BytesWritable beginB = null, endB = null;
View Full Code Here

  public RowTableSplit(Reader reader, RowSplit split, int tableIndex, JobConf conf)
      throws IOException {
    this.path = reader.getPath();
    this.split = split;
    this.tableIndex = tableIndex;
    BlockDistribution dataDist = reader.getBlockDistribution(split);
    if (dataDist != null) {
      length = dataDist.getLength();
      hosts =
          dataDist.getHosts(conf.getInt("mapred.lib.table.input.nlocation", 5));
    }
  }
View Full Code Here

    // BytesWritable key22Read = new BytesWritable();
    // scanner2.getKey(key22Read);
    // Assert.assertEquals(new BytesWritable("key22".getBytes()), key22Read);

    // Map<String, Long> map;
    BlockDistribution dist = reader.getBlockDistribution(listRanges.get(0));
    long n = dist.getLength();

    reader.close();
    close();
  }
View Full Code Here

  public RowTableSplit(Reader reader, RowSplit split, JobConf conf)
      throws IOException {
    this.path = reader.getPath();
    this.split = split;
    BlockDistribution dataDist = reader.getBlockDistribution(split);
    if (dataDist != null) {
      length = dataDist.getLength();
      hosts =
          dataDist.getHosts(conf.getInt("mapred.lib.table.input.nlocation", 5));
    }
  }
View Full Code Here

  public RowTableSplit(Reader reader, RowSplit split, int tableIndex, Configuration conf)
      throws IOException {
    this.path = reader.getPath();
    this.split = split;
    this.tableIndex = tableIndex;
    BlockDistribution dataDist = reader.getBlockDistribution(split);
    if (dataDist != null) {
      length = dataDist.getLength();
      hosts =
          dataDist.getHosts(conf.getInt("mapred.lib.table.input.nlocation", 5));
    }
  }
View Full Code Here

        throw new IOException("Attempting sorted split on unsorted table");
      }
    }

    if (numSplits == 1) {
      BlockDistribution bd = null;
      for (Iterator<BasicTable.Reader> it = readers.iterator(); it.hasNext();) {
        BasicTable.Reader reader = it.next();
        bd = BlockDistribution.sum(bd, reader.getBlockDistribution((RangeSplit) null));
      }
     
      SortedTableSplit split = new SortedTableSplit(null, null, bd, conf);
      return new InputSplit[] { split };
    }
   
    // TODO: Does it make sense to interleave keys for all leaf tables if
    // numSplits <= 0 ?
    int nLeaves = readers.size();
    BlockDistribution lastBd = new BlockDistribution();
    ArrayList<KeyDistribution> btKeyDistributions = new ArrayList<KeyDistribution>();
    for (int i = 0; i < nLeaves; ++i) {
      KeyDistribution btKeyDistri =
          readers.get(i).getKeyDistribution(
              (numSplits <= 0) ? -1 :
              Math.max(numSplits * 5 / nLeaves, numSplits), nLeaves, lastBd);
      btKeyDistributions.add(btKeyDistri);
    }
    int btSize = btKeyDistributions.size();
    KeyDistribution[] btKds = new KeyDistribution[btSize];
    Object[] btArray = btKeyDistributions.toArray();
    for (int i = 0; i < btSize; i++)
      btKds[i] = (KeyDistribution) btArray[i];
   
    KeyDistribution keyDistri = KeyDistribution.merge(btKds);

    if (keyDistri == null) {
      // should never happen.
      SortedTableSplit split = new SortedTableSplit(null, null, null, conf);
      return new InputSplit[] { split };
    }
   
    keyDistri.resize(lastBd);
   
    RawComparable[] keys = keyDistri.getKeys();
    for (int i = 0; i <= keys.length; ++i) {
      RawComparable begin = (i == 0) ? null : keys[i - 1];
      RawComparable end = (i == keys.length) ? null : keys[i];
      BlockDistribution bd;
      if (i < keys.length)
        bd = keyDistri.getBlockDistribution(keys[i]);
      else
        bd = lastBd;
      BytesWritable beginB = null, endB = null;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.zebra.io.BlockDistribution

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.