Examples of TableSplit


Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

                        keys.getFirst()[i] : startRow;
                byte[] splitStop = (stopRow.length == 0 ||
                        Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) &&
                        keys.getSecond()[i].length > 0 ?
                        keys.getSecond()[i] : stopRow;
                InputSplit split = new TableSplit(table.getTableName(),
                        splitStart, splitStop, regionLocation);
                splits.add(split);
                if (log.isDebugEnabled()) {
                    log.debug("getSplits: split -> " + (count++) + " -> " + split);
                }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

        // Build RecordScan
        RecordScan scan = getScan(repository);

        // Change the start/stop record IDs on the scan to the current split
        TableSplit split = (TableSplit)inputSplit;
        scan.setRawStartRecordId(split.getStartRow());
        scan.setRawStopRecordId(split.getEndRow());

        RecordScanner scanner = null;
        try {
            String hbaseTableName = Bytes.toString(split.getTableName());
            String repositoryTableName = RepoAndTableUtil.extractLilyTableName(repositoryName, hbaseTableName);
            scanner = lilyClient.getRepository(repositoryName).getTable(repositoryTableName).getScanner(scan);
        } catch (RepositoryException e) {
            Closer.close(lilyClient);
            throw new IOException("Error setting up RecordScanner", e);
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

        // Build RecordScan
        RecordScan scan = getScan(repository);

        // Change the start/stop record IDs on the scan to the current split
        TableSplit split = (TableSplit)inputSplit;
        scan.setRawStartRecordId(split.getStartRow());
        scan.setRawStopRecordId(split.getEndRow());

        IdRecordScanner scanner = null;
        try {
            String hbaseTableName = Bytes.toString(split.getTableName());
            String repositoryTableName = RepoAndTableUtil.extractLilyTableName(repositoryName, hbaseTableName);
            scanner = lilyClient.getRepository(repositoryName).getTable(repositoryTableName).getScannerWithIds(scan);
        } catch (RepositoryException e) {
            Closer.close(lilyClient);
            throw new IOException("Error setting up RecordScanner", e);
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

public class HBaseSplit extends FileSplit implements InputSplit {
  private final TableSplit split;

  public HBaseSplit() {
    super((Path) null, 0, 0, (String[]) null);
    split = new TableSplit();
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

    InputSplit split,
    JobConf jobConf,
    final Reporter reporter) throws IOException {

    HBaseSplit hbaseSplit = (HBaseSplit) split;
    TableSplit tableSplit = hbaseSplit.getSplit();
    String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME);
    setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName)));
    String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING);
    boolean doColumnRegexMatching = jobConf.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true);
    List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf);
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

    InputSplit split,
    JobConf jobConf,
    final Reporter reporter) throws IOException {

    HBaseSplit hbaseSplit = (HBaseSplit) split;
    TableSplit tableSplit = hbaseSplit.getSplit();
    String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME);
    setHTable(new HTable(new HBaseConfiguration(jobConf), Bytes.toBytes(hbaseTableName)));
    String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING);
    List<String> hbaseColumnFamilies = new ArrayList<String>();
    List<String> hbaseColumnQualifiers = new ArrayList<String>();
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

    // make it compare as the very next value after startRow
    byte [] stopRow = new byte[startRow.length + 1];
    System.arraycopy(startRow, 0, stopRow, 0, startRow.length);

    if (tableSplit != null) {
      tableSplit = new TableSplit(
        tableSplit.getTableName(),
        startRow,
        stopRow,
        tableSplit.getRegionLocation());
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

      for (KijiRegion region : table.getRegions()) {
        final byte[] startKey = region.getStartKey();
        // TODO: a smart way to get which location is most relevant.
        final String location =
            region.getLocations().isEmpty() ? null : region.getLocations().iterator().next();
        final TableSplit tableSplit = new TableSplit(
            htable.getTableName(), startKey, region.getEndKey(), location);
        splits.add(new KijiTableSplit(tableSplit, startKey));
      }
      return splits;
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

    @Override
    public WritableComparable<InputSplit> getSplitComparable(InputSplit split)
            throws IOException {
        return new WritableComparable<InputSplit>() {
            TableSplit tsplit = new TableSplit();

            @Override
            public void readFields(DataInput in) throws IOException {
                tsplit.readFields(in);
}

            @Override
            public void write(DataOutput out) throws IOException {
                tsplit.write(out);
            }

            @Override
            public int compareTo(InputSplit split) {
                return tsplit.compareTo((TableSplit) split);
            }
        };
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit

  /**
   * For Writable
   */
  public HBaseSplit() {
    super((Path) null, 0, 0, (String[]) null);
    tableSplit = new TableSplit();
    snapshotSplit = HBaseTableSnapshotInputFormatUtil.createTableSnapshotRegionSplit();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.