Examples of TableSplit


Examples of co.cask.cdap.api.dataset.table.TableSplit

    List<KeyRange> keyRanges = SplitsUtil.primitiveGetSplits(numSplits, start, stop);
    return Lists.transform(keyRanges, new Function<KeyRange, Split>() {
      @Nullable
      @Override
      public Split apply(@Nullable KeyRange input) {
        return new TableSplit(input == null ? null : input.getStart(),
                                           input == null ? null : input.getStop());
      }
    });
  }
View Full Code Here

Examples of co.cask.cdap.api.dataset.table.TableSplit

    // the current row, that is, a map from column key to value
    private Map<byte[], byte[]> row = null;

    @Override
    public void initialize(Split split) throws InterruptedException {
      TableSplit tableSplit = (TableSplit) split;
      try {
        this.scanner = table.scan(tableSplit.getStart(), tableSplit.getStop());
      } catch (Exception e) {
        LOG.debug("scan failed for table: " + getTransactionAwareName(), e);
        throw new DataSetException("scan failed", e);
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

    throws IOException {
    String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
    InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

    String tableName = job.get(TableInputFormat.INPUT_TABLE);
    TableSplit tSplit = (TableSplit) split;
    HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
    inputFormat.setConf(job);
    Scan inputScan = inputFormat.getScan();
    // TODO: Make the caching configurable by the user
    inputScan.setCaching(200);
    inputScan.setCacheBlocks(false);
    Scan sc = new Scan(inputScan);
    sc.setStartRow(tSplit.getStartRow());
    sc.setStopRow(tSplit.getEndRow());
    recordReader.setScan(sc);
    recordReader.setHTable(new HTable(job, tableName));
    recordReader.init();
    return recordReader;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

  private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
    InputSplit[] converted = new InputSplit[splits.size()];
    for (int i = 0; i < splits.size(); i++) {
      org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
        (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
      TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
        tableSplit.getStartRow(),
        tableSplit.getEndRow(), tableSplit.getRegionLocation());
      converted[i] = newTableSplit;
    }
    return converted;
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

        throws IOException {
        String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
        InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

        String tableName = job.get(TableInputFormat.INPUT_TABLE);
        TableSplit tSplit = (TableSplit) split;
        HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
        inputFormat.setConf(job);
        Scan inputScan = inputFormat.getScan();
        // TODO: Make the caching configurable by the user
        inputScan.setCaching(200);
        inputScan.setCacheBlocks(false);
        Scan sc = new Scan(inputScan);
        sc.setStartRow(tSplit.getStartRow());
        sc.setStopRow(tSplit.getEndRow());
        recordReader.setScan(sc);
        recordReader.setHTable(new HTable(job, tableName));
        recordReader.init();
        return recordReader;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

    private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
        InputSplit[] converted = new InputSplit[splits.size()];
        for (int i = 0; i < splits.size(); i++) {
            org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
                (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
            TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
                tableSplit.getStartRow(),
                tableSplit.getEndRow(), tableSplit.getRegionLocation());
            converted[i] = newTableSplit;
        }
        return converted;
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

  private TableSplit split;
   
  public HBaseSplit() {
    super((Path) null, 0, 0, (String[]) null);
    hbaseColumnMapping = "";
    split = new TableSplit();
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

            throws IOException {
        String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
        InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

        String tableName = job.get(TableInputFormat.INPUT_TABLE);
        TableSplit tSplit = (TableSplit) split;
        HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
        inputFormat.setConf(job);
        Scan inputScan = inputFormat.getScan();
        // TODO: Make the caching configurable by the user
        inputScan.setCaching(200);
        inputScan.setCacheBlocks(false);
        Scan sc = new Scan(inputScan);
        sc.setStartRow(tSplit.getStartRow());
        sc.setStopRow(tSplit.getEndRow());
        recordReader.setScan(sc);
        recordReader.setHTable(new HTable(job, tableName));
        recordReader.init();
        return recordReader;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

    private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
        InputSplit[] converted = new InputSplit[splits.size()];
        for (int i = 0; i < splits.size(); i++) {
            org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
                    (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
            TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
                    tableSplit.getStartRow(),
                    tableSplit.getEndRow(), tableSplit.getRegionLocation());
            converted[i] = newTableSplit;
        }
        return converted;
View Full Code Here

Examples of org.apache.hadoop.hbase.mapred.TableSplit

    throws IOException {
    String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
    InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

    String tableName = job.get(TableInputFormat.INPUT_TABLE);
    TableSplit tSplit = (TableSplit) split;
    HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
    inputFormat.setConf(job);
    Scan inputScan = inputFormat.getScan();
    // TODO: Make the caching configurable by the user
    inputScan.setCaching(200);
    inputScan.setCacheBlocks(false);
    Scan sc = new Scan(inputScan);
    sc.setStartRow(tSplit.getStartRow());
    sc.setStopRow(tSplit.getEndRow());
    recordReader.setScan(sc);
    recordReader.setHTable(new HTable(job, tableName));
    recordReader.init();
    return recordReader;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.