Package org.apache.hadoop.zebra.mapred

Examples of org.apache.hadoop.zebra.mapred.TableInputFormat$DummyFileInputFormat$MultiPathFilter


    private org.apache.hadoop.zebra.schema.Schema projectionSchema;
  /**
   * default constructor
   */
  public TableLoader() {
    inputFormat = new TableInputFormat();
  }
View Full Code Here


  /**
   * @param projectionStr
   *       projection string passed from pig query.
   */
  public TableLoader(String projectionStr) {
    inputFormat = new TableInputFormat();
    projectionString = projectionStr;   
  }
View Full Code Here

   *       projection string passed from pig query.
   * @param sorted
   *      need sorted table(s)?
   */
  public TableLoader(String projectionStr, String sorted) throws IOException {
      inputFormat = new TableInputFormat();
      if (projectionStr != null && !projectionStr.isEmpty())
        projectionString = projectionStr;   
      if (sorted.equalsIgnoreCase("sorted"))
        this.sorted = true;
      else
View Full Code Here

        projection = TableInputFormat.getProjection(conf);
      } catch (ParseException e) {
        throw new IOException("Schema parsing failed :"+e.getMessage());
      }
      numProjCols = Projection.getNumColumns(projection);
      TableInputFormat inputFormat = new TableInputFormat();
      if (sorted)
        TableInputFormat.requireSortedTable(conf, null);
      scanner = inputFormat.getRecordReader(split, conf, Reporter.NULL);
      key = new BytesWritable();
    }
View Full Code Here

    BasicTable.drop(path1, conf);
    BasicTable.drop(path2, conf);
    int total1 = TestBasicTable.createBasicTable(1, 100, "a, b, c, d, e, f", "[a, b]; [c, d]", null, path1, true);   
    int total2 = TestBasicTable.createBasicTable(1, 100, "a, b, c, d, e, f", "[a, b]; [c, d]", null, path2, true);   

    TableInputFormat inputFormat = new TableInputFormat();
    TableInputFormat.setInputPaths(jobConf, path1, path2);
    TableInputFormat.setProjection(jobConf, "source_table");
    InputSplit[] splits = inputFormat.getSplits(jobConf, -1);
    Assert.assertEquals(splits.length, 2);
    for (int i = 0; i < 2; i++)
    {
      int count = 0;
      RowTableSplit split = (RowTableSplit) splits[i];
      TableRecordReader rr = (TableRecordReader) inputFormat.getRecordReader(split, jobConf, null);
      Tuple t = TypesUtils.createTuple(1);
      BytesWritable key = new BytesWritable();
      while (rr.next(key, t)) {
        int idx= (Integer) t.get(0);
        Assert.assertEquals(idx, i);
View Full Code Here

  public void testTfileSplit1()
          throws IOException, ParseException {
    BasicTable.drop(path, conf);
    TestBasicTable.createBasicTable(1, 100, "a, b, c, d, e, f", "[a, b]; [c, d]", null, path, true);   

    TableInputFormat inputFormat = new TableInputFormat();
    JobConf jobConf = new JobConf(conf);
    inputFormat.setInputPaths(jobConf, path);
    inputFormat.setMinSplitSize(jobConf, 100);
    inputFormat.setProjection(jobConf, "aa");
    InputSplit[] splits = inputFormat.getSplits(jobConf, 40);
   
    RowTableSplit split = (RowTableSplit) splits[0];
    String str = split.getSplit().toString();
    StringTokenizer tokens = new StringTokenizer(str, "\n");
    str = tokens.nextToken();
View Full Code Here

    BasicTable.drop(path, conf);
    TestBasicTable.createBasicTable(1, 100, "a, b, c, d, e, f", "[a, b]; [c, d]", null, path, true);   
    BasicTable.dropColumnGroup(path, conf, "CG0");
    BasicTable.dropColumnGroup(path, conf, "CG2");
   
    TableInputFormat inputFormat = new TableInputFormat();
    JobConf jobConf = new JobConf(conf);
    inputFormat.setInputPaths(jobConf, path);
    inputFormat.setMinSplitSize(jobConf, 100);
    InputSplit[] splits = inputFormat.getSplits(jobConf, 40);
   
    RowTableSplit split = (RowTableSplit) splits[0];
    String str = split.getSplit().toString();
    StringTokenizer tokens = new StringTokenizer(str, "\n");
    str = tokens.nextToken();
View Full Code Here

    TestBasicTable.createBasicTable(1, 100, "a, b, c, d, e, f", "[a, b]; [c, d]", null, path, true);   
    BasicTable.dropColumnGroup(path, conf, "CG0");
    BasicTable.dropColumnGroup(path, conf, "CG1");
    BasicTable.dropColumnGroup(path, conf, "CG2");
   
    TableInputFormat inputFormat = new TableInputFormat();
    JobConf jobConf = new JobConf(conf);
    inputFormat.setInputPaths(jobConf, path);
    inputFormat.setMinSplitSize(jobConf, 100);
    InputSplit[] splits = inputFormat.getSplits(jobConf, 40);
   
    Assert.assertEquals(splits.length, 0);
  }
View Full Code Here

 
  /**
   * default constructor
   */
  public TableLoader() {
    inputFormat = new TableInputFormat();
  }
View Full Code Here

  /**
   * @param projectionStr
   *       projection string passed from pig query.
   */
  public TableLoader(String projectionStr) {
    inputFormat = new TableInputFormat();
    projectionString = projectionStr;   
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.zebra.mapred.TableInputFormat$DummyFileInputFormat$MultiPathFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.