Package org.apache.hadoop.zebra.schema

Examples of org.apache.hadoop.zebra.schema.Schema


        this.path = path;
        schemaFile = new SchemaFile(path, deletedCGs, conf);
        metaReader = MetaFile.createReader(new Path(path, BT_META_FILE), conf);
        // create column group readers
        int numCGs = schemaFile.getNumOfPhysicalSchemas();
        Schema schema;
        colGroups = new ColumnGroup.Reader[numCGs];
        cgTuples = new Tuple[numCGs];
        // set default projection that contains everything
        schema = schemaFile.getLogical();
        projection = new Projection(schema);
View Full Code Here


    pathTable1 = new Path(pathWorking, "TestBasicTableUnion" + "1");
    System.out.println("pathTable1 =" + pathTable1);

    BasicTable.Writer writer = new BasicTable.Writer(pathTable1,
        "a:string,b,c:string", "[a,b];[c]", conf);
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    final int numsBatch = 10;
    final int numsInserters = 2;
    TableInserter[] inserters = new TableInserter[numsInserters];
View Full Code Here

    public void configure(JobConf job) {
      bytesKey = new BytesWritable();
      conf = job;
      sortKey = job.get("sortKey");
      try {
        Schema outSchema = BasicTableOutputFormat.getSchema(job);
        tupleRow = (ZebraTuple) TypesUtils.createTuple(outSchema);
        javaObj = BasicTableOutputFormat.getSortKeyGenerator(job);
      } catch (IOException e) {
        throw new RuntimeException(e);
      } catch (org.apache.hadoop.zebra.parser.ParseException e) {
View Full Code Here

        "pig", "gz", "root", null, (short) Short.parseShort("755", 8), false, conf);

    writer.finish();

    int total = 0;
    Schema schema = new Schema(strSchema);
    String colNames[] = schema.getColumns();
    Tuple tuple = TypesUtils.createTuple(schema);
    int[] permutation = new int[parts];
    for (int i = 0; i < parts; ++i) {
      permutation[i] = i;
    }
View Full Code Here

        "pig", "gz", "root", null, (short) Short.parseShort("777", 8), false, conf);
    writer.finish();

    int total = 0;
    DupKeyGen keyGen = new DupKeyGen(10, rows * 3);
    Schema schema = new Schema(strSchema);
    String colNames[] = schema.getColumns();
    Tuple tuple = TypesUtils.createTuple(schema);
    int[] permutation = new int[parts];
    for (int i = 0; i < parts; ++i) {
      permutation[i] = i;
    }
View Full Code Here

   *          conforms to the {@link Schema} string.
   *
   */
  public static void setProjection(JobConf conf, ZebraProjection projection) throws ParseException {
    /* validity check on projection */
    Schema schema = null;
    String normalizedProjectionString = Schema.normalize(projection.toString());
    try {
      schema = getSchema(conf);
      new org.apache.hadoop.zebra.types.Projection(schema, normalizedProjectionString);
    } catch (ParseException e) {
View Full Code Here

    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
        STR_STORAGE, "r, c", null, conf);
    writer.finish();

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    tuple.set(0, true);

    Tuple tupRecord;
    try {
      tupRecord = TypesUtils.createTuple(schema.getColumnSchema("r")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    // row 1
    tupRecord.set(0, 1);
    tupRecord.set(1, 1001L);
    tuple.set(1, tupRecord);

    Map<String, String> map = new HashMap<String, String>();
    map.put("a", "x");
    map.put("b", "y");
    map.put("c", "z");
    tuple.set(2, map);

    DataBag bagColl = TypesUtils.createBag();
    Schema schColl = schema.getColumn(3).getSchema();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);
    byte[] abs1 = new byte[3];
    byte[] abs2 = new byte[4];
    tupColl1.set(0, 3.1415926);
View Full Code Here

    //
    // Create table from tableData array
    //
    BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
   
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);
   
    for (int i = 0; i < tableData.length; ++i) {
      TypesUtils.resetTuple(tuple);
View Full Code Here

    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
        STR_STORAGE, conf);
    writer.finish();

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    tuple.set(0, true);

    Tuple tupRecord;
    try {
      tupRecord = TypesUtils.createTuple(schema.getColumnSchema("r")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }
    tupRecord.set(0, 1);
    tupRecord.set(1, 1001L);
    tuple.set(1, tupRecord);

    Map<String, String> map = new HashMap<String, String>();
    map.put("a", "x");
    map.put("b", "y");
    map.put("c", "z");
    tuple.set(2, map);

    DataBag bagColl = TypesUtils.createBag();
    Schema schColl = schema.getColumn(3).getSchema();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);
    byte[] abs1 = new byte[3];
    byte[] abs2 = new byte[4];
    tupColl1.set(0, 3.1415926);
View Full Code Here

    pathTable = new Path(pathWorking, "TestMapTableStorer");
    System.out.println("table path=" + pathTable);
    BasicTable.Writer writer = new BasicTable.Writer(pathTable,
        "m:map(string)", "[m#{a}]", conf);
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    final int numsBatch = 10;
    final int numsInserters = 2;
    TableInserter[] inserters = new TableInserter[numsInserters];
View Full Code Here

TOP

Related Classes of org.apache.hadoop.zebra.schema.Schema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.