Package org.apache.hadoop.zebra.types

Examples of org.apache.hadoop.zebra.types.Schema$ParsedName


            + "; found in file: " + version);
      }
      comparator = WritableUtils.readString(in);
      String logicalStr = WritableUtils.readString(in);
      try {
        logical = new Schema(logicalStr);
      }
      catch (Exception e) {
        ;
        throw new IOException("Schema build failed :" + e.getMessage());
      }
View Full Code Here


    rawLFS.setConf(conf);
    fs = new LocalFileSystem(rawLFS);
    path = new Path(fs.getWorkingDirectory(), outputFile);
    System.out.println("output file: " + path);

    schema = new Schema(STR_SCHEMA);

    ColumnGroup.Writer writer = new ColumnGroup.Writer(path, schema, false,
        "pig", "lzo2", true, conf);
    TableInserter ins = writer.getInserter("part0", true);

    Tuple row = TypesUtils.createTuple(schema);
    // schema for "r:record..."
    Schema schRecord = writer.getSchema().getColumn(1).getSchema();
    Tuple tupRecord = TypesUtils.createTuple(schRecord);
    Schema schColl = schema.getColumn(3).getSchema();
    DataBag bagColl = TypesUtils.createBag();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);
    Map<String, String> map = new HashMap<String, String>();
    byte[] abs1 = new byte[3];
View Full Code Here

    return new InferredProjection(subProjections, columns, colMapping);
  }

  @Override
  public Schema getSchema(Configuration conf) throws IOException {
    Schema result = new Schema();
    for (Iterator<TableExpr> it = composite.iterator(); it.hasNext();) {
      TableExpr e = it.next();
      try {
        result.unionSchema(e.getSchema(conf));
      } catch (ParseException exc) {
        throw new IOException("Schema parsing failed :"+exc.getMessage());
      }
    }
    return result;
View Full Code Here

        Map<String, RowMappingEntry> colMap) throws ParseException {
      subProjections = new Schema[subProj.length];
      for (int i = 0; i < subProj.length; ++i) {
        List<String> subProjection = subProj[i];
        subProjections[i] =
            new Schema(subProjection.toArray(new String[subProjection.size()]));
      }
     
      projection = new Schema(proj);
     
      this.colMapping = new RowMappingEntry[proj.length];
     
      for (int i = 0; i < proj.length; ++i) {
        colMapping[i] = colMap.get(proj[i]);
View Full Code Here

    String schema = conf.get(OUTPUT_SCHEMA);
    if (schema == null) {
      return null;
    }
    schema = schema.replaceAll(";", ",");
    return new Schema(schema);
  }
View Full Code Here

    BasicTable.Writer writer = new BasicTable.Writer(path, "a,b,c,d,e,f,g",
        "[a,b,c];[d,e,f,g]", false, conf);
    writer.finish();

    Schema schema = writer.getSchema();
    // String[] colNames = schema.getColumns();
    Tuple tuple = TypesUtils.createTuple(schema);

    // BytesWritable key;
    int parts = 2;
View Full Code Here

        this.path = path;
        schemaFile = new SchemaFile(path, conf);
        metaReader = MetaFile.createReader(new Path(path, BT_META_FILE), conf);
        // create column group readers
        int numCGs = schemaFile.getNumOfPhysicalSchemas();
        Schema schema;
        colGroups = new ColumnGroup.Reader[numCGs];
        cgTuples = new Tuple[numCGs];
        // set default projection that contains everything
        schema = schemaFile.getLogical();
        projection = new Projection(schema);
View Full Code Here

    // Build Table and column groups
    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
        STR_STORAGE, false, conf);
    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);
View Full Code Here

    System.out.println("in testRecord, get path: " + path.toString());
    // Build Table and column groups
    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
        STR_STORAGE, false, conf);
    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    Tuple tupRecord1;
    try {
      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    Tuple tupRecord2;
    try {
      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    Tuple tupRecord3;
    try {
      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4"));
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }
    // insert data in row 1
View Full Code Here

    // Build Table and column groups
    path = new Path(getCurrentMethodName());
    BasicTable.Writer writer = new BasicTable.Writer(path, STR_SCHEMA,
        STR_STORAGE, false, conf);
    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.zebra.types.Schema$ParsedName

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.