Examples of TableInserter


Examples of org.apache.hadoop.zebra.io.TableInserter

    //
    BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
   
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);
   
    for (int i = 0; i < tableData.length; ++i) {
      TypesUtils.resetTuple(tuple);
      for (int k = 0; k < tableData[i].length; ++k) {
        tuple.set(k, tableData[i][k]);
        System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
      }
      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    schema = new Schema("a:string,b:string,c:string,d:string,e:string,f:string,g:string");

    ColumnGroup.Writer writer = new ColumnGroup.Writer(path, schema, false, path.getName(),
        "pig", "gz", null, null, (short) -1, true, conf);
    TableInserter ins = writer.getInserter("part0", true);

    // row 1
    Tuple row = TypesUtils.createTuple(writer.getSchema());
    row.set(0, "a1");
    row.set(1, "b1");
    row.set(2, "c1");
    row.set(3, "d1");
    row.set(4, "e1");
    row.set(5, "f1");
    row.set(6, "g1");
    ins.insert(new BytesWritable("k1".getBytes()), row);

    // row 2
    TypesUtils.resetTuple(row);
    row.set(0, "a2");
    row.set(1, "b2");
    row.set(2, "c2");
    row.set(3, "d2");
    row.set(4, "e2");
    row.set(5, "f2");
    row.set(6, "g2");
    ins.insert(new BytesWritable("k2".getBytes()), row);
    ins.close();

    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    // insert data in row 1
    int row = 0;
    tuple.set(0, true); // bool
    tuple.set(1, 1); // int
    tuple.set(2, 1001L); // long
    tuple.set(3, 1.1); // float
    tuple.set(4, "hello world 1"); // string
    tuple.set(5, new DataByteArray("hello byte 1")); // byte

    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // insert data in row 2
    row++;
    tuple.set(0, false);
    tuple.set(1, 2); // int
    tuple.set(2, 1002L); // long
    tuple.set(3, 3.1); // float
    tuple.set(4, "hello world 2"); // string
    tuple.set(5, new DataByteArray("hello byte 2")); // byte
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // finish building table, closing out the inserter, writer, writer1
    inserter.close();
    writer1.finish();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(pathTable, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    Tuple record1;
    try {
      record1 = TypesUtils.createTuple(new Schema("f1:int, f2:string"));
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    Tuple record2;
    try {
      record2 = TypesUtils.createTuple(new Schema("f1:int, f2:string"));
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    Tuple record3;
    try {
      record3 = TypesUtils.createTuple(new Schema("f1:int, f2:string"));
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    // add data to row 1
    // m1:map(string)
    Map<String, String> m1 = new HashMap<String, String>();
    m1.put("a", "A");
    m1.put("b", "B");
    m1.put("c", "C");
    tuple.set(0, m1);

    // m2:map(map(int))
    HashMap<String, Map> m2 = new HashMap<String, Map>();
    Map<String, Integer> m31 = new HashMap<String, Integer>();
    m31.put("m311", 311);
    m31.put("m321", 321);
    m31.put("m331", 331);
    Map<String, Integer> m32 = new HashMap<String, Integer>();
    m32.put("m411", 411);
    m32.put("m421", 421);
    m32.put("m431", 431);
    m2.put("x", m31);
    m2.put("y", m32);
    tuple.set(1, m2);

    // m4:map(map(record(f1:int,f2:string)))
    record1.set(0, 11);
    record1.set(1, "record row 1.1");
    Map<String, Tuple> m51 = new HashMap<String, Tuple>();
    Map<String, Tuple> m52 = new HashMap<String, Tuple>();
    Map<String, Tuple> m53 = new HashMap<String, Tuple>();
    m51.put("ma4", (Tuple) record1);
    m52.put("ma41", (Tuple) record1);
    m53.put("ma43", (Tuple) record1);

    record2.set(0, 12);
    record2.set(1, "record row 1.2");
    m51.put("mb4", (Tuple) record2);
    m52.put("mb42", (Tuple) record2);
    m53.put("ma43", (Tuple) record2);
    System.out.println("record1-1: " + record1.toString());

    record3.set(0, 13);
    record3.set(1, "record row 1.3");
    System.out.println("record1-3: " + record1.toString());

    m51.put("mc4", (Tuple) record3);
    m52.put("mc42", (Tuple) record3);
    m53.put("ma43", (Tuple) record3);

    Map<String, Map> m4 = new HashMap<String, Map>();
    m4.put("a4", m51);
    m4.put("b4", m52);
    m4.put("c4", m53);
    m4.put("d4", m53);
    m4.put("ma43", m53);

    tuple.set(2, m4);

    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(record1);
    TypesUtils.resetTuple(record2);
    TypesUtils.resetTuple(record3);
    m1.clear();
    m2.clear();
    m31.clear();
    m32.clear();
    m4.clear();
    m51.clear();
    m52.clear();
    m53.clear();
    // m1:map(string)
    m1.put("a", "A2");
    m1.put("b2", "B2");
    m1.put("c2", "C2");
    tuple.set(0, m1);

    // m2:map(map(int))
    m31.put("m321", 321);
    m31.put("m322", 322);
    m31.put("m323", 323);
    m2.put("z", m31);
    tuple.set(1, m2);

    // m4:map(map(record(f1:int,f2:string)))
    record1.set(0, 21);
    record1.set(1, "record row 2.1");
    m51.put("ma4", (Tuple) record1);
    m52.put("ma41", (Tuple) record1);
    m53.put("ma43", (Tuple) record1);

    record2.set(0, 22);
    record2.set(1, "record row 2.2");
    m51.put("mb4", (Tuple) record2);
    m52.put("mb42", (Tuple) record2);
    m53.put("ma43", (Tuple) record2);

    record3.set(0, 33);
    record3.set(1, "record row 3.3");
    m51.put("mc4", (Tuple) record3);
    m52.put("mc42", (Tuple) record3);
    m53.put("ma43", (Tuple) record3);

    m4.put("a4", m51);
    m4.put("b4", m52);

    m4.put("ma43", m53);

    tuple.set(2, m4);

    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // finish building table, closing out the inserter, writer, writer1
    inserter.close();
    writer1.finish();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    Tuple tupRecord1;
    try {
      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    Tuple tupRecord2;
    try {
      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    Tuple tupRecord3;
    try {
      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4:map(int)"));
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }
    // insert data in row 1
    int row = 0;
    // r1:record(f1:int, f2:long
    tupRecord1.set(0, 1);
    tupRecord1.set(1, 1001L);
    tuple.set(0, tupRecord1);

    // r2:record(r3:record(f3:float, map(int)))
    HashMap<String, Integer> map = new HashMap<String, Integer>();
    tupRecord2.set(0, tupRecord3);
    tupRecord3.set(0, 1.3);
    map.put("a", 1);
    map.put("b", 2);
    map.put("c", 3);
    tupRecord3.set(1, map);
    tuple.set(1, tupRecord2);
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(tupRecord1);
    TypesUtils.resetTuple(tupRecord2);
    TypesUtils.resetTuple(tupRecord3);
    // r1:record(f1:int, f2:long
    tupRecord1.set(0, 2);
    tupRecord1.set(1, 1002L);
    tuple.set(0, tupRecord1);

    // r2:record(r3:record(f3:float, f4))
    tupRecord2.set(0, tupRecord3);
    map.clear();
    map.put("x", 11);
    map.put("y", 12);
    map.put("c", 13);

    tupRecord3.set(0, 2.3);
    tupRecord3.set(1, map);
    tuple.set(1, tupRecord2);

    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // finish building table, closing out the inserter, writer, writer1
    inserter.close();
    writer1.finish();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    //
    BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
   
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);
   
    for (int i = 0; i < tableData.length; ++i) {
      TypesUtils.resetTuple(tuple);
      for (int k = 0; k < tableData[i].length; ++k) {
        tuple.set(k, tableData[i][k]);
        System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
      }
      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    BasicTable.Writer writer = new BasicTable.Writer(unsortedPath,
        TABLE_SCHEMA, TABLE_STORAGE, conf);

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);

    Map<String, String> m1 = new HashMap<String, String>();

    Tuple tupRecord1; // record
    tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
        .getSchema()); // r1 schema

    DataBag bag1 = TypesUtils.createBag();
    Schema schColl = schema.getColumnSchema("c1").getSchema(); // c1 schema
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);

    int randRange = new Long(numbRows / 10).intValue(); // random range to allow
    // for duplicate values
    for (int i = 0; i < numbRows; ++i) {
      int random = generator.nextInt(randRange);

      TypesUtils.resetTuple(tuple); // reset row tuple
      m1.clear(); // reset map
      TypesUtils.resetTuple(tupRecord1); // reset record
      TypesUtils.resetTuple(tupColl1); // reset collection
      TypesUtils.resetTuple(tupColl2);
      bag1.clear();

      tuple.set(0, i); // count
      tuple.set(1, seed); // seed

      tuple.set(2, i); // int1
      tuple.set(3, random); // int2
      tuple.set(4, "string " + i); // str1
      tuple.set(5, "string random " + random); // str2
      tuple.set(6, new DataByteArray("byte " + i)); // byte1
      tuple.set(7, new DataByteArray("byte random " + random)); // byte2

      tuple.set(8, new Float(i * -1)); // float1 negative
      tuple.set(9, new Long(numbRows - i)); // long1 reverse
      tuple.set(10, new Double(i * 100)); // double1

      // insert map1
      m1.put("a", "m1");
      m1.put("b", "m1 " + i);
      tuple.set(11, m1);

      // insert record1
      tupRecord1.set(0, "r1 " + seed);
      tupRecord1.set(1, "r1 " + i);
      tuple.set(12, tupRecord1);

      // insert collection1
      // tupColl1.set(0, "c1 a " + seed);
      // tupColl1.set(1, "c1 a " + i);
      // bag1.add(tupColl1); // first collection item
      bag1.add(tupRecord1); // first collection item
      bag1.add(tupRecord1); // second collection item

      // tupColl2.set(0, "c1 b " + seed);
      // tupColl2.set(1, "c1 b " + i);
      // bag1.add(tupColl2); // second collection item

      tuple.set(13, bag1);

      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();

    if (debug == true) {
      // Load tables
      String query1 = "table1 = LOAD '" + unsortedPath.toString()
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    // add data to row 1
    // m1:map(string)
    Map<String, String> m1 = new HashMap<String, String>();
    m1.put("a", "A");
    m1.put("b", "B");
    m1.put("c", "C");
    tuple.set(0, m1);

    // m2:map(map(int))
    HashMap<String, Map> m2 = new HashMap<String, Map>();
    Map<String, Integer> m3 = new HashMap<String, Integer>();
    m3.put("m311", 311);
    m3.put("m321", 321);
    m3.put("m331", 331);
    Map<String, Integer> m4 = new HashMap<String, Integer>();
    m4.put("m411", 411);
    m4.put("m421", 421);
    m4.put("m431", 431);
    m2.put("x", m3);
    m2.put("y", m4);
    tuple.set(1, m2);
    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    m1.clear();
    m2.clear();
    m3.clear();
    m4.clear();
    // m1:map(string)
    m1.put("a", "A2");
    m1.put("b2", "B2");
    m1.put("c2", "C2");
    tuple.set(0, m1);

    // m2:map(map(int))
    m3.put("m321", 321);
    m3.put("m322", 322);
    m3.put("m323", 323);
    m2.put("z", m3);
    tuple.set(1, m2);
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // finish building table, closing out the inserter, writer, writer1
    inserter.close();
    writer1.finish();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    //
    BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
   
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);
   
    for (int i = 0; i < tableData.length; ++i) {
      TypesUtils.resetTuple(tuple);
      for (int k = 0; k < tableData[i].length; ++k) {
        tuple.set(k, tableData[i][k]);
        System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
      }
      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

      ParseException {
    System.out.println("testInsert2Inserters");
    boolean sorted = false; // true;
    writer = new ColumnGroup.Writer(path, "col1:string, colTWO:map(string)", sorted, path.getName(), "pig",
        "gz", null, null, (short) -1, true, conf);
    TableInserter ins1 = writer.getInserter("part1", false);
    TableInserter ins2 = writer.getInserter("part2", false);

    // row 1
    Tuple row = TypesUtils.createTuple(writer.getSchema());
    row.set(0, "val1");

    SortedMap<String, String> map = new TreeMap<String, String>();
    map.put("john", "boy");
    row.set(1, map);

    ins1.insert(new BytesWritable("key11".getBytes()), row);
    ins2.insert(new BytesWritable("key21".getBytes()), row);

    // row 2
    TypesUtils.resetTuple(row);
    row.set(0, "val2");
    map.put("joe", "boy");
    map.put("jane", "girl");
    // map should contain 3 k->v pairs
    row.set(1, map);

    ins2.insert(new BytesWritable("key22".getBytes()), row);
    ins2.insert(new BytesWritable("key23".getBytes()), row);
    // ins2.close();
    BytesWritable key12 = new BytesWritable("key12".getBytes());
    ins1.insert(key12, row);

    ins1.close();
    ins2.close();
    finish();

    // test without beginKey/endKey
    ColumnGroup.Reader reader = new ColumnGroup.Reader(path, conf);
    List<CGRangeSplit> listRanges = reader.rangeSplit(2);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.