Examples of TableInserter


Examples of org.apache.hadoop.zebra.io.TableInserter

      permutation[targetIndex] = tmp;
    }

    for (int i = 0; i < parts; ++i) {
      writer = new ColumnGroup.Writer(path, conf);
      TableInserter inserter = writer.getInserter(String.format("part-%06d",
          permutation[i]), true);
      if ((rows > 0) && !emptyTFileSet.contains(permutation[i])) {
        int actualRows = random.nextInt(rows) + rows / 2;
        for (int j = 0; j < actualRows; ++j, ++total) {
          BytesWritable key;
          if (!sorted) {
            key = makeRandomKey(rows * 10);
          } else {
            key = makeKey(total);
          }
          TypesUtils.resetTuple(tuple);
          for (int k = 0; k < tuple.size(); ++k) {
            try {
              tuple.set(k, makeString("col-" + colNames[k], rows * 10));
            } catch (ExecException e) {
              e.printStackTrace();
            }
          }
          inserter.insert(key, tuple);
        }
      }
      inserter.close();
    }

    if (properClose) {
      writer = new ColumnGroup.Writer(path, conf);
      writer.close();
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

      permutation[targetIndex] = tmp;
    }

    for (int i = 0; i < parts; ++i) {
      writer = new ColumnGroup.Writer(path, conf);
      TableInserter inserter = writer.getInserter(String.format("part-%06d",
          permutation[i]), true);
      if (rows > 0) {
        int actualRows = random.nextInt(rows * 2 / 3) + rows * 2 / 3;
        for (int j = 0; j < actualRows; ++j, ++total) {
          BytesWritable key = keyGen.next();
          TypesUtils.resetTuple(tuple);
          for (int k = 0; k < tuple.size(); ++k) {
            try {
              tuple.set(k, makeString("col-" + colNames[k], rows * 10));
            } catch (ExecException e) {
              e.printStackTrace();
            }
          }
          inserter.insert(key, tuple);
        }
      }
      inserter.close();
    }

    writer = new ColumnGroup.Writer(path, conf);
    writer.close();
    BasicTableStatus status = getStatus(path);
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    tuple.set(0, true);

    Tuple tupRecord;
    try {
      tupRecord = TypesUtils.createTuple(schema.getColumnSchema("r")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    // row 1
    tupRecord.set(0, 1);
    tupRecord.set(1, 1001L);
    tuple.set(1, tupRecord);

    Map<String, String> map = new HashMap<String, String>();
    map.put("a", "x");
    map.put("b", "y");
    map.put("c", "z");
    tuple.set(2, map);

    DataBag bagColl = TypesUtils.createBag();
    Schema schColl = schema.getColumn(3).getSchema();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);
    byte[] abs1 = new byte[3];
    byte[] abs2 = new byte[4];
    tupColl1.set(0, 3.1415926);
    tupColl1.set(1, 1.6);
    abs1[0] = 11;
    abs1[1] = 12;
    abs1[2] = 13;
    tupColl1.set(2, new DataByteArray(abs1));
    bagColl.add(tupColl1);
    tupColl2.set(0, 123.456789);
    tupColl2.set(1, 100);
    abs2[0] = 21;
    abs2[1] = 22;
    abs2[2] = 23;
    abs2[3] = 24;
    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(3, bagColl);

    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(tupRecord);
    map.clear();
    tuple.set(0, false);
    tupRecord.set(0, 2);
    tupRecord.set(1, 1002L);
    tuple.set(1, tupRecord);
    map.put("boy", "girl");
    map.put("adam", "amy");
    map.put("bob", "becky");
    map.put("carl", "cathy");
    tuple.set(2, map);
    bagColl.clear();
    TypesUtils.resetTuple(tupColl1);
    TypesUtils.resetTuple(tupColl2);
    tupColl1.set(0, 7654.321);
    tupColl1.set(1, 0.0001);
    abs1[0] = 31;
    abs1[1] = 32;
    abs1[2] = 33;
    tupColl1.set(2, new DataByteArray(abs1));
    bagColl.add(tupColl1);
    tupColl2.set(0, 0.123456789);
    tupColl2.set(1, 0.3333);
    abs2[0] = 41;
    abs2[1] = 42;
    abs2[2] = 43;
    abs2[3] = 44;
    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(3, bagColl);
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    inserter.close();
    writer1.finish();

    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    //
    BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
   
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);
   
    for (int i = 0; i < tableData.length; ++i) {
      TypesUtils.resetTuple(tuple);
      for (int k = 0; k < tableData[i].length; ++k) {
        tuple.set(k, tableData[i][k]);
        System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
      }
      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    tuple.set(0, true);

    Tuple tupRecord;
    try {
      tupRecord = TypesUtils.createTuple(schema.getColumnSchema("r")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }
    tupRecord.set(0, 1);
    tupRecord.set(1, 1001L);
    tuple.set(1, tupRecord);

    Map<String, String> map = new HashMap<String, String>();
    map.put("a", "x");
    map.put("b", "y");
    map.put("c", "z");
    tuple.set(2, map);

    DataBag bagColl = TypesUtils.createBag();
    Schema schColl = schema.getColumn(3).getSchema();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);
    byte[] abs1 = new byte[3];
    byte[] abs2 = new byte[4];
    tupColl1.set(0, 3.1415926);
    tupColl1.set(1, 1.6);
    abs1[0] = 11;
    abs1[1] = 12;
    abs1[2] = 13;
    tupColl1.set(2, new DataByteArray(abs1));
    bagColl.add(tupColl1);
    tupColl2.set(0, 123.456789);
    tupColl2.set(1, 100);
    abs2[0] = 21;
    abs2[1] = 22;
    abs2[2] = 23;
    abs2[3] = 24;
    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(3, bagColl);

    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);
    inserter.close();
    writer1.finish();

    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    writer.finish();
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    DataBag bag2 = TypesUtils.createBag();
    Schema schColl2 = schema.getColumn(7).getSchema();
    Tuple tupColl2_1 = TypesUtils.createTuple(schColl2);
    Tuple tupColl2_2 = TypesUtils.createTuple(schColl2);
    // add data to row 1
    tuple.set(0, new DataByteArray("column1 row 1 ")); // byte
    tuple.set(1,new DataByteArray("column2 row 1"));
    tuple.set(2, new DataByteArray("column3 row 1"));
    tuple.set(3, new DataByteArray("column4 row 1"));
   
    // column5
    Map<String, String> column5 = new HashMap<String, String>();
    column5.put("key51", "key511");
    column5.put("key52", "key521");
    column5.put("key53", "key531");
    column5.put("key54", "key541");
    column5.put("key55", "key551");
    column5.put("key56", "key561");
    column5.put("key57", "key571");
  
    tuple.set(4, column5);

    //column5:map(bytes),column6:map(bytes),column7:map(bytes),column8:collection(f1:map(bytes)
    //column7:map(String, column6:map(String)
    HashMap<String, String> column7 = new HashMap<String, String>();
    HashMap<String, String> column6 = new HashMap<String, String>();
    column6.put("column61", "column61");
    column7.put("key71", "key711");
    column7.put("key72", "key721");
    column7.put("key73", "key731");
    column7.put("key74", "key741");
    column7.put("key75", "key751");
    tuple.set(6, column7);
    tuple.set(5, column6);
   
    //column8:collection(f1:map(bytes))
    HashMap<String, String> mapInCollection = new HashMap<String, String>();
    mapInCollection.put("mc", "mc1");
    tupColl2_1.set(0, mapInCollection);
    bag2.add(tupColl2_1);

    tupColl2_2.set(0, mapInCollection);
    bag2.add(tupColl2_2);
    tuple.set(7, bag2);

    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    column5.clear();
    column7.clear();
    column6.clear();
    mapInCollection.clear();
    bag2.clear();
    TypesUtils.resetTuple(tupColl2_1);
    TypesUtils.resetTuple(tupColl2_2);
   
    tuple.set(0, new DataByteArray("column1 row 2 ")); // byte
    tuple.set(1,new DataByteArray("column2 row 2"));
    tuple.set(2, new DataByteArray("column3 row 2"));
    tuple.set(3, new DataByteArray("column4 row 2"));
   
    // column5
    column5.put("key51", "key512");
    column5.put("key52", "key522");
    column5.put("key53", "key532");
    column5.put("key54", "key542");
    column5.put("key55", "key552");
    column5.put("key56", "key562");
    column5.put("key57", "key572");
    tuple.set(4, column5);

    // column6
 
    column6.put("column6", "column62");
    column7.put("key71", "key712");
    column7.put("key72", "key722");
    column7.put("key73", "key732");
    column7.put("key74", "key742");
    column7.put("key75", "key752");
    tuple.set(6, column7);
    tuple.set(5, column6);
   
   
    //column8
    //column8:collection(f1:map(bytes))
    mapInCollection.put("mc", "mc2");
    tupColl2_1.set(0, mapInCollection);
    bag2.add(tupColl2_1);

    tupColl2_2.set(0, mapInCollection);
    bag2.add(tupColl2_2);
    tuple.set(7, bag2);

   
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // finish building table, closing out the inserter, writer, writer1
    inserter.close();
    writer1.finish();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    // Create table from tableData array
    BasicTable.Writer writer = new BasicTable.Writer(path, schemaString, storageString, conf);
   
    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    TableInserter inserter = writer.getInserter("ins", false);
   
    for (int i = 0; i < tableData.length; ++i) {
      TypesUtils.resetTuple(tuple);
      for (int k = 0; k < tableData[i].length; ++k) {
        tuple.set(k, tableData[i][k]);
        System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
      }
      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);

    BasicTable.Writer writer1 = new BasicTable.Writer(pathTable1, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);

    TypesUtils.resetTuple(tuple);
    DataBag bag1 = TypesUtils.createBag();
    Schema schColl = schema.getColumn(0).getSchema();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);

    int row = 0;
    tupColl1.set(0, "1.1");
    tupColl1.set(1, "1.11");
    bag1.add(tupColl1);
    tupColl2.set(0, "1.111");
    tupColl2.set(1, "1.1111");
    bag1.add(tupColl2);
    tuple.set(0, bag1);

    Map<String, String> m1 = new HashMap<String, String>();
    m1.put("k1", "k11");
    m1.put("b", "b1");
    m1.put("c", "c1");
    tuple.set(1, m1);

    Tuple tupRecord1;
    try {
      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("c")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
      throw new IOException(e);
    }

    tupRecord1.set(0, "1");
    tupRecord1.set(1, "hello1");
    tuple.set(2, tupRecord1);
    tuple.set(3, "world1");

    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // second row
    row++;
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(tupRecord1);
    TypesUtils.resetTuple(tupColl1);
    TypesUtils.resetTuple(tupColl2);
    m1.clear();
    bag1.clear();

    row++;
    tupColl1.set(0, "2.2");
    tupColl1.set(1, "2.22");
    bag1.add(tupColl1);
    tupColl2.set(0, "2.222");
    tupColl2.set(1, "2.2222");
    bag1.add(tupColl2);
    tuple.set(0, bag1);

    m1.put("k2", "k22");
    m1.put("k3", "k32");
    m1.put("k1", "k12");
    m1.put("k4", "k42");
    tuple.set(1, m1);

    tupRecord1.set(0, "2");
    tupRecord1.set(1, "hello2");
    tuple.set(2, tupRecord1);
    tuple.set(3, "world2");

    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);
    inserter.close();
    writer1.finish();
    writer.close();

    /*
     * create 2nd basic table;
     */
    pathTable2 = new Path(pathWorking, "2");
    System.out.println("pathTable2 =" + pathTable2);

    BasicTable.Writer writer2 = new BasicTable.Writer(pathTable2, STR_SCHEMA2,
        STR_STORAGE2, conf);
    Schema schema2 = writer.getSchema();

    Tuple tuple2 = TypesUtils.createTuple(schema2);

    BasicTable.Writer writer22 = new BasicTable.Writer(pathTable2, conf);
    part = 0;
    TableInserter inserter2 = writer22.getInserter("part" + part, true);

    TypesUtils.resetTuple(tuple2);
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(tupRecord1);
    TypesUtils.resetTuple(tupColl1);
    TypesUtils.resetTuple(tupColl2);
    m1.clear();
    bag1.clear();

    row = 0;
    tupColl1.set(0, "3.3");
    tupColl1.set(1, "3.33");
    bag1.add(tupColl1);
    tupColl2.set(0, "3.333");
    tupColl2.set(1, "3.3333");
    bag1.add(tupColl2);
    tuple2.set(0, bag1);

    m1.put("k1", "k13");
    m1.put("b", "b3");
    m1.put("c", "c3");
    tuple2.set(1, m1);

    tupRecord1.set(0, "3");
    tupRecord1.set(1, "hello3");
    tuple2.set(2, tupRecord1);
    tuple2.set(3, "world13");

    inserter2.insert(new BytesWritable(String
        .format("k%d%d", part + 1, row + 1).getBytes()), tuple2);

    // second row
    row++;
    TypesUtils.resetTuple(tuple2);
    TypesUtils.resetTuple(tupRecord1);
    TypesUtils.resetTuple(tupColl1);
    TypesUtils.resetTuple(tupColl2);
    bag1.clear();
    m1.clear();

    row++;
    tupColl1.set(0, "4.4");
    tupColl1.set(1, "4.44");
    bag1.add(tupColl1);
    tupColl2.set(0, "4.444");
    tupColl2.set(1, "4.4444");
    bag1.add(tupColl2);
    tuple2.set(0, bag1);

    m1.put("k2", "k24");
    m1.put("k3", "k34");
    m1.put("k1", "k14");
    m1.put("k4", "k44");
    tuple2.set(1, m1);

    tupRecord1.set(0, "4");
    tupRecord1.set(1, "hello4");
    tuple2.set(2, tupRecord1);
    tuple2.set(3, "world4");

    inserter2.insert(new BytesWritable(String
        .format("k%d%d", part + 1, row + 1).getBytes()), tuple2);
    inserter2.close();
    writer2.finish();
    writer22.close();

  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

    Schema schema = writer.getSchema();
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = null;
    try {
      inserter = writer1.getInserter("part" + part, true);
    } catch (Exception e) {
      e.printStackTrace();
    }
    TypesUtils.resetTuple(tuple);
    Tuple tupRecord1 = null;
    try {
      tupRecord1 = TypesUtils.createTuple(schema.getColumnSchema("r1")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
    }

    Tuple tupRecord2 = null;
    try {
      tupRecord2 = TypesUtils.createTuple(schema.getColumnSchema("r2")
          .getSchema());
    } catch (ParseException e) {
      e.printStackTrace();
    }

    Tuple tupRecord3 = null;
    try {
      tupRecord3 = TypesUtils.createTuple(new Schema("f3:float, f4"));
    } catch (ParseException e) {
      e.printStackTrace();
    }

    // row 1
    tuple.set(0, true); // bool
    tuple.set(1, 1); // int
    tuple.set(2, 1001L); // long
    tuple.set(3, 1.1); // float
    tuple.set(4, "hello world 1"); // string
    tuple.set(5, new DataByteArray("hello byte 1")); // byte

    // r1:record(f1:int, f2:long
    tupRecord1.set(0, 1);
    tupRecord1.set(1, 1001L);
    tuple.set(6, tupRecord1);

    // r2:record(r3:record(f3:float, f4))
    tupRecord2.set(0, tupRecord3);
    tupRecord3.set(0, 1.3);
    tupRecord3.set(1, new DataByteArray("r3 row 1 byte array "));
    tuple.set(7, tupRecord2);

    // m1:map(string)
    Map<String, String> m1 = new HashMap<String, String>();
    m1.put("a", "A");
    m1.put("b", "B");
    m1.put("c", "C");
    tuple.set(8, m1);

    // m2:map(map(int))
    HashMap<String, Map> m2 = new HashMap<String, Map>();
    Map<String, Integer> m3 = new HashMap<String, Integer>();
    m3.put("m311", 311);
    m3.put("m321", 321);
    m3.put("m331", 331);
    Map<String, Integer> m4 = new HashMap<String, Integer>();
    m4.put("m411", 411);
    m4.put("m421", 421);
    m4.put("m431", 431);
    m2.put("x", m3);
    m2.put("y", m4);
    tuple.set(9, m2);

    // c:collection(f13:double, f14:float, f15:bytes)
    DataBag bagColl = TypesUtils.createBag();
    Schema schColl = schema.getColumn(10).getSchema();
    Tuple tupColl1 = TypesUtils.createTuple(schColl);
    Tuple tupColl2 = TypesUtils.createTuple(schColl);
    byte[] abs1 = new byte[3];
    byte[] abs2 = new byte[4];
    tupColl1.set(0, 3.1415926);
    tupColl1.set(1, 1.6);
    abs1[0] = 11;
    abs1[1] = 12;
    abs1[2] = 13;
    tupColl1.set(2, new DataByteArray(abs1));
    bagColl.add(tupColl1);
    tupColl2.set(0, 123.456789);
    tupColl2.set(1, 100);
    abs2[0] = 21;
    abs2[1] = 22;
    abs2[2] = 23;
    abs2[3] = 24;
    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(10, bagColl);

    // set s7 to s23
    for (int i = 7; i <= 23; i++) {
      tuple.set(i + 4, "s" + "i" + ", line1");
    }

    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(tupRecord1);
    TypesUtils.resetTuple(tupRecord2);
    TypesUtils.resetTuple(tupRecord3);
    m1.clear();
    m2.clear();
    m3.clear();
    m4.clear();
    tuple.set(0, false);
    tuple.set(1, 2); // int
    tuple.set(2, 1002L); // long
    tuple.set(3, 3.1); // float
    tuple.set(4, "hello world 2"); // string
    tuple.set(5, new DataByteArray("hello byte 2")); // byte

    // r1:record(f1:int, f2:long
    tupRecord1.set(0, 2);
    tupRecord1.set(1, 1002L);
    tuple.set(6, tupRecord1);

    // r2:record(r3:record(f3:float, f4))
    tupRecord2.set(0, tupRecord3);
    tupRecord3.set(0, 2.3);
    tupRecord3.set(1, new DataByteArray("r3 row2  byte array"));
    tuple.set(7, tupRecord2);

    // m1:map(string)
    m1.put("a2", "A2");
    m1.put("b2", "B2");
    m1.put("c2", "C2");
    tuple.set(8, m1);

    // m2:map(map(int))
    m3.put("m321", 321);
    m3.put("m322", 322);
    m3.put("m323", 323);
    m2.put("z", m3);
    tuple.set(9, m2);

    // c:collection(f13:double, f14:float, f15:bytes)
    bagColl.clear();
    TypesUtils.resetTuple(tupColl1);
    TypesUtils.resetTuple(tupColl2);
    tupColl1.set(0, 7654.321);
    tupColl1.set(1, 0.0001);
    abs1[0] = 31;
    abs1[1] = 32;
    abs1[2] = 33;
    tupColl1.set(2, new DataByteArray(abs1));
    bagColl.add(tupColl1);
    tupColl2.set(0, 0.123456789);
    tupColl2.set(1, 0.3333);
    abs2[0] = 41;
    abs2[1] = 42;
    abs2[2] = 43;
    abs2[3] = 44;
    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(10, bagColl);
    // set s7 to s23
    for (int i = 7; i <= 23; i++) {
      tuple.set(i + 4, "s" + "i" + ", line2");
    }
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    inserter.close();
    writer1.finish();

    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.zebra.io.TableInserter

  void insertData(String myuser, Schema schema, Path path) throws IOException {
    System.out.println(myuser + " is inserting table...");
    Tuple tuple = TypesUtils.createTuple(schema);
    BasicTable.Writer writer1 = new BasicTable.Writer(path, conf);
    int part = 0;
    TableInserter inserter = writer1.getInserter("part" + part, true);
    TypesUtils.resetTuple(tuple);

    // row 1
    tuple.set(0, "column1_1");
    tuple.set(1, "column2_1");
    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    tuple.set(0, "column1_2");
    tuple.set(1, "column2_2");
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);
    inserter.close();
    writer1.finish();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.