Examples of BytesWritable


Examples of org.apache.hadoop.io.BytesWritable

  BytesWritable makeRandomKey(int max) {
    return makeKey(random.nextInt(max));
  }

  static BytesWritable makeKey(int i) {
    return new BytesWritable(String.format("key%09d", i).getBytes());
  }
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

      TableInserter inserter = writer.getInserter(String.format("part-%06d",
          permutation[i]), true);
      if ((rows > 0) && !emptyTFileSet.contains(permutation[i])) {
        int actualRows = random.nextInt(rows) + rows / 2;
        for (int j = 0; j < actualRows; ++j, ++total) {
          BytesWritable key;
          if (!sorted) {
            key = makeRandomKey(rows * 10);
          } else {
            key = makeKey(total);
          }
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

      TableInserter inserter = writer.getInserter(String.format("part-%06d",
          permutation[i]), true);
      if (rows > 0) {
        int actualRows = random.nextInt(rows * 2 / 3) + rows * 2 / 3;
        for (int j = 0; j < actualRows; ++j, ++total) {
          BytesWritable key = keyGen.next();
          TypesUtils.resetTuple(tuple);
          for (int k = 0; k < tuple.size(); ++k) {
            try {
              tuple.set(k, makeString("col-" + colNames[k], rows * 10));
            } catch (ExecException e) {
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

   
    RawComparable[] rawKeys = keyDistri.getKeys();
    BytesWritable[] keys = new BytesWritable[rawKeys.length];
    for (int i=0; i<keys.length; ++i) {
      RawComparable rawKey = rawKeys[i];
      keys[i] = new BytesWritable();
      keys[i].setSize(rawKey.size());
      System.arraycopy(rawKey.buffer(), rawKey.offset(), keys[i].get(), 0,
          rawKey.size());
    }
   
    // TODO: Should we change to RawComparable to avoid the creation of
    // BytesWritables?
    for (int i = 0; i < keys.length; ++i) {
      BytesWritable begin = (i == 0) ? null : keys[i - 1];
      BytesWritable end = (i == keys.length - 1) ? null : keys[i];
      BlockDistribution bd = keyDistri.getBlockDistribution(keys[i]);
      SortedTableSplit split = new SortedTableSplit(begin, end, bd, conf);
      splits.add(split);
    }
    return splits.toArray(new InputSplit[splits.size()]);
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

      keyDistri.resize(numSplits);
      Assert.assertEquals(totalBytes, keyDistri.length());
      RawComparable[] rawComparables = keyDistri.getKeys();
      keys = new BytesWritable[rawComparables.length];
      for (int i = 0; i < keys.length; ++i) {
        keys[i] = new BytesWritable();
        keys[i].setSize(rawComparables[i].size());
        System.arraycopy(rawComparables[i].buffer(),
            rawComparables[i].offset(), keys[i].get(), 0, rawComparables[i]
                .size());
      }
    } else {
      int targetSize = Math.min(totalRows / 10, numSplits);
      // revert to manually cooked up keys.
      Set<Integer> keySets = new TreeSet<Integer>();
      while (keySets.size() < targetSize) {
        keySets.add(random.nextInt(totalRows));
      }
      keys = new BytesWritable[targetSize];
      if (!keySets.isEmpty()) {
        int j = 0;
        for (int i : keySets.toArray(new Integer[keySets.size()])) {
          keys[j] = makeKey(i);
          ++j;
        }
      }
    }

    int total = 0;
    for (int i = 0; i < keys.length; ++i) {
      reader = new ColumnGroup.Reader(path, conf);
      reader.setProjection(strProjection);
      BytesWritable begin = (i == 0) ? null : keys[i - 1];
      BytesWritable end = (i == keys.length - 1) ? null : keys[i];
      total += doReadOnly(reader.getScanner(begin, end, true));
    }
    Assert.assertEquals(total, totalRows);
  }
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

    }
  }

  int doReadOnly(TableScanner scanner) throws IOException, ParseException {
    int total = 0;
    BytesWritable key = new BytesWritable();
    Tuple value = TypesUtils.createTuple(scanner.getSchema());
    for (; !scanner.atEnd(); scanner.advance()) {
      ++total;
      switch (random.nextInt() % 4) {
      case 0:
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

        while (true)
        {
          int index = random.nextInt(cgScanners.length);
          if (cgScanners[index] != null)
          {
            BytesWritable key2 = new BytesWritable();
            cgScanners[index].getCGKey(key2);
            if (key.equals(key2)) {
              return;
            }
            break;
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(3, bagColl);

    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    TypesUtils.resetTuple(tupRecord);
    map.clear();
    tuple.set(0, false);
    tupRecord.set(0, 2);
    tupRecord.set(1, 1002L);
    tuple.set(1, tupRecord);
    map.put("boy", "girl");
    map.put("adam", "amy");
    map.put("bob", "becky");
    map.put("carl", "cathy");
    tuple.set(2, map);
    bagColl.clear();
    TypesUtils.resetTuple(tupColl1);
    TypesUtils.resetTuple(tupColl2);
    tupColl1.set(0, 7654.321);
    tupColl1.set(1, 0.0001);
    abs1[0] = 31;
    abs1[1] = 32;
    abs1[2] = 33;
    tupColl1.set(2, new DataByteArray(abs1));
    bagColl.add(tupColl1);
    tupColl2.set(0, 0.123456789);
    tupColl2.set(1, 0.3333);
    abs2[0] = 41;
    abs2[1] = 42;
    abs2[2] = 43;
    abs2[3] = 44;
    tupColl2.set(2, new DataByteArray(abs2));
    bagColl.add(tupColl2);
    tuple.set(3, bagColl);
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    inserter.close();
    writer1.finish();
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

    List<RangeSplit> splits = reader.rangeSplit(1);
    reader.close();
    reader = new BasicTable.Reader(path, conf);
    reader.setProjection(projection);
    TableScanner scanner = reader.getScanner(splits.get(0), true);
    BytesWritable key = new BytesWritable();
    Tuple value = TypesUtils.createTuple(scanner.getSchema());

    scanner.getKey(key);
    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
    scanner.getValue(value);

    Assert.assertEquals(1001L, value.get(0));
    Assert.assertEquals(true, value.get(1));

    scanner.advance();
    scanner.getKey(key);
    Assert.assertEquals(key, new BytesWritable("k12".getBytes()));
    scanner.getValue(value);
    Assert.assertEquals(1002L, value.get(0));
    Assert.assertEquals(false, value.get(1));

    reader.close();
View Full Code Here

Examples of org.apache.hadoop.io.BytesWritable

    List<RangeSplit> splits = reader.rangeSplit(1);
    reader.close();
    reader = new BasicTable.Reader(path, conf);
    reader.setProjection(projection);
    TableScanner scanner = reader.getScanner(splits.get(0), true);
    BytesWritable key = new BytesWritable();
    Tuple value = TypesUtils.createTuple(scanner.getSchema());

    scanner.getKey(key);
    Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
    scanner.getValue(value);

    Tuple recordTuple = (Tuple) value.get(1);
    Assert.assertEquals(1, recordTuple.get(0));
    Assert.assertEquals(1001L, recordTuple.get(1));
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.