Examples of Put


Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIndexedScanWithOneRow";
    initIdxRegion(tableName, method, new HBaseConfiguration(),
      Pair.of(family, new IdxIndexDescriptor[]{indexDescriptor}));

    Put put = new Put(row1);
    put.add(family, qualLong, Bytes.toBytes(42L));
    region.put(put);

    checkScanWithOneRow(family, false);

    region.flushcache();
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIndexedScanWithOneRow";
    initIdxRegion(tableName, method, new HBaseConfiguration(), Pair.of(family,
      new IdxIndexDescriptor[]{indexDescriptor}));
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      put.add(family, qualLong, Bytes.toBytes(i));
      region.put(put);
    }

    /**
     * Check when indexes are empty and memstore is full
     */
    checkScanWithOneIndexAndOneColumn(family, false, numRows, 1);

    region.flushcache();

    /**
     * Check when indexes are full and memstore is empty
     */
    checkScanWithOneIndexAndOneColumn(family, true, numRows, 1);


    for (long i = numRows; i < numRows + 1000; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      put.add(family, qualLong, Bytes.toBytes(i));
      region.put(put);
    }

    /**
     * check when both the index and the memstore contain entries
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIndexedScanWithThreeColumns";
    initIdxRegion(tableName, method, new HBaseConfiguration(), Pair.of(family,
      new IdxIndexDescriptor[]{indexDescriptor1, indexDescriptor2, indexDescriptor3}));
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      put.add(family, qualLong, Bytes.toBytes(i));
      put.add(family, qualDouble, Bytes.toBytes((double) i));
      put.add(family, qualBytes, Bytes.toBytes("str" + (10 + (i % 50))));
      region.put(put);
    }

    /**
     * Check when indexes are empty and memstore is full
     */
    checkScanWithOneIndexAndOneColumn(family, false, numRows, 3);
    checkScanWithThreeColumns(family, false, numRows, 3);

    region.flushcache();

    /**
     * Check when indexes are full and memstore is empty
     */
    checkScanWithOneIndexAndOneColumn(family, true, numRows, 3);
    checkScanWithThreeColumns(family, true, numRows, 3);


    int numAdditionalRows = 1000;
    for (long i = numRows; i < numRows + numAdditionalRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      put.add(family, qualLong, Bytes.toBytes(i));
      put.add(family, qualDouble, Bytes.toBytes((double) i));
      put.add(family, qualBytes, Bytes.toBytes("str" + (10 + (i % 50))));
      region.put(put);
    }

    /**
     * check when both the index and the memstore contain entries
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    int numberOfRows = 1000;

    Random random = new Random(3505L);
    for (int row = 0; row < numberOfRows; row++) {
      Put put = new Put(Bytes.toBytes(random.nextLong()));
      int val = row % 10;
      put.add(family1, qualLong, Bytes.toBytes((long) val));
      put.add(family1, qualDouble, Bytes.toBytes((double) val));
      put.add(family2, qualBytes, Bytes.toBytes(String.format("%04d", val)));
      region.put(put);
    }

    checkScanWithTwoFamilies(family1, family2, false, numberOfRows, 3);

    region.flushcache();

    checkScanWithTwoFamilies(family1, family2, true, numberOfRows, 3);

    /**
     * Add some more to have results both in the index and in memstore
     */
    for (int row = 0; row < numberOfRows; row++) {
      Put put = new Put(Bytes.toBytes(random.nextLong()));
      int val = row % 10;
      put.add(family1, qualLong, Bytes.toBytes((long) val));
      put.add(family1, qualDouble, Bytes.toBytes((double) val));
      put.add(family2, qualBytes, Bytes.toBytes(String.format("%04d", val)));
      region.put(put);
    }

    checkScanWithTwoFamilies(family1, family2, false, numberOfRows * 2, 3);

View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIndexedScanWithMultipleVersions";
    initIdxRegion(tableName, method, new HBaseConfiguration(), Pair.of(family,
      new IdxIndexDescriptor[]{indexDescriptor1, indexDescriptor2}));
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      long value = i % 10;
      put.add(family, qualLong, Bytes.toBytes(value));
      put.add(family, qualDouble, Bytes.toBytes((double) i));
      region.put(put);
    }

    /**
     * Check when indexes are empty and memstore is full
     */
    checkIndexedScanWithMultipleVersions(family, false, numRows, 1);


    random = new Random(27101973L)// pseudo random order of row insertions
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      long value = 10 + i % 10;
      put.add(family, qualLong, Bytes.toBytes(value));
      put.add(family, qualDouble, Bytes.toBytes((double) i));
      region.put(put);
    }

    /**
     * Check when indexes are full and memstore is empty
     */
    checkIndexedScanWithMultipleVersions(family, false, numRows, 2);

    region.flushcache();

    /**
     * Check when indexes are full and memstore is empty
     */
    checkIndexedScanWithMultipleVersions(family, true, numRows, 2);

    random = new Random(27101973L)// pseudo random order of row insertions
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      long value = 20 + i % 10;
      put.add(family, qualLong, Bytes.toBytes(value));
      put.add(family, qualDouble, Bytes.toBytes((double) i));
      region.put(put);
    }

    checkIndexedScanWithMultipleVersions(family, false, numRows, 3);
    region.flushcache();
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIndexedScanWithDeletedRows";
    initIdxRegion(tableName, method, new HBaseConfiguration(), Pair.of(family,
      new IdxIndexDescriptor[]{indexDescriptor1, indexDescriptor2}));
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      long value = i % 10;
      put.add(family, qualLong, timestamp, Bytes.toBytes(value));
      put.add(family, qualDouble, timestamp, Bytes.toBytes((double) i));
      region.put(put);
    }

    checkIndexedScanWithDeletedRows(family, false, 7L, numRows / 5);
    checkIndexedScanWithDeletedRows(family, false, 6L, 8L, 3 * numRows / 5);

    timestamp++;

    // delete some rows
    random = new Random(10121986L)// pseudo random order of row insertions
    for (long i = 0; i < numRows; i++) {
      byte[] rowId = Bytes.toBytes(random.nextLong() + "." + i);
      if (i % 10 == 7) {
        Delete delete = new Delete(rowId, timestamp, null);
        region.delete(delete, null, true);
      }
    }

    checkIndexedScanWithDeletedRows(family, false, 7L, 0);
    checkIndexedScanWithDeletedRows(family, false, 6L, 8L, 2 * numRows / 5);

    /**
     * Flush and verify
     */
    region.flushcache();

    checkIndexedScanWithDeletedRows(family, true, 7L, 0);
    checkIndexedScanWithDeletedRows(family, true, 6L, 8L, 2 * numRows / 5);

    /**
     * New check - now the index should find the 4's  and the memstore
     * should only contains deleted rows - override the index's findings
     */
    checkIndexedScanWithDeletedRows(family, true, 4L, numRows / 5);
    checkIndexedScanWithDeletedRows(family, true, 3L, 8L, numRows);

    timestamp++;

    random = new Random(10121986L)// pseudo random order of row insertions
    for (long i = 0; i < numRows; i++) {
      byte[] rowId = Bytes.toBytes(random.nextLong() + "." + i);
      if (i % 10 == 4) {
        Delete delete = new Delete(rowId, timestamp, null);
        region.delete(delete, null, true);
      }
    }

    checkIndexedScanWithDeletedRows(family, false, 4L, 0);
    checkIndexedScanWithDeletedRows(family, false, 3L, 8L, 4 * numRows / 5);

    region.flushcache();
    checkIndexedScanWithDeletedRows(family, true, 4L, 0);
    checkIndexedScanWithDeletedRows(family, true, 3L, 8L, 4 * numRows / 5);

    timestamp++;
    /**
     * New check - put some records back and verify
     */
    for (long i = 0; i < numRows / 10; i++) {
      Put put = new Put(Bytes.toBytes(random.nextLong() + "." + i));
      long value = 7L;
      put.add(family, qualLong, timestamp, Bytes.toBytes(value));
      put.add(family, qualDouble, timestamp, Bytes.toBytes((double) i));
      region.put(put);
    }

    checkIndexedScanWithDeletedRows(family, false, 7L, numRows / 5);
    checkIndexedScanWithDeletedRows(family, false, 4L, 0);
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIdxRegionSplit";
    initIdxRegion(tableName, method, new HBaseConfiguration(), Pair.of(family,
      new IdxIndexDescriptor[]{indexDescriptor}));
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(String.format("%08d", i)));
      put.add(family, qualLong, Bytes.toBytes(i));
      region.put(put);
    }

    IdxScan idxScan = new IdxScan();
    idxScan.addFamily(family);
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    //Setting up region
    String method = "testIdxRegionCompaction_" + majorcompaction;
    initIdxRegion(tableName, method, new HBaseConfiguration(), Pair.of(family,
      new IdxIndexDescriptor[]{indexDescriptor}));
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(String.format("%08d", i)));
      put.add(family, qualLong, Bytes.toBytes(i));
      region.put(put);
      if (i != 0 && i % flushInterval == 0) {
        region.flushcache();
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

    int expectedCount = 0;
    List<KeyValue> res = new ArrayList<KeyValue>();

    boolean toggle = true;
    for (long i = 0; i < numRows; i++) {
      Put put = new Put(Bytes.toBytes(i));
      put.add(family, qualLong, Bytes.toBytes(i % 10));
      region.put(put);

      if (i != 0 && i % compactInterval == 0) {
        //System.out.println("iteration = " + i);
        region.compactStores(true);
View Full Code Here

Examples of org.apache.hadoop.hbase.client.Put

  throws IOException {
    for (int i = 0; i < numRows; i++) {
      String row = key + "_" + i/* UUID.randomUUID().toString() */;
      System.out.println(String.format("Saving row: %s, with value %s", row,
        value));
      Put put = new Put(Bytes.toBytes(row));
      put.add(Bytes.toBytes("trans-blob"), null,
        Bytes.toBytes("value for blob"));
      put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
      put.add(Bytes.toBytes("trans-date"), null,
        Bytes.toBytes("20090921010101999"));
      put.add(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"),
        Bytes.toBytes(value));
      put.add(Bytes.toBytes("trans-group"), null,
        Bytes.toBytes("adhocTransactionGroupId"));
      r.put(put);
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.