Package org.apache.hadoop.hbase.io

Examples of org.apache.hadoop.hbase.io.BatchUpdate


    }
   
    @Override
    void testRow(@SuppressWarnings("unused") final int i) throws IOException {
      byte [] row = getRandomRow(this.rand, this.totalRows);
      BatchUpdate b = new BatchUpdate(row);
      b.put(COLUMN_NAME, generateValue(this.rand));
      table.commit(b);
    }
View Full Code Here


      super(conf, startRow, perClientRunRows, totalRows, status);
    }
   
    @Override
    void testRow(final int i) throws IOException {
      BatchUpdate b = new BatchUpdate(format(i));
      b.put(COLUMN_NAME, generateValue(this.rand));
      table.commit(b);
    }
View Full Code Here

   
    HRegion region = createNewHRegion(desc, startKey, endKey);
   
    byte [] keyToWrite = startKey == null ? Bytes.toBytes("row_000") : startKey;
   
    BatchUpdate bu = new BatchUpdate(keyToWrite);
    bu.put(COLUMN_NAME, "test".getBytes());

    region.batchUpdate(bu, null);

    region.close();
    region.getLog().closeAndDelete();
View Full Code Here

   * Test BatchUpdate serialization
   * @throws Exception
   */
  public void testBatchUpdate() throws Exception {
    // Add row named 'testName'.
    BatchUpdate bu = new BatchUpdate(getName());
    // Add a column named same as row.
    bu.put(getName(), getName().getBytes());
    byte [] b = Writables.getBytes(bu);
    BatchUpdate bubu =
      (BatchUpdate)Writables.getWritable(b, new BatchUpdate());
    // Assert rows are same.
    assertTrue(Bytes.equals(bu.getRow(), bubu.getRow()));
    // Assert has same number of BatchOperations.
    int firstCount = 0;
    for (BatchOperation bo: bubu) {
      firstCount++;
    }
    // Now deserialize again into same instance to ensure we're not
    // accumulating BatchOperations on each deserialization.
    BatchUpdate bububu = (BatchUpdate)Writables.getWritable(b, bubu);
    // Assert rows are same again.
    assertTrue(Bytes.equals(bu.getRow(), bububu.getRow()));
    int secondCount = 0;
    for (BatchOperation bo: bububu) {
      secondCount++;
    }
    assertEquals(firstCount, secondCount);
View Full Code Here

          if (endKey != null && endKey.length > 0
              && Bytes.compareTo(endKey, t) <= 0) {
            break EXIT;
          }
          try {
            BatchUpdate batchUpdate = ts == -1 ?
              new BatchUpdate(t) : new BatchUpdate(t, ts);
            try {
              batchUpdate.put(column, t);
              updater.commit(batchUpdate);
            } catch (RuntimeException ex) {
              ex.printStackTrace();
              throw ex;
            } catch (IOException ex) {
View Full Code Here

    System.out.println("created region " +
        Bytes.toString(region.getRegionName()));

    HRegionIncommon r = new HRegionIncommon(region);
    for(int i = firstRow; i < firstRow + nrows; i++) {
      BatchUpdate batchUpdate = new BatchUpdate(Bytes.toBytes("row_"
          + String.format("%1$05d", i)));

      batchUpdate.put(COLUMN_NAME, value.get());
      region.batchUpdate(batchUpdate, null);
      if(i % 10000 == 0) {
        System.out.println("Flushing write #" + i);
        r.flushcache();
      }
View Full Code Here

        OutputCollector<ImmutableBytesWritable, BatchUpdate> output,
        Reporter r)
    throws IOException {
      while (v.hasNext()) {
        r.setStatus("Reducer committing " + k);
        BatchUpdate bu = new BatchUpdate(k.get());
        while (v.hasNext()) {
          HbaseMapWritable<byte [], byte []> hmw = v.next();
          for (Entry<byte [], byte []> e: hmw.entrySet()) {
            bu.put(e.getKey(), e.getValue());
          }
        }
        output.collect(k, bu);
      }
    }
View Full Code Here

        newValue.append(originalValue.charAt(i));
      }
     
      // Now set the value to be collected

      BatchUpdate outval = new BatchUpdate(key.get());
      outval.put(OUTPUT_COLUMN, Bytes.toBytes(newValue.toString()));
      output.collect(key, outval);
    }
View Full Code Here

      region = createNewHRegion(desc, null, null);
      HRegionIncommon r = new HRegionIncommon(region);
     
      // Write information to the table

      BatchUpdate batchUpdate = null;
      batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
      batchUpdate.put(CONTENTS, CONTENTS);
      batchUpdate.put(HConstants.COL_REGIONINFO,
          Writables.getBytes(HRegionInfo.ROOT_REGIONINFO));
      r.commit(batchUpdate);
     
      batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
      batchUpdate.put(HConstants.COL_SERVER,
        Bytes.toBytes(new HServerAddress(SERVER_ADDRESS).toString()));
      batchUpdate.put(HConstants.COL_STARTCODE, Bytes.toBytes(12345));
      batchUpdate.put(Bytes.toString(HConstants.COLUMN_FAMILY) +
        "region", Bytes.toBytes("region"));
      r.commit(batchUpdate);
     
      // Verify that get works the same from memcache as when reading from disk
      // NOTE dumpRegion won't work here because it only reads from disk.
     
      verifyGet(r, SERVER_ADDRESS);
     
      // Close and re-open region, forcing updates to disk
     
      region.close();
      region = openClosedRegion(region);
      r = new HRegionIncommon(region);
     
      // Read it back
     
      verifyGet(r, SERVER_ADDRESS);
     
      // Update one family member and add a new one
     
      batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
      batchUpdate.put(Bytes.toString(HConstants.COLUMN_FAMILY) + "region",
        "region2".getBytes(HConstants.UTF8_ENCODING));
      String otherServerName = "bar.foo.com:4321";
      batchUpdate.put(HConstants.COL_SERVER,
        Bytes.toBytes(new HServerAddress(otherServerName).toString()));
      batchUpdate.put(Bytes.toString(HConstants.COLUMN_FAMILY) + "junk",
        "junk".getBytes(HConstants.UTF8_ENCODING));
      r.commit(batchUpdate);

      verifyGet(r, otherServerName);
     
View Full Code Here

    byte [] colA = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "a");
    byte [] colB = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "b");
    byte [] colC = Bytes.toBytes(Bytes.toString(COLUMNS[1]) + "c");

    BatchUpdate batchUpdate = null;
    batchUpdate = new BatchUpdate(row, t0);
    batchUpdate.put(colA, cellData(0, flush).getBytes());
    batchUpdate.put(colB, cellData(0, flush).getBytes());
    batchUpdate.put(colC, cellData(0, flush).getBytes());     
    region_incommon.commit(batchUpdate);

    batchUpdate = new BatchUpdate(row, t1);
    batchUpdate.put(colA, cellData(1, flush).getBytes());
    batchUpdate.put(colB, cellData(1, flush).getBytes());
    batchUpdate.put(colC, cellData(1, flush).getBytes());     
    region_incommon.commit(batchUpdate);
   
    batchUpdate = new BatchUpdate(row, t2);
    batchUpdate.put(colA, cellData(2, flush).getBytes());
    batchUpdate.put(colB, cellData(2, flush).getBytes());
    batchUpdate.put(colC, cellData(2, flush).getBytes());     
    region_incommon.commit(batchUpdate);

    if (flush) {region_incommon.flushcache();}

    // call delete family at a timestamp, make sure only the most recent stuff
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.BatchUpdate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.