Examples of FirstKeyOnlyFilter


Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

   */
  @Override
  public void getRowCount(RpcController controller, ExampleProtos.CountRequest request,
                          RpcCallback<ExampleProtos.CountResponse> done) {
    Scan scan = new Scan();
    scan.setFilter(new FirstKeyOnlyFilter());
    ExampleProtos.CountResponse response = null;
    InternalScanner scanner = null;
    try {
      scanner = env.getRegion().getScanner(scan);
      List<KeyValue> results = new ArrayList<KeyValue>();
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

      Scan scan = ProtobufUtil.toScan(request.getScan());
      if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
        // What we need is just the rowkeys. So only 1st KV from any row is enough.
        // Only when it is a row delete, we can apply this filter.
        // In other types we rely on the scan to know which all columns to be deleted.
        scan.setFilter(new FirstKeyOnlyFilter());
      }
      // Here by assume that the scan is perfect with the appropriate
      // filter and having necessary column(s).
      scanner = region.getScanner(scan);
      while (hasMore) {
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

      byte[] qualifier = null;
      if (qualifiers != null && !qualifiers.isEmpty()) {
        qualifier = qualifiers.pollFirst();
      }
      if (scan.getFilter() == null && qualifier == null)
        scan.setFilter(new FirstKeyOnlyFilter());
      scanner = env.getRegion().getScanner(scan);
      boolean hasMoreRows = false;
      do {
        hasMoreRows = scanner.next(results);
        if (results.size() > 0) {
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

    }
    // specified column may or may not be part of first key value for the row.
    // Hence do not use FirstKeyOnlyFilter if scan has columns, instead use
    // FirstKeyValueMatchingQualifiersFilter.
    if (qualifiers.size() == 0) {
      scan.setFilter(new FirstKeyOnlyFilter());
    } else {
      scan.setFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
    }
    job.setOutputFormatClass(NullOutputFormat.class);
    TableMapReduceUtil.initTableMapperJob(tableName, scan,
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

      }
      sb.append(args[i]);
    }
    Scan scan = new Scan();
    if (sb.length() > 0) scan.addColumns(sb.toString());
    scan.setFilter(new FirstKeyOnlyFilter());
    // Second argument is the table name.
    TableMapReduceUtil.initTableMapperJob(tableName, scan,
      RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job);
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setNumReduceTasks(0);
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

    verifyScanNoEarlyOut(s, this.numRows, this.colsPerRow);   
  }
 
  public void testFirstKeyOnlyFilter() throws Exception {
    Scan s = new Scan();
    s.setFilter(new FirstKeyOnlyFilter());
    // Expected KVs, the first KV from each of the remaining 6 rows
    KeyValue [] kvs = {
        new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
        new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

          list.add(model.build());
        }
        filter = new FilterList(FilterList.Operator.valueOf(op), list);
      } break;
      case FirstKeyOnlyFilter: {
        filter = new FirstKeyOnlyFilter();
      } break;
      case InclusiveStopFilter: {
        filter = new InclusiveStopFilter(Base64.decode(value));
      } break;
      case PageFilter: {
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

        if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
            return null;
        }
       
        Scan scan = newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
        scan.setFilter(new FirstKeyOnlyFilter());
        scan.setRaw(true);
        RegionScanner scanner = region.getScanner(scan);
        List<KeyValue> results = Lists.<KeyValue>newArrayList();
        scanner.next(results);
        // HBase ignores the time range on a raw scan (HBASE-7362)
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

            scan.addFamily(TABLE_FAMILY_BYTES);
            byte[] startRow = ByteUtil.concat(indexInfo[0],QueryConstants.SEPARATOR_BYTE_ARRAY, indexInfo[2], QueryConstants.SEPARATOR_BYTE_ARRAY);
            byte[] stopRow = ByteUtil.nextKey(startRow);
            scan.setStartRow(startRow);
            scan.setStopRow(stopRow);
            scan.setFilter(new FirstKeyOnlyFilter());
            scanner = table.getScanner(scan);
            List<KeyRange> indexRowsToUpdate = Lists.newArrayList();
            while ((result = scanner.next()) != null) {
                byte[] rowKey = result.getRow();
                byte[][] rowKeyMetaData = new byte[4][];
View Full Code Here

Examples of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter

       
        // Confirm that data is no longer there because we dropped the table
        // This needs to be done natively b/c the metadata is gone
        HTableInterface htable = conn5.unwrap(PhoenixConnection.class).getQueryServices().getTable(SchemaUtil.getTableNameAsBytes(ATABLE_SCHEMA_NAME, ATABLE_NAME));
        Scan scan = new Scan();
        scan.setFilter(new FirstKeyOnlyFilter());
        scan.setTimeRange(0, ts+9);
        assertNull(htable.getScanner(scan).next());
        conn5.close();

        // Still should work b/c we're at an earlier timestamp than when table was deleted
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.