Package org.apache.hadoop.hbase.filter

Examples of org.apache.hadoop.hbase.filter.ColumnPrefixFilter


    }
    verifyResult(result, kvListExp, toLog, "Testing multiple CFs + CRF");

    get = new Get(ROW);
    get.setMaxResultsPerColumnFamily(7);
    get.setFilter(new ColumnPrefixFilter(QUALIFIERS[1]));
    result = ht.get(get);
    kvListExp = new ArrayList<Cell>();
    kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE));
View Full Code Here


    }
    verifyResult(result, kvListExp, toLog, "Testing multiple CFs + CRF");

    get = new Get(ROW);
    get.setMaxResultsPerColumnFamily(7);
    get.setFilter(new ColumnPrefixFilter(QUALIFIERS[1]));
    result = ht.get(get);
    kvListExp = new ArrayList<Cell>();
    kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE));
View Full Code Here

        break;
      case ColumnPaginationFilter:
        filter = new ColumnPaginationFilter(limit, offset);
        break;
      case ColumnPrefixFilter:
        filter = new ColumnPrefixFilter(Base64.decode(value));
        break;
      case ColumnRangeFilter:
        filter = new ColumnRangeFilter(Base64.decode(minColumn),
            minColumnInclusive, Base64.decode(maxColumn),
            maxColumnInclusive);
View Full Code Here

                                Bytes.toString(colInfo.getColumnPrefix()));
                    }

                    // add a PrefixFilter to the list of column filters
                    if (colInfo.getColumnPrefix() != null) {
                        columnFilters.addFilter(new ColumnPrefixFilter(
                            colInfo.getColumnPrefix()));
                    }
                }
                else {
View Full Code Here

    }
    verifyResult(result, kvListExp, toLog, "Testing multiple CFs + CRF");

    get = new Get(ROW);
    get.setMaxResultsPerColumnFamily(7);
    get.setFilter(new ColumnPrefixFilter(QUALIFIERS[1]));
    result = ht.get(get);
    kvListExp = new ArrayList<Cell>();
    kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE));
View Full Code Here

        break;
      case ColumnPaginationFilter:
        filter = new ColumnPaginationFilter(limit, offset);
        break;
      case ColumnPrefixFilter:
        filter = new ColumnPrefixFilter(Base64.decode(value));
        break;
      case ColumnRangeFilter:
        filter = new ColumnRangeFilter(Base64.decode(minColumn),
            minColumnInclusive, Base64.decode(maxColumn),
            maxColumnInclusive);
View Full Code Here

    // And delete any referred blobs
    private void clearData(RecordId recordId, Record originalRecord, Long upToVersion)
            throws IOException, RepositoryException, InterruptedException {
        Get get = new Get(recordId.toBytes());
        get.addFamily(RecordCf.DATA.bytes);
        get.setFilter(new ColumnPrefixFilter(new byte[]{RecordColumn.DATA_PREFIX}));
        // Only read versions that exist(ed) at the time the record was deleted, since this code could
        // run concurrently with the re-creation of the same record.
        if (upToVersion != null) {
            get.setTimeRange(1 /* inclusive */, upToVersion + 1 /* exclusive */);
        } else {
View Full Code Here

    }
    verifyResult(result, kvListExp, toLog, "Testing multiple CFs + CRF");

    get = new Get(ROW);
    get.setMaxResultsPerColumnFamily(7);
    get.setFilter(new ColumnPrefixFilter(QUALIFIERS[1]));
    result = ht.get(get);
    kvListExp = new ArrayList<Cell>();
    kvListExp.add(new KeyValue(ROW, FAMILIES[0], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[1], QUALIFIERS[1], 1, VALUE));
    kvListExp.add(new KeyValue(ROW, FAMILIES[2], QUALIFIERS[1], 1, VALUE));
View Full Code Here

        break;
      case ColumnPaginationFilter:
        filter = new ColumnPaginationFilter(limit, offset);
        break;
      case ColumnPrefixFilter:
        filter = new ColumnPrefixFilter(Base64.decode(value));
        break;
      case ColumnRangeFilter:
        filter = new ColumnRangeFilter(Base64.decode(minColumn),
            minColumnInclusive, Base64.decode(maxColumn),
            maxColumnInclusive);
View Full Code Here

    final Get get = new Get(hbaseRow);
    get.addFamily(hbaseColumnName.getFamily());

    final FilterList filter = new FilterList(FilterList.Operator.MUST_PASS_ALL);
    filter.addFilter(new KeyOnlyFilter());
    filter.addFilter(new ColumnPrefixFilter(hbaseColumnName.getQualifier()));
    get.setFilter(filter);

    final Result result = mHTable.get(get);

    // Step 2.
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.filter.ColumnPrefixFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.