Package org.apache.cassandra.db.filter

Examples of org.apache.cassandra.db.filter.IDiskAtomFilter


            IPartitioner p = StorageService.getPartitioner();
            AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(RowPosition.forKey(index_clause.start_key, p),
                                                                         p.getMinimumToken().minKeyBound());

            IDiskAtomFilter filter = ThriftValidation.asIFilter(column_predicate, metadata.getComparatorFor(column_parent.super_column));
            RangeSliceCommand command = new RangeSliceCommand(keyspace,
                                                              column_parent.column_family,
                                                              null,
                                                              filter,
                                                              bounds,
View Full Code Here


        Table table = Table.open(command.keyspace);
        List<Row> rows;
        // now scan until we have enough results
        try
        {
            IDiskAtomFilter commandPredicate = command.predicate;

            int cql3RowCount = 0;
            rows = new ArrayList<Row>();
            List<AbstractBounds<RowPosition>> ranges = getRestrictedRanges(command.range);
            int i = 0;
View Full Code Here

            IPartitioner p = StorageService.getPartitioner();
            IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"),
                                                       IndexOperator.EQ,
                                                       ByteBufferUtil.bytes(val));
            List<IndexExpression> clause = Arrays.asList(expr);
            IDiskAtomFilter filter = new IdentityQueryFilter();
            Range<RowPosition> range = Util.range("", "");
            List<Row> rows = cfs.search(clause, range, 100, filter);
            assertEquals(1, rows.size());
            assert rows.get(0).key.key.equals(ByteBufferUtil.bytes(key));
        }
View Full Code Here

                total++;
                ColumnFamily data = rawRow.cf;

                if (rowIterator.needsFiltering())
                {
                    IDiskAtomFilter extraFilter = filter.getExtraFilter(data);
                    if (extraFilter != null)
                    {
                        QueryPath path = new QueryPath(columnFamily);
                        ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, path, extraFilter));
                        if (cf != null)
View Full Code Here

                total++;
                ColumnFamily data = rawRow.cf;

                if (rowIterator.needsFiltering())
                {
                    IDiskAtomFilter extraFilter = filter.getExtraFilter(rawRow.key, data);
                    if (extraFilter != null)
                    {
                        ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, name, extraFilter, filter.timestamp));
                        if (cf != null)
                            data.addAll(cf, HeapAllocator.instance);
View Full Code Here

        return filter;
    }

    private IDiskAtomFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate)
    {
        IDiskAtomFilter filter;
        if (predicate.column_names != null)
        {
            if (metadata.isSuper())
            {
                CompositeType type = (CompositeType)metadata.comparator;
View Full Code Here

        org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
        consistencyLevel.validateForRead(keyspace);

        List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size());
        IDiskAtomFilter filter = toInternalFilter(metadata, column_parent, predicate);

        for (ByteBuffer key: keys)
        {
            ThriftValidation.validateKey(metadata, key);
            // Note that we should not share a slice filter amongst the command, due to SliceQueryFilter not  being immutable
            // due to its columnCounter used by the lastCounted() method (also see SelectStatement.getSliceCommands)
            commands.add(ReadCommand.create(keyspace, key, column_parent.getColumn_family(), timestamp, filter.cloneShallow()));
        }

        return getSlice(commands, column_parent.isSetSuper_column(), consistencyLevel);
    }
View Full Code Here

            org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
            consistencyLevel.validateForRead(keyspace);

            ThriftValidation.validateKey(metadata, key);

            IDiskAtomFilter filter;
            if (metadata.isSuper())
            {
                CompositeType type = (CompositeType)metadata.comparator;
                SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.types.get(0) : type.types.get(1));
                names.add(column_path.column == null ? column_path.super_column : column_path.column);
View Full Code Here

            }
            long now = System.currentTimeMillis();
            schedule(DatabaseDescriptor.getRangeRpcTimeout());
            try
            {
                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata, column_parent.super_column);
                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace,
                                                                        column_parent.column_family,
                                                                        now,
                                                                        filter,
                                                                        bounds,
View Full Code Here

            List<Row> rows;
            long now = System.currentTimeMillis();
            schedule(DatabaseDescriptor.getRangeRpcTimeout());
            try
            {
                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata, null);
                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_family, now, filter, bounds, null, range.count, true, true), consistencyLevel);
            }
            finally
            {
                release();
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.filter.IDiskAtomFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.