Package org.apache.cassandra.db.filter

Examples of org.apache.cassandra.db.filter.IDiskAtomFilter


                total++;
                ColumnFamily data = rawRow.cf;

                if (rowIterator.needsFiltering())
                {
                    IDiskAtomFilter extraFilter = filter.getExtraFilter(rawRow.key, data);
                    if (extraFilter != null)
                    {
                        ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, name, extraFilter, filter.timestamp));
                        if (cf != null)
                            data.addAll(cf);
View Full Code Here


            protected Row getReduced()
            {
                // First check if this row is in the rowCache. If it is and it covers our filter, we can skip the rest
                ColumnFamily cached = cfs.getRawCachedRow(key);
                IDiskAtomFilter filter = range.columnFilter(key.key);

                if (cached == null || !cfs.isFilterFullyCoveredBy(filter, cached, now))
                {
                    // not cached: collate
                    QueryFilter.collateOnDiskAtom(returnCF, colIters, filter, gcBefore, now);
View Full Code Here

        String columnFamily = in.readUTF();
        long timestamp = in.readLong();

        CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily);

        IDiskAtomFilter predicate = metadata.comparator.diskAtomFilterSerializer().deserialize(in, version);

        List<IndexExpression> rowFilter;
        int filterCount = in.readInt();
        rowFilter = new ArrayList<>(filterCount);
        for (int i = 0; i < filterCount; i++)
View Full Code Here

        size += TypeSizes.NATIVE.sizeof(rsc.columnFamily);
        size += TypeSizes.NATIVE.sizeof(rsc.timestamp);

        CFMetaData metadata = Schema.instance.getCFMetaData(rsc.keyspace, rsc.columnFamily);

        IDiskAtomFilter filter = rsc.predicate;

        size += metadata.comparator.diskAtomFilterSerializer().serializedSize(filter, version);

        if (rsc.rowFilter == null)
        {
View Full Code Here

        return filter;
    }

    private IDiskAtomFilter toInternalFilter(CFMetaData metadata, ColumnParent parent, SlicePredicate predicate)
    {
        IDiskAtomFilter filter;

        if (predicate.column_names != null)
        {
            if (metadata.isSuper())
            {
View Full Code Here

        org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
        consistencyLevel.validateForRead(keyspace);

        List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size());
        IDiskAtomFilter filter = toInternalFilter(metadata, column_parent, predicate);

        for (ByteBuffer key: keys)
        {
            ThriftValidation.validateKey(metadata, key);
            // Note that we should not share a slice filter amongst the command, due to SliceQueryFilter not  being immutable
            // due to its columnCounter used by the lastCounted() method (also see SelectStatement.getSliceCommands)
            commands.add(ReadCommand.create(keyspace, key, column_parent.getColumn_family(), timestamp, filter.cloneShallow()));
        }

        return getSlice(commands, column_parent.isSetSuper_column(), consistencyLevel);
    }
View Full Code Here

            org.apache.cassandra.db.ConsistencyLevel consistencyLevel = ThriftConversion.fromThrift(consistency_level);
            consistencyLevel.validateForRead(keyspace);

            ThriftValidation.validateKey(metadata, key);

            IDiskAtomFilter filter;
            if (metadata.isSuper())
            {
                CellNameType columnType = new SimpleDenseCellNameType(metadata.comparator.subtype(column_path.column == null ? 0 : 1));
                SortedSet<CellName> names = new TreeSet<CellName>(columnType);
                names.add(columnType.cellFromByteBuffer(column_path.column == null ? column_path.super_column : column_path.column));
View Full Code Here

            }
            long now = System.currentTimeMillis();
            schedule(DatabaseDescriptor.getRangeRpcTimeout());
            try
            {
                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata, column_parent.super_column);
                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace,
                                                                        column_parent.column_family,
                                                                        now,
                                                                        filter,
                                                                        bounds,
View Full Code Here

            List<Row> rows;
            long now = System.currentTimeMillis();
            schedule(DatabaseDescriptor.getRangeRpcTimeout());
            try
            {
                IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, metadata, null);
                rows = StorageProxy.getRangeSlice(new RangeSliceCommand(keyspace, column_family, now, filter, bounds, null, range.count, true, true), consistencyLevel);
            }
            finally
            {
                release();
View Full Code Here

            IPartitioner p = StorageService.getPartitioner();
            AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(RowPosition.forKey(index_clause.start_key, p),
                                                                         p.getMinimumToken().minKeyBound());

            IDiskAtomFilter filter = ThriftValidation.asIFilter(column_predicate, metadata, column_parent.super_column);
            long now = System.currentTimeMillis();
            RangeSliceCommand command = new RangeSliceCommand(keyspace,
                                                              column_parent.column_family,
                                                              now,
                                                              filter,
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.filter.IDiskAtomFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.