Package org.apache.cassandra.db.filter

Examples of org.apache.cassandra.db.filter.ColumnSlice


        int[] values = new int[]{ 1, 2, 3, 5, 9 };

        for (int i = 0; i < values.length; ++i)
            map.addColumn(new BufferCell(type.makeCellName(values[reversed ? values.length - 1 - i : i])));

        assertSame(new int[]{ 3, 2, 1 }, map.reverseIterator(new ColumnSlice[]{ new ColumnSlice(type.make(3), Composites.EMPTY) }));
        assertSame(new int[]{ 3, 2, 1 }, map.reverseIterator(new ColumnSlice[]{ new ColumnSlice(type.make(4), Composites.EMPTY) }));

        assertSame(map.iterator(), map.iterator(ColumnSlice.ALL_COLUMNS_ARRAY));
    }
View Full Code Here


        for (ByteBuffer name : toRead)
        {
            ColumnNameBuilder prefix = updatePrefixFor(name, clusteringPrefix);
            ByteBuffer start = prefix.copy().add(name).build();
            ByteBuffer finish = prefix.copy().add(name).buildAsEndOfRange();
            slices[i++] = new ColumnSlice(start, finish);
        }

        List<ReadCommand> commands = new ArrayList<ReadCommand>(partitionKeys.size());
        long now = System.currentTimeMillis();
        for (ByteBuffer key : partitionKeys)
View Full Code Here

                        logger.trace("Adding index hit to current row for {}", indexComparator.getString(cell.name()));

                        // We always query the whole CQL3 row. In the case where the original filter was a name filter this might be
                        // slightly wasteful, but this probably doesn't matter in practice and it simplify things.
                        ColumnSlice dataSlice = new ColumnSlice(start, entry.indexedEntryPrefix.end());
                        // If the table has static columns, we must fetch them too as they may need to be returned too.
                        // Note that this is potentially wasteful for 2 reasons:
                        //  1) we will retrieve the static parts for each indexed row, even if we have more than one row in
                        //     the same partition. If we were to group data queries to rows on the same slice, which would
                        //     speed up things in general, we would also optimize here since we would fetch static columns only
View Full Code Here

                int compare = metadata.comparator.compare(start, finish);
                if (!request.reversed && compare > 0)
                    throw new InvalidRequestException(String.format("Column slice at index %d had start greater than finish", i));
                else if (request.reversed && compare < 0)
                    throw new InvalidRequestException(String.format("Reversed column slice at index %d had start less than finish", i));
                slices[i] = new ColumnSlice(start, finish);
            }

            ColumnSlice[] deoverlapped = ColumnSlice.deoverlapSlices(slices, request.reversed ? metadata.comparator.reverseComparator() : metadata.comparator);
            SliceQueryFilter filter = new SliceQueryFilter(deoverlapped, request.reversed, request.count);
            ThriftValidation.validateKey(metadata, request.key);
View Full Code Here

        }
    }

    public ColumnSlice slice()
    {
        return new ColumnSlice(start(), end());
    }
View Full Code Here

                    if (!request.reversed && compare > 0)
                        throw new InvalidRequestException(String.format("Column slice at index %d had start greater than finish", i));
                    else if (request.reversed && compare < 0)
                        throw new InvalidRequestException(String.format("Reversed column slice at index %d had start less than finish", i));
                }
                slices[i] = new ColumnSlice(start, finish);
            }
            ColumnSlice[] deoverlapped = ColumnSlice.deoverlapSlices(slices, request.reversed ? metadata.comparator.reverseComparator() : metadata.comparator);
            SliceQueryFilter filter = new SliceQueryFilter(deoverlapped, request.reversed, request.count);
            ThriftValidation.validateKey(metadata, request.key);
            commands.add(ReadCommand.create(keyspace, request.key, request.column_parent.getColumn_family(), System.currentTimeMillis(), filter));
View Full Code Here

            if (currentSlice == null)
            {
                if (idx >= slices.length)
                    return endOfData();

                ColumnSlice slice = slices[idx++];
                if (forwards)
                    currentSlice = slice(btree, comparator, slice.start, slice.finish, true);
                else
                    currentSlice = slice(btree, comparator, slice.finish, slice.start, false);
            }
View Full Code Here

        cf = cfs.getColumnFamily(QueryFilter.getSliceFilter(dk(key), CFNAME, b(17), b(18), false, Integer.MAX_VALUE, System.currentTimeMillis()));
        rt = rangeTombstones(cf);
        assertEquals(1, rt.size());

        ColumnSlice[] slices = new ColumnSlice[]{new ColumnSlice( b(1), b(10)), new ColumnSlice( b(16), b(20))};
        IDiskAtomFilter sqf = new SliceQueryFilter(slices, false, Integer.MAX_VALUE);
        cf = cfs.getColumnFamily( new QueryFilter(dk(key), CFNAME, sqf, System.currentTimeMillis()) );
        rt = rangeTombstones(cf);
        assertEquals(2, rt.size());
    }
View Full Code Here

        int i = 0;
        for (ByteBuffer name : toRead)
        {
            ByteBuffer start = builder.copy().add(name).build();
            ByteBuffer finish = builder.copy().add(name).buildAsEndOfRange();
            slices[i++] = new ColumnSlice(start, finish);
        }

        List<ReadCommand> commands = new ArrayList<ReadCommand>(keys.size());
        for (ByteBuffer key : keys)
            commands.add(new SliceFromReadCommand(keyspace(),
View Full Code Here

        int i = 0;
        for (ByteBuffer name : toRead)
        {
            ByteBuffer start = clusteringPrefix.copy().add(name).build();
            ByteBuffer finish = clusteringPrefix.copy().add(name).buildAsEndOfRange();
            slices[i++] = new ColumnSlice(start, finish);
        }

        List<ReadCommand> commands = new ArrayList<ReadCommand>(partitionKeys.size());
        long now = System.currentTimeMillis();
        for (ByteBuffer key : partitionKeys)
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.filter.ColumnSlice

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.