Package org.apache.cassandra.db.marshal

Examples of org.apache.cassandra.db.marshal.CompositeType


        IDiskAtomFilter filter;
        if (predicate.column_names != null)
        {
            if (metadata.isSuper())
            {
                CompositeType type = (CompositeType)metadata.comparator;
                SortedSet s = new TreeSet<ByteBuffer>(parent.isSetSuper_column() ? type.types.get(1) : type.types.get(0));
                s.addAll(predicate.column_names);
                filter = SuperColumns.fromSCNamesFilter(type, parent.bufferForSuper_column(), new NamesQueryFilter(s));
            }
            else
View Full Code Here


            ThriftValidation.validateKey(metadata, key);

            IDiskAtomFilter filter;
            if (metadata.isSuper())
            {
                CompositeType type = (CompositeType)metadata.comparator;
                SortedSet names = new TreeSet<ByteBuffer>(column_path.column == null ? type.types.get(0) : type.types.get(1));
                names.add(column_path.column == null ? column_path.super_column : column_path.column);
                filter = SuperColumns.fromSCNamesFilter(type, column_path.column == null ? null : column_path.bufferForSuper_column(), new NamesQueryFilter(names));
            }
            else
View Full Code Here

    public ColumnToCollectionType getCollectionType()
    {
        if (!hasCollections)
            return null;

        CompositeType composite = (CompositeType)cfm.comparator;
        return (ColumnToCollectionType)composite.types.get(composite.types.size() - 1);
    }
View Full Code Here

        }
    }

    private static Map<ByteBuffer, List<Column>> groupSuperColumns(ColumnFamily scf)
    {
        CompositeType type = (CompositeType)scf.getComparator();
        // The order of insertion matters!
        Map<ByteBuffer, List<Column>> scMap = new LinkedHashMap<ByteBuffer, List<Column>>();

        ByteBuffer scName = null;
        List<Column> subColumns = null;
View Full Code Here

            {
                // Sparse case: group column in cqlRow when composite prefix is equal
                if (row.cf == null)
                    continue;

                CompositeType composite = (CompositeType)cfDef.cfm.comparator;
                int last = composite.types.size() - 1;

                ByteBuffer[] previous = null;
                Map<ByteBuffer, IColumn> group = new HashMap<ByteBuffer, IColumn>();
                for (IColumn c : row.cf)
                {
                    if (c.isMarkedForDelete())
                        continue;

                    ByteBuffer[] current = composite.split(c.name());
                    // If current differs from previous, we've just finished a group
                    if (previous != null && !isSameRow(previous, current))
                    {
                        cqlRows.add(handleGroup(selection, row.key.key, previous, group, schema));
                        group = new HashMap<ByteBuffer, IColumn>();
View Full Code Here

        this.cfm = cfm;
        this.key = new Name(getKeyId(cfm), Name.Kind.KEY_ALIAS, cfm.getKeyValidator());
        if (cfm.comparator instanceof CompositeType)
        {
            this.isComposite = true;
            CompositeType composite = (CompositeType)cfm.comparator;
            if (cfm.getColumn_metadata().isEmpty())
            {
                // "dense" composite
                this.isCompact = true;
                for (int i = 0; i < composite.types.size(); i++)
View Full Code Here

        cfs.truncateBlocking();

        ByteBuffer rowKey = ByteBufferUtil.bytes("k1");
        ByteBuffer clusterKey = ByteBufferUtil.bytes("ck1");
        ByteBuffer colName = ByteBufferUtil.bytes("col1");
        CompositeType baseComparator = (CompositeType)cfs.getComparator();
        CompositeType.Builder builder = baseComparator.builder();
        builder.add(clusterKey);
        builder.add(colName);
        ByteBuffer compositeName = builder.build();

        ByteBuffer val1 = ByteBufferUtil.bytes("v1");
View Full Code Here

        cfs.truncateBlocking();

        ByteBuffer rowKey = ByteBufferUtil.bytes("k1");
        ByteBuffer clusterKey = ByteBufferUtil.bytes("ck1");
        ByteBuffer colName = ByteBufferUtil.bytes("col1");
        CompositeType baseComparator = (CompositeType)cfs.getComparator();
        CompositeType.Builder builder = baseComparator.builder();
        builder.add(clusterKey);
        builder.add(colName);
        ByteBuffer compositeName = builder.build();

        ByteBuffer val1 = ByteBufferUtil.bytes("v2");
View Full Code Here

         * indexed row.
         */
        ByteBuffer startKey = range.left instanceof DecoratedKey ? ((DecoratedKey)range.left).key : ByteBufferUtil.EMPTY_BYTE_BUFFER;
        ByteBuffer endKey = range.right instanceof DecoratedKey ? ((DecoratedKey)range.right).key : ByteBufferUtil.EMPTY_BYTE_BUFFER;

        final CompositeType baseComparator = (CompositeType)baseCfs.getComparator();
        final CompositeType indexComparator = (CompositeType)index.getIndexCfs().getComparator();

        final ByteBuffer startPrefix;
        if (startKey.remaining() > 0)
        {
            CompositeType.Builder builder = indexComparator.builder().add(startKey);
            // For names filter, we have no choice but to query from the beginning of the key. This can be highly inefficient however.
            if (filter.originalFilter() instanceof SliceQueryFilter)
            {
                ByteBuffer[] components = baseComparator.split(((SliceQueryFilter)filter.originalFilter()).start());
                for (int i = 0; i < Math.min(prefixSize, components.length); ++i)
                    builder.add(components[i]);
            }
            startPrefix = builder.build();
        }
        else
        {
            startPrefix = ByteBufferUtil.EMPTY_BYTE_BUFFER;
        }

        final ByteBuffer endPrefix;
        if (endKey.remaining() > 0)
        {
            CompositeType.Builder builder = indexComparator.builder().add(endKey);
            // For names filter, we have no choice but to query until the end of the key. This can be highly inefficient however.
            if (filter.originalFilter() instanceof SliceQueryFilter)
            {
                ByteBuffer[] components = baseComparator.split(((SliceQueryFilter)filter.originalFilter()).finish());
                for (int i = 0; i < Math.min(prefixSize, components.length); ++i)
                    builder.add(components[i]);
            }
            endPrefix = builder.buildAsEndOfRange();
        }
        else
        {
            endPrefix = ByteBufferUtil.EMPTY_BYTE_BUFFER;
        }

        // We will need to filter clustering keys based on the user filter. If
        // it is a names filter, we are really interested on the clustering
        // part, not the actual column name (NOTE: this is a hack that assumes CQL3).
        final SliceQueryFilter originalFilter;
        if (filter.originalFilter() instanceof SliceQueryFilter)
        {
            originalFilter = (SliceQueryFilter)filter.originalFilter();
        }
        else
        {
            ByteBuffer first = ((NamesQueryFilter)filter.originalFilter()).columns.iterator().next();
            ByteBuffer[] components = baseComparator.split(first);
            CompositeType.Builder builder = baseComparator.builder();
            // All all except the last component, since it's the column name
            for (int i = 0; i < components.length - 1; i++)
                builder.add(components[i]);
            originalFilter = new SliceQueryFilter(builder.copy().build(), builder.copy().buildAsEndOfRange(), false, Integer.MAX_VALUE);
        }

        return new ColumnFamilyStore.AbstractScanIterator()
        {
            private ByteBuffer lastSeenPrefix = startPrefix;
            private ArrayDeque<IColumn> indexColumns;
            private final QueryPath path = new QueryPath(baseCfs.columnFamily);
            private int columnsRead = Integer.MAX_VALUE;
            private int limit = ((SliceQueryFilter)filter.initialFilter()).count;
            private int columnsCount = 0;

            private int meanColumns = Math.max(index.getIndexCfs().getMeanColumns(), 1);
            // We shouldn't fetch only 1 row as this provides buggy paging in case the first row doesn't satisfy all clauses
            private final int rowsPerQuery = Math.max(Math.min(filter.maxRows(), filter.maxColumns() / meanColumns), 2);

            public boolean needsFiltering()
            {
                return false;
            }

            private Row makeReturn(DecoratedKey key, ColumnFamily data)
            {
                if (data == null)
                    return endOfData();

                assert key != null;
                return new Row(key, data);
            }

            protected Row computeNext()
            {
                /*
                 * Our internal index code is wired toward internal rows. So we need to accumulate all results for a given
                 * row before returning from this method. Which unfortunately means that this method has to do what
                 * CFS.filter does for KeysIndex.
                 */
                DecoratedKey currentKey = null;
                ColumnFamily data = null;

                while (true)
                {
                    // Did we get more columns that needed to respect the user limit?
                    // (but we still need to return what has been fetched already)
                    if (columnsCount >= limit)
                        return makeReturn(currentKey, data);

                    if (indexColumns == null || indexColumns.isEmpty())
                    {
                        if (columnsRead < rowsPerQuery)
                        {
                            logger.trace("Read only {} (< {}) last page through, must be done", columnsRead, rowsPerQuery);
                            return makeReturn(currentKey, data);
                        }

                        if (logger.isTraceEnabled())
                            logger.trace("Scanning index {} starting with {}",
                                         ((AbstractSimplePerColumnSecondaryIndex)index).expressionString(primary), indexComparator.getString(startPrefix));

                        QueryFilter indexFilter = QueryFilter.getSliceFilter(indexKey,
                                                                             new QueryPath(index.getIndexCfs().getColumnFamilyName()),
                                                                             lastSeenPrefix,
                                                                             endPrefix,
                                                                             false,
                                                                             rowsPerQuery);
                        ColumnFamily indexRow = index.getIndexCfs().getColumnFamily(indexFilter);
                        if (indexRow == null)
                            return makeReturn(currentKey, data);

                        Collection<IColumn> sortedColumns = indexRow.getSortedColumns();
                        columnsRead = sortedColumns.size();
                        indexColumns = new ArrayDeque<IColumn>(sortedColumns);
                        IColumn firstColumn = sortedColumns.iterator().next();

                        // Paging is racy, so it is possible the first column of a page is not the last seen one.
                        if (lastSeenPrefix != startPrefix && lastSeenPrefix.equals(firstColumn.name()))
                        {
                            // skip the row we already saw w/ the last page of results
                            indexColumns.poll();
                            logger.trace("Skipping {}", indexComparator.getString(firstColumn.name()));
                        }
                        else if (range instanceof Range && !indexColumns.isEmpty() && firstColumn.name().equals(startPrefix))
                        {
                            // skip key excluded by range
                            indexColumns.poll();
                            logger.trace("Skipping first key as range excludes it");
                        }
                    }

                    while (!indexColumns.isEmpty() && columnsCount <= limit)
                    {
                        IColumn column = indexColumns.poll();
                        lastSeenPrefix = column.name();
                        if (column.isMarkedForDelete())
                        {
                            logger.trace("skipping {}", column.name());
                            continue;
                        }

                        ByteBuffer[] components = indexComparator.split(lastSeenPrefix);
                        DecoratedKey dk = baseCfs.partitioner.decorateKey(components[0]);

                        // Are we done for this row?
                        if (currentKey == null)
                        {
                            currentKey = dk;
                        }
                        else if (!currentKey.equals(dk))
                        {
                            DecoratedKey previousKey = currentKey;
                            currentKey = dk;

                            // We're done with the previous row, return it if it had data, continue otherwise
                            indexColumns.addFirst(column);
                            if (data == null)
                                continue;
                            else
                                return makeReturn(previousKey, data);
                        }

                        if (!range.right.isMinimum(baseCfs.partitioner) && range.right.compareTo(dk) < 0)
                        {
                            logger.trace("Reached end of assigned scan range");
                            return endOfData();
                        }

                        if (!range.contains(dk))
                        {
                            logger.debug("Skipping entry {} outside of assigned scan range", dk.token);
                            continue;
                        }

                        logger.trace("Adding index hit to current row for {}", indexComparator.getString(lastSeenPrefix));
                        // For sparse composites, we're good querying the whole logical row
                        // Obviously if this index is used for other usage, that might be inefficient
                        CompositeType.Builder builder = baseComparator.builder();
                        for (int i = 0; i < prefixSize; i++)
                            builder.add(components[i + 1]);
View Full Code Here

     */
    public static List<ByteBuffer> maxComponents(List<ByteBuffer> maxSeen, ByteBuffer candidate, AbstractType<?> comparator)
    {
        if (comparator instanceof CompositeType)
        {
            CompositeType ct = (CompositeType)comparator;
            if (maxSeen.isEmpty())
                return Arrays.asList(ct.split(candidate));

            int typeCount = getTypeCount(ct);

            List<ByteBuffer> components = Arrays.asList(ct.split(candidate));
            List<ByteBuffer> biggest = maxSeen.size() > components.size() ? maxSeen : components;
            // if typecount is less than both the components and maxseen, we only keep typecount columns.
            int minSize = Math.min(typeCount, Math.min(components.size(), maxSeen.size()));
            int maxSize = Math.min(typeCount, biggest.size());
            List<ByteBuffer> retList = new ArrayList<ByteBuffer>(maxSize);
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.marshal.CompositeType

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.