Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.IColumn


            file.reset(mark);
            FileUtils.skipBytesFully(file, indexInfo.offset);
            // TODO only completely deserialize columns we are interested in
            while (file.bytesPastMark(mark) < indexInfo.offset + indexInfo.width)
            {
                IColumn column = cf.getColumnSerializer().deserialize(file);
                // we check vs the original Set, not the filtered List, for efficiency
                if (columnNames.contains(column.name()))
                {
                    cf.addColumn(column);
                }
            }
        }
View Full Code Here


    protected IColumn computeNext()
    {
        while (true)
        {
            IColumn column = blockColumns.poll();
            if (column != null && isColumnNeeded(column))
                return column;
            try
            {
                if (column == null && !fetcher.getNextBlock())
View Full Code Here

            totalRead++;
            KeySlice ks = rows.get(i++);
            SortedMap<ByteBuffer, IColumn> map = new TreeMap<ByteBuffer, IColumn>(comparator);
            for (ColumnOrSuperColumn cosc : ks.columns)
            {
                IColumn column = unthriftify(cosc);
                map.put(column.name(), column);
            }
            return Pair.create(ks.key, map);
        }
View Full Code Here

                while (true)
                {
                    if (columns.hasNext())
                    {
                        ColumnOrSuperColumn cosc = columns.next();
                        IColumn column = unthriftify(cosc);
                        ImmutableSortedMap<ByteBuffer, IColumn> map = ImmutableSortedMap.of(column.name(), column);
                        return Pair.<ByteBuffer, SortedMap<ByteBuffer, IColumn>>create(currentRow.key, map);
                    }

                    if (!rows.hasNext())
                        return endOfData();
View Full Code Here

        {
            while (row.hasNext())
            {
                SuperColumn scol = (SuperColumn)row.next();
                assert scol instanceof IColumn;
                IColumn column = (IColumn)scol;
                writeKey(out, comparator.getString(column.name()));
                out.print("{");
                writeMeta(out, scol);
                writeKey(out, "subColumns");
                out.print("[");
                serializeIColumns(column.getSubColumns().iterator(), out, columnFamily.getSubComparator(), cfMetaData);
                out.print("]");
                out.print("}");

                if (row.hasNext())
                    out.print(", ");
View Full Code Here

            for (Object elt : l)
                toDiscard.add(validator.valueComparator().decompose(elt));

            for (Pair<ByteBuffer, IColumn> p : list)
            {
                IColumn c = p.right;
                if (toDiscard.contains(c.value()))
                    cf.addColumn(params.makeTombstone(c.name()));
            }
        }
        catch (MarshalException e)
        {
            throw new InvalidRequestException(e.getMessage());
View Full Code Here

        for (Term value : values)
            toDiscard.add(value.getByteBuffer(validator.valueComparator(), params.variables));

        for (Pair<ByteBuffer, IColumn> p : list)
        {
            IColumn c = p.right;
            if (toDiscard.contains(c.value()))
                cf.addColumn(params.makeTombstone(c.name()));
        }
    }
View Full Code Here

            // the read-before-write this operation requires limits its usefulness on big lists, so in practice
            // toDiscard will be small and keeping a list will be more efficient.
            List<ByteBuffer> toDiscard = ((Lists.Value)value).elements;
            for (Pair<ByteBuffer, IColumn> p : existingList)
            {
                IColumn element = p.right;
                if (toDiscard.contains(element.value()))
                    cf.addColumn(params.makeTombstone(element.name()));
            }
        }
View Full Code Here

            totalRead++;
            KeySlice ks = rows.get(i++);
            SortedMap<ByteBuffer, IColumn> map = new TreeMap<ByteBuffer, IColumn>(comparator);
            for (ColumnOrSuperColumn cosc : ks.columns)
            {
                IColumn column = unthriftify(cosc);
                map.put(column.name(), column);
            }
            return new Pair<ByteBuffer, SortedMap<ByteBuffer, IColumn>>(ks.key, map);
        }
View Full Code Here

                while (true)
                {
                    if (columns.hasNext())
                    {
                        ColumnOrSuperColumn cosc = columns.next();
                        IColumn column = unthriftify(cosc);
                        ImmutableSortedMap<ByteBuffer, IColumn> map = ImmutableSortedMap.of(column.name(), column);
                        return Pair.<ByteBuffer, SortedMap<ByteBuffer, IColumn>>create(currentRow.key, map);
                    }

                    if (!rows.hasNext())
                        return endOfData();
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.IColumn

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.