Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.ColumnFamily


    }

    static ColumnFamily resolveSuperset(List<ColumnFamily> versions)
    {
        assert versions.size() > 0;
        ColumnFamily resolved = null;
        for (ColumnFamily cf : versions)
        {
            if (cf != null)
            {
                resolved = cf.cloneMe();
                break;
            }
        }
        if (resolved == null)
            return null;
        for (ColumnFamily cf : versions)
        {
            resolved.resolve(cf);
        }
        return resolved;
    }
View Full Code Here


        // anyway, so the odds of a "tombstones consuming memory indefinitely" problem are minimal.
        // See https://issues.apache.org/jira/browse/CASSANDRA-3921 for more discussion.
        if (CacheService.instance.rowCache.isPutCopying())
            return;

        ColumnFamily cachedRow = cfs.getRawCachedRow(key);
        if (cachedRow != null)
            ColumnFamilyStore.removeDeleted(cachedRow, gcBefore);
    }
View Full Code Here

     */
    private static void writeMeta(PrintStream out, AbstractColumnContainer columnContainer)
    {
        if (columnContainer instanceof ColumnFamily)
        {
            ColumnFamily columnFamily = (ColumnFamily) columnContainer;
            if (!columnFamily.deletionInfo().equals(DeletionInfo.LIVE))
            {
                // begin meta
                writeKey(out, "metadata");
                writeDeletionInfo(out, columnFamily.deletionInfo().getTopLevelDeletion());
                out.print(",");
            }
            return;
        }

View Full Code Here

     * @param key Decorated Key for the required row
     * @param out output stream
     */
    private static void serializeRow(SSTableIdentityIterator row, DecoratedKey key, PrintStream out)
    {
        ColumnFamily columnFamily = row.getColumnFamily();
        boolean isSuperCF = columnFamily.isSuper();
        CFMetaData cfMetaData = columnFamily.metadata();
        AbstractType<?> comparator = columnFamily.getComparator();

        out.print("{");
        writeKey(out, "key");
        writeJSON(out, bytesToHex(key.key));
        out.print(",");

        writeMeta(out, columnFamily);

        writeKey(out, "columns");
        out.print(isSuperCF ? "{" : "[");

        if (isSuperCF)
        {
            while (row.hasNext())
            {
                SuperColumn scol = (SuperColumn)row.next();
                assert scol instanceof IColumn;
                IColumn column = (IColumn)scol;
                writeKey(out, comparator.getString(column.name()));
                out.print("{");
                writeMeta(out, scol);
                writeKey(out, "subColumns");
                out.print("[");
                serializeIColumns(column.getSubColumns().iterator(), out, columnFamily.getSubComparator(), cfMetaData);
                out.print("]");
                out.print("}");

                if (row.hasNext())
                    out.print(", ");
View Full Code Here

                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.maybeUpdateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

    public RowMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, boolean isRange, UpdateParameters params, ColumnGroupMap group)
    throws InvalidRequestException
    {
        QueryProcessor.validateKey(key);
        RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
        ColumnFamily cf = rm.addOrGet(columnFamily());

        if (columns.isEmpty() && builder.componentCount() == 0)
        {
            // No columns, delete the row
            cf.delete(new DeletionInfo(params.timestamp, params.localDeletionTime));
        }
        else
        {
            if (isRange)
            {
                ByteBuffer start = builder.copy().build();
                ByteBuffer end = builder.buildAsEndOfRange();
                QueryProcessor.validateColumnName(start); // If start is good, end is too
                cf.addAtom(params.makeRangeTombstone(start, end));
            }
            else
            {
                // Delete specific columns
                if (cfDef.isCompact)
                {
                    ByteBuffer columnName = builder.build();
                    QueryProcessor.validateColumnName(columnName);
                    cf.addColumn(params.makeTombstone(columnName));
                }
                else
                {
                    Iterator<Pair<CFDefinition.Name, Term>> iter = toRemove.iterator();
                    while (iter.hasNext())
                    {
                        Pair<CFDefinition.Name, Term> p = iter.next();
                        CFDefinition.Name column = p.left;

                        if (column.type.isCollection())
                        {
                            CollectionType validator = (CollectionType) column.type;
                            Term keySelected = p.right;

                            if (keySelected == null)
                            {
                                // Delete the whole collection
                                ByteBuffer start = builder.copy().add(column.name.key).build();
                                QueryProcessor.validateColumnName(start);
                                ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder;
                                ByteBuffer end = b.add(column.name.key).buildAsEndOfRange();
                                cf.addAtom(params.makeRangeTombstone(start, end));
                            }
                            else
                            {
                                builder.add(column.name.key);
                                List<Term> args = Collections.singletonList(keySelected);

                                Operation op;
                                switch (validator.kind)
                                {
                                    case LIST:
                                        op = ListOperation.DiscardKey(args);
                                        break;
                                    case SET:
                                        op = SetOperation.Discard(args);
                                        break;
                                    case MAP:
                                        op = MapOperation.DiscardKey(keySelected);
                                        break;
                                    default:
                                        throw new InvalidRequestException("Unknown collection type: " + validator.kind);
                                }

                                op.execute(cf, builder, validator, params, group == null ? null : group.getCollection(column.name.key));
                            }
                        }
                        else
                        {
                            ColumnNameBuilder b = iter.hasNext() ? builder.copy() : builder;
                            ByteBuffer columnName = b.add(column.name.key).build();
                            QueryProcessor.validateColumnName(columnName);
                            cf.addColumn(params.makeTombstone(columnName));
                        }
                    }
                }
            }
        }
View Full Code Here

        long startTime = System.currentTimeMillis();

        // validate digests against each other; throw immediately on mismatch.
        // also extract the data reply, if any.
        ColumnFamily data = null;
        ByteBuffer digest = null;

        for (MessageIn<ReadResponse> message : replies)
        {
            ReadResponse response = message.payload;
View Full Code Here

    public RowMutation mutationForKey(CFDefinition cfDef, ByteBuffer key, ColumnNameBuilder builder, boolean isRange, UpdateParameters params)
    throws InvalidRequestException
    {
        QueryProcessor.validateKey(key);
        RowMutation rm = new RowMutation(cfDef.cfm.ksName, key);
        ColumnFamily cf = rm.addOrGet(columnFamily());

        if (toRemove.isEmpty() && builder.componentCount() == 0)
        {
            // No columns specified, delete the row
            cf.delete(new DeletionInfo(params.timestamp, params.localDeletionTime));
        }
        else
        {
            if (isRange)
            {
                assert toRemove.isEmpty();
                ByteBuffer start = builder.build();
                ByteBuffer end = builder.buildAsEndOfRange();
                cf.addAtom(params.makeRangeTombstone(start, end));
            }
            else
            {
                // Delete specific columns
                if (cfDef.isCompact)
                {
                    ByteBuffer columnName = builder.build();
                    cf.addColumn(params.makeTombstone(columnName));
                }
                else
                {
                    for (Operation op : toRemove)
                        op.execute(key, cf, builder.copy(), params);
View Full Code Here

                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

        if (cf.hasIrrelevantData(controller.gcBefore))
            shouldPurge = controller.shouldPurge(key);
        // We should only gc tombstone if shouldPurge == true. But otherwise,
        // it is still ok to collect column that shadowed by their (deleted)
        // container, which removeDeleted(cf, Integer.MAX_VALUE) will do
        ColumnFamily compacted = ColumnFamilyStore.removeDeleted(cf, shouldPurge != null && shouldPurge ? controller.gcBefore : Integer.MIN_VALUE);

        if (compacted != null && compacted.metadata().getDefaultValidator().isCommutative())
        {
            if (shouldPurge == null)
                shouldPurge = controller.shouldPurge(key);
            if (shouldPurge)
                CounterColumn.mergeAndRemoveOldShards(key, compacted, controller.gcBefore, controller.mergeShardBefore);
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.ColumnFamily

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.