Package org.apache.cassandra.utils

Examples of org.apache.cassandra.utils.StreamingHistogram


        return new EstimatedHistogram(150);
    }

    static StreamingHistogram defaultTombstoneDropTimeHistogram()
    {
        return new StreamingHistogram(SSTable.TOMBSTONE_HISTOGRAM_BIN_SIZE);
    }
View Full Code Here


            String partitioner = desc.version.hasPartitioner ? dis.readUTF() : null;
            int nbAncestors = desc.version.hasAncestors ? dis.readInt() : 0;
            Set<Integer> ancestors = new HashSet<Integer>(nbAncestors);
            for (int i = 0; i < nbAncestors; i++)
                ancestors.add(dis.readInt());
            StreamingHistogram tombstoneHistogram = desc.version.tracksTombstones
                                                   ? StreamingHistogram.serializer.deserialize(dis)
                                                   : defaultTombstoneDropTimeHistogram();
            return new SSTableMetadata(rowSizes, columnCounts, replayPosition, maxTimestamp, compressionRatio, partitioner, ancestors, tombstoneHistogram);
        }
View Full Code Here

            throw new RuntimeException(e);
        }
        // reach into the reducer used during iteration to get column count, size, max column timestamp
        // (however, if there are zero columns, iterator() will not be called by ColumnIndexer and reducer will be null)
        columnStats = new ColumnStats(reducer == null ? 0 : reducer.columns, reducer == null ? Long.MIN_VALUE : reducer.maxTimestampSeen,
                                      reducer == null ? new StreamingHistogram(SSTable.TOMBSTONE_HISTOGRAM_BIN_SIZE) : reducer.tombstones
        );
        columnSerializedSize = reducer == null ? 0 : reducer.serializedSize;
        reducer = null;
    }
View Full Code Here

        return new EstimatedHistogram(150);
    }

    static StreamingHistogram defaultTombstoneDropTimeHistogram()
    {
        return new StreamingHistogram(SSTable.TOMBSTONE_HISTOGRAM_BIN_SIZE);
    }
View Full Code Here

            String partitioner = desc.version.hasPartitioner ? dis.readUTF() : null;
            int nbAncestors = desc.version.hasAncestors ? dis.readInt() : 0;
            Set<Integer> ancestors = new HashSet<Integer>(nbAncestors);
            for (int i = 0; i < nbAncestors; i++)
                ancestors.add(dis.readInt());
            StreamingHistogram tombstoneHistogram = desc.version.tracksTombstones
                                                   ? StreamingHistogram.serializer.deserialize(dis)
                                                   : defaultTombstoneDropTimeHistogram();
            return new SSTableMetadata(rowSizes, columnCounts, replayPosition, minTimestamp, maxTimestamp, compressionRatio, partitioner, ancestors, tombstoneHistogram);
        }
View Full Code Here

        // reach into the reducer used during iteration to get column count, size, max column timestamp
        // (however, if there are zero columns, iterator() will not be called by ColumnIndexer and reducer will be null)
        columnStats = new ColumnStats(reducer == null ? 0 : reducer.columns,
                                      reducer == null ? Long.MAX_VALUE : reducer.minTimestampSeen,
                                      reducer == null ? maxDelTimestamp : Math.max(maxDelTimestamp, reducer.maxTimestampSeen),
                                      reducer == null ? new StreamingHistogram(SSTable.TOMBSTONE_HISTOGRAM_BIN_SIZE) : reducer.tombstones
        );
        columnSerializedSize = reducer == null ? 0 : reducer.serializedSize;
        reducer = null;
    }
View Full Code Here

        long minTimestamp = Long.MAX_VALUE;
        long maxTimestamp = Long.MIN_VALUE;
        int maxLocalDeletionTime = Integer.MIN_VALUE;
        List<ByteBuffer> minColumnNames = Collections.emptyList();
        List<ByteBuffer> maxColumnNames = Collections.emptyList();
        StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
        ColumnFamily cf = ArrayBackedSortedColumns.factory.create(metadata);

        cf.delete(DeletionTime.serializer.deserialize(in));

        ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf, key.key, dataFile.stream);
        int columnCount = Integer.MAX_VALUE;
        if (version.hasRowSizeAndColumnCount)
        {
            // skip row size
            FileUtils.skipBytesFully(in, 8);
            columnCount = in.readInt();
        }
        Iterator<OnDiskAtom> iter = metadata.getOnDiskIterator(in, columnCount, ColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE, version);
        try
        {
            while (iter.hasNext())
            {
                // deserialize column with PRESERVE_SIZE because we've written the dataSize based on the
                // data size received, so we must reserialize the exact same data
                OnDiskAtom atom = iter.next();
                if (atom == null)
                    break;
                if (atom instanceof CounterColumn)
                    atom = ((CounterColumn) atom).markDeltaToBeCleared();

                int deletionTime = atom.getLocalDeletionTime();
                if (deletionTime < Integer.MAX_VALUE)
                {
                    tombstones.update(deletionTime);
                }
                minTimestamp = Math.min(minTimestamp, atom.minTimestamp());
                maxTimestamp = Math.max(maxTimestamp, atom.maxTimestamp());
                minColumnNames = ColumnNameHelper.minComponents(minColumnNames, atom.name(), metadata.comparator);
                maxColumnNames = ColumnNameHelper.maxComponents(maxColumnNames, atom.name(), metadata.comparator);
View Full Code Here

        // (however, if there are zero columns, iterator() will not be called by ColumnIndexer and reducer will be null)
        columnStats = new ColumnStats(reducer == null ? 0 : reducer.columns,
                                      reducer == null ? Long.MAX_VALUE : reducer.minTimestampSeen,
                                      reducer == null ? maxTombstoneTimestamp : Math.max(maxTombstoneTimestamp, reducer.maxTimestampSeen),
                                      reducer == null ? Integer.MIN_VALUE : reducer.maxLocalDeletionTimeSeen,
                                      reducer == null ? new StreamingHistogram(SSTable.TOMBSTONE_HISTOGRAM_BIN_SIZE) : reducer.tombstones,
                                      reducer == null ? Collections.<ByteBuffer>emptyList() : reducer.minColumnNameSeen,
                                      reducer == null ? Collections.<ByteBuffer>emptyList() : reducer.maxColumnNameSeen
        );
        reducer = null;
View Full Code Here

            ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in);
            long minTimestamp = in.readLong();
            long maxTimestamp = in.readLong();
            int maxLocalDeletionTime = in.readInt();
            double compressionRatio = in.readDouble();
            StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in);
            int sstableLevel = in.readInt();
            long repairedAt = 0;
            if (version.hasRepairedAt)
                repairedAt = in.readLong();
View Full Code Here

        long minTimestamp = Long.MAX_VALUE;
        long maxTimestamp = Long.MIN_VALUE;
        int maxLocalDeletionTime = Integer.MIN_VALUE;
        List<ByteBuffer> minColumnNames = Collections.emptyList();
        List<ByteBuffer> maxColumnNames = Collections.emptyList();
        StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE);
        boolean hasLegacyCounterShards = false;

        ColumnFamily cf = ArrayBackedSortedColumns.factory.create(metadata);
        cf.delete(DeletionTime.serializer.deserialize(in));

        ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf, key.getKey(), dataFile.stream);

        if (cf.deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE)
            tombstones.update(cf.deletionInfo().getTopLevelDeletion().localDeletionTime);

        Iterator<RangeTombstone> rangeTombstoneIterator = cf.deletionInfo().rangeIterator();
        while (rangeTombstoneIterator.hasNext())
        {
            RangeTombstone rangeTombstone = rangeTombstoneIterator.next();
            tombstones.update(rangeTombstone.getLocalDeletionTime());
            minTimestamp = Math.min(minTimestamp, rangeTombstone.timestamp());
            maxTimestamp = Math.max(maxTimestamp, rangeTombstone.timestamp());

            minColumnNames = ColumnNameHelper.minComponents(minColumnNames, rangeTombstone.min, metadata.comparator);
            maxColumnNames = ColumnNameHelper.maxComponents(maxColumnNames, rangeTombstone.max, metadata.comparator);
        }

        Iterator<OnDiskAtom> iter = metadata.getOnDiskIterator(in, ColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE, version);
        try
        {
            while (iter.hasNext())
            {
                // deserialize column with PRESERVE_SIZE because we've written the cellDataSize based on the
                // data size received, so we must reserialize the exact same data
                OnDiskAtom atom = iter.next();
                if (atom == null)
                    break;

                if (atom instanceof CounterCell)
                {
                    atom = ((CounterCell) atom).markLocalToBeCleared();
                    hasLegacyCounterShards = hasLegacyCounterShards || ((CounterCell) atom).hasLegacyShards();
                }

                int deletionTime = atom.getLocalDeletionTime();
                if (deletionTime < Integer.MAX_VALUE)
                    tombstones.update(deletionTime);
                minTimestamp = Math.min(minTimestamp, atom.timestamp());
                maxTimestamp = Math.max(maxTimestamp, atom.timestamp());
                minColumnNames = ColumnNameHelper.minComponents(minColumnNames, atom.name(), metadata.comparator);
                maxColumnNames = ColumnNameHelper.maxComponents(maxColumnNames, atom.name(), metadata.comparator);
                maxLocalDeletionTime = Math.max(maxLocalDeletionTime, atom.getLocalDeletionTime());
View Full Code Here

TOP

Related Classes of org.apache.cassandra.utils.StreamingHistogram

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.