Package org.apache.cassandra.io.util

Examples of org.apache.cassandra.io.util.DataOutputBuffer


    public void updateDigest(MessageDigest digest)
    {
        assert name != null;
        digest.update(name.duplicate());
        DataOutputBuffer buffer = new DataOutputBuffer();
        try
        {
            buffer.writeLong(getMarkedForDeleteAt());
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }
        digest.update(buffer.getData(), 0, buffer.getLength());
        for (IColumn column : getSubColumns())
        {
            column.updateDigest(digest);
        }
    }
View Full Code Here


        try
        {
            switch (compression)
            {
                case GZIP:
                    DataOutputBuffer decompressed = new DataOutputBuffer();
                    byte[] outBuffer = new byte[1024], inBuffer = new byte[1024];

                    Inflater decompressor = new Inflater();

                    int lenRead = 0;
                    while (true)
                    {
                        if (decompressor.needsInput())
                            lenRead = query.remaining() < 1024 ? query.remaining() : 1024;
                        query.get(inBuffer, 0, lenRead);
                        decompressor.setInput(inBuffer, 0, lenRead);

                        int lenWrite = 0;
                        while ((lenWrite = decompressor.inflate(outBuffer)) != 0)
                            decompressed.write(outBuffer, 0, lenWrite);

                        if (decompressor.finished())
                            break;
                    }

                    decompressor.end();

                    queryString = new String(decompressed.getData(), 0, decompressed.size(), "UTF-8");
                    break;
                case NONE:
                    try
                    {
                        queryString = ByteBufferUtil.string(query);
View Full Code Here

    public long write(DataOutput out) throws IOException
    {
        assert !closed;

        DataOutputBuffer clockOut = new DataOutputBuffer();
        DeletionInfo.serializer().serializeForSSTable(emptyColumnFamily.deletionInfo(), clockOut);

        long dataSize = clockOut.getLength() + columnSerializedSize;
        if (logger.isDebugEnabled())
            logger.debug(String.format("clock / column sizes are %s / %s", clockOut.getLength(), columnSerializedSize));
        assert dataSize > 0;
        out.writeLong(dataSize);
        out.write(clockOut.getData(), 0, clockOut.getLength());
        out.writeInt(indexBuilder.writtenAtomCount());

        // We rebuild the column index uselessly, but we need to do that because range tombstone markers depend
        // on indexing. If we're able to remove the two-phase compaction, we'll avoid that.
        indexAndWrite(out);
View Full Code Here

    {
        assert !closed;

        // no special-case for rows.size == 1, we're actually skipping some bytes here so just
        // blindly updating everything wouldn't be correct
        DataOutputBuffer out = new DataOutputBuffer();

        try
        {
            DeletionInfo.serializer().serializeForSSTable(emptyColumnFamily.deletionInfo(), out);
            out.writeInt(columnStats.columnCount);
            digest.update(out.getData(), 0, out.getLength());
        }
        catch (IOException e)
        {
            throw new AssertionError(e);
        }
View Full Code Here

    public void updateDigest(MessageDigest digest)
    {
        digest.update(name.duplicate());
        digest.update(value.duplicate());

        DataOutputBuffer buffer = new DataOutputBuffer();
        try
        {
            buffer.writeLong(timestamp);
            buffer.writeByte(serializationFlags());
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }
        digest.update(buffer.getData(), 0, buffer.getLength());
    }
View Full Code Here

            return composite.types.size() - components.size();
        }

        public ByteBuffer build()
        {
            DataOutputBuffer out = new DataOutputBuffer(serializedSize);
            for (int i = 0; i < components.size(); i++)
            {
                try
                {
                    ByteBufferUtil.writeWithShortLength(components.get(i), out);
                }
                catch (IOException e)
                {
                    throw new RuntimeException(e);
                }
                out.write(endOfComponents[i]);
            }
            return ByteBuffer.wrap(out.getData(), 0, out.getLength());
        }
View Full Code Here

        forceBlockingFlush(LOCAL_CF);
    }

    private static String positionAsMapEntry(ColumnFamilyStore cfs, ReplayPosition position)
    {
        DataOutputBuffer out = new DataOutputBuffer();
        try
        {
            ReplayPosition.serializer.serialize(position, out);
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }
        return String.format("{'%s': '%s'}",
                             cfs.metadata.cfId,
                             ByteBufferUtil.bytesToHex(ByteBuffer.wrap(out.getData(), 0, out.getLength())));
    }
View Full Code Here

    public void updateDigest(MessageDigest digest)
    {
        digest.update(min.duplicate());
        digest.update(max.duplicate());
        DataOutputBuffer buffer = new DataOutputBuffer();
        try
        {
            buffer.writeLong(data.markedForDeleteAt);
            buffer.writeInt(data.localDeletionTime);
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }
        digest.update(buffer.getData(), 0, buffer.getLength());
    }
View Full Code Here

         * | No. of Pending files | Pending Files ... |
         */
        byte[] bytes;
        try
        {
            DataOutputBuffer buffer = new DataOutputBuffer();
            StreamHeader.serializer.serialize(streamHeader, buffer, version);
            bytes = buffer.getData();
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
        }
        assert bytes.length > 0;

        ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + 4 + bytes.length);
        buffer.putInt(PROTOCOL_MAGIC);
        buffer.putInt(header);
        buffer.putInt(bytes.length);
        buffer.put(bytes);
        buffer.flip();
        return buffer;
    }
View Full Code Here

    public long write(DataOutput out) throws IOException
    {
        assert !closed;

        DataOutputBuffer clockOut = new DataOutputBuffer();
        DeletionInfo.serializer().serializeForSSTable(emptyColumnFamily.deletionInfo(), clockOut);

        long dataSize = clockOut.getLength() + columnSerializedSize;
        if (logger.isDebugEnabled())
            logger.debug(String.format("clock / column sizes are %s / %s", clockOut.getLength(), columnSerializedSize));
        assert dataSize > 0;
        out.writeLong(dataSize);
        out.write(clockOut.getData(), 0, clockOut.getLength());
        out.writeInt(indexBuilder.writtenAtomCount());

        // We rebuild the column index uselessly, but we need to do that because range tombstone markers depend
        // on indexing. If we're able to remove the two-phase compaction, we'll avoid that.
        indexAndWrite(out);
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.util.DataOutputBuffer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.