Package org.apache.cassandra.io

Examples of org.apache.cassandra.io.DataOutputBuffer$FastByteArrayOutputStream


        String mergedFileName = getTempFileName(files);
        SSTable ssTable = null;
        String lastkey = null;
        List<FileStruct> lfs = new ArrayList<FileStruct>();
        DataOutputBuffer bufOut = new DataOutputBuffer();
        int expectedBloomFilterSize = SSTable.getApproximateKeyCount(files);
        expectedBloomFilterSize = (expectedBloomFilterSize > 0) ? expectedBloomFilterSize : SSTable.indexInterval();
        logger_.debug("Expected bloom filter size : " + expectedBloomFilterSize);
        /* Create the bloom filter for the compacted file. */
        BloomFilter compactedBloomFilter = new BloomFilter(expectedBloomFilterSize, 15);
        List<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>();

        while (pq.size() > 0 || lfs.size() > 0)
        {
            FileStruct fs = null;
            if (pq.size() > 0)
            {
                fs = pq.poll();
            }
            if (fs != null
                && (lastkey == null || lastkey.equals(fs.getKey())))
            {
                // The keys are the same so we need to add this to the
                // ldfs list
                lastkey = fs.getKey();
                lfs.add(fs);
            }
            else
            {
                Collections.sort(lfs, new FileStructComparator());
                ColumnFamily columnFamily;
                bufOut.reset();
                if (lfs.size() > 1)
                {
                    for (FileStruct filestruct : lfs)
                    {
                        try
                        {
                            /* read the length although we don't need it */
                            filestruct.getBufIn().readInt();
                            // Skip the Index
                            IndexHelper.skipBloomFilterAndIndex(filestruct.getBufIn());
                            // We want to add only 2 and resolve them right there in order to save on memory footprint
                            if (columnFamilies.size() > 1)
                            {
                                merge(columnFamilies);
                            }
                            // deserialize into column families
                            columnFamilies.add(ColumnFamily.serializer().deserialize(filestruct.getBufIn()));
                        }
                        catch (Exception ex)
                        {
                            logger_.warn("error in filecompaction", ex);
                        }
                    }
                    // Now after merging all crap append to the sstable
                    columnFamily = resolveAndRemoveDeleted(columnFamilies);
                    columnFamilies.clear();
                    if (columnFamily != null)
                    {
                        /* serialize the cf with column indexes */
                        ColumnFamily.serializerWithIndexes().serialize(columnFamily, bufOut);
                    }
                }
                else
                {
                    FileStruct filestruct = lfs.get(0);
                    /* read the length although we don't need it */
                    int size = filestruct.getBufIn().readInt();
                    bufOut.write(filestruct.getBufIn(), size);
                }

                if (ssTable == null)
                {
                    ssTable = new SSTable(compactionFileLocation, mergedFileName, StorageService.getPartitioner());
View Full Code Here


    public static void serialize(ColumnFamily columnFamily, DataOutputStream dos) throws IOException
  {
        Collection<IColumn> columns = columnFamily.getAllColumns();
        BloomFilter bf = createColumnBloomFilter(columns);                   
        /* Write out the bloom filter. */
        DataOutputBuffer bufOut = new DataOutputBuffer();
        BloomFilter.serializer().serialize(bf, bufOut);
        /* write the length of the serialized bloom filter. */
        dos.writeInt(bufOut.getLength());
        /* write out the serialized bytes. */
        dos.write(bufOut.getData(), 0, bufOut.getLength());

        /* Do the indexing */
        TypeInfo typeInfo = DatabaseDescriptor.getTypeInfo(columnFamily.name());       
        doIndexing(typeInfo, columns, dos);       
  }
View Full Code Here

    private void doRecovery(Stack<File> filesNeeded, byte[] header) throws IOException
    {
        Table table = Table.open(table_);

        DataInputBuffer bufIn = new DataInputBuffer();
        DataOutputBuffer bufOut = new DataOutputBuffer();       

        while ( !filesNeeded.isEmpty() )
        {
            File file = filesNeeded.pop();
            // IFileReader reader = SequenceFile.bufferedReader(file.getAbsolutePath(), DatabaseDescriptor.getLogFileSizeThreshold());
            IFileReader reader = SequenceFile.reader(file.getAbsolutePath());
            try
            {
                reader.readDirect(header);
                /* deserialize the commit log header */
                bufIn.reset(header, 0, header.length);
                CommitLogHeader clHeader = CommitLogHeader.serializer().deserialize(bufIn);
                /* seek to the lowest position */
                int lowPos = CommitLogHeader.getLowestPosition(clHeader);
                /*
                 * If lowPos == 0 then we need to skip the processing of this
                 * file.
                */
                if (lowPos == 0)
                    break;
                else
                    reader.seek(lowPos);

                /* read the logs populate RowMutation and apply */
                while ( !reader.isEOF() )
                {
                    bufOut.reset();
                    long bytesRead = reader.next(bufOut);
                    if ( bytesRead == -1 )
                        break;

                    bufIn.reset(bufOut.getData(), bufOut.getLength());
                    /* Skip over the commit log key portion */
                    bufIn.readUTF();
                    /* Skip over data size */
                    bufIn.readInt();
                   
View Full Code Here

    */
    synchronized CommitLogContext add(Row row) throws IOException
    {
        long currentPosition = -1L;
        CommitLogContext cLogCtx = null;
        DataOutputBuffer cfBuffer = new DataOutputBuffer();
        long fileSize = 0L;
       
        try
        {
            /* serialize the row */
            cfBuffer.reset();
            Row.serializer().serialize(row, cfBuffer);
            currentPosition = logWriter_.getCurrentPosition();
            cLogCtx = new CommitLogContext(logFile_, currentPosition);
            /* Update the header */
            updateHeader(row);
            logWriter_.append(table_, cfBuffer);
            fileSize = logWriter_.getFileSize();                      
            checkThresholdAndRollLog(fileSize);           
        }
        catch (IOException e)
        {
            if ( currentPosition != -1 )
                logWriter_.seek(currentPosition);
            throw e;
        }
        finally
        {                   
            cfBuffer.close();           
        }
        return cLogCtx;
    }
View Full Code Here

public class FastSerializer implements ISerializer
{
    public byte[] serialize(Message message) throws IOException
    {
        DataOutputBuffer buffer = new DataOutputBuffer();
        Message.serializer().serialize(message, buffer);
        return buffer.getData();
    }
View Full Code Here

    private ReadCommand serializeAndDeserializeReadMessage(ReadCommand rm)
    {
        ReadCommand rm2 = null;
        ReadCommandSerializer rms = ReadCommand.serializer();
        DataOutputBuffer dos = new DataOutputBuffer();
        DataInputBuffer dis = new DataInputBuffer();

        try
        {
            rms.serialize(rm, dos);
            dis.reset(dos.getData(), dos.getLength());
            rm2 = rms.deserialize(dis);
        }
        catch (IOException e)
        {
            throw new RuntimeException(e);
View Full Code Here

        random.nextBytes(bytes);
        ColumnFamily cf;

        cf = new ColumnFamily("Standard1", "Standard");
        cf.addColumn("C", bytes, 1);
        DataOutputBuffer bufOut = new DataOutputBuffer();
        ColumnFamily.serializer().serialize(cf, bufOut);

        DataInputBuffer bufIn = new DataInputBuffer();
        bufIn.reset(bufOut.getData(), bufOut.getLength());
        cf = ColumnFamily.serializer().deserialize(bufIn);
        assert cf != null;
        assert cf.name().equals("Standard1");
        assert cf.getAllColumns().size() == 1;
    }
View Full Code Here

            map.put(Integer.toString(i), ("Avinash Lakshman is a good man: " + i).getBytes());
        }

        // write
        cf = new ColumnFamily("Standard1", "Standard");
        DataOutputBuffer bufOut = new DataOutputBuffer();
        for (String cName : map.navigableKeySet())
        {
            cf.addColumn(cName, map.get(cName), 314);
        }
        ColumnFamily.serializer().serialize(cf, bufOut);

        // verify
        DataInputBuffer bufIn = new DataInputBuffer();
        bufIn.reset(bufOut.getData(), bufOut.getLength());
        cf = ColumnFamily.serializer().deserialize(bufIn);
        for (String cName : map.navigableKeySet())
        {
            assert Arrays.equals(cf.getColumn(cName).value(), map.get(cName));
View Full Code Here

    }

    private static int getBlockCount(String dataFile) throws IOException
    {
        IFileReader dataReader = SequenceFile.bufferedReader(dataFile, bufferSize_);
        DataOutputBuffer bufOut = new DataOutputBuffer();
        DataInputBuffer bufIn = new DataInputBuffer();
        int blockCount = 0;

        try
        {
            while ( !dataReader.isEOF() )
            {
                bufOut.reset();
                dataReader.next(bufOut);
                bufIn.reset(bufOut.getData(), bufOut.getLength());
                /* Key just read */
                String key = bufIn.readUTF();
                if ( key.equals(SSTable.blockIndexKey_) )
                {
                    ++blockCount;
View Full Code Here

        String indexFile = dataFile.replace("-Data.", "-Index.");
        final int bufferSize = 64*1024;

        IFileWriter indexWriter = SequenceFile.bufferedWriter(indexFile, bufferSize);
        IFileReader dataReader = SequenceFile.bufferedReader(dataFile, bufferSize);
        DataOutputBuffer bufOut = new DataOutputBuffer();
        DataInputBuffer bufIn = new DataInputBuffer();
        /* BloomFilter of all data in the data file */
        BloomFilter bf = new BloomFilter((SSTable.indexInterval() + 1)*blockCount, 8);

        try
        {
            while ( !dataReader.isEOF() )
            {
                bufOut.reset();
                /* Record the position of the key. */
                long blockIndexOffset = dataReader.getCurrentPosition();
                dataReader.next(bufOut);
                bufIn.reset(bufOut.getData(), bufOut.getLength());
                /* Key just read */
                String key = bufIn.readUTF();
                if ( key.equals(SSTable.blockIndexKey_) )
                {
                    /* Ignore the size of the data associated with the block index */
                    bufIn.readInt();
                    /* Number of keys in the block. */
                    int blockSize = bufIn.readInt();
                    /* Largest key in the block */
                    String largestKey = null;

                    /*
                     * Read the keys in this block and find the largest key in
                     * this block. This is the key that gets written into the
                     * index file.
                    */
                    for ( int i = 0; i < blockSize; ++i )
                    {
                        String currentKey = bufIn.readUTF();
                        bf.add(currentKey);
                        if ( largestKey == null )
                        {
                            largestKey = currentKey;
                        }
                        else
                        {
                            if ( currentKey.compareTo(largestKey) > 0 )
                            {
                                /* record this key */
                                largestKey = currentKey;
                            }
                        }
                        /* read the position of the key and the size of key data and throws it away. */
                        bufIn.readLong();
                        bufIn.readLong();
                    }

                    /*
                     * Write into the index file the largest key in the block
                     * and the offset of the block index in the data file.
                    */
                    indexWriter.append(largestKey, BasicUtilities.longToByteArray(blockIndexOffset));
                }
            }
        }
        finally
        {
            dataReader.close();
            /* Cache the bloom filter */
            SSTable.storeBloomFilter(dataFile, bf);
            /* Write the bloom filter into the index file */
            bufOut.reset();
            BloomFilter.serializer().serialize(bf, bufOut);
            byte[] bytes = new byte[bufOut.getLength()];
            System.arraycopy(bufOut.getData(), 0, bytes, 0, bytes.length);
            indexWriter.close(bytes, bytes.length);
            bufOut.close();
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.DataOutputBuffer$FastByteArrayOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.