Package org.voltcore.utils.DBBPool

Examples of org.voltcore.utils.DBBPool.BBContainer


    @Override
    public Callable<BBContainer> filter(final Callable<BBContainer> input) {
        return new Callable<BBContainer>() {
            @Override
            public BBContainer call() throws Exception {
                final BBContainer cont = input.call();
                final int partitionId = cont.b().getInt(m_partitionIdOffset);
                boolean hasPartition = false;

                for (int acceptedPartitionId : m_partitions) {
                    if (partitionId == acceptedPartitionId) {
                        hasPartition = true;
                    }
                }

                if (hasPartition) {
                    return cont;
                } else {
                    cont.discard();
                    return null;
                }
            }
        };
View Full Code Here


         * The old method was out of hand. Going to start a new one with a different format
         * that should be easier to understand and validate.
         */
        private void readChunksV2() {
            //For reading the compressed input.
            final BBContainer fileInputBufferC =
                    DBBPool.allocateDirect(CompressionService.maxCompressedLength(DEFAULT_CHUNKSIZE));
            final ByteBuffer fileInputBuffer = fileInputBufferC.b();
            long sinceLastFAdvise = Long.MAX_VALUE;
            long positionAtLastFAdvise = 0;
            while (m_hasMoreChunks) {
                if (sinceLastFAdvise > 1024 * 1024 * 48) {
                    sinceLastFAdvise = 0;
                    VoltLogger log = new VoltLogger("SNAPSHOT");
                    try {
                        final long position = m_saveFile.position();
                        long retval = PosixAdvise.fadvise(
                                m_fd,
                                position,
                                position + 1024 * 1024 * 64,
                                PosixAdvise.POSIX_FADV_WILLNEED);
                        if (retval != 0) {
                            log.info("Failed to fadvise in TableSaveFile, this is harmless: " + retval);
                        }

                        //Get aligned start and end position
                        final long fadviseStart = positionAtLastFAdvise;
                        //-1 because we don't want to drop the last page because
                        //We will be reading it soon
                        positionAtLastFAdvise = ((position / Bits.pageSize()) - 1) * Bits.pageSize();
                        final long length = positionAtLastFAdvise - fadviseStart;
                        if (length > 0) {
                            retval = PosixAdvise.fadvise(
                                    m_fd,
                                    fadviseStart,
                                    length,
                                    PosixAdvise.POSIX_FADV_DONTNEED);
                        }
                        if (retval != 0) {
                            log.info("Failed to fadvise in TableSaveFile, this is harmless: " + retval);
                        }
                        positionAtLastFAdvise = position;
                    } catch (Throwable t) {
                        log.info("Exception attempting fadvise", t);
                    }
                }

                /*
                 * Limit the number of chunk materialized into memory at one time
                 */
                try {
                    m_chunkReads.acquire();
                } catch (InterruptedException e) {
                    return;
                }
                boolean expectedAnotherChunk = false;
                Container c = null;
                try {

                    /*
                     * Get the length of the next chunk, partition id, crc for partition id, and length prefix,
                     * and then the CRC of the compressed payload
                     */
                    ByteBuffer chunkLengthB = ByteBuffer.allocate(16);
                    while (chunkLengthB.hasRemaining()) {
                        final int read = m_saveFile.read(chunkLengthB);
                        if (read == -1) {
                            throw new EOFException();
                        }
                        sinceLastFAdvise += read;
                    }
                    int nextChunkLength = chunkLengthB.getInt(0);
                    expectedAnotherChunk = true;

                    /*
                     * Get the partition id and its CRC (CRC now covers length prefix) and validate it. Validating the
                     * partition ID for the chunk separately makes it possible to
                     * continue processing chunks from other partitions if only one partition
                     * has corrupt chunks in the file.
                     */
                    assert(m_checksumType == ChecksumType.CRC32C);
                    final Checksum partitionIdCRC = new PureJavaCrc32C();
                    final int nextChunkPartitionId = chunkLengthB.getInt(4);
                    final int nextChunkPartitionIdCRC = chunkLengthB.getInt(8);

                    partitionIdCRC.update(chunkLengthB.array(), 0, 8);
                    int generatedValue = (int)partitionIdCRC.getValue();
                    if (generatedValue != nextChunkPartitionIdCRC) {
                        chunkLengthB.position(0);
                        for (int partitionId : m_partitionIds) {
                            m_corruptedPartitions.add(partitionId);
                        }
                        throw new IOException("Chunk partition ID CRC check failed. " +
                                "This corrupts all partitions in this file");
                    }

                    /*
                     * CRC for the data portion of the chunk
                     */
                    final int nextChunkCRC = chunkLengthB.getInt(12);

                    /*
                     * Sanity check the length value to ensure there isn't
                     * a runtime exception or OOM.
                     */
                    if (nextChunkLength < 0) {
                        throw new IOException("Corrupted TableSaveFile chunk has negative chunk length");
                    }

                    if (nextChunkLength > fileInputBuffer.capacity()) {
                        throw new IOException("Corrupted TableSaveFile chunk has unreasonable length " +
                                "> DEFAULT_CHUNKSIZE bytes");
                    }

                    /*
                     * Go fetch the compressed data so that the uncompressed size is known
                     * and use that to set nextChunkLength to be the uncompressed length,
                     * the code ahead that constructs the volt table is expecting
                     * the uncompressed size/data since it is producing an uncompressed table
                     */
                    fileInputBuffer.clear();
                    fileInputBuffer.limit(nextChunkLength);
                    while (fileInputBuffer.hasRemaining()) {
                        final int read = m_saveFile.read(fileInputBuffer);
                        if (read == -1) {
                            throw new EOFException();
                        }
                        sinceLastFAdvise += read;
                    }
                    fileInputBuffer.flip();
                    nextChunkLength = CompressionService.uncompressedLength(fileInputBuffer);

                    /*
                     * Validate the rest of the chunk. This can fail if the data is corrupted
                     * or the length value was corrupted.
                     */
                    final int calculatedCRC =
                            DBBPool.getBufferCRC32C(fileInputBuffer, 0, fileInputBuffer.remaining());
                    if (calculatedCRC != nextChunkCRC) {
                        m_corruptedPartitions.add(nextChunkPartitionId);
                        if (m_continueOnCorruptedChunk) {
                            m_chunkReads.release();
                            continue;
                        } else {
                            throw new IOException("CRC mismatch in saved table chunk");
                        }
                    }

                    /*
                     * Now allocate space to store the chunk using the VoltTable serialization representation.
                     * The chunk will contain an integer row count preceding it so it can
                     * be sucked straight in. There is a little funny business to overwrite the
                     * partition id that is not part of the serialization format
                     */
                    c = getOutputBuffer(nextChunkPartitionId);

                    /*
                     * If the length value is wrong or not all data made it to disk this read will
                     * not complete correctly. There could be overflow, underflow etc.
                     * so use a try finally block to indicate that all partitions are now corrupt.
                     * The enclosing exception handlers will do the right thing WRT to
                     * propagating the error and closing the file.
                     */
                    boolean completedRead = false;
                    try {
                        final ByteBuffer buf = c.b();
                        /*
                         * Assemble a VoltTable out of the chunk of tuples.
                         * Put in the header that was cached in the constructor,
                         * then copy the tuple data.
                         */
                        buf.clear();
                        buf.limit(nextChunkLength  + m_tableHeader.capacity());
                        m_tableHeader.position(0);
                        buf.put(m_tableHeader);
                        //Doesn't move buffer position, does change the limit
                        CompressionService.decompressBuffer(fileInputBuffer, buf);
                        completedRead = true;
                    } finally {
                        if (!completedRead) {
                            for (int partitionId : m_partitionIds) {
                                m_corruptedPartitions.add(partitionId);
                            }
                            if (m_continueOnCorruptedChunk) {
                                m_chunkReads.release();
                                continue;
                            } else {
                                throw new IOException("Failed decompression of saved table chunk");
                            }
                        }
                    }

                    /*
                     * Skip irrelevant chunks after CRC is calculated. Always calulate the CRC
                     * in case it is the length value that is corrupted
                     */
                    if (m_relevantPartitionIds != null) {
                        if (!m_relevantPartitionIds.contains(nextChunkPartitionId)) {
                            m_chunkReads.release();
                            continue;
                        }
                    }

                    /*
                     * VoltTable wants the buffer at the home position 0
                     */
                    c.b().position(0);

                    synchronized (TableSaveFile.this) {
                        m_availableChunks.offer(c);
                        c = null;
                        TableSaveFile.this.notifyAll();
                    }
                } catch (EOFException eof) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        if (expectedAnotherChunk) {
                            m_chunkReaderException = new IOException(
                                    "Expected to find another chunk but reached end of file instead");
                        }
                        TableSaveFile.this.notifyAll();
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = e;
                        TableSaveFile.this.notifyAll();
                    }
                } catch (BufferUnderflowException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = new IOException(e);
                        TableSaveFile.this.notifyAll();
                    }
                } catch (BufferOverflowException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = new IOException(e);
                        TableSaveFile.this.notifyAll();
                    }
                } catch (IndexOutOfBoundsException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = new IOException(e);
                        TableSaveFile.this.notifyAll();
                    }
                } finally {
                    if (c != null) c.discard();
                }
            }
            fileInputBufferC.discard();
        }
View Full Code Here

            fileInputBufferC.discard();
        }

        private void readChunks() {
            //For reading the compressed input.
            BBContainer fileInputBufferC =
                    DBBPool.allocateDirect(CompressionService.maxCompressedLength(DEFAULT_CHUNKSIZE));
            ByteBuffer fileInputBuffer = fileInputBufferC.b();
            while (m_hasMoreChunks) {
                /*
                 * Limit the number of chunk materialized into memory at one time
                 */
                try {
                    m_chunkReads.acquire();
                } catch (InterruptedException e) {
                    return;
                }
                boolean expectedAnotherChunk = false;
                Container c = null;
                try {

                    /*
                     * Get the length of the next chunk, partition id, crc for partition id,
                     */
                    ByteBuffer chunkLengthB = ByteBuffer.allocate(16);
                    while (chunkLengthB.hasRemaining()) {
                        final int read = m_saveFile.read(chunkLengthB);
                        if (read == -1) {
                            throw new EOFException();
                        }
                    }
                    chunkLengthB.flip();
                    int nextChunkLength = chunkLengthB.getInt();
                    expectedAnotherChunk = true;

                    /*
                     * Get the partition id and its CRC and validate it. Validating the
                     * partition ID for the chunk separately makes it possible to
                     * continue processing chunks from other partitions if only one partition
                     * has corrupt chunks in the file.
                     */
                    final Checksum partitionIdCRC = m_checksumType == ChecksumType.CRC32C ? new PureJavaCrc32C() : new PureJavaCrc32();
                    chunkLengthB.mark();
                    final int nextChunkPartitionId = chunkLengthB.getInt();
                    final int nextChunkPartitionIdCRC = chunkLengthB.getInt();
                    chunkLengthB.reset();
                    byte partitionIdBytes[] = new byte[4];
                    chunkLengthB.get(partitionIdBytes);
                    partitionIdCRC.update(partitionIdBytes, 0, partitionIdBytes.length);
                    int generatedValue = (int)partitionIdCRC.getValue();
                    if (generatedValue != nextChunkPartitionIdCRC) {
                        chunkLengthB.position(0);
                        for (int partitionId : m_partitionIds) {
                            m_corruptedPartitions.add(partitionId);
                        }
                        throw new IOException("Chunk partition ID CRC check failed. " +
                                "This corrupts all partitions in this file");
                    }

                    /*
                     * CRC for the data portion of the chunk
                     */
                    chunkLengthB.position(chunkLengthB.position() + 4);
                    final int nextChunkCRC = chunkLengthB.getInt();

                    /*
                     * Sanity check the length value to ensure there isn't
                     * a runtime exception or OOM.
                     */
                    if (nextChunkLength < 0) {
                        throw new IOException("Corrupted TableSaveFile chunk has negative chunk length");
                    }

                    if (isCompressed()) {
                        if (nextChunkLength > fileInputBuffer.capacity()) {
                            throw new IOException("Corrupted TableSaveFile chunk has unreasonable length " +
                                    "> DEFAULT_CHUNKSIZE bytes");
                        }
                    } else {
                        if (nextChunkLength > DEFAULT_CHUNKSIZE) {
                            throw new IOException("Corrupted TableSaveFile chunk has unreasonable length " +
                                    "> DEFAULT_CHUNKSIZE bytes");
                        }
                    }

                    /*
                     * Go fetch the compressed data so that the uncompressed size is known
                     * and use that to set nextChunkLength to be the uncompressed length,
                     * the code ahead that constructs the volt table is expecting
                     * the uncompressed size/data since it is producing an uncompressed table
                     */
                    if (isCompressed()) {
                        fileInputBuffer.clear();
                        fileInputBuffer.limit(nextChunkLength);
                        while (fileInputBuffer.hasRemaining()) {
                            final int read = m_saveFile.read(fileInputBuffer);
                            if (read == -1) {
                                throw new EOFException();
                            }
                        }
                        fileInputBuffer.flip();
                        nextChunkLength = CompressionService.uncompressedLength(fileInputBuffer);
                    }

                    /*
                     * Now allocate space to store the chunk using the VoltTable serialization representation.
                     * The chunk will contain an integer row count preceding it so it can
                     * be sucked straight in. There is a little funny business to overwrite the
                     * partition id that is not part of the serialization format
                     */
                    c = getOutputBuffer(nextChunkPartitionId);

                    /*
                     * If the length value is wrong or not all data made it to disk this read will
                     * not complete correctly. There could be overflow, underflow etc.
                     * so use a try finally block to indicate that all partitions are now corrupt.
                     * The enclosing exception handlers will do the right thing WRT to
                     * propagating the error and closing the file.
                     */
                    boolean completedRead = false;
                    int checksumStartPosition = 0;
                    int rowCount = 0;
                    try {
                        /*
                         * Assemble a VoltTable out of the chunk of tuples.
                         * Put in the header that was cached in the constructor,
                         * then copy the tuple data. The row count is at the end
                         * because it isn't known until serialization is complete.
                         * It will have to be moved back to the beginning of the tuple data
                         * after the header once the CRC has been calculated.
                         */
                        c.b().clear();
                        //The length of the chunk already includes space for the 4-byte row count
                        //even though it is at the end, but we need to also leave at the end for the CRC calc
                        if (isCompressed()) {
                            c.b().limit(nextChunkLength  + m_tableHeader.capacity() + 4);
                        } else {
                            //Before compression the chunk length included the stuff added in the EE
                            //like the 2 CRCs and partition id. It is only -8 because we still need the 4-bytes
                            //of padding to move the row count in when constructing the volt table format.
                            c.b().limit((nextChunkLength - 8+ m_tableHeader.capacity());
                        }
                        m_tableHeader.position(0);
                        c.b().put(m_tableHeader);
                        c.b().position(c.b().position() + 4);//Leave space for row count to be moved into
                        checksumStartPosition = c.b().position();
                        if (isCompressed()) {
                            CompressionService.decompressBuffer(fileInputBuffer, c.b());
                            c.b().position(c.b().limit());
                        } else {
                            while (c.b().hasRemaining()) {
                                final int read = m_saveFile.read(c.b());
                                if (read == -1) {
                                    throw new EOFException();
                                }
                            }
                        }
                        c.b().position(c.b().position() - 4);
                        rowCount = c.b().getInt();
                        c.b().position(checksumStartPosition);
                        completedRead = true;
                    } finally {
                        if (!completedRead) {
                            for (int partitionId : m_partitionIds) {
                                m_corruptedPartitions.add(partitionId);
                            }
                        }
                    }

                    /*
                     * Validate the rest of the chunk. This can fail if the data is corrupted
                     * or the length value was corrupted.
                     */
                    final int calculatedCRC =
                            m_checksumType == ChecksumType.CRC32C  ?
                                    DBBPool.getCRC32C(c.address(), c.b().position(), c.b().remaining()) :
                                        DBBPool.getCRC32(c.address(), c.b().position(), c.b().remaining());
                    if (calculatedCRC != nextChunkCRC) {
                        m_corruptedPartitions.add(nextChunkPartitionId);
                        if (m_continueOnCorruptedChunk) {
                            m_chunkReads.release();
                            continue;
                        } else {
                            throw new IOException("CRC mismatch in saved table chunk");
                        }
                    }

                    /*
                     * Skip irrelevant chunks after CRC is calculated. Always calulate the CRC
                     * in case it is the length value that is corrupted
                     */
                    if (m_relevantPartitionIds != null) {
                        if (!m_relevantPartitionIds.contains(nextChunkPartitionId)) {
                            m_chunkReads.release();
                            continue;
                        }
                    }

                    /*
                     * The row count which was stored on disk at the end (and for the CRC calc)
                     * is now moved to the appropriate place for the table serialization format.
                     * Update the limit to reflect that.
                     *
                     * Surrounded in a try finally just in case there is overflow/underflow. Shouldn't
                     * happen but I could be wrong.
                     */
                    boolean success = false;
                    try {
                        c.b().limit(c.b().limit() - 4);
                        c.b().position(checksumStartPosition - 4);
                        c.b().putInt(rowCount);
                        c.b().position(0);
                        success = true;
                    } finally {
                        if (!success) {
                            for (int partitionId : m_partitionIds) {
                                m_corruptedPartitions.add(partitionId);
                            }
                        }
                    }

                    synchronized (TableSaveFile.this) {
                        m_availableChunks.offer(c);
                        c = null;
                        TableSaveFile.this.notifyAll();
                    }
                } catch (EOFException eof) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        if (expectedAnotherChunk) {
                            m_chunkReaderException = new IOException(
                                    "Expected to find another chunk but reached end of file instead");
                        }
                        TableSaveFile.this.notifyAll();
                    }
                } catch (IOException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = e;
                        TableSaveFile.this.notifyAll();
                    }
                } catch (BufferUnderflowException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = new IOException(e);
                        TableSaveFile.this.notifyAll();
                    }
                } catch (BufferOverflowException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = new IOException(e);
                        TableSaveFile.this.notifyAll();
                    }
                } catch (IndexOutOfBoundsException e) {
                    synchronized (TableSaveFile.this) {
                        m_hasMoreChunks = false;
                        m_chunkReaderException = new IOException(e);
                        TableSaveFile.this.notifyAll();
                    }
                } finally {
                    if (c != null) c.discard();
                }
            }
            fileInputBufferC.discard();
        }
View Full Code Here

                }
            }
            fileInputBufferC.discard();
        }
        private Container getOutputBuffer(final int nextChunkPartitionId) {
            BBContainer c = m_buffers.poll();
            if (c == null) {
                final BBContainer originContainer = DBBPool.allocateDirect(DEFAULT_CHUNKSIZE);
                final ByteBuffer b = originContainer.b();
                final Container retcont = new Container(b, originContainer, nextChunkPartitionId);
                return retcont;
            }
            /*
             * Need to reconstruct the container with the partition id of the next
View Full Code Here

        fs.writeBoolean(isReplicated);
        if (!isReplicated) {
            fs.writeArray(partitionIds);
            fs.writeInt(numPartitions);
        }
        final BBContainer container = fs.getBBContainer();
        container.b().position(4);
        container.b().putInt(container.b().remaining() - 4);
        container.b().position(0);

        FastSerializer schemaSerializer = new FastSerializer();
        int schemaTableLen = schemaTable.getSerializedSize();
        ByteBuffer serializedSchemaTable = ByteBuffer.allocate(schemaTableLen);
        schemaTable.flattenToBuffer(serializedSchemaTable);
        serializedSchemaTable.flip();
        schemaSerializer.write(serializedSchemaTable);
        final BBContainer schemaContainer = schemaSerializer.getBBContainer();
        schemaContainer.b().limit(schemaContainer.b().limit() - 4);//Don't want the row count
        schemaContainer.b().position(schemaContainer.b().position() + 4);//Don't want total table length

        final PureJavaCrc32 crc = new PureJavaCrc32();
        ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b().remaining() + schemaContainer.b().remaining());
        aggregateBuffer.put(container.b());
        container.discard();
        aggregateBuffer.put(schemaContainer.b());
        schemaContainer.discard();
        aggregateBuffer.flip();
        crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4);

        final int crcValue = (int) crc.getValue();
        aggregateBuffer.putInt(crcValue).position(8);
View Full Code Here

    private ListenableFuture<?> write(final Callable<BBContainer> tupleDataC, final boolean prependLength) {
        /*
         * Unwrap the data to be written. For the traditional
         * snapshot data target this should be a noop.
         */
        BBContainer tupleDataTemp;
        try {
            tupleDataTemp = tupleDataC.call();
        } catch (Throwable t) {
            return Futures.immediateFailedFuture(t);
        }
        final BBContainer tupleData = tupleDataTemp;

        if (m_writeFailed) {
            tupleData.discard();
            return null;
        }

        if (prependLength) {
            tupleData.b().putInt(tupleData.b().remaining() - 4);
            tupleData.b().position(0);
        }

        m_outstandingWriteTasks.incrementAndGet();
        ListenableFuture<?> writeTask = m_es.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                try {
                    if (m_acceptOneWrite) {
                        m_acceptOneWrite = false;
                    } else {
                        if (m_simulateFullDiskWritingChunk) {
                            throw new IOException("Disk full");
                        }
                    }

                    m_bytesAllowedBeforeSync.acquire(tupleData.b().remaining());

                    int totalWritten = 0;
                    while (tupleData.b().hasRemaining()) {
                        totalWritten += m_channel.write(tupleData.b());
                    }
                    m_bytesWritten += totalWritten;
                    m_bytesWrittenSinceLastSync.addAndGet(totalWritten);
                } catch (IOException e) {
                    m_writeException = e;
                    SNAP_LOG.error("Error while attempting to write snapshot data to file " + m_file, e);
                    m_writeFailed = true;
                    throw e;
                } finally {
                    tupleData.discard();
                    m_outstandingWriteTasksLock.lock();
                    try {
                        if (m_outstandingWriteTasks.decrementAndGet() == 0) {
                            m_noMoreOutstandingWriteTasksCondition.signalAll();
                        }
View Full Code Here

    @Override
    public Callable<BBContainer> filter(final Callable<BBContainer> input) {
        return new Callable<BBContainer>() {
            @Override
            public BBContainer call() throws Exception {
                BBContainer cont = input.call();
                if (cont == null) {
                    return null;
                }
                try {
                    ByteBuffer buf = ByteBuffer.allocate(m_schemaBytes.length + cont.b().remaining() - 4);
                    buf.put(m_schemaBytes);
                    cont.b().position(4);
                    buf.put(cont.b());

                    VoltTable vt = PrivateVoltTableFactory.createVoltTableFromBuffer(buf, true);
                    Pair<Integer, byte[]> p =
                                    VoltTableUtil.toCSV(
                                            vt,
                                            m_columnTypes,
                                            m_delimiter,
                                            m_fullDelimiters,
                                            m_lastNumCharacters);
                    m_lastNumCharacters = p.getFirst();
                    final BBContainer origin = cont;
                    cont = null;
                    return new BBContainer( ByteBuffer.wrap(p.getSecond())) {
                        @Override
                        public void discard() {
                            checkDoubleFree();
                            origin.discard();
                        }
                    };
                } finally {
                    if (cont != null) {
                        cont.discard();
View Full Code Here

TOP

Related Classes of org.voltcore.utils.DBBPool.BBContainer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.