Package org.voltcore.utils.DBBPool

Examples of org.voltcore.utils.DBBPool.BBContainer


        m_refCount.incrementAndGet();
        return getRefCountingContainer(m_buffer.b().slice().asReadOnlyBuffer());
    }

    private BBContainer getRefCountingContainer(ByteBuffer buf) {
        return new BBContainer(buf) {
            @Override
            public void discard() {
                checkDoubleFree();
                StreamBlock.this.discard();
            }
View Full Code Here


                                         CoreUtils.getThreadFactory("Compression service thread"))
            );

    private static IOBuffers getBuffersForCompression(int length, boolean inputNotUsed) {
        IOBuffers buffers = m_buffers.get();
        BBContainer input = buffers.input;
        BBContainer output = buffers.output;

        final int maxCompressedLength = Snappy.maxCompressedLength(length);

        final int inputCapacity = input.b().capacity();
        final int outputCapacity = output.b().capacity();

        /*
         * A direct byte buffer might be provided in which case no input buffer is needed
         */
        boolean changedBuffer = false;
        if (!inputNotUsed && inputCapacity < length) {
            input.discard();
            input = DBBPool.allocateDirect(Math.max(inputCapacity * 2, length));
            changedBuffer = true;
        }

        if (outputCapacity < maxCompressedLength) {
            output.discard();
            output = DBBPool.allocateDirect(Math.max(outputCapacity * 2, maxCompressedLength));
            changedBuffer = true;
        }

        if (changedBuffer) {
            buffers = new IOBuffers(input, output);
            m_buffers.set(buffers);
        }
        output.b().clear();
        input.b().clear();

        return buffers;
    }
View Full Code Here

    }

    public static byte[] decompressBuffer(final ByteBuffer compressed) throws IOException {
        assert(compressed.isDirect());
        IOBuffers buffers = m_buffers.get();
        BBContainer output = buffers.output;

        final int uncompressedLength = Snappy.uncompressedLength(compressed);
        final int outputCapacity = buffers.output.b().capacity();
        if (outputCapacity < uncompressedLength) {
            buffers.output.discard();
            output = DBBPool.allocateDirect(Math.max(outputCapacity * 2, uncompressedLength));
            buffers = new IOBuffers(buffers.input, output);
            m_buffers.set(buffers);
        }
        output.b().clear();

        final int actualUncompressedLength = Snappy.uncompress(compressed, output.b());
        assert(uncompressedLength == actualUncompressedLength);

        byte result[] = new byte[actualUncompressedLength];
        output.b().get(result);
        return result;
    }
View Full Code Here

        return Snappy.uncompress(compressed, uncompressed);
    }

    public static byte[] decompressBytes(byte bytes[]) throws IOException {
        IOBuffers buffers = m_buffers.get();
        BBContainer input = buffers.input;
        BBContainer output = buffers.output;

        final int inputCapacity = input.b().capacity();
        if (inputCapacity < bytes.length){
            input.discard();
            input = DBBPool.allocateDirect(Math.max(inputCapacity * 2, bytes.length));
            buffers = new IOBuffers(input, output);
            m_buffers.set(buffers);
        }

        final ByteBuffer inputBuffer = input.b();
        inputBuffer.clear();
        inputBuffer.put(bytes);
        inputBuffer.flip();

        final int uncompressedLength = Snappy.uncompressedLength(inputBuffer);
        final int outputCapacity = output.b().capacity();
        if (outputCapacity < uncompressedLength) {
            output.discard();
            output = DBBPool.allocateDirect(Math.max(outputCapacity * 2, uncompressedLength));
            buffers = new IOBuffers(input, output);
            m_buffers.set(buffers);
        }
        final ByteBuffer outputBuffer = output.b();
        outputBuffer.clear();

        final int actualUncompressedLength = Snappy.uncompress(inputBuffer, outputBuffer);
        assert(uncompressedLength == actualUncompressedLength);

View Full Code Here

                if (m_availableBytes.get() > m_maxAvailableBytes) {
                    Thread.sleep(5);
                    continue;
                }

                BBContainer c = m_saveFile.getNextChunk();
                if (c == null) {
                    return;
                }

                try {
                    final VoltTable vt = PrivateVoltTableFactory
                            .createVoltTableFromBuffer(c.b(), true);
                    Pair<Integer, byte[]> p = VoltTableUtil.toCSV( vt, m_delimiter, null, lastNumCharacters);
                    lastNumCharacters = p.getFirst();
                    byte csvBytes[] = p.getSecond();
                    m_availableBytes.addAndGet(csvBytes.length);
                    m_available.offer(csvBytes);
                } finally {
                    c.discard();
                }
            }
        }
View Full Code Here

        assert(!m_endOfStream);
        if (buffer != null) {
            //There will be 8 bytes of no data that we can ignore, it is header space for storing
            //the USO in stream block
            if (buffer.capacity() > 8) {
                final BBContainer cont = DBBPool.wrapBB(buffer);
                if (m_lastReleaseOffset > 0 && m_lastReleaseOffset >= (uso + (buffer.capacity() - 8))) {
                    //What ack from future is known?
                    if (exportLog.isDebugEnabled()) {
                        exportLog.debug("Dropping already acked USO: " + m_lastReleaseOffset
                                + " Buffer info: " + uso + " Size: " + buffer.capacity());
                    }
                    cont.discard();
                    return;
                }
                try {
                    m_committedBuffers.offer(new StreamBlock(
                            new BBContainer(buffer) {
                                @Override
                                public void discard() {
                                    final ByteBuffer buf = checkDoubleFree();
                                    cont.discard();
                                    deleted.set(true);
                                }
                            }, uso, false));
                } catch (IOException e) {
                    exportLog.error(e);
                    if (!deleted.get()) {
                        cont.discard();
                    }
                }
            } else {
                /*
                 * TupleStreamWrapper::setBytesUsed propagates the USO by sending
View Full Code Here

        }

        /*
         * Free buffers used to pull snapshot data in process
         */
        BBContainer cont;
        while ((cont = m_buffers.poll()) != null) {
            cont.discard();
        }
    }
View Full Code Here

    public void testMatchesNativeBytes() throws Exception {
        final long seed = ByteBuffer.wrap(SecureRandom.getSeed(8)).getInt();
        Random r = new Random(seed);
        System.out.println("Seed is " + seed);
        EELibraryLoader.loadExecutionEngineLibrary(true);
        BBContainer c = DBBPool.allocateDirect(4096);
        try {
            c.b().order(ByteOrder.LITTLE_ENDIAN);

            for (int ii = 0; ii < iterations; ii++) {
                int bytesToFill = r.nextInt(maxLength + 1);
                byte bytes[] = new byte[bytesToFill];
                r.nextBytes(bytes);
                c.b().clear();
                c.b().put(bytes);
                c.b().flip();

                long nativeHash = DBBPool.getMurmur3128(c.address(), 0, bytesToFill);
                long javaHash =  MurmurHash3.hash3_x64_128(c.b(), 0, bytesToFill, 0);
                if (nativeHash != javaHash) {
                    fail("Failed in iteration " + ii + " native hash " + Long.toHexString(nativeHash) +
                            " java hash " + Long.toHexString(javaHash) +" with bytes " + Encoder.base64Encode(bytes));
                }
            }
        } finally {
            c.discard();
        }
    }
View Full Code Here

            throw new IOException(e);
        }
        fs.writeInt(jsonBytes.length);
        fs.write(jsonBytes);

        final BBContainer container = fs.getBBContainer();
        container.b().position(4);
        container.b().putInt(container.b().remaining() - 4);
        container.b().position(0);

        final byte schemaBytes[] = PrivateVoltTableFactory.getSchemaBytes(schemaTable);

        final PureJavaCrc32 crc = new PureJavaCrc32();
        ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b().remaining() + schemaBytes.length);
        aggregateBuffer.put(container.b());
        container.discard();
        aggregateBuffer.put(schemaBytes);
        aggregateBuffer.flip();
        crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4);

        final int crcValue = (int) crc.getValue();
View Full Code Here

    private ListenableFuture<?> write(final Callable<BBContainer> tupleDataC, final boolean prependLength) {
        /*
         * Unwrap the data to be written. For the traditional
         * snapshot data target this should be a noop.
         */
        BBContainer tupleDataTemp;
        try {
            tupleDataTemp = tupleDataC.call();
            /*
             * Can be null if the dedupe filter nulled out the buffer
             */
            if (tupleDataTemp == null) {
                return Futures.immediateFuture(null);
            }
        } catch (Throwable t) {
            return Futures.immediateFailedFuture(t);
        }
        final BBContainer tupleDataCont = tupleDataTemp;


        if (m_writeFailed) {
            tupleDataCont.discard();
            return null;
        }

        ByteBuffer tupleData = tupleDataCont.b();

        m_outstandingWriteTasks.incrementAndGet();

        Future<BBContainer> compressionTask = null;
        if (prependLength) {
            BBContainer cont =
                    DBBPool.allocateDirectAndPool(SnapshotSiteProcessor.m_snapshotBufferCompressedLen);
            //Skip 4-bytes so the partition ID is not compressed
            //That way if we detect a corruption we know what partition is bad
            tupleData.position(tupleData.position() + 4);
            /*
             * Leave 12 bytes, it's going to be a 4-byte length prefix, a 4-byte partition id,
             * and a 4-byte CRC32C of just the header bytes, in addition to the compressed payload CRC
             * that is 16 bytes, but 4 of those are done by CompressionService
             */
            cont.b().position(12);
            compressionTask = CompressionService.compressAndCRC32cBufferAsync(tupleData, cont);
        }
        final Future<BBContainer> compressionTaskFinal = compressionTask;

        ListenableFuture<?> writeTask = m_es.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                try {
                    if (m_acceptOneWrite) {
                        m_acceptOneWrite = false;
                    } else {
                        if (m_simulateBlockedWrite != null) {
                            m_simulateBlockedWrite.await();
                        }
                        if (m_simulateFullDiskWritingChunk) {
                            //Make sure to consume the result of the compression
                            compressionTaskFinal.get().discard();
                            throw new IOException("Disk full");
                        }
                    }

                    final ByteBuffer tupleData = tupleDataCont.b();
                    int totalWritten = 0;
                    if (prependLength) {
                        BBContainer payloadContainer = compressionTaskFinal.get();
                        try {
                            final ByteBuffer payloadBuffer = payloadContainer.b();
                            payloadBuffer.position(0);

                            ByteBuffer lengthPrefix = ByteBuffer.allocate(12);
                            m_bytesAllowedBeforeSync.acquire(payloadBuffer.remaining());
                            //Length prefix does not include 4 header items, just compressd payload
                            //that follows
                            lengthPrefix.putInt(payloadBuffer.remaining() - 16);//length prefix
                            lengthPrefix.putInt(tupleData.getInt(0)); // partitionId

                            /*
                             * Checksum the header and put it in the payload buffer
                             */
                            PureJavaCrc32C crc = new PureJavaCrc32C();
                            crc.update(lengthPrefix.array(), 0, 8);
                            lengthPrefix.putInt((int)crc.getValue());
                            lengthPrefix.flip();
                            payloadBuffer.put(lengthPrefix);
                            payloadBuffer.position(0);

                            enforceSnapshotRateLimit(payloadBuffer.remaining());

                            /*
                             * Write payload to file
                             */
                            while (payloadBuffer.hasRemaining()) {
                                totalWritten += m_channel.write(payloadBuffer);
                            }
                        } finally {
                            payloadContainer.discard();
                        }
                    } else {
                        while (tupleData.hasRemaining()) {
                            totalWritten += m_channel.write(tupleData);
                        }
View Full Code Here

TOP

Related Classes of org.voltcore.utils.DBBPool.BBContainer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.