Package org.apache.activemq.store.kahadb.disk.util

Examples of org.apache.activemq.store.kahadb.disk.util.DataByteArrayOutputStream


                    if( file.length() < journal.preferedFileLength ) {
                        file.setLength(journal.preferedFileLength);
                    }
                }

                final DataByteArrayOutputStream buff = wb.buff;
                final boolean forceToDisk = wb.forceToDisk;

                ByteSequence sequence = buff.toByteSequence();
               
                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                  Checksum checksum = new Adler32();
                  checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                  buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                if (maxStat > 0) {
View Full Code Here


        DataFile dataFile = null;
        RandomAccessFile file = null;
        WriteBatch wb = null;
        try {

            DataByteArrayOutputStream buff = new DataByteArrayOutputStream(maxWriteBatchSize);
            while (true) {

                // Block till we get a command.
                synchronized (enqueueMutex) {
                    while (true) {
                        if (nextWriteBatch != null) {
                            wb = nextWriteBatch;
                            nextWriteBatch = null;
                            break;
                        }
                        if (shutdown) {
                            return;
                        }
                        enqueueMutex.wait();
                    }
                    enqueueMutex.notifyAll();
                }

                if (dataFile != wb.dataFile) {
                    if (file != null) {
                        file.setLength(dataFile.getLength());
                        dataFile.closeRandomAccessFile(file);
                    }
                    dataFile = wb.dataFile;
                    file = dataFile.openRandomAccessFile();
                    if( file.length() < journal.preferedFileLength ) {
                        file.setLength(journal.preferedFileLength);
                    }
                }

                Journal.WriteCommand write = wb.writes.getHead();

                // Write an empty batch control record.
                buff.reset();
                buff.writeInt(Journal.BATCH_CONTROL_RECORD_SIZE);
                buff.writeByte(Journal.BATCH_CONTROL_RECORD_TYPE);
                buff.write(Journal.BATCH_CONTROL_RECORD_MAGIC);
                buff.writeInt(0);
                buff.writeLong(0);

                boolean forceToDisk = false;
                while (write != null) {
                    forceToDisk |= write.sync | (syncOnComplete && write.onComplete != null);
                    buff.writeInt(write.location.getSize());
                    buff.writeByte(write.location.getType());
                    buff.write(write.data.getData(), write.data.getOffset(), write.data.getLength());
                    write = write.getNext();
                }

                ByteSequence sequence = buff.toByteSequence();

                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                    Checksum checksum = new Adler32();
                    checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                    buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                if (maxStat > 0) {
View Full Code Here

        DataFile dataFile = null;
        RecoverableRandomAccessFile file = null;
        WriteBatch wb = null;
        try {

            DataByteArrayOutputStream buff = new DataByteArrayOutputStream(maxWriteBatchSize);
            while (true) {

                // Block till we get a command.
                synchronized (enqueueMutex) {
                    while (true) {
                        if (nextWriteBatch != null) {
                            wb = nextWriteBatch;
                            nextWriteBatch = null;
                            break;
                        }
                        if (shutdown) {
                            return;
                        }
                        enqueueMutex.wait();
                    }
                    enqueueMutex.notifyAll();
                }

                if (dataFile != wb.dataFile) {
                    if (file != null) {
                        file.setLength(dataFile.getLength());
                        dataFile.closeRandomAccessFile(file);
                    }
                    dataFile = wb.dataFile;
                    file = dataFile.openRandomAccessFile();
                    if( file.length() < journal.preferedFileLength ) {
                        file.setLength(journal.preferedFileLength);
                    }
                }

                Journal.WriteCommand write = wb.writes.getHead();

                // Write an empty batch control record.
                buff.reset();
                buff.writeInt(Journal.BATCH_CONTROL_RECORD_SIZE);
                buff.writeByte(Journal.BATCH_CONTROL_RECORD_TYPE);
                buff.write(Journal.BATCH_CONTROL_RECORD_MAGIC);
                buff.writeInt(0);
                buff.writeLong(0);

                boolean forceToDisk = false;
                while (write != null) {
                    forceToDisk |= write.sync | (syncOnComplete && write.onComplete != null);
                    buff.writeInt(write.location.getSize());
                    buff.writeByte(write.location.getType());
                    buff.write(write.data.getData(), write.data.getOffset(), write.data.getLength());
                    write = write.getNext();
                }

                ByteSequence sequence = buff.toByteSequence();

                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                    Checksum checksum = new Adler32();
                    checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                    buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                if (maxStat > 0) {
View Full Code Here

            page.makeFree(getWriteTransactionId());
            // ensure free page is visible while write is pending
            pageFile.addToCache(page.copy());

            DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageFile.getPageSize());
            page.write(out);
            write(page, out.getData());

            freeList.add(page.getPageId());
            page = next;
        }
    }
View Full Code Here

     *         If the page data marshalls to size larger than maximum page size and overflow was false.
     * @throws IllegalStateException
     *         if the PageFile is not loaded
     */
    public <T> void store(Page<T> page, Marshaller<T> marshaller, final boolean overflow) throws IOException {
        DataByteArrayOutputStream out = (DataByteArrayOutputStream)openOutputStream(page, overflow);
        if (marshaller != null) {
            marshaller.writePayload(page.get(), out);
        }
        out.close();
    }
View Full Code Here

        //
        // To support writing VERY large data, we override the output stream so
        // that we
        // we do the page writes incrementally while the data is being
        // marshalled.
        DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageFile.getPageSize() * 2) {
            Page current = copy;

            @SuppressWarnings("unchecked")
            @Override
            protected void onWrite() throws IOException {

                // Are we at an overflow condition?
                final int pageSize = pageFile.getPageSize();
                if (pos >= pageSize) {
                    // If overflow is allowed
                    if (overflow) {

                        do {
                            Page next;
                            if (current.getType() == Page.PAGE_PART_TYPE) {
                                next = load(current.getNext(), null);
                            } else {
                                next = allocate();
                            }

                            next.txId = current.txId;

                            // Write the page header
                            int oldPos = pos;
                            pos = 0;

                            current.makePagePart(next.getPageId(), getWriteTransactionId());
                            current.write(this);

                            // Do the page write..
                            byte[] data = new byte[pageSize];
                            System.arraycopy(buf, 0, data, 0, pageSize);
                            Transaction.this.write(current, data);

                            // make the new link visible
                            pageFile.addToCache(current);

                            // Reset for the next page chunk
                            pos = 0;
                            // The page header marshalled after the data is written.
                            skip(Page.PAGE_HEADER_SIZE);
                            // Move the overflow data after the header.
                            System.arraycopy(buf, pageSize, buf, pos, oldPos - pageSize);
                            pos += oldPos - pageSize;
                            current = next;

                        } while (pos > pageSize);
                    } else {
                        throw new PageOverflowIOException("Page overflow.");
                    }
                }

            }

            @Override
            public void close() throws IOException {
                super.close();

                // We need to free up the rest of the page chain..
                if (current.getType() == Page.PAGE_PART_TYPE) {
                    free(current.getNext());
                }

                current.makePageEnd(pos, getWriteTransactionId());

                // make visible as end page
                pageFile.addToCache(current);

                // Write the header..
                pos = 0;
                current.write(this);

                Transaction.this.write(current, buf);
            }
        };

        // The page header marshaled after the data is written.
        out.skip(Page.PAGE_HEADER_SIZE);
        return out;
    }
View Full Code Here

                    if( file.length() < journal.preferedFileLength ) {
                        file.setLength(journal.preferedFileLength);
                    }
                }

                final DataByteArrayOutputStream buff = wb.buff;
                final boolean forceToDisk = wb.forceToDisk;

                ByteSequence sequence = buff.toByteSequence();

                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                    Checksum checksum = new Adler32();
                    checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                    buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                if (maxStat > 0) {
View Full Code Here

TOP

Related Classes of org.apache.activemq.store.kahadb.disk.util.DataByteArrayOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.