Package org.apache.jackrabbit.oak.plugins.segment

Examples of org.apache.jackrabbit.oak.plugins.segment.SegmentNodeBuilder


        try {
            Journal journal = backup.getJournal("root");

            SegmentNodeState state = new SegmentNodeState(
                    backup.getWriter().getDummySegment(), journal.getHead());
            SegmentNodeBuilder builder = state.builder();

            String beforeCheckpoint = state.getString("checkpoint");
            if (beforeCheckpoint == null) {
                // 3.1 no stored checkpoint, so do the initial full backup
                builder.setChildNode("root", current);
            } else {
                // 3.2 try to retrieve the previously backed up checkpoint
                NodeState before = store.retrieve(beforeCheckpoint);
                if (before != null) {
                    // the previous checkpoint is no longer available,
                    // so use the backed up state as the basis of the
                    // incremental backup diff
                    before = state.getChildNode("root");
                }
                current.compareAgainstBaseState(
                        before, new ApplyDiff(builder.child("root")));
            }
            builder.setProperty("checkpoint", checkpoint);

            // 4. commit the backup
            journal.setHead(
                    state.getRecordId(), builder.getNodeState().getRecordId());
        } finally {
            backup.close();
        }

        log.debug("Backup done in {} ms.", System.currentTimeMillis() - s);
View Full Code Here


        FileStore store = new FileStore(directory, 1, memoryMapping);
        store.close();

        store = new FileStore(directory, 1, memoryMapping);
        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[10 * 1024 * 1024];
        new Random().nextBytes(data);
        Blob blob = builder.createBlob(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        store.setHead(base, builder.getNodeState());
        store.flush();
        store.setHead(store.getHead(), base);
        store.close();

        store = new FileStore(directory, 1, memoryMapping);
View Full Code Here

    public void testRecovery() throws IOException {
        FileStore store = new FileStore(directory, 1, false);
        store.flush(); // first 1kB

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        builder.setProperty("step", "a");
        store.setHead(base, builder.getNodeState());
        store.flush(); // second 1kB

        base = store.getHead();
        builder = base.builder();
        builder.setProperty("step", "b");
        store.setHead(base, builder.getNodeState());
        store.close(); // third 1kB

        store = new FileStore(directory, 1, false);
        assertEquals("b", store.getHead().getString("step"));
        store.close();
View Full Code Here

    public void testRecovery() throws IOException {
        FileStore store = new FileStore(directory, 1, false);
        store.flush(); // first 1kB

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        builder.setProperty("step", "a");
        store.setHead(base, builder.getNodeState());
        store.flush(); // second 1kB

        base = store.getHead();
        builder = base.builder();
        builder.setProperty("step", "b");
        store.setHead(base, builder.getNodeState());
        store.close(); // third 1kB

        store = new FileStore(directory, 1, false);
        assertEquals("b", store.getHead().getString("step"));
        store.close();
View Full Code Here

        FileStore store = new FileStore(directory, 1, memoryMapping);
        store.close();

        store = new FileStore(directory, 1, memoryMapping);
        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[10 * 1024 * 1024];
        new Random().nextBytes(data);
        Blob blob = builder.createBlob(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        store.setHead(base, builder.getNodeState());
        store.flush();
        store.setHead(store.getHead(), base);
        store.close();

        store = new FileStore(directory, 1, memoryMapping);
View Full Code Here

        FileStore store = new FileStore(directory, 1, false);
        SegmentWriter writer = store.getTracker().getWriter();

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[largeBinarySize];
        new Random().nextBytes(data);
        SegmentBlob blob = writer.writeStream(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        builder.getNodeState(); // write the blob reference to the segment
        builder.setProperty("foo", "bar");
        SegmentNodeState head = builder.getNodeState();
        assertTrue(store.setHead(base, head));
        assertEquals("bar", store.getHead().getString("foo"));
        store.close();

        // First simulate the case where during compaction a reference to the
        // older segments is added to a segment that the compactor is writing
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        Compactor compactor = new Compactor(writer);
        SegmentNodeState compacted =
                compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // In this case the revision cleanup is unable to reclaim the old data
        store = new FileStore(directory, 1, false);
        assertTrue(store.size() > largeBinarySize);
        store.cleanup();
        assertTrue(store.size() > largeBinarySize);
        store.close();

        // Now we do the same thing, but let the compactor use a different
        // SegmentWriter
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        writer = new SegmentWriter(store, store.getTracker());
        compactor = new Compactor(writer);
        compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        writer.flush();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // Revision cleanup is now able to reclaim the extra space (OAK-1932)
View Full Code Here

    public void testRecovery() throws IOException {
        FileStore store = new FileStore(directory, 1, false);
        store.flush(); // first 1kB

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        builder.setProperty("step", "a");
        store.setHead(base, builder.getNodeState());
        store.flush(); // second 1kB

        base = store.getHead();
        builder = base.builder();
        builder.setProperty("step", "b");
        store.setHead(base, builder.getNodeState());
        store.close(); // third 1kB

        store = new FileStore(directory, 1, false);
        assertEquals("b", store.getHead().getString("step"));
        store.close();
View Full Code Here

            Compactor compactor = new Compactor(backup.getTracker().getWriter());
            SegmentNodeState after = compactor.compact(before, current);

            // 4. commit the backup
            SegmentNodeBuilder builder = state.builder();
            builder.setProperty("checkpoint", checkpoint);
            builder.setChildNode("root", after);
            backup.setHead(state, builder.getNodeState());
        } finally {
            backup.close();
        }

        log.debug("Backup finished in {} ms.", System.currentTimeMillis() - s);
View Full Code Here

        FileStore store = new FileStore(directory, 1, memoryMapping);
        store.close();

        store = new FileStore(directory, 1, memoryMapping);
        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[10 * 1024 * 1024];
        new Random().nextBytes(data);
        Blob blob = builder.createBlob(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        store.setHead(base, builder.getNodeState());
        store.flush();
        store.setHead(store.getHead(), base);
        store.close();

        store = new FileStore(directory, 1, memoryMapping);
View Full Code Here

        FileStore store = new FileStore(directory, 1, false);
        SegmentWriter writer = store.getTracker().getWriter();

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[largeBinarySize];
        new Random().nextBytes(data);
        SegmentBlob blob = writer.writeStream(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        builder.getNodeState(); // write the blob reference to the segment
        builder.setProperty("foo", "bar");
        SegmentNodeState head = builder.getNodeState();
        assertTrue(store.setHead(base, head));
        assertEquals("bar", store.getHead().getString("foo"));
        store.close();

        // First simulate the case where during compaction a reference to the
        // older segments is added to a segment that the compactor is writing
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        Compactor compactor = new Compactor(writer);
        SegmentNodeState compacted =
                compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // In this case the revision cleanup is unable to reclaim the old data
        store = new FileStore(directory, 1, false);
        assertTrue(store.size() > largeBinarySize);
        store.cleanup();
        assertTrue(store.size() > largeBinarySize);
        store.close();

        // Now we do the same thing, but let the compactor use a different
        // SegmentWriter
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        writer = new SegmentWriter(store, store.getTracker());
        compactor = new Compactor(writer);
        compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        writer.flush();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // Revision cleanup is now able to reclaim the extra space (OAK-1932)
View Full Code Here

TOP

Related Classes of org.apache.jackrabbit.oak.plugins.segment.SegmentNodeBuilder

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.