Package org.apache.activemq.store.kahadb.disk.journal

Examples of org.apache.activemq.store.kahadb.disk.journal.Location


    private void recover() throws IllegalStateException, IOException {
        this.indexLock.writeLock().lock();
        try {

            long start = System.currentTimeMillis();
            Location producerAuditPosition = recoverProducerAudit();
            Location ackMessageFileLocation = recoverAckMessageFileMap();
            Location lastIndoubtPosition = getRecoveryPosition();

            Location recoveryPosition = minimum(producerAuditPosition, ackMessageFileLocation);
            recoveryPosition = minimum(recoveryPosition, lastIndoubtPosition);

            if (recoveryPosition != null) {
                int redoCounter = 0;
                LOG.info("Recovering from the journal ...");
View Full Code Here


        return TransactionIdConversion.convertToLocal(tx);
    }

    private Location minimum(Location producerAuditPosition,
            Location lastIndoubtPosition) {
        Location min = null;
        if (producerAuditPosition != null) {
            min = producerAuditPosition;
            if (lastIndoubtPosition != null && lastIndoubtPosition.compareTo(producerAuditPosition) < 0) {
                min = lastIndoubtPosition;
            }
View Full Code Here

    public JobLocation(Location location) {
        this.location = location;
    }

    public JobLocation() {
        this(new Location());
    }
View Full Code Here

        this.nextTime = in.readLong();
        this.period = in.readLong();
        this.cronEntry = in.readUTF();
        this.location.readExternal(in);
        if (in.readBoolean()) {
            this.lastUpdate = new Location();
            this.lastUpdate.readExternal(in);
        }
    }
View Full Code Here

    protected void recoverIndex(Transaction tx) throws IOException {
        long start = System.currentTimeMillis();
        // It is possible index updates got applied before the journal updates..
        // in that case we need to removed references to messages that are not in the journal
        final Location lastAppendLocation = journal.getLastAppendLocation();
        long undoCounter=0;

        // Go through all the destinations to see if they have messages past the lastAppendLocation
        for (StoredDestination sd : storedDestinations.values()) {

            final ArrayList<Long> matches = new ArrayList<Long>();
            // Find all the Locations that are >= than the last Append Location.
            sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) {
                @Override
                protected void matched(Location key, Long value) {
                    matches.add(value);
                }
            });

            for (Long sequenceId : matches) {
                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
                sd.locationIndex.remove(tx, keys.location);
                sd.messageIdIndex.remove(tx, keys.messageId);
                metadata.producerSequenceIdTracker.rollback(keys.messageId);
                undoCounter++;
                // TODO: do we need to modify the ack positions for the pub sub case?
            }
        }

        if( undoCounter > 0 ) {
            // The rolledback operations are basically in flight journal writes.  To avoid getting
            // these the end user should do sync writes to the journal.
            if (LOG.isInfoEnabled()) {
                long end = System.currentTimeMillis();
                LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
            }
        }

        undoCounter = 0;
        start = System.currentTimeMillis();

        // Lets be extra paranoid here and verify that all the datafiles being referenced
        // by the indexes still exists.

        final SequenceSet ss = new SequenceSet();
        for (StoredDestination sd : storedDestinations.values()) {
            // Use a visitor to cut down the number of pages that we load
            sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
                int last=-1;

                @Override
                public boolean isInterestedInKeysBetween(Location first, Location second) {
                    if( first==null ) {
                        return !ss.contains(0, second.getDataFileId());
                    } else if( second==null ) {
                        return true;
                    } else {
                        return !ss.contains(first.getDataFileId(), second.getDataFileId());
                    }
                }

                @Override
                public void visit(List<Location> keys, List<Long> values) {
                    for (Location l : keys) {
                        int fileId = l.getDataFileId();
                        if( last != fileId ) {
                            ss.add(fileId);
                            last = fileId;
                        }
                    }
                }

            });
        }
        HashSet<Integer> missingJournalFiles = new HashSet<Integer>();
        while (!ss.isEmpty()) {
            missingJournalFiles.add((int) ss.removeFirst());
        }
        missingJournalFiles.removeAll(journal.getFileMap().keySet());

        if (!missingJournalFiles.isEmpty()) {
            if (LOG.isInfoEnabled()) {
                LOG.info("Some journal files are missing: " + missingJournalFiles);
            }
        }

        ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<BTreeVisitor.Predicate<Location>>();
        for (Integer missing : missingJournalFiles) {
            missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0)));
        }

        if (checkForCorruptJournalFiles) {
            Collection<DataFile> dataFiles = journal.getFileMap().values();
            for (DataFile dataFile : dataFiles) {
                int id = dataFile.getDataFileId();
                missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0)));
                Sequence seq = dataFile.getCorruptedBlocks().getHead();
                while (seq != null) {
                    missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1)));
                    seq = seq.getNext();
                }
            }
        }
View Full Code Here

     */
    public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException {
        try {
            ByteSequence sequence = toByteSequence(data);

            Location location;
            checkpointLock.readLock().lock();
            try {

                long start = System.currentTimeMillis();
                location = onJournalStoreComplete == null ? journal.write(sequence, sync) :  journal.write(sequence, onJournalStoreComplete) ;
View Full Code Here

     */
    void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
        LOG.debug("Checkpoint started.");

        // reflect last update exclusive of current checkpoint
        Location lastUpdate = metadata.lastUpdate;

        metadata.state = OPEN_STATE;
        metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit();
        metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap();
        Location[] inProgressTxRange = getInProgressTxLocationRange();
        metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
        tx.store(metadata.page, metadataMarshaller, true);
        pageFile.flush();

        if( cleanup ) {

            final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet());
            final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet);

            if (LOG.isTraceEnabled()) {
                LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet);
            }

            if (lastUpdate != null) {
                gcCandidateSet.remove(lastUpdate.getDataFileId());
            }

            // Don't GC files under replication
            if( journalFilesBeingReplicated!=null ) {
                gcCandidateSet.removeAll(journalFilesBeingReplicated);
View Full Code Here

            ObjectOutputStream oout = new ObjectOutputStream(baos);
            oout.writeObject(metadata.producerSequenceIdTracker);
            oout.flush();
            oout.close();
            // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
            Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback);
            try {
                location.getLatch().await();
            } catch (InterruptedException e) {
                throw new InterruptedIOException(e.toString());
            }
            return location;
        }
View Full Code Here

    @Override
    protected void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
        LOG.debug("Job Scheduler Store Checkpoint started.");

        // reflect last update exclusive of current checkpoint
        Location lastUpdate = metaData.getLastUpdateLocation();
        metaData.setState(KahaDBMetaData.OPEN_STATE);
        tx.store(metaData.getPage(), metaDataMarshaller, true);
        pageFile.flush();

        if (cleanup) {
            final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet());
            final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet);

            LOG.trace("Last update: {}, full gc candidates set: {}", lastUpdate, gcCandidateSet);

            if (lastUpdate != null) {
                gcCandidateSet.remove(lastUpdate.getDataFileId());
            }

            this.metaData.getJournalRC().visit(tx, new BTreeVisitor<Integer, Integer>() {

                @Override
View Full Code Here

     */
    private void recover() throws IllegalStateException, IOException {
        this.indexLock.writeLock().lock();
        try {
            long start = System.currentTimeMillis();
            Location lastIndoubtPosition = getRecoveryPosition();
            Location recoveryPosition = lastIndoubtPosition;

            if (recoveryPosition != null) {
                int redoCounter = 0;
                LOG.info("Recovering from the journal ...");
                while (recoveryPosition != null) {
View Full Code Here

TOP

Related Classes of org.apache.activemq.store.kahadb.disk.journal.Location

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.