Package org.apache.activemq.store.kahadb.disk.journal

Examples of org.apache.activemq.store.kahadb.disk.journal.Location


        }
        return range;
    }

    private void trackMaxAndMin(Location[] range, List<Operation> ops) {
        Location t = ops.get(0).getLocation();
        if (range[0]==null || t.compareTo(range[0]) <= 0) {
            range[0] = t;
        }
        t = ops.get(ops.size() -1).getLocation();
        if (range[1]==null || t.compareTo(range[1]) >= 0) {
            range[1] = t;
        }
    }
View Full Code Here


    private void recover() throws IllegalStateException, IOException {
        this.indexLock.writeLock().lock();
        try {

            long start = System.currentTimeMillis();
            Location producerAuditPosition = recoverProducerAudit();
            Location lastIndoubtPosition = getRecoveryPosition();

            Location recoveryPosition = minimum(producerAuditPosition, lastIndoubtPosition);

            if (recoveryPosition != null) {
                int redoCounter = 0;
                LOG.info("Recovering from the journal ...");
                while (recoveryPosition != null) {
View Full Code Here

        return TransactionIdConversion.convertToLocal(tx);
    }

    private Location minimum(Location producerAuditPosition,
            Location lastIndoubtPosition) {
        Location min = null;
        if (producerAuditPosition != null) {
            min = producerAuditPosition;
            if (lastIndoubtPosition != null && lastIndoubtPosition.compareTo(producerAuditPosition) < 0) {
                min = lastIndoubtPosition;
            }
View Full Code Here

    protected void recoverIndex(Transaction tx) throws IOException {
        long start = System.currentTimeMillis();
        // It is possible index updates got applied before the journal updates..
        // in that case we need to removed references to messages that are not in the journal
        final Location lastAppendLocation = journal.getLastAppendLocation();
        long undoCounter=0;

        // Go through all the destinations to see if they have messages past the lastAppendLocation
        for (StoredDestination sd : storedDestinations.values()) {

            final ArrayList<Long> matches = new ArrayList<Long>();
            // Find all the Locations that are >= than the last Append Location.
            sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) {
                @Override
                protected void matched(Location key, Long value) {
                    matches.add(value);
                }
            });

            for (Long sequenceId : matches) {
                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
                sd.locationIndex.remove(tx, keys.location);
                sd.messageIdIndex.remove(tx, keys.messageId);
                metadata.producerSequenceIdTracker.rollback(keys.messageId);
                undoCounter++;
                // TODO: do we need to modify the ack positions for the pub sub case?
            }
        }

        if( undoCounter > 0 ) {
            // The rolledback operations are basically in flight journal writes.  To avoid getting
            // these the end user should do sync writes to the journal.
            if (LOG.isInfoEnabled()) {
                long end = System.currentTimeMillis();
                LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
            }
        }

        undoCounter = 0;
        start = System.currentTimeMillis();

        // Lets be extra paranoid here and verify that all the datafiles being referenced
        // by the indexes still exists.

        final SequenceSet ss = new SequenceSet();
        for (StoredDestination sd : storedDestinations.values()) {
            // Use a visitor to cut down the number of pages that we load
            sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
                int last=-1;

                @Override
                public boolean isInterestedInKeysBetween(Location first, Location second) {
                    if( first==null ) {
                        return !ss.contains(0, second.getDataFileId());
                    } else if( second==null ) {
                        return true;
                    } else {
                        return !ss.contains(first.getDataFileId(), second.getDataFileId());
                    }
                }

                @Override
                public void visit(List<Location> keys, List<Long> values) {
                    for (Location l : keys) {
                        int fileId = l.getDataFileId();
                        if( last != fileId ) {
                            ss.add(fileId);
                            last = fileId;
                        }
                    }
                }

            });
        }
        HashSet<Integer> missingJournalFiles = new HashSet<Integer>();
        while (!ss.isEmpty()) {
            missingJournalFiles.add((int) ss.removeFirst());
        }
        missingJournalFiles.removeAll(journal.getFileMap().keySet());

        if (!missingJournalFiles.isEmpty()) {
            if (LOG.isInfoEnabled()) {
                LOG.info("Some journal files are missing: " + missingJournalFiles);
            }
        }

        ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<BTreeVisitor.Predicate<Location>>();
        for (Integer missing : missingJournalFiles) {
            missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0)));
        }

        if (checkForCorruptJournalFiles) {
            Collection<DataFile> dataFiles = journal.getFileMap().values();
            for (DataFile dataFile : dataFiles) {
                int id = dataFile.getDataFileId();
                missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0)));
                Sequence seq = dataFile.getCorruptedBlocks().getHead();
                while (seq != null) {
                    missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1)));
                    seq = seq.getNext();
                }
            }
        }
View Full Code Here

            before.run();
        }
        try {
            ByteSequence sequence = toByteSequence(data);
            long start = System.currentTimeMillis();
            Location location = onJournalStoreComplete == null ? journal.write(sequence, sync) :  journal.write(sequence, onJournalStoreComplete) ;
            long start2 = System.currentTimeMillis();
            process(data, location, after);
            long end = System.currentTimeMillis();
            if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) {
                if (LOG.isInfoEnabled()) {
View Full Code Here

     */
    void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
        LOG.debug("Checkpoint started.");

        // reflect last update exclusive of current checkpoint
        Location lastUpdate = metadata.lastUpdate;

        metadata.state = OPEN_STATE;
        metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit();
        Location[] inProgressTxRange = getInProgressTxLocationRange();
        metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
        tx.store(metadata.page, metadataMarshaller, true);
        pageFile.flush();

        if( cleanup ) {

            final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet());
            final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet);

            if (LOG.isTraceEnabled()) {
                LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet);
            }

            if (lastUpdate != null) {
                gcCandidateSet.remove(lastUpdate.getDataFileId());
            }

            // Don't GC files under replication
            if( journalFilesBeingReplicated!=null ) {
                gcCandidateSet.removeAll(journalFilesBeingReplicated);
View Full Code Here

            ObjectOutputStream oout = new ObjectOutputStream(baos);
            oout.writeObject(metadata.producerSequenceIdTracker);
            oout.flush();
            oout.close();
            // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
            Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback);
            try {
                location.getLatch().await();
            } catch (InterruptedException e) {
                throw new InterruptedIOException(e.toString());
            }
            return location;
        }
View Full Code Here

        LOG.info("Stopped KahaDB");
        super.doStop(stopper);
    }

    void incrementRedeliveryAndReWrite(final String key, final KahaDestination destination) throws IOException {
        Location location;
        this.indexLock.writeLock().lock();
        try {
              location = findMessageLocation(key, destination);
        } finally {
            this.indexLock.writeLock().unlock();
        }

        if (location != null) {
            KahaAddMessageCommand addMessage = (KahaAddMessageCommand) load(location);
            Message message = (Message) wireFormat.unmarshal(new DataInputStream(addMessage.getMessage().newInput()));

            message.incrementRedeliveryCounter();
            if (LOG.isTraceEnabled()) {
                LOG.trace("rewriting: " + key + " with deliveryCount: " + message.getRedeliveryCounter());
            }
            org.apache.activemq.util.ByteSequence packet = wireFormat.marshal(message);
            addMessage.setMessage(new Buffer(packet.getData(), packet.getOffset(), packet.getLength()));

            final Location rewriteLocation = journal.write(toByteSequence(addMessage), true);

            this.indexLock.writeLock().lock();
            try {
                pageFile.tx().execute(new Transaction.Closure<IOException>() {
                    public void execute(Transaction tx) throws IOException {
View Full Code Here

            final String key = identity.toString();

            // Hopefully one day the page file supports concurrent read
            // operations... but for now we must
            // externally synchronize...
            Location location;
            indexLock.writeLock().lock();
            try {
                location = findMessageLocation(key, dest);
            }finally {
                indexLock.writeLock().unlock();
View Full Code Here

            time += delay;
        } else {
            time += period;
        }

        Location location = this.store.write(payload, false);
        JobLocation jobLocation = new JobLocation(location);
        this.store.incrementJournalCount(tx, location);
        jobLocation.setJobId(jobId);
        jobLocation.setStartTime(startTime);
        jobLocation.setCronEntry(cronEntry);
View Full Code Here

TOP

Related Classes of org.apache.activemq.store.kahadb.disk.journal.Location

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.