Package org.apache.activemq.store.kahadb.disk.journal

Examples of org.apache.activemq.store.kahadb.disk.journal.Location


    }

    private Location getRecoveryPosition() throws IOException {
        // This loads the first position and we completely rebuild the index if we
        // do not override it with some known recovery start location.
        Location result = null;

        if (!isForceRecoverIndex()) {
            if (metaData.getLastUpdateLocation() != null) {
                result = metaData.getLastUpdateLocation();
            }
View Full Code Here


    private void recoverIndex(Transaction tx) throws IOException {
        long start = System.currentTimeMillis();

        // It is possible index updates got applied before the journal updates..
        // in that case we need to removed references to Jobs that are not in the journal
        final Location lastAppendLocation = journal.getLastAppendLocation();
        long undoCounter = 0;

        // Go through all the jobs in each scheduler and check if any are added after
        // the last appended location and remove those.  For now we ignore the update
        // location since the scheduled job will update itself after the next fire and
        // a new update will replace any existing update.
        for (Iterator<Map.Entry<String, JobSchedulerImpl>> i = metaData.getJobSchedulers().iterator(tx); i.hasNext();) {
            Map.Entry<String, JobSchedulerImpl> entry = i.next();
            JobSchedulerImpl scheduler = entry.getValue();

            List<JobLocation> jobs = scheduler.getAllScheduledJobs(tx);
            for (JobLocation job : jobs) {
                if (job.getLocation().compareTo(lastAppendLocation) >= 0) {
                    if (scheduler.removeJobAtTime(tx, job.getJobId(), job.getNextTime())) {
                        LOG.trace("Removed Job past last appened in the journal: {}", job.getJobId());
                        undoCounter++;
                    }
                }
            }
        }

        if (undoCounter > 0) {
            // The rolled back operations are basically in flight journal writes.  To avoid getting
            // these the end user should do sync writes to the journal.
            long end = System.currentTimeMillis();
            LOG.info("Rolled back {} messages from the index in {} seconds.", undoCounter, ((end - start) / 1000.0f));
            undoCounter = 0;
        }

        // Now we check for missing and corrupt journal files.

        // 1. Collect the set of all referenced journal files based on the Location of the
        //    the scheduled jobs and the marked last update field.
        HashSet<Integer> missingJournalFiles = new HashSet<Integer>();
        for (Iterator<Map.Entry<String, JobSchedulerImpl>> i = metaData.getJobSchedulers().iterator(tx); i.hasNext();) {
            Map.Entry<String, JobSchedulerImpl> entry = i.next();
            JobSchedulerImpl scheduler = entry.getValue();

            List<JobLocation> jobs = scheduler.getAllScheduledJobs(tx);
            for (JobLocation job : jobs) {
                missingJournalFiles.add(job.getLocation().getDataFileId());
                if (job.getLastUpdate() != null) {
                    missingJournalFiles.add(job.getLastUpdate().getDataFileId());
                }
            }
        }

        // 2. Remove from that set all known data file Id's in the journal and what's left
        //    is the missing set which will soon also contain the corrupted set.
        missingJournalFiles.removeAll(journal.getFileMap().keySet());
        if (!missingJournalFiles.isEmpty()) {
            LOG.info("Some journal files are missing: {}", missingJournalFiles);
        }

        // 3. Now check all references in the journal logs for corruption and add any
        //    corrupt journal files to the missing set.
        HashSet<Location> corruptedLocations = new HashSet<Location>();

        if (isCheckForCorruptJournalFiles()) {
            Collection<DataFile> dataFiles = journal.getFileMap().values();
            for (DataFile dataFile : dataFiles) {
                int id = dataFile.getDataFileId();
                for (long offset : dataFile.getCorruptedBlocks()) {
                    corruptedLocations.add(new Location(id, (int) offset));
                }
            }

            if (!corruptedLocations.isEmpty()) {
                LOG.debug("Found some corrupted data blocks in the journal: {}", corruptedLocations.size());
View Full Code Here

    private Location store(JournalCommand<?> data) throws IOException {
        int size = data.serializedSizeFramed();
        DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
        os.writeByte(data.type().getNumber());
        data.writeFramed(os);
        Location location = journal.write(os.toByteSequence(), true);
        journal.setLastAppendLocation(location);
        return location;
    }
View Full Code Here

        journal.close();
        journal = null;
    }

    private void recoverPendingLocalTransactions() throws IOException {
        Location location = journal.getNextLocation(null);
        while (location != null) {
            process(load(location));
            location = journal.getNextLocation(location);
        }
        recoveredPendingCommit.addAll(inflightTransactions.keySet());
View Full Code Here

        ObjectOutputStream oout = new ObjectOutputStream(baos);
        oout.writeObject(metadata.ackMessageFileMap);
        oout.flush();
        oout.close();
        // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
        Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback);
        try {
            location.getLatch().await();
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.toString());
        }
        return location;
    }
View Full Code Here

    }

    private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException {

        ByteSequence sequence = toByteSequence(subscription);
        Location location = journal.write(sequence, nullCompletionCallback) ;

        try {
            location.getLatch().await();
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.toString());
        }
        return location;
    }
View Full Code Here

    public LegacyJobLocation(Location location) {
        this.location = location;
    }

    public LegacyJobLocation() {
        this(new Location());
    }
View Full Code Here

            if (before != null) {
                before.run();
            }

            ByteSequence sequence = toByteSequence(command);
            Location location;
            checkpointLock.readLock().lock();
            try {

                long start = System.currentTimeMillis();
                location = onJournalStoreComplete == null ? journal.write(sequence, sync) :
View Full Code Here

            final String key = identity.toProducerKey();

            // Hopefully one day the page file supports concurrent read
            // operations... but for now we must
            // externally synchronize...
            Location location;
            indexLock.writeLock().lock();
            try {
                location = findMessageLocation(key, dest);
            } finally {
                indexLock.writeLock().unlock();
View Full Code Here

        return range;
    }

    @SuppressWarnings("rawtypes")
    private void trackMaxAndMin(Location[] range, List<Operation> ops) {
        Location t = ops.get(0).getLocation();
        if (range[0]==null || t.compareTo(range[0]) <= 0) {
            range[0] = t;
        }
        t = ops.get(ops.size() -1).getLocation();
        if (range[1]==null || t.compareTo(range[1]) >= 0) {
            range[1] = t;
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.activemq.store.kahadb.disk.journal.Location

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.