Package org.apache.activemq.store.kahadb.disk.journal

Examples of org.apache.activemq.store.kahadb.disk.journal.Location


     */
    void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
        LOG.debug("Checkpoint started.");

        // reflect last update exclusive of current checkpoint
        Location lastUpdate = metadata.lastUpdate;

        metadata.state = OPEN_STATE;
        metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit();
        metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap();
        Location[] inProgressTxRange = getInProgressTxLocationRange();
        metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
        tx.store(metadata.page, metadataMarshaller, true);
        pageFile.flush();

        if( cleanup ) {

            final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet());
            final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet);

            if (LOG.isTraceEnabled()) {
                LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet);
            }

            if (lastUpdate != null) {
                gcCandidateSet.remove(lastUpdate.getDataFileId());
            }

            // Don't GC files under replication
            if( journalFilesBeingReplicated!=null ) {
                gcCandidateSet.removeAll(journalFilesBeingReplicated);
View Full Code Here


            ObjectOutputStream oout = new ObjectOutputStream(baos);
            oout.writeObject(metadata.producerSequenceIdTracker);
            oout.flush();
            oout.close();
            // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
            Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback);
            try {
                location.getLatch().await();
            } catch (InterruptedException e) {
                throw new InterruptedIOException(e.toString());
            }
            return location;
        }
View Full Code Here

        ObjectOutputStream oout = new ObjectOutputStream(baos);
        oout.writeObject(metadata.ackMessageFileMap);
        oout.flush();
        oout.close();
        // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
        Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback);
        try {
            location.getLatch().await();
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.toString());
        }
        return location;
    }
View Full Code Here

    }

    private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException {

        ByteSequence sequence = toByteSequence(subscription);
        Location location = journal.write(sequence, nullCompletionCallback) ;

        try {
            location.getLatch().await();
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.toString());
        }
        return location;
    }
View Full Code Here

            final String key = identity.toProducerKey();

            // Hopefully one day the page file supports concurrent read
            // operations... but for now we must
            // externally synchronize...
            Location location;
            indexLock.writeLock().lock();
            try {
                location = findMessageLocation(key, dest);
            }finally {
                indexLock.writeLock().unlock();
View Full Code Here

        super.doStop(stopper);
    }

    @Override
    void incrementRedeliveryAndReWrite(final String key, final KahaDestination destination) throws IOException {
        Location location;
        this.indexLock.writeLock().lock();
        try {
              location = findMessageLocation(key, destination);
        } finally {
            this.indexLock.writeLock().unlock();
        }

        if (location != null) {
            KahaAddMessageCommand addMessage = (KahaAddMessageCommand) load(location);
            Message message = (Message) wireFormat.unmarshal(new DataInputStream(addMessage.getMessage().newInput()));

            message.incrementRedeliveryCounter();
            if (LOG.isTraceEnabled()) {
                LOG.trace("rewriting: " + key + " with deliveryCount: " + message.getRedeliveryCounter());
            }
            org.apache.activemq.util.ByteSequence packet = wireFormat.marshal(message);
            addMessage.setMessage(new Buffer(packet.getData(), packet.getOffset(), packet.getLength()));

            final Location rewriteLocation = journal.write(toByteSequence(addMessage), true);

            this.indexLock.writeLock().lock();
            try {
                pageFile.tx().execute(new Transaction.Closure<IOException>() {
                    @Override
View Full Code Here

    private Location store(JournalCommand<?> data) throws IOException {
        int size = data.serializedSizeFramed();
        DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
        os.writeByte(data.type().getNumber());
        data.writeFramed(os);
        Location location = journal.write(os.toByteSequence(), true);
        journal.setLastAppendLocation(location);
        return location;
    }
View Full Code Here

        journal.close();
        journal = null;
    }

    private void recoverPendingLocalTransactions() throws IOException {
        Location location = journal.getNextLocation(null);
        while (location != null) {
            process(load(location));
            location = journal.getNextLocation(location);
        }
        recoveredPendingCommit.addAll(inflightTransactions.keySet());
View Full Code Here

            LOG.debug("Process reschedule command for job {} non-existent executime time {}.",
                      command.getJobId(), command.getExecutionTime());
        }

        if (result != null) {
            Location previousUpdate = result.getLastUpdate();

            List<JobLocation> target = null;
            result.setNextTime(command.getNextExecutionTime());
            result.setLastUpdate(location);
            result.setRescheduledCount(command.getRescheduledCount());
View Full Code Here

        return range;
    }

    @SuppressWarnings("rawtypes")
    private void trackMaxAndMin(Location[] range, List<Operation> ops) {
        Location t = ops.get(0).getLocation();
        if (range[0]==null || t.compareTo(range[0]) <= 0) {
            range[0] = t;
        }
        t = ops.get(ops.size() -1).getLocation();
        if (range[1]==null || t.compareTo(range[1]) >= 0) {
            range[1] = t;
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.activemq.store.kahadb.disk.journal.Location

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.