Package org.hornetq.core.journal

Examples of org.hornetq.core.journal.Journal


      int transactionSize = Integer.parseInt(args[3]);
      int numberOfThreads = Integer.parseInt(args[4]);

      try
      {
         Journal journal = ValidateTransactionHealthTest.appendData(journalType,
                                                                    journalDir,
                                                                    numberOfElements,
                                                                    transactionSize,
                                                                    numberOfThreads);
View Full Code Here


   {
      ArrayList<RecordInfo> acks = new ArrayList<RecordInfo>();

      List<RecordInfo> records = new LinkedList<RecordInfo>();

      Journal messageJournal = storageManager.getMessageJournal();

      log.debug("Reading journal from " + config.getJournalDirectory());

      messageJournal.start();

      ((JournalImpl) messageJournal).load(records, null, null, false);

      for (RecordInfo info : records)
      {
         byte[] data = info.data;

         HornetQBuffer buff = HornetQBuffers.wrappedBuffer(data);

         Object o = JournalStorageManager.newObjectEncoding(info, storageManager);
         if (info.getUserRecordType() == JournalStorageManager.ADD_MESSAGE)
         {
            messages.put(info.id, ((MessageDescribe) o).msg);
         }
         else if (info.getUserRecordType() == JournalStorageManager.ADD_LARGE_MESSAGE)
         {
            messages.put(info.id, ((MessageDescribe) o).msg);
         }
         else if (info.getUserRecordType() == JournalStorageManager.ADD_REF)
         {
            ReferenceDescribe ref = (ReferenceDescribe) o;
            HashMap<Long, ReferenceDescribe> map = messageRefs.get(info.id);
            if (map == null)
            {
               HashMap<Long, ReferenceDescribe> newMap = new HashMap<Long, ReferenceDescribe>();
               newMap.put(ref.refEncoding.queueID, ref);
               messageRefs.put(info.id, newMap);
            }
            else
            {
               map.put(ref.refEncoding.queueID, ref);
            }
         }
         else if (info.getUserRecordType() == JournalStorageManager.ACKNOWLEDGE_REF)
         {
            acks.add(info);
         }
         else if (info.userRecordType == JournalStorageManager.ACKNOWLEDGE_CURSOR)
         {
            CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
            encoding.decode(buff);

            Set<PagePosition> set = cursorRecords.get(encoding.queueID);

            if (set == null)
            {
               set = new HashSet<PagePosition>();
               cursorRecords.put(encoding.queueID, set);
            }

            set.add(encoding.position);
         }
         else if (info.userRecordType == JournalStorageManager.PAGE_TRANSACTION)
         {
            if (info.isUpdate)
            {
               PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();

               pageUpdate.decode(buff);
               pgTXs.add(pageUpdate.pageTX);
            }
            else
            {
               PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();

               pageTransactionInfo.decode(buff);

               pageTransactionInfo.setRecordID(info.id);
               pgTXs.add(pageTransactionInfo.getTransactionID());
            }
         }
      }

      messageJournal.stop();

      removeAcked(acks);
   }
View Full Code Here

      journalDir = config.getJournalDirectory();

      SequentialFileFactory bindingsFF = new NIOSequentialFileFactory(bindingsDir);

      Journal localBindings = new JournalImpl(1024 * 1024,
                                              2,
                                              config.getJournalCompactMinFiles(),
                                              config.getJournalCompactPercentage(),
                                              bindingsFF,
                                              "hornetq-bindings",
                                              "bindings",
                                              1);

      if (replicator != null)
      {
         bindingsJournal = new ReplicatedJournal((byte)0, localBindings, replicator);
      }
      else
      {
         bindingsJournal = localBindings;
      }

      if (journalDir == null)
      {
         throw new NullPointerException("journal-dir is null");
      }

      createJournalDir = config.isCreateJournalDir();

      syncNonTransactional = config.isJournalSyncNonTransactional();

      syncTransactional = config.isJournalSyncTransactional();

      SequentialFileFactory journalFF = null;

      if (config.getJournalType() == JournalType.ASYNCIO)
      {
         JournalStorageManager.log.info("Using AIO Journal");

         journalFF = new AIOSequentialFileFactory(journalDir,
                                                  config.getJournalBufferSize_AIO(),
                                                  config.getJournalBufferTimeout_AIO(),
                                                  config.isLogJournalWriteRate());
      }
      else if (config.getJournalType() == JournalType.NIO)
      {
         JournalStorageManager.log.info("Using NIO Journal");
         journalFF = new NIOSequentialFileFactory(journalDir,
                                                  true,
                                                  config.getJournalBufferSize_NIO(),
                                                  config.getJournalBufferTimeout_NIO(),
                                                  config.isLogJournalWriteRate());
      }
      else
      {
         throw new IllegalArgumentException("Unsupported journal type " + config.getJournalType());
      }

      if (config.isBackup() && !config.isSharedStore())
      {
         idGenerator = null;
      }
      else
      {
         idGenerator = new BatchingIDGenerator(0, JournalStorageManager.CHECKPOINT_BATCH_SIZE, bindingsJournal);
      }
      Journal localMessage = new JournalImpl(config.getJournalFileSize(),
                                             config.getJournalMinFiles(),
                                             config.getJournalCompactMinFiles(),
                                             config.getJournalCompactPercentage(),
                                             journalFF,
                                             "hornetq-data",
View Full Code Here

   // Public --------------------------------------------------------

   public void testSequence() throws Exception
   {
      NIOSequentialFileFactory factory = new NIOSequentialFileFactory(getTestDir());
      Journal journal = new JournalImpl(10 * 1024, 2, 0, 0, factory, "test-data", "tst", 1);

      journal.start();

      journal.load(new ArrayList<RecordInfo>(), new ArrayList<PreparedTransactionInfo>(), null);

      BatchingIDGenerator batch = new BatchingIDGenerator(0, 1000, journal);
      long id1 = batch.generateID();
      long id2 = batch.generateID();

      Assert.assertTrue(id2 > id1);

      journal.stop();
      batch = new BatchingIDGenerator(0, 1000, journal);
      loadIDs(journal, batch);

      long id3 = batch.generateID();

      Assert.assertEquals(1000, id3);

      long id4 = batch.generateID();

      Assert.assertTrue(id4 > id3 && id4 < 2000);

      batch.close();

      journal.stop();
      batch = new BatchingIDGenerator(0, 1000, journal);
      loadIDs(journal, batch);

      long id5 = batch.generateID();
      Assert.assertTrue(id5 > id4 && id5 < 2000);

      long lastId = id5;

      boolean close = true;
      for (int i = 0; i < 100000; i++)
      {
         if (i % 1000 == 0)
         {
            System.out.println("lastId = " + lastId);
            // interchanging closes and simulated crashes
            if (close)
            {
               batch.close();
            }

            close = !close;

            journal.stop();
            batch = new BatchingIDGenerator(0, 1000, journal);
            loadIDs(journal, batch);
         }

         long id = batch.generateID();

         Assert.assertTrue(id > lastId);

         lastId = id;
      }

      batch.close();
      journal.stop();
      batch = new BatchingIDGenerator(0, 1000, journal);
      loadIDs(journal, batch);

      lastId = batch.getCurrentID();

      journal.stop();
      batch = new BatchingIDGenerator(0, 1000, journal);
      loadIDs(journal, batch);

      Assert.assertEquals("No Ids were generated, so the currentID was supposed to stay the same",
                          lastId,
                          batch.getCurrentID());
     
      journal.stop();

   }
View Full Code Here

      createDir = config.isCreateBindingsDir();

      SequentialFileFactory bindingsJMS = new NIOSequentialFileFactory(journalDir);

      Journal localJMS = new JournalImpl(1024 * 1024,
                                              2,
                                              config.getJournalCompactMinFiles(),
                                              config.getJournalCompactPercentage(),
                                              bindingsJMS,
                                              "hornetq-jms",
View Full Code Here

    */
   private void getBindings() throws Exception
   {
      List<RecordInfo> records = new LinkedList<RecordInfo>();

      Journal bindingsJournal = storageManager.getBindingsJournal();

      bindingsJournal.start();

      log.debug("Reading bindings journal from " + config.getBindingsDirectory());

      ((JournalImpl) bindingsJournal).load(records, null, null, false);

      for (RecordInfo info : records)
      {
         if (info.getUserRecordType() == JournalStorageManager.QUEUE_BINDING_RECORD)
         {
            PersistentQueueBindingEncoding bindingEncoding = (PersistentQueueBindingEncoding) JournalStorageManager.newObjectEncoding(info, null);
            queueBindings.put(bindingEncoding.getId(), bindingEncoding);
         }
      }

      bindingsJournal.stop();
   }
View Full Code Here

      journalDir = config.getJournalDirectory();

      SequentialFileFactory bindingsFF = new NIOSequentialFileFactory(bindingsDir, criticalErrorListener);

      Journal localBindings = new JournalImpl(1024 * 1024,
                                              2,
                                              config.getJournalCompactMinFiles(),
                                              config.getJournalCompactPercentage(),
                                              bindingsFF,
                                              "hornetq-bindings",
                                              "bindings",
                                              1);

      if (replicator != null)
      {
         bindingsJournal = new ReplicatedJournal((byte)0, localBindings, replicator);
      }
      else
      {
         bindingsJournal = localBindings;
      }

      if (journalDir == null)
      {
         throw new NullPointerException("journal-dir is null");
      }

      createJournalDir = config.isCreateJournalDir();

      syncNonTransactional = config.isJournalSyncNonTransactional();

      syncTransactional = config.isJournalSyncTransactional();

      if (config.getJournalType() == JournalType.ASYNCIO)
      {
         JournalStorageManager.log.info("Using AIO Journal");

         journalFF = new AIOSequentialFileFactory(journalDir,
                                                  config.getJournalBufferSize_AIO(),
                                                  config.getJournalBufferTimeout_AIO(),
                                                  config.isLogJournalWriteRate(),
                                                  criticalErrorListener);
      }
      else if (config.getJournalType() == JournalType.NIO)
      {
         JournalStorageManager.log.info("Using NIO Journal");
         journalFF = new NIOSequentialFileFactory(journalDir,
                                                  true,
                                                  config.getJournalBufferSize_NIO(),
                                                  config.getJournalBufferTimeout_NIO(),
                                                  config.isLogJournalWriteRate(),
                                                  criticalErrorListener);
      }
      else
      {
         throw new IllegalArgumentException("Unsupported journal type " + config.getJournalType());
      }

      if (config.isBackup() && !config.isSharedStore())
      {
         idGenerator = null;
      }
      else
      {
         idGenerator = new BatchingIDGenerator(0, JournalStorageManager.CHECKPOINT_BATCH_SIZE, bindingsJournal);
      }
      Journal localMessage = new JournalImpl(config.getJournalFileSize(),
                                             config.getJournalMinFiles(),
                                             config.getJournalCompactMinFiles(),
                                             config.getJournalCompactPercentage(),
                                             journalFF,
                                             "hornetq-data",
View Full Code Here

   private void finishSynchronization(String liveID) throws Exception
   {
      for (JournalContent jc : EnumSet.allOf(JournalContent.class))
      {
         Journal journal = journalsHolder.remove(jc);
         journal.synchronizationLock();
         try
         {
            // files should be already in place.
            filesReservedForSync.remove(jc);
            registerJournal(jc.typeByte, journal);
            journal.stop();
            journal.start();
            journal.loadSyncOnly(JournalState.SYNCING_UP_TO_DATE);
         }
         finally
         {
            journal.synchronizationUnlock();
         }
      }
      ByteBuffer buffer = ByteBuffer.allocate(4 * 1024);
      for (Entry<Long, ReplicatedLargeMessage> entry : largeMessages.entrySet())
      {
View Full Code Here

               {
                  HornetQServerLogger.LOGGER.autoFailBackDenied();
               }

               final JournalContent journalContent = SyncDataType.getJournalContentType(packet.getDataType());
               final Journal journal = journalsHolder.get(journalContent);

               if (packet.getNodeID() != null)
               {
                  // At the start of replication, we still do not know which is the nodeID that the live uses.
                  // This is the point where the backup gets this information.
                  quorumManager.setLiveID(packet.getNodeID());
               }
               Map<Long, JournalSyncFile> mapToFill = filesReservedForSync.get(journalContent);

               for (Entry<Long, JournalFile> entry : journal.createFilesForBackupSync(packet.getFileIds()).entrySet())
               {
                  mapToFill.put(entry.getKey(), new JournalSyncFile(entry.getValue()));
               }
               FileWrapperJournal syncJournal = new FileWrapperJournal(journal);
               registerJournal(journalContent.typeByte, syncJournal);
View Full Code Here

   /**
    * @param packet
    */
   private void handleCommitRollback(final ReplicationCommitMessage packet) throws Exception
   {
      Journal journalToUse = getJournal(packet.getJournalID());

      if (packet.isRollback())
      {
         journalToUse.appendRollbackRecord(packet.getTxId(), packet.getSync());
      }
      else
      {
         journalToUse.appendCommitRecord(packet.getTxId(), packet.getSync());
      }
   }
View Full Code Here

TOP

Related Classes of org.hornetq.core.journal.Journal

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.