Package org.jboss.ha.framework.interfaces

Examples of org.jboss.ha.framework.interfaces.DistributedReplicantManager


    * DistributedState to see if we can remove/narrow the synchronization.
    */
   public synchronized void replicantsChanged(String key, List<?> newReplicants, int newReplicantsViewId,
         boolean merge)
   {
      DistributedReplicantManager drm = this.partition.getDistributedReplicantManager();
     
      if (key.equals(this.RPC_HANDLER_NAME) && drm.isMasterReplica(this.RPC_HANDLER_NAME))
      {
         this.log.debug("The list of replicants for the JG bridge has changed, computing and updating local info...");

         DistributedState ds = this.partition.getDistributedStateService();
        
         // we remove any entry from the DS whose node is dead
         //
         Collection<?> coll = ds.getAllKeys(this.RPC_HANDLER_NAME);
         if (coll == null)
         {
            this.log.debug("... No bridge info was associated with this node");
            return;
         }

         // to avoid ConcurrentModificationException, we copy the list of keys in a new structure
         //
         ArrayList<?> collCopy = new ArrayList<Object>(coll);
         List<String> newReplicantsNodeNames = drm.lookupReplicantsNodeNames(this.RPC_HANDLER_NAME);

         for (int i = 0; i < collCopy.size(); i++)
         {
            String nodeEntry = (String) collCopy.get(i);
            if (!newReplicantsNodeNames.contains(nodeEntry))
View Full Code Here


      if (this.partition == null)
         throw new IllegalStateException("HAPartition property must be set before starting InvalidationBridge service");

      this.RPC_HANDLER_NAME = "DCacheBridge-" + this.bridgeName;

      DistributedReplicantManager drm = this.partition.getDistributedReplicantManager();
      DistributedState ds = this.partition.getDistributedStateService();
     
      drm.add(this.RPC_HANDLER_NAME, "");
      drm.registerListener(this.RPC_HANDLER_NAME, this);
      ds.registerDSListenerEx(this.RPC_HANDLER_NAME, this);
      this.partition.registerRPCHandler(this.RPC_HANDLER_NAME, this);

      // we now publish the list of caches we have access to
      if (this.invalMgr == null)
View Full Code Here

   }

   @Override
   public void stopService()
   {
      DistributedReplicantManager drm = this.partition.getDistributedReplicantManager();
      DistributedState ds = this.partition.getDistributedStateService();
     
      try
      {
         this.partition.unregisterRPCHandler(this.RPC_HANDLER_NAME, this);
         ds.unregisterDSListenerEx(this.RPC_HANDLER_NAME, this);
         drm.unregisterListener(this.RPC_HANDLER_NAME, this);
         drm.remove(this.RPC_HANDLER_NAME);

         this.invalidationSubscription.unregister();

         ds.remove(this.RPC_HANDLER_NAME, this.partition.getNodeName(), true);
View Full Code Here

    */
   public void start() throws Exception
   {
      partition.registerRPCHandler(getServiceHAName(), rpcTarget);
     
      DistributedReplicantManager drm = partition.getDistributedReplicantManager();
      drm.add(getServiceHAName(), partition.getClusterNode());
      coordinator = drm.isMasterReplica(getServiceHAName());
      drm.registerListener(getServiceHAName(), drmListener);
     
      statusCheck();
   }
View Full Code Here

    *
    * @throws Exception
    */
   public void stop() throws Exception
   {
      DistributedReplicantManager drm = partition.getDistributedReplicantManager();
      drm.unregisterListener(getServiceHAName(), drmListener);
      drm.remove(getServiceHAName());
     
      coordinator = false;

      partition.unregisterRPCHandler(getServiceHAName(), rpcTarget);
     
View Full Code Here

   {
      if (!deadMembersKnown)
      {
         try
         {
            DistributedReplicantManager drm = partition.getDistributedReplicantManager();
            List<ClusterNode> nodes = drm.lookupReplicantsNodes(getServiceHAName());
            ClusterNode coord = (nodes != null && nodes.size() > 0 ? nodes.get(0) : null);
            if (coord != null && coord.equals(partition.getClusterNode()) == false)
            {
               Object rsp = partition.callMethodOnNode(getServiceHAName(), "getDiscrepancies", NULL_ARGS, NULL_TYPES, 60000, coord);
               if (rsp instanceof RemoteDiscrepancies)
View Full Code Here

      if (hasAdds)
      {
         statusCheckRequired = true;
      }
     
      DistributedReplicantManager drm  = partition.getDistributedReplicantManager();
      this.coordinator = drm.isMasterReplica(getServiceHAName());
     
      if (wasCoordinator && !coordinator)
      {
         // There's been a merge and we are no longer coordinator. Asynchronously
         // tell the rest of the cluster about our knowledge of timestamps
View Full Code Here

     
      this.lockSupport.start();
     
      this.partition.registerRPCHandler(getServiceHAName(), rpcTarget);
     
      DistributedReplicantManager drm = this.partition.getDistributedReplicantManager();
      drm.add(getServiceHAName(), this.partition.getClusterNode());
      this.serviceView = drm.lookupReplicantsNodes(getServiceHAName());
      drm.registerListener(getServiceHAName(), drmListener);
     
      this.initialized = true;
   }
View Full Code Here

      this.initialized = true;
   }
  
   public void shutdown() throws Exception
   {
      DistributedReplicantManager drm = this.partition.getDistributedReplicantManager();
      drm.unregisterListener(getServiceHAName(), drmListener);
      drm.remove(getServiceHAName());
      this.partition.unregisterRPCHandler(getServiceHAName(), rpcTarget);
      this.lockSupport.stop();
     
      this.contentManager = null;
     
View Full Code Here

         partition1.setBindIntoJndi(false);
        
         partition1.create();        
         partition1.start();

         DistributedReplicantManager drm1 = partition1.getDistributedReplicantManager();

         Thread.sleep(10000);
        
         // Use a different stack name with the same config to avoid singleton conflicts
         stackName = "tunnel2";
        
         JChannelFactory factory2 = new JChannelFactory();
         factory2.setMultiplexerConfig(muxFile);
         factory2.setNamingServicePort(1099);
         factory2.setNodeName("node2");
         factory2.setExposeChannels(false);
         factory2.setExposeProtocols(false);
         factory2.setAddMissingSingletonName(false);
         factory2.create();
         factory2.start();
        
         partition2 = new InjectedChannelClusterPartition(factory2.createChannel(stackName));
         partition2.setPartitionName(partitionName);
         partition2.setStateTransferTimeout(30000);
         partition2.setMethodCallTimeout(60000);
         partition2.setBindIntoJndi(false);
        
         partition2.create();        
         partition2.start();

         DistributedReplicantManager drm2 = partition2.getDistributedReplicantManager();
        
         Thread.sleep(10000);
        
         // confirm that each partition contains two nodes  
         assertEquals("Partition1 should contain two nodes; ", 2, partition1.getCurrentView().size());
         assertEquals("Partition2 should contain two nodes; ", 2, partition2.getCurrentView().size());
        
         drm1.add(SERVICEA, "valueA1");
         drm2.add(SERVICEA, "valueA2");
         drm2.add(SERVICEB, "valueB2");
        
         // test that only one node is the master replica for serviceA
         assertTrue("ServiceA must have a master replica",
                 drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
         assertTrue("ServiceA must have a single master replica",
                 drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
         // ServiceB should only be a master replica on partition2
         assertFalse("ServiceB should not be a master replica on partition1",
                 drm1.isMasterReplica(SERVICEB));
         assertTrue("ServiceB must have a master replica on partition2",
                 drm2.isMasterReplica(SERVICEB));
        
         // confirm that each partition contains correct DRM replicants for services A and B 
         assertEquals("Partition1 should contain two DRM replicants for serviceA; ",
                 2, drm1.lookupReplicants(SERVICEA).size());
         assertEquals("Partition2 should contain two DRM replicants for serviceA; ",
                 2, drm2.lookupReplicants(SERVICEA).size());
         assertEquals("Partition1 should contain one DRM replicant for serviceB; ",
                 1, drm1.lookupReplicants(SERVICEB).size());
         assertEquals("Partition2 should contain one DRM replicant for serviceB; ",
                 1, drm2.lookupReplicants(SERVICEB).size());

         // simulate a split of the partition
         log.info("DRMTestCase.testIsMasterReplica() - stopping GossipRouter");
         router.clear();   // temporary workaround for JGRP-1232
         router.stop();
         sleepThread(15000);
         log.info("router stopped, routing table = " + router.dumpRoutingTable() + ", partition1 view=" + partition1.getCurrentView().toString());
         assertTrue("router is stopped", !router.isStarted());
         assertTrue("router is NOT running", !router.isRunning());

         // confirm that each partition contains one node  
         assertEquals("Partition1 should contain one node after split; ",
                 1, partition1.getCurrentView().size());
         assertEquals("Partition2 should contain one node after split; ",
                 1, partition2.getCurrentView().size());
       
         // confirm that each node is a master replica for serviceA after the split
         assertTrue("ServiceA should be a master replica on partition1 after split",
                 drm1.isMasterReplica(SERVICEA));
         assertTrue("ServiceA should be a master replica on partition2 after split",
                 drm2.isMasterReplica(SERVICEA));
        
         // ServiceB should still only be a master replica on partition2 after split
         assertFalse("ServiceB should not be a master replica on partition1 after split",
                 drm1.isMasterReplica(SERVICEB));
         assertTrue("ServiceB must have a master replica on partition2 after split",
                 drm2.isMasterReplica(SERVICEB));
        
         // Remove ServiceA replicant from partition1        
         drm1.remove(SERVICEA);
        
         // test that this node is not the master replica        
         assertFalse("partition1 is not master replica after dropping ServiceA replicant",
                 drm1.isMasterReplica(SERVICEA));
        
         //Restore the local replicant        
         drm1.add(SERVICEA, "valueA1a");
        
         // simulate a merge
         log.info("DRMTestCase.testIsMasterReplica() - restarting GossipRouter");
         router.start();
         // it seems to take more than 15 seconds for the merge to take effect
         sleepThread(30000);
        
         assertTrue(router.isStarted());

         // confirm that each partition contains two nodes again
         assertEquals("Partition1 should contain two nodes after merge; ",
               2, partition1.getCurrentView().size());
         assertEquals("Partition2 should contain two nodes after merge; ",
                 2, partition2.getCurrentView().size());
        
         // test that only one node is the master replica for serviceA after merge
         assertTrue("ServiceA must have a master replica after merge",
                 drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
         assertTrue("ServiceA must have a single master replica after merge",
                 drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
         // ServiceB should only be a master replica on partition2 after merge
         assertFalse("ServiceB should not be a master replica on partition1 after merge",
                 drm1.isMasterReplica(SERVICEB));
         assertTrue("ServiceB must have a master replica on partition2 after merge",
                 drm2.isMasterReplica(SERVICEB));
        
         // confirm that each partition contains correct DRM replicants for services A and B after merge
         assertEquals("Partition1 should contain two DRM replicants for serviceA after merge; ",
                 2, drm1.lookupReplicants(SERVICEA).size());
         assertEquals("Partition2 should contain two DRM replicants for serviceA after merge; ",
                 2, drm2.lookupReplicants(SERVICEA).size());
         assertEquals("Partition1 should contain one DRM replicant for serviceB after merge; ",
                 1, drm1.lookupReplicants(SERVICEB).size());
         assertEquals("Partition2 should contain one DRM replicant for serviceB after merge; ",
                 1, drm2.lookupReplicants(SERVICEB).size());
        
         partition1.stop();
         partition2.stop();
      }
      finally
View Full Code Here

TOP

Related Classes of org.jboss.ha.framework.interfaces.DistributedReplicantManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.