Package org.infinispan.topology

Examples of org.infinispan.topology.CacheTopology$Externalizer


   private void updateTopologyIdAndWaitForTransactionData(TopologyAffectedCommand command) throws InterruptedException {
      // set the topology id if it was not set before (ie. this is local command)
      // TODO Make tx commands extend FlagAffectedCommand so we can use CACHE_MODE_LOCAL in StaleTransactionCleanupService
      if (command.getTopologyId() == -1) {
         CacheTopology cacheTopology = stateTransferManager.getCacheTopology();
         if (cacheTopology != null) {
            command.setTopologyId(cacheTopology.getTopologyId());
         }
      }

      // remote/forwarded command
      int cmdTopologyId = command.getTopologyId();
View Full Code Here


   }

   private void waitForStateTransfer(int expectedTopologyId, Cache... caches) {
      waitForRehashToComplete(caches);
      for (Cache c : caches) {
         CacheTopology cacheTopology = extractComponent(c, StateTransferManager.class).getCacheTopology();
         assertEquals(cacheTopology.getTopologyId(), expectedTopologyId,
               String.format("Wrong topology on cache %s, expected %d and got %s",
                     c, expectedTopologyId, cacheTopology));
      }
   }
View Full Code Here

      when(transactionTable.getRemoteTransactions()).thenReturn(Collections.<RemoteTransaction>emptyList());

      assertFalse(stateConsumer.hasActiveTransfers());

      // node 481 leaves
      stateConsumer.onTopologyUpdate(new CacheTopology(1, ch2, null), false);
      assertFalse(stateConsumer.hasActiveTransfers());

      // start a rebalance
      stateConsumer.onTopologyUpdate(new CacheTopology(2, ch2, ch3), true);
      assertTrue(stateConsumer.hasActiveTransfers());

      // check that all segments have been requested
      Set<Integer> oldSegments = ch2.getSegmentsForOwner(addresses[0]);
      final Set<Integer> newSegments = ch3.getSegmentsForOwner(addresses[0]);
      newSegments.removeAll(oldSegments);
      log.debugf("Rebalancing. Added segments=%s, old segments=%s", newSegments, oldSegments);
      assertEquals(flatRequestedSegments, newSegments);

      // simulate a cluster state recovery and return to ch2
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            stateConsumer.onTopologyUpdate(new CacheTopology(3, ch2, null), false);
            return null;
         }
      });
      stateConsumer.onTopologyUpdate(new CacheTopology(3, ch2, null), false);
      future.get();
      assertFalse(stateConsumer.hasActiveTransfers());


      // restart the rebalance
      requestedSegments.clear();
      stateConsumer.onTopologyUpdate(new CacheTopology(4, ch2, ch3), true);
      assertTrue(stateConsumer.hasActiveTransfers());
      assertEquals(flatRequestedSegments, newSegments);

      // apply state
      ArrayList<StateChunk> stateChunks = new ArrayList<StateChunk>();
View Full Code Here

      LocalTopologyManager component = TestingUtil.extractGlobalComponent(manager, LocalTopologyManager.class);
      LocalTopologyManager spyLtm = Mockito.spy(component);
      doAnswer(new Answer() {
         @Override
         public Object answer(InvocationOnMock invocation) throws Throwable {
            CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
            // Ignore the first topology update on the joiner, which is with the topology before the join
            if (topology.getTopologyId() != currentTopologyId) {
               checkPoint.trigger("pre_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress());
               checkPoint.await("allow_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress(),
                     10, TimeUnit.SECONDS);
            }
            Object result = invocation.callRealMethod();
            checkPoint.trigger("post_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress());
            return result;
         }
      }).when(spyLtm).handleConsistentHashUpdate(eq(CacheContainer.DEFAULT_CACHE_NAME), any(CacheTopology.class),
            anyInt());
      TestingUtil.extractGlobalComponentRegistry(manager).registerComponent(spyLtm, LocalTopologyManager.class);
View Full Code Here

                  cache1.getRpcManager().getMembers().size() == 3 &&
                  cache2.getRpcManager().getMembers().size() == 3;
         }
      });

      CacheTopology duringJoinTopology = ltm0.getCacheTopology(CACHE_NAME);
      assertEquals(duringJoinTopologyId, duringJoinTopology.getTopologyId());
      assertNotNull(duringJoinTopology.getPendingCH());
      final MagicKey key = getKeyForCache2(duringJoinTopology.getPendingCH());
      log.tracef("Rebalance started. Found key %s with current owners %s and pending owners %s", key,
            duringJoinTopology.getCurrentCH().locateOwners(key), duringJoinTopology.getPendingCH().locateOwners(key));

      // Every PutKeyValueCommand will be blocked before reaching the distribution interceptor on cache1
      CyclicBarrier beforeCache1Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(beforeCache1Barrier,
            PutKeyValueCommand.class, false);
      cache1.addInterceptorBefore(blockingInterceptor1, NonTxConcurrentDistributionInterceptor.class);

      // Every PutKeyValueCommand will be blocked after returning to the distribution interceptor on cache2
      CyclicBarrier afterCache2Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor2 = new BlockingInterceptor(afterCache2Barrier,
            PutKeyValueCommand.class, true);
      cache2.addInterceptorBefore(blockingInterceptor2, StateTransferInterceptor.class);

      // Put from cache0 with cache0 as primary owner, cache2 will become the primary owner for the retry
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return conditional ? cache0.putIfAbsent(key, "v") : cache0.put(key, "v");
         }
      });

      // Wait for the command to be executed on cache2 and unblock it
      afterCache2Barrier.await(10, TimeUnit.SECONDS);
      afterCache2Barrier.await(10, TimeUnit.SECONDS);

      // Allow the topology update to proceed on all the caches
      int postJoinTopologyId = duringJoinTopologyId + 1;
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(2));

      // Wait for the topology to change everywhere
      TestingUtil.waitForRehashToComplete(cache0, cache1, cache2);

      // Allow the put command to throw an OutdatedTopologyException on cache1
      log.tracef("Unblocking the put command on node " + address(1));
      beforeCache1Barrier.await(10, TimeUnit.SECONDS);
      beforeCache1Barrier.await(10, TimeUnit.SECONDS);

      // Allow the retry to proceed on cache1, if it's still a member.
      // (In my tests, the backup was always cache0.)
      CacheTopology postJoinTopology = ltm0.getCacheTopology(CACHE_NAME);
      if (postJoinTopology.getCurrentCH().locateOwners(key).contains(address(1))) {
         beforeCache1Barrier.await(10, TimeUnit.SECONDS);
         beforeCache1Barrier.await(10, TimeUnit.SECONDS);
      }
      // And allow the retry to finish successfully on cache2
      afterCache2Barrier.await(10, TimeUnit.SECONDS);
View Full Code Here

      LocalTopologyManager component = TestingUtil.extractGlobalComponent(manager, LocalTopologyManager.class);
      LocalTopologyManager spyLtm = Mockito.spy(component);
      doAnswer(new Answer() {
         @Override
         public Object answer(InvocationOnMock invocation) throws Throwable {
            CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
            // Ignore the first topology update on the joiner, which is with the topology before the join
            if (topology.getTopologyId() != currentTopologyId) {
               checkPoint.trigger("pre_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress());
               checkPoint.await("allow_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress(),
                     10, TimeUnit.SECONDS);
            }
            return invocation.callRealMethod();
         }
      }).when(spyLtm).handleConsistentHashUpdate(eq(CacheContainer.DEFAULT_CACHE_NAME), any(CacheTopology.class),
View Full Code Here

      log.info("Adding a new node ..");
      addClusterEnabledCacheManager(cacheConfigBuilder);
      log.info("Added a new node");

      // node B is not a member yet and rebalance has not started yet
      CacheTopology cacheTopology = advancedCache(1).getComponentRegistry().getStateTransferManager().getCacheTopology();
      assertNull(cacheTopology.getPendingCH());
      assertTrue(cacheTopology.getMembers().contains(address(0)));
      assertFalse(cacheTopology.getMembers().contains(address(1)));
      assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));

      // no keys should be present on node B yet because state transfer is blocked
      assertTrue(cache(1).keySet().isEmpty());

      // initiate a REMOVE
View Full Code Here

      log.info("Adding a new node ..");
      addClusterEnabledCacheManager(cacheConfigBuilder);
      log.info("Added a new node");

      // node B is not a member yet and rebalance has not started yet
      CacheTopology cacheTopology = advancedCache(1).getComponentRegistry().getStateTransferManager().getCacheTopology();
      assertNull(cacheTopology.getPendingCH());
      assertTrue(cacheTopology.getMembers().contains(address(0)));
      assertFalse(cacheTopology.getMembers().contains(address(1)));
      assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));

      // no keys should be present on node B yet because state transfer is blocked
      assertTrue(cache(1).keySet().isEmpty());

      // initiate a PUT
View Full Code Here

      log.info("Adding a new node ..");
      addClusterEnabledCacheManager(cacheConfigBuilder);
      log.info("Added a new node");

      // node B is not a member yet and rebalance has not started yet
      CacheTopology cacheTopology = advancedCache(1).getComponentRegistry().getStateTransferManager().getCacheTopology();
      assertNull(cacheTopology.getPendingCH());
      assertTrue(cacheTopology.getMembers().contains(address(0)));
      assertFalse(cacheTopology.getMembers().contains(address(1)));
      assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));

      // no keys should be present on node B yet because state transfer is blocked
      assertTrue(cache(1).keySet().isEmpty());

      // initiate a REPLACE
View Full Code Here

      members1.add(A);
      members2.add(A);
      members2.add(B);
      ReplicatedConsistentHash readCh = new ReplicatedConsistentHash(members1);
      ReplicatedConsistentHash writeCh = new ReplicatedConsistentHash(members2);
      final CacheTopology cacheTopology = new CacheTopology(1, readCh, writeCh);
      when(stateTransferManager.getCacheTopology()).thenAnswer(new Answer<CacheTopology>() {
         @Override
         public CacheTopology answer(InvocationOnMock invocation) {
            return cacheTopology;
         }
View Full Code Here

TOP

Related Classes of org.infinispan.topology.CacheTopology$Externalizer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.