Examples of TrackerClient


Examples of com.linkedin.d2.balancer.clients.TrackerClient

    final Map<TrackerClient,Integer> serverCounts = new HashMap<TrackerClient, Integer>();

    for (int i = 0; i < NUM_URIS; i++)
    {
      URIRequest request = new URIRequest("d2://fooService/this/is/a/test/" + i);
      TrackerClient lastClient = null;
      for (int j = 0; j < NUM_CHECKS; j++)
      {
        TrackerClient client = getTrackerClient(strategy, request, new RequestContext(), 0, clients);
        assertNotNull(client);
        if (lastClient != null)
        {
          assertEquals(client, lastClient);
        }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    Map<TrackerClient, Integer> serverCounts = new HashMap<TrackerClient, Integer>();
    RestRequestBuilder builder = new RestRequestBuilder(URI.create("d2://fooservice"));
    final int NUM_REQUESTS=100;
    for (int ii=0; ii<NUM_REQUESTS; ++ii)
    {
      TrackerClient client = getTrackerClient(strategy, builder.build(), new RequestContext(), 0, clients);
      Integer count = serverCounts.get(client);
      if (count == null)
      {
        count = 0;
      }
      serverCounts.put(client, count + 1);
    }

    //First, check that requests are normally evenly distributed.
    Assert.assertEquals(serverCounts.size(), NUM_SERVERS);

    serverCounts.clear();
    RestRequest request = builder.build();

    RequestContext context = new RequestContext();
    KeyMapper.TargetHostHints.setRequestContextTargetHost(context, clients.get(0).getUri());

    for (int ii=0; ii<NUM_REQUESTS; ++ii)
    {
      TrackerClient client = getTrackerClient(strategy, request, context, 0, clients);
      Integer count = serverCounts.get(client);
      if (count == null)
      {
        count = 0;
      }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    RestRequest request = builder.build();
    RequestContext context = new RequestContext();
    KeyMapper.TargetHostHints.setRequestContextTargetHost(context, URI.create("http://notinclientlist.testing:9876/foobar"));


    TrackerClient client = getTrackerClient(strategy, request, context, 0, clients);

    Assert.assertNull(client);

  }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    DegraderImpl.Config degraderConfig = DegraderConfigFactory.toDegraderConfig(degraderProperties);
    final List<TrackerClient> clients = new ArrayList<TrackerClient>();
    for (int i = 0; i < numberOfPartitions; i++)
    {
      URI uri = URI.create(baseUri + i);
      TrackerClient client =   new TrackerClient(uri,
                                                 getDefaultPartitionData(1, numberOfPartitions),
                                                 new TestLoadBalancerClient(uri), testClock, degraderConfig);
      clients.add(client);
    }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

  }

  private List<Runnable> createRaceCondition(final URI uri, Clock clock, final DegraderLoadBalancerStrategyV3 strategy, final CountDownLatch joinLatch)
  {
    final CountDownLatch clientLatch = new CountDownLatch(1);
    TrackerClient evilClient = new EvilClient(uri, getDefaultPartitionData(1, 2), new TrackerClientTest.TestClient(),
                                              clock, null, clientLatch);
    final List<TrackerClient> clients = Collections.singletonList(evilClient);
    final Runnable update = new Runnable()
    {
      @Override
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

      List<TrackerClient> clients = new ArrayList<TrackerClient>();
      URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
      URIRequest request = new URIRequest(uri1);

      TrackerClient client1 =
              new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);

      clients.add(client1);

      // force client1 to be disabled
      DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
      dcClient1Default.setOverrideMinCallCount(5);
      dcClient1Default.setMinCallCount(5);
      dcClient1Default.setMaxDropRate(1d);
      dcClient1Default.setUpStep(1.0d);

      List<CallCompletion> ccList = new ArrayList<CallCompletion>();
      CallCompletion cc;
      for (int j = 0; j < NUM_CHECKS; j++)

      {
        cc = client1.getCallTracker().startCall();

        ccList.add(cc);
      }

      // add high latency and errors to shut off traffic to this tracker client.
      clock.addMs(3500);

      for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
      {
        cc = iter.next();
        cc.endCallWithError();
        iter.remove();
      }

      // go to next time interval.
      clock.addMs(TIME_INTERVAL);

      Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);

      // trigger a state update
      TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);

      // now we mimic the high latency and force the state to drop all calls so to make
      // the overrideClusterDropRate to 1.0
      ccList = new ArrayList<CallCompletion>();
      for (int j = 0; j < NUM_CHECKS; j++)
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    List<TrackerClient> clients = new ArrayList<TrackerClient>();
    URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
    URIRequest request = new URIRequest(uri1);

    TrackerClient client1 =
            new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);

    clients.add(client1);

    // force client1 to be disabled
    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setMinCallCount(5);
    dcClient1Default.setMaxDropRate(1d);
    dcClient1Default.setUpStep(1.0d);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;
    for (int j = 0; j < NUM_CHECKS; j++)

    {
      cc = client1.getCallTracker().startCall();

      ccList.add(cc);
    }

    // add high latency and errors to shut off traffic to this tracker client.
    // note: the default values for highError and lowError in the degrader are 1.1,
    // which means we don't use errorRates when deciding when to lb/degrade.
    // In addition, because we changed to use the
    clock.addMs(3500);
    //for (int j = 0; j < NUM_CHECKS; j++)
    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCallWithError();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(TIME_INTERVAL);

    Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);

    // trigger a state update
    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    if (config.getInitialRecoveryLevel() < 0.01)
    {
      //the returned TrackerClient should be null
      assertNull(resultTC,"expected null trackerclient");

      // In the next time interval, the load balancer should reintroduce the TC
      // back into the ring because there was an entire time interval where no calls went to this
      // tracker client, so it's time to try it out. We need to enter this code at least once.
      do
      {
        // go to next time interval.
        clock.addMs(TIME_INTERVAL);
        // try adjusting the hash ring on this updateState
        if (strategyV3 != null)
        {
          strategy.setStrategyV3(DEFAULT_PARTITION_ID, strategyV3);
        }
        else if (strategyV2 != null)
        {
          strategy.setStrategyV2(strategyV2);
        }
        else
        {
          fail("should set strategy (either LoadBalance or Degrader");
        }
        resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
        localStepsToFullRecovery--;
      }
      while (localStepsToFullRecovery > 0);
    }
    assertNotNull(resultTC,"expected non-null trackerclient");

    // make calls to the tracker client to verify that it's on the road to healthy status.
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = resultTC.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs(10);
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    String baseUri = "http://test.linkedin.com:10010/abc";
    List<TrackerClient> result = new LinkedList<TrackerClient>();
    for (int i = 0; i < n; i++)
    {
      URI uri = URI.create(baseUri + i);
      TrackerClient client =   new TrackerClient(uri,
                                                 getDefaultPartitionData(1d),
                                                 new TestLoadBalancerClient(uri), clock, config);
      result.add(client);
    }
    return result;
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    callClients(latency, qps, clients, clock, timeInterval, isCalledWithError, isCalledWithErrorForLoadBalancing);
    //create any random URIRequest because we just need a URI to be hashed to get the point in hash ring anyway
    if (clients != null && !clients.isEmpty())
    {
      URIRequest request = new URIRequest(clients.get(0).getUri());
      TrackerClient client = getTrackerClient(adapter, request, new RequestContext(), clusterGenerationId, clients);
      Map<URI, Integer> pointsMap = adapter.getPointsMap();
      for (TrackerClient trackerClient : clients)
      {
        Integer pointsInTheRing = pointsMap.get(trackerClient.getUri());
        assertEquals(pointsInTheRing, expectedPointsPerClient);
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    double overrideDropRate = 0.0;

    //simulate latency 4000 ms
    //1st round we use LOAD_BALANCING strategy. Since we have a high latency we will decrease the number of points
    //from 100 to 80 (transmissionRate * points per weight).
    TrackerClient resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                                        80, true, 0.0, 4000, false, false);
    assertNotNull(resultTC);

    //2nd round drop rate should be increased by DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                          80, false,
                                          overrideDropRate, 4000, false, false);

    //3rd round. We alternate back to LOAD_BALANCING strategy and we drop the points even more
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                          39, true,
                                          overrideDropRate, 4000, false, false);

    //4th round. The drop rate should be increased again like 2nd round
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                          39, false,
                                          overrideDropRate, 4000, false, false);

    //5th round. Alternate to changing hash ring again.
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                              1, true,
                                              overrideDropRate, 4000, false, false);

    //6th round. Same as 5th round, we'll increase the drop rate
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                              1, false,
                                              overrideDropRate, 4000, false, false);

    //7th round. The # of point in hashring is at the minimum so we can't decrease it further. At this point the client
    //is in recovery mode. But since we can't change the hashring anymore, we'll always in CALL_DROPPING mode
    //so the next strategy is expected to be LOAD_BALANCING mode.
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                              1, false,
                                              overrideDropRate, 4000, false, false);

    //8th round. We'll increase the drop rate to the max.
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                              1, false,
                                              overrideDropRate, 4000, false, false);

    //9th round, now we'll simulate as if there still a call even though we drop 100% of all request to get
    //tracker client. The assumption is there's some thread that still holds tracker client and we want
    //to make sure we can handle the request and we can't degrade the cluster even further.
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                                  1, false,
                                                  overrideDropRate, 4000, false, false);

    //10th round, now we'll simulate as if there's no call because we dropped all request
    //even though we are in LOAD_BALANCING mode and this tracker client is in recovery mode and there's no call
    //so the hashring doesn't change so we go back to reducing the drop rate to 0.8 and that means the next
    //strategy is LOAD_BALANCE
    overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, 0.0, clients, adapter, clusterGenerationId,
                                                      1, false,
                                                      overrideDropRate, 4000, false, false);

    //11th round, this time we'll simulate the latency is now 1000 ms (so it's within low and high watermark). Drop rate
    //should stay the same and everything else should stay the same
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                                      1, false,
                                                      overrideDropRate, 1000, false, false);

    //we'll simulate the client dying one by one until all the clients are gone
    int numberOfClients = clients.size();
    HashSet<URI> uris = new HashSet<URI>();
    HashSet<URI> removedUris = new HashSet<URI>();
    for (TrackerClient client : clients)
    {
      uris.add(client.getUri());
    }
    LinkedList<TrackerClient> removedClients = new LinkedList<TrackerClient>();
    //loadBalancing strategy will always be picked because there is no hash ring changes
    boolean isLoadBalancingStrategyTurn = true;
    for(int i = numberOfClients; i > 0; i--)
    {
      TrackerClient removed = clients.remove(0);
      uris.remove(removed.getUri());
      removedClients.addLast(removed);
      removedUris.add(removed.getUri());
      clusterGenerationId++;
      resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId,
                                            1, isLoadBalancingStrategyTurn, overrideDropRate, 1000, false, false);
      if (i == 1)
      {
        assertNull(resultTC);
      }
      else
      {
        //if the tracker client is not dropped by overrideClusterDropRate (which could be true because at this point
        //the override drop rate is 0.8)
        if (resultTC != null)
        {
          assertTrue(uris.contains(resultTC.getUri()));
          assertFalse(removedUris.contains(resultTC.getUri()));
        }
      }
    }
    assertTrue(uris.isEmpty());
    assertTrue(clients.isEmpty());
    assertEquals(removedUris.size(), numberOfClients);
    assertEquals(removedClients.size(), numberOfClients);
    //we'll simulate the client start reviving one by one until all clients are back up again
    for (int i = numberOfClients; i > 0 ; i--)
    {
      TrackerClient added = removedClients.remove(0);
      //we have to create a new client. The old client has a degraded DegraderImpl. And in production enviroment
      //when a new client join a cluster, it should be in good state. This means there should be 100 points
      //in the hash ring for this client
      TrackerClient newClient = new TrackerClient(added.getUri(),
                                                  getDefaultPartitionData(1d),
                                                  new TestLoadBalancerClient(added.getUri()), clock, degraderConfig);
      clients.add(newClient);
      uris.add(added.getUri());
      removedUris.remove(added.getUri());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.