Package com.linkedin.d2.balancer.clients

Examples of com.linkedin.d2.balancer.clients.TrackerClient


      for (URI possibleUri : possibleUris)
      {
        // don't pay attention to this uri if it's banned
        if (!serviceProperties.isBanned(possibleUri))
        {
          TrackerClient possibleTrackerClient = _state.getClient(serviceName, possibleUri);

          if (possibleTrackerClient != null)
          {
            clientsToLoadBalance.add(possibleTrackerClient);
          }
View Full Code Here


                                            List<LoadBalancerState.SchemeStrategyPair> orderedStrategies,
                                            ServiceProperties serviceProperties)
          throws ServiceUnavailableException
  {
    // now try and find a tracker client for the uri
    TrackerClient trackerClient = null;
    URI targetHost = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
    int partitionId = -1;
    URI requestUri = request.getURI();

    if (targetHost == null)
View Full Code Here

    TestClock clock1 = new TestClock();
    TestClock clock2 = new TestClock();
    TestClock clock3 = new TestClock();


    @SuppressWarnings("serial")
    TrackerClient client1 =  new TrackerClient(uri1,
        new HashMap<Integer, PartitionData>(){{put(0, new PartitionData(1d));}},
        new TestLoadBalancerClient(uri1), clock1, null);
    @SuppressWarnings("serial")
    TrackerClient client2 =  new TrackerClient(uri2,
        new HashMap<Integer, PartitionData>(){{put(0, new PartitionData(0.5d)); put(1, new PartitionData(0.5d));}},
        new TestLoadBalancerClient(uri2), clock2, null);
    @SuppressWarnings("serial")
    TrackerClient client3 =  new TrackerClient(uri3,
        new HashMap<Integer, PartitionData>(){{put(1, new PartitionData(1d));}},
        new TestLoadBalancerClient(uri3), clock3, null);


    final int partitionId0 = 0;
    clientsForPartition0.add(client1);
    clientsForPartition0.add(client2);

    final int partitionId1 = 1;
    clientsForPartition1.add(client2);
    clientsForPartition1.add(client3);

    // force client2 to be disabled
    DegraderControl dcClient2Partition0 = client2.getDegraderControl(0);
    DegraderControl dcClient2Partition1 = client2.getDegraderControl(1);
    dcClient2Partition0.setOverrideMinCallCount(1);
    dcClient2Partition0.setMinCallCount(1);
    dcClient2Partition0.setMaxDropRate(1d);
    dcClient2Partition0.setUpStep(0.4d);
    dcClient2Partition0.setHighErrorRate(0);

    dcClient2Partition1.setOverrideMinCallCount(1);
    dcClient2Partition1.setMinCallCount(1);
    dcClient2Partition1.setMaxDropRate(1d);
    dcClient2Partition1.setUpStep(0.4d);
    dcClient2Partition1.setHighErrorRate(0);
    CallCompletion cc = client2.getCallTracker().startCall();
    clock2.addMs(1);
    cc.endCallWithError();

    // force client3 to be disabled
    DegraderControl dcClient3Partition1 = client3.getDegraderControl(1);
    dcClient3Partition1.setOverrideMinCallCount(1);
    dcClient3Partition1.setMinCallCount(1);
    dcClient3Partition1.setMaxDropRate(1d);
    dcClient3Partition1.setHighErrorRate(0);
    dcClient3Partition1.setUpStep(0.2d);
    CallCompletion cc3 = client3.getCallTracker().startCall();
    clock3.addMs(1);
    cc3.endCallWithError();

    clock1.addMs(15000);
    clock2.addMs(5000);
    clock3.addMs(5000);

    // trigger a state update
    assertNotNull(strategy.getTrackerClient(null, new RequestContext(), 1, partitionId0, clientsForPartition0));
    assertNotNull(strategy.getTrackerClient(null, new RequestContext(), 1, partitionId1, clientsForPartition1));
    assertNotNull(strategy.getRing(1,partitionId0, clientsForPartition0));
    assertNotNull(strategy.getRing(1, partitionId1, clientsForPartition1));

    ConsistentHashRing<URI> ring0 =
        (ConsistentHashRing<URI>) strategy.getState().getPartitionState(partitionId0).getRing();

    ConsistentHashRing<URI> ring1 =
        (ConsistentHashRing<URI>) strategy.getState().getPartitionState(partitionId1).getRing();

    Map<URI, AtomicInteger> count0 = new HashMap<URI, AtomicInteger>();

    count0.put(uri1, new AtomicInteger(0));
    count0.put(uri2, new AtomicInteger(0));

    for (Point<URI> point : ring0.getPoints())
    {
      count0.get(point.getT()).incrementAndGet();
    }

    // .4 degradation on a .5 weighted node should degrade the weight of client2 by 30
    assertEquals(count0.get(uri1).get(), 100);
    assertEquals(count0.get(uri2).get(), 30);

    Map<URI, AtomicInteger> count1 = new HashMap<URI, AtomicInteger>();

    count1.put(uri2, new AtomicInteger(0));
    count1.put(uri3, new AtomicInteger(0));
    count1.put(uri4, new AtomicInteger(0));

    for (Point<URI> point : ring1.getPoints())
    {
      count1.get(point.getT()).incrementAndGet();
    }

    // .4 degradation on a .5 weighted node should degrade the weight of client2 by 30
    // .2 degradation on a 1 weighted node should degrade the weight of client3 by 80
    assertEquals(count1.get(uri3).get(), 80);
    assertEquals(count1.get(uri2).get(), 30);
    // uri4 should be ignored due to non-specified partition weight
    assertEquals(count1.get(uri4).get(), 0);

    // now do a basic verification to verify getTrackerClient is properly weighting things
    int calls = 10000;
    int client1Count = 0;
    int client2Count = 0;
    double tolerance = 0.05d;

    for (int i = 0; i < calls; ++i)
    {
      TrackerClient client = strategy.getTrackerClient(null, new RequestContext(), 1, partitionId0, clientsForPartition0);

      assertNotNull(client);

      if (client.getUri().equals(uri1))
      {
        ++client1Count;
      }
      else
      {
        ++client2Count;
      }
    }

    assertTrue(Math.abs((client1Count / (double)calls) - (100 / 130d)) < tolerance);
    assertTrue(Math.abs((client2Count / (double)calls) - (30 / 130d)) < tolerance);


    client2Count = 0;
    int client3Count = 0;
    int client4Count = 0;

    for (int i = 0; i < calls; ++i)
    {
      TrackerClient client = strategy.getTrackerClient(null, new RequestContext(), 1, partitionId1, clientsForPartition1);

      assertNotNull(client);

      if (client.getUri().equals(uri3))
      {
        ++client3Count;
      }
      else if (client.getUri().equals(uri2))
      {
        ++client2Count;
      }
      else
      {
View Full Code Here

    List<TrackerClient> clients = new ArrayList<TrackerClient>();
    URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
    URI uri2 = URI.create("http://test.linkedin.com:3243/fdsaf");
    TestClock clock1 = new TestClock();
    TestClock clock2 = new TestClock();
    TrackerClient client1 =
        new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock1, null);
    TrackerClient client2 =
        new TrackerClient(uri2, getDefaultPartitionData(0.8d), new TestLoadBalancerClient(uri2), clock2, null);

    clients.add(client1);
    clients.add(client2);

    DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient2Default.setOverrideMinCallCount(1);
    dcClient2Default.setMinCallCount(1);
    dcClient2Default.setMaxDropRate(1d);
    dcClient2Default.setUpStep(0.4d);
    dcClient2Default.setHighErrorRate(0);
    CallCompletion cc = client2.getCallTracker().startCall();
    clock2.addMs(1);
    cc.endCallWithError();

    clock1.addMs(15000);
    clock2.addMs(5000);

    System.err.println(dcClient2Default.getCurrentComputedDropRate());
    System.err.println(client1.getDegraderControl(DEFAULT_PARTITION_ID).getCurrentComputedDropRate());

    // trigger a state update
    assertNotNull(getTrackerClient(strategy, null, new RequestContext(), 1, clients));

    // now verify that the ring has degraded client 2 by 20%
    ConsistentHashRing<URI> ring =
        (ConsistentHashRing<URI>) strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getRing();

    Map<URI, AtomicInteger> count = new HashMap<URI, AtomicInteger>();

    count.put(uri1, new AtomicInteger(0));
    count.put(uri2, new AtomicInteger(0));

    for (Point<URI> point : ring.getPoints())
    {
      count.get(point.getT()).incrementAndGet();
    }

    System.err.println(count);

    // .4 degradation on a .8 weighted node should degrade the weight of client2 by 48
    // points. 100 * (1 - 0.4) * 0.8 = 48
    assertEquals(count.get(uri1).get(), 100);
    assertEquals(count.get(uri2).get(), 48);

    // now do a basic verification to verify getTrackerClient is properly weighting things
    double calls = 10000d;
    int client1Count = 0;
    int client2Count = 0;
    double tolerance = 0.05d;

    for (int i = 0; i < calls; ++i)
    {
      TrackerClient client = getTrackerClient(strategy, null, new RequestContext(), 1, clients);

      assertNotNull(client);

      if (client.getUri().equals(uri1))
      {
        ++client1Count;
      }
      else
      {
View Full Code Here

    else
    {
      debug(_log, "Degrader honoring target host header in request, skipping hashing.  URI: " + targetHostUri.toString());
    }

    TrackerClient client = null;

    if (targetHostUri != null)
    {
      // These are the clients that were passed in, NOT necessarily the clients that make up the
      // consistent hash ring! Therefore, this linear scan is the best we can do.
      for (TrackerClient trackerClient : trackerClients)
      {
        if (trackerClient.getUri().equals(targetHostUri))
        {
          client = trackerClient;
          break;
        }
      }

      if (client == null)
      {
        warn(_log, "No client found for " + targetHostUri + (hostHeaderUri == null ?
                ", degrader load balancer state is inconsistent with cluster manager" :
                ", target host specified is no longer part of cluster"));
      }
    }
    else
    {
      warn(_log, "unable to find a URI to use");
    }

    boolean dropCall = client == null;

    if (!dropCall)
    {
      dropCall = client.getDegrader(DEFAULT_PARTITION_ID).checkDrop();

      if (dropCall)
      {
        warn(_log, "client's degrader is dropping call for: ", client);
      }
View Full Code Here

      return false;
    }
    Map<URI, Integer> pointsMap = newState.getPointsMap();
    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      int perfectHealth = (int) (client.getPartitionWeight(DEFAULT_PARTITION_ID) * config.getPointsPerWeight());
      Integer point = pointsMap.get(client.getUri());
      if (point < perfectHealth)
      {
        return false;
      }
    }
View Full Code Here

                                                         DegraderLoadBalancerStrategyConfig config)
  {
    List<String> unhealthyClients = new ArrayList<String>();
    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      int perfectHealth = (int) (client.getPartitionWeight(DEFAULT_PARTITION_ID) * config.getPointsPerWeight());
      Integer point = pointsMap.get(client.getUri());
      if (point < perfectHealth)
      {
        unhealthyClients.add(client.getUri() + ":" + point + "/" + perfectHealth);
      }
    }
    return unhealthyClients;
  }
View Full Code Here

    int pointsPerWeight = config.getPointsPerWeight();
    DegraderLoadBalancerState newState;

    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      double averageLatency = client.getDegraderControl(DEFAULT_PARTITION_ID).getLatency();
      long callCount = client.getDegraderControl(DEFAULT_PARTITION_ID).getCallCount();

      oldState.getPreviousMaxDropRate().put(client, clientUpdater.getMaxDropRate());
      sumOfClusterLatencies += averageLatency * callCount;
      totalClusterCallCount += callCount;
      double clientDropRate = client.getDegraderControl(DEFAULT_PARTITION_ID).getCurrentComputedDropRate();
      computedClusterDropSum += client.getPartitionWeight(DEFAULT_PARTITION_ID) * clientDropRate;

      computedClusterWeight += client.getPartitionWeight(DEFAULT_PARTITION_ID);

      boolean recoveryMapContainsClient = newRecoveryMap.containsKey(client);

      // The following block of code calculates and updates the maxDropRate if the client had been
      // fully degraded in the past and has not received any requests since being fully degraded.
      // To increase the chances of the client receiving a request, we change the maxDropRate, which
      // influences the maximum value of computedDropRate, which is used to compute the number of
      // points in the hash ring for the clients.
      if (callCount == 0)
      {
        // if this client is enrolled in the program, decrease the maxDropRate
        // it is important to note that this excludes clients that haven't gotten traffic
        // due solely to low volume.
        if (recoveryMapContainsClient)
        {
          // if it's the hash ring's turn to adjust, then adjust the maxDropRate.
          // Otherwise, we let the call dropping strategy take it's turn, even if
          // it may do nothing.
          if(strategy == DegraderLoadBalancerState.Strategy.LOAD_BALANCE)
          {
            double oldMaxDropRate = clientUpdater.getMaxDropRate();
            double transmissionRate = 1.0 - oldMaxDropRate;
            if( transmissionRate <= 0.0)
            {
              // We use the initialRecoveryLevel to indicate how many points to initially set
              // the tracker client to when traffic has stopped flowing to this node.
              transmissionRate = initialRecoveryLevel;
            }
            else
            {
              transmissionRate *= ringRampFactor;
              transmissionRate = Math.min(transmissionRate, 1.0);
            }
            double newMaxDropRate = 1.0 - transmissionRate;

            clientUpdater.setMaxDropRate(newMaxDropRate);
          }
          recoveryMapChanges = true;
        }
      } //else we don't really need to change the client maxDropRate.
      else if(recoveryMapContainsClient)
      {
        // else if the recovery map contains the client and the call count was > 0

        // tough love here, once the rehab clients start taking traffic, we
        // restore their maxDropRate to it's original value, and unenroll them
        // from the program.
        // This is safe because the hash ring points are controlled by the
        // computedDropRate variable, and the call dropping rate is controlled by
        // the overrideDropRate. The maxDropRate only serves to cap the computedDropRate and
        // overrideDropRate.
        // We store the maxDropRate and restore it here because the initialRecoveryLevel could
        // potentially be higher than what the default maxDropRate allowed. (the maxDropRate doesn't
        // necessarily have to be 1.0). For instance, if the maxDropRate was 0.99, and the
        // initialRecoveryLevel was 0.05  then we need to store the old maxDropRate.
        clientUpdater.setMaxDropRate(newRecoveryMap.get(client));
        newRecoveryMap.remove(client);
        recoveryMapChanges = true;
      }
    }

    double computedClusterDropRate = computedClusterDropSum / computedClusterWeight;
    debug(_log, "total cluster call count: ", totalClusterCallCount);
    debug(_log,
          "computed cluster drop rate for ",
          trackerClientUpdaters.size(),
          " nodes: ",
          computedClusterDropRate);

    if (oldState.getClusterGenerationId() == clusterGenerationId
        && totalClusterCallCount <= 0 && !recoveryMapChanges)
    {
      // if the cluster has not been called recently (total cluster call count is <= 0)
      // and we already have a state with the same set of URIs (same cluster generation),
      // and no clients are in rehab, then don't change anything.
      debug(_log, "New state is the same as the old state so we're not changing anything. Old state = ", oldState
          , ", config=", config);

      return new DegraderLoadBalancerState(oldState, clusterGenerationId, config.getUpdateIntervalMs(),
                                           config.getClock().currentTimeMillis());
    }

    // update our overrides.
    double newCurrentAvgClusterLatency = -1;
    if (totalClusterCallCount > 0)
    {
      newCurrentAvgClusterLatency = sumOfClusterLatencies / totalClusterCallCount;
    }

    debug(_log, "average cluster latency: ", newCurrentAvgClusterLatency);

    // This points map stores how many hash map points to allocate for each tracker client.

    Map<URI, Integer> points = new HashMap<URI, Integer>();
    Map<URI, Integer> oldPointsMap = oldState.getPointsMap();

    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      double successfulTransmissionWeight;
      URI clientUri = client.getUri();

      // Don't take into account cluster health when calculating the number of points
      // for each client. This is because the individual clients already take into account
      // latency, and a successfulTransmissionWeight can and should be made
      // independent of other nodes in the cluster. Otherwise, one unhealthy client in a small
      // cluster can take down the entire cluster if the avg latency is too high.
      // The global drop rate will take into account the cluster latency. High cluster-wide error
      // rates are not something d2 can address.
      //
      // this client's maxDropRate and currentComputedDropRate may have been adjusted if it's in the
      // rehab program (to gradually send traffic it's way).
      double dropRate = Math.min(client.getDegraderControl(DEFAULT_PARTITION_ID).getCurrentComputedDropRate(),
                                 clientUpdater.getMaxDropRate());

      // calculate the weight as the probability of successful transmission to this
      // node divided by the probability of successful transmission to the entire
      // cluster
      successfulTransmissionWeight = client.getPartitionWeight(DEFAULT_PARTITION_ID) * (1.0 - dropRate);

      // calculate the weight as the probability of a successful transmission to this node
      // multiplied by the client's self-defined weight. thus, the node's final weight
      // takes into account both the self defined weight (to account for different
      // hardware in the same cluster) and the performance of the node (as defined by the
View Full Code Here

  public static void overrideMinCallCount(double newOverrideDropRate, List<TrackerClientUpdater> trackerClientUpdaters,
                                   Map<URI,Integer> pointsMap, int pointsPerWeight)
  {
    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      int currentOverrideMinCallCount = client.getDegraderControl(DEFAULT_PARTITION_ID).getOverrideMinCallCount();
      double hashFactor = pointsMap.get(client.getUri()) / pointsPerWeight;
      double transmitFactor = 1.0 - newOverrideDropRate;
      int newOverrideMinCallCount = (int) Math.max(Math.round(client.getDegraderControl(DEFAULT_PARTITION_ID).getMinCallCount() *
                                                       hashFactor * transmitFactor), 1);

      if (newOverrideMinCallCount != currentOverrideMinCallCount)
      {
        clientUpdater.setOverrideMinCallCount(newOverrideMinCallCount);
        warn(_log,
             "overriding Min Call Count to ",
             newOverrideMinCallCount,
             " for client: ",
             client.getUri());
      }
    }
  }
View Full Code Here

    RandomLoadBalancerStrategy rrLoadBalancer = lbFactory.newLoadBalancer("unused",
                                                                          Collections.<String, Object>emptyMap(),
                                                                          null);
    Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
    partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
    TrackerClient trackerClient1 =
        new TrackerClient(URI.create("http://www.google.com:567/foo/bar"), partitionDataMap, null);
    TrackerClient trackerClient2 =
        new TrackerClient(URI.create("http://www.amazon.com:567/foo/bar"), partitionDataMap, null);
    List<TrackerClient> trackerClients = new ArrayList<TrackerClient>();

    trackerClients.add(trackerClient1);
    trackerClients.add(trackerClient2);
View Full Code Here

TOP

Related Classes of com.linkedin.d2.balancer.clients.TrackerClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.