Examples of TrackerClient


Examples of com.linkedin.d2.balancer.clients.TrackerClient

    List<TrackerClient> clients = new ArrayList<TrackerClient>();
    URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
    URIRequest request = new URIRequest(uri1);

    TrackerClient client1 =
            new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);

    clients.add(client1);

    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setMinCallCount(5);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;

    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    // The override drop rate should be zero at this point.
    assertEquals(dcClient1Default.getOverrideDropRate(),0.0);

    // make high latency calls to the tracker client, verify the override drop rate doesn't change
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = client1.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs((long)highWaterMark);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(timeInterval);

    // try call dropping on the next updateState
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    // we now expect that the override drop rate stepped up because updateState
    // made that decision.
    assertEquals(dcClient1Default.getOverrideDropRate(), globalStepUp);

    // make mid latency calls to the tracker client, verify the override drop rate doesn't change
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      // need to use client1 because the resultTC may be null
      cc = client1.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs((long)highWaterMark - 1);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(timeInterval);

    double previousOverrideDropRate = dcClient1Default.getOverrideDropRate();

    // try call dropping on the next updateState
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertEquals(dcClient1Default.getOverrideDropRate(), previousOverrideDropRate );

    // make low latency calls to the tracker client, verify the override drop rate decreases
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = client1.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs((long)lowWaterMark);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(timeInterval);

    // try Call dropping on this updateState
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertEquals(resultTC.getDegraderControl(DEFAULT_PARTITION_ID).getOverrideDropRate(), 0.0 );
  }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    URIRequest request = new URIRequest(uri1);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;

    TrackerClient client1 =
            new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
    TrackerClient client2 =
            new TrackerClient(uri2, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri2), clock, null);

    clients.add(client1);
    clients.add(client2);

    // force client1 to be disabled if we encounter errors/high latency
    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setMinCallCount(5);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setUpStep(1.0);
    // force client2 to be disabled if we encounter errors/high latency
    DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient2Default.setOverrideMinCallCount(5);
    dcClient2Default.setMinCallCount(5);
    dcClient2Default.setUpStep(0.4);

    // Have one cycle of successful calls to verify valid tracker clients returned.
    // try load balancing on this updateState, need to updateState before forcing the strategy.
    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertNotNull(resultTC, "expected non-null trackerclient");
    for (int j = 0; j < NUM_CHECKS; j++)
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    URIRequest request = new URIRequest(uri1);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;

    TrackerClient client1 =
            new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);

    clients.add(client1);

    // force client1 to be disabled if we encounter errors/high latency
    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setMinCallCount(5);
    dcClient1Default.setUpStep(1.0);
    dcClient1Default.setHighErrorRate(0);

    // Issue high latency calls to reduce client1 to the minimum number of hash points allowed.
    // (1 in this case)
    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertNotNull(resultTC, "expected non-null trackerclient");
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = resultTC.getCallTracker().startCall();

      ccList.add(cc);
    }

    clock.addMs(3500);
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

  public static TrackerClient getClient(URI uri)
  {
    Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
    partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
    return new TrackerClient(uri, partitionDataMap,new TestLoadBalancerClient(uri));
  }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

  public static TrackerClient getClient(URI uri, Clock clock)
  {
    Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
    partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1));
    return new TrackerClient(uri, partitionDataMap, new TestLoadBalancerClient(uri), clock, null);
  }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    else
    {
      debug(_log, "Degrader honoring target host header in request, skipping hashing.  URI: " + targetHostUri.toString());
    }

    TrackerClient client = null;

    if (targetHostUri != null)
    {
      // These are the clients that were passed in, NOT necessarily the clients that make up the
      // consistent hash ring! Therefore, this linear scan is the best we can do.
      for (TrackerClient trackerClient : trackerClients)
      {
        if (trackerClient.getUri().equals(targetHostUri))
        {
          client = trackerClient;
          break;
        }
      }

      if (client == null)
      {
        warn(_log, "No client found for " + targetHostUri + (hostHeaderUri == null ?
                ", degrader load balancer state is inconsistent with cluster manager" :
                ", target host specified is no longer part of cluster"));
      }
    }
    else
    {
      warn(_log, "unable to find a URI to use");
    }

    boolean dropCall = client == null;

    if (!dropCall)
    {
      dropCall = client.getDegrader(partitionId).checkDrop();

      if (dropCall)
      {
        warn(_log, "client's degrader is dropping call for: ", client);
      }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

      return false;
    }
    Map<URI, Integer> pointsMap = newState.getPointsMap();
    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      int perfectHealth = (int) (client.getPartitionWeight(partitionId) * config.getPointsPerWeight());
      Integer point = pointsMap.get(client.getUri());
      if (point < perfectHealth)
      {
        return false;
      }
    }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

                                                         int partitionId)
  {
    List<String> unhealthyClients = new ArrayList<String>();
    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      int perfectHealth = (int) (client.getPartitionWeight(partitionId) * config.getPointsPerWeight());
      Integer point = pointsMap.get(client.getUri());
      if (point < perfectHealth)
      {
        unhealthyClients.add(client.getUri() + ":" + point + "/" + perfectHealth);
      }
    }
    return unhealthyClients;
  }
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

    int pointsPerWeight = config.getPointsPerWeight();
    PartitionDegraderLoadBalancerState newState;

    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      DegraderControl degraderControl = client.getDegraderControl(partitionId);
      double averageLatency = degraderControl.getLatency();
      long callCount = degraderControl.getCallCount();
      oldState.getPreviousMaxDropRate().put(client, clientUpdater.getMaxDropRate());
      double clientWeight =  client.getPartitionWeight(partitionId);

      sumOfClusterLatencies += averageLatency * callCount;
      totalClusterCallCount += callCount;
      clientDropRate = degraderControl.getCurrentComputedDropRate();
      computedClusterDropSum += clientWeight * clientDropRate;

      computedClusterWeight += clientWeight;

      boolean recoveryMapContainsClient = newRecoveryMap.containsKey(client);

      // The following block of code calculates and updates the maxDropRate if the client had been
      // fully degraded in the past and has not received any requests since being fully degraded.
      // To increase the chances of the client receiving a request, we change the maxDropRate, which
      // influences the maximum value of computedDropRate, which is used to compute the number of
      // points in the hash ring for the clients.
      if (callCount == 0)
      {
        // if this client is enrolled in the program, decrease the maxDropRate
        // it is important to note that this excludes clients that haven't gotten traffic
        // due solely to low volume.
        if (recoveryMapContainsClient)
        {
          double oldMaxDropRate = clientUpdater.getMaxDropRate();
          double transmissionRate = 1.0 - oldMaxDropRate;
          if( transmissionRate <= 0.0)
          {
            // We use the initialRecoveryLevel to indicate how many points to initially set
            // the tracker client to when traffic has stopped flowing to this node.
            transmissionRate = initialRecoveryLevel;
          }
          else
          {
            transmissionRate *= ringRampFactor;
            transmissionRate = Math.min(transmissionRate, 1.0);
          }
          newMaxDropRate = 1.0 - transmissionRate;

          if (strategy == PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE)
          {
            // if it's the hash ring's turn to adjust, then adjust the maxDropRate.
            // Otherwise, we let the call dropping strategy take it's turn, even if
            // it may do nothing.
            clientUpdater.setMaxDropRate(newMaxDropRate);
          }
          recoveryMapChanges = true;
        }
      }
      else if(recoveryMapContainsClient)
      {
        // else if the recovery map contains the client and the call count was > 0

        // tough love here, once the rehab clients start taking traffic, we
        // restore their maxDropRate to it's original value, and unenroll them
        // from the program.
        // This is safe because the hash ring points are controlled by the
        // computedDropRate variable, and the call dropping rate is controlled by
        // the overrideDropRate. The maxDropRate only serves to cap the computedDropRate and
        // overrideDropRate.
        // We store the maxDropRate and restore it here because the initialRecoveryLevel could
        // potentially be higher than what the default maxDropRate allowed. (the maxDropRate doesn't
        // necessarily have to be 1.0). For instance, if the maxDropRate was 0.99, and the
        // initialRecoveryLevel was 0.05  then we need to store the old maxDropRate.
        clientUpdater.setMaxDropRate(newRecoveryMap.get(client));
        newRecoveryMap.remove(client);
        recoveryMapChanges = true;
      }
    }

    computedClusterDropRate = computedClusterDropSum / computedClusterWeight;
    debug(_log, "total cluster call count: ", totalClusterCallCount);
    debug(_log,
          "computed cluster drop rate for ",
          trackerClientUpdaters.size(),
          " nodes: ",
          computedClusterDropRate);

    if (oldState.getClusterGenerationId() == clusterGenerationId
        && totalClusterCallCount <= 0 && !recoveryMapChanges)
    {
      // if the cluster has not been called recently (total cluster call count is <= 0)
      // and we already have a state with the same set of URIs (same cluster generation),
      // and no clients are in rehab, then don't change anything.
      debug(_log, "New state is the same as the old state so we're not changing anything. Old state = ", oldState
          ,", config= ", config);
      return new PartitionDegraderLoadBalancerState(oldState, clusterGenerationId,
                                                    config.getClock().currentTimeMillis());
    }

    // update our overrides.
    double newCurrentAvgClusterLatency = -1;

    if(totalClusterCallCount > 0)
    {
      newCurrentAvgClusterLatency = sumOfClusterLatencies / totalClusterCallCount;
    }

    debug(_log, "average cluster latency: ", newCurrentAvgClusterLatency);

    // compute points for every node in the cluster
    double computedClusterSuccessRate = computedClusterWeight - computedClusterDropRate;

    // This points map stores how many hash map points to allocate for each tracker client.

    Map<URI, Integer> points = new HashMap<URI, Integer>();
    Map<URI, Integer> oldPointsMap = oldState.getPointsMap();

    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      double successfulTransmissionWeight;
      URI clientUri = client.getUri();

      // Don't take into account cluster health when calculating the number of points
      // for each client. This is because the individual clients already take into account
      // latency and errors, and a successfulTransmissionWeight can and should be made
      // independent of other nodes in the cluster. Otherwise, one unhealthy client in a small
      // cluster can take down the entire cluster if the avg latency is too high.
      // The global drop rate will take into account the cluster latency. High cluster-wide error
      // rates are not something d2 can address.
      //
      // this client's maxDropRate and currentComputedDropRate may have been adjusted if it's in the
      // rehab program (to gradually send traffic it's way).
      DegraderControl degraderControl = client.getDegraderControl(partitionId);
      double dropRate = Math.min(degraderControl.getCurrentComputedDropRate(),
                                 clientUpdater.getMaxDropRate());

      // calculate the weight as the probability of successful transmission to this
      // node divided by the probability of successful transmission to the entire
      // cluster
      double clientWeight = client.getPartitionWeight(partitionId);
      successfulTransmissionWeight = clientWeight * (1.0 - dropRate);

      // calculate the weight as the probability of a successful transmission to this node
      // multiplied by the client's self-defined weight. thus, the node's final weight
      // takes into account both the self defined weight (to account for different
View Full Code Here

Examples of com.linkedin.d2.balancer.clients.TrackerClient

  public static void overrideMinCallCount(int partitionId, double newOverrideDropRate, List<TrackerClientUpdater> trackerClientUpdaters,
                                   Map<URI,Integer> pointsMap, int pointsPerWeight)
  {
    for (TrackerClientUpdater clientUpdater : trackerClientUpdaters)
    {
      TrackerClient client = clientUpdater.getTrackerClient();
      DegraderControl degraderControl = client.getDegraderControl(partitionId);
      int currentOverrideMinCallCount = client.getDegraderControl(partitionId).getOverrideMinCallCount();
      double hashFactor = pointsMap.get(client.getUri()) / pointsPerWeight;
      double transmitFactor = 1.0 - newOverrideDropRate;
      int newOverrideMinCallCount = (int) Math.max(Math.round(degraderControl.getMinCallCount() *
                                                       hashFactor * transmitFactor), 1);

      if (newOverrideMinCallCount != currentOverrideMinCallCount)
      {
        clientUpdater.setOverrideMinCallCount(newOverrideMinCallCount);
        warn(_log,
             "partitionId=",
             partitionId,
             "overriding Min Call Count to ",
             newOverrideMinCallCount,
             " for client: ",
             client.getUri());
      }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.