Package org.apache.helix.api

Examples of org.apache.helix.api.Cluster


    // refresh the cache
    ClusterEvent event = new ClusterEvent("testEvent");
    ClusterId clusterId = new ClusterId("sampleClusterId");
    ClusterAccessor clusterAccessor = new MockClusterAccessor(clusterId, accessor);
    Cluster cluster = clusterAccessor.readCluster();
    event.addAttribute("Cluster", cluster);
    Map<String, Map<String, String>> emptyMap = Maps.newHashMap();
    event.addAttribute(AttributeName.IDEAL_STATE_RULES.toString(), emptyMap);

    // run resource computation
View Full Code Here


    ResourceCurrentState currentStateOutput =
        event.getAttribute(AttributeName.CURRENT_STATE.toString());
    Map<ResourceId, ResourceConfig> resourceMap =
        event.getAttribute(AttributeName.RESOURCES.toString());
    Cluster cluster = event.getAttribute("Cluster");
    ClusterDataCache cache = event.getAttribute("ClusterDataCache");

    if (currentStateOutput == null || resourceMap == null || cluster == null) {
      throw new StageException("Missing attributes in event:" + event
          + ". Requires CURRENT_STATE|RESOURCES|Cluster");
View Full Code Here

public class ResourceComputationStage extends AbstractBaseStage {
  private static Logger LOG = Logger.getLogger(ResourceComputationStage.class);

  @Override
  public void process(ClusterEvent event) throws StageException {
    Cluster cluster = event.getAttribute("Cluster");
    if (cluster == null) {
      throw new StageException("Missing attributes in event: " + event + ". Requires Cluster");
    }

    Map<ResourceId, ResourceConfig> resCfgMap = new HashMap<ResourceId, ResourceConfig>();
    Map<ResourceId, ResourceConfig> csResCfgMap = getCurStateResourceCfgMap(cluster);

    // ideal-state may be removed, add all resource config in current-state but not in ideal-state
    for (ResourceId resourceId : csResCfgMap.keySet()) {
      if (!cluster.getResourceMap().keySet().contains(resourceId)) {
        resCfgMap.put(resourceId, csResCfgMap.get(resourceId));
      }
    }

    for (ResourceId resourceId : cluster.getResourceMap().keySet()) {
      Resource resource = cluster.getResource(resourceId);
      RebalancerConfig rebalancerCfg = resource.getRebalancerConfig();

      ResourceConfig.Builder resCfgBuilder = new ResourceConfig.Builder(resourceId);
      resCfgBuilder.schedulerTaskConfig(resource.getSchedulerTaskConfig());
      resCfgBuilder.rebalancerConfig(rebalancerCfg);
View Full Code Here

    LOG.info("START PersistAssignmentStage.process()");
    long startTime = System.currentTimeMillis();

    ClusterDataCache cache = event.getAttribute("ClusterDataCache");
    if (cache.assignmentWriteEnabled()) {
      Cluster cluster = event.getAttribute("Cluster");
      HelixManager helixManager = event.getAttribute("helixmanager");
      HelixDataAccessor accessor = helixManager.getHelixDataAccessor();
      PropertyKey.Builder keyBuilder = accessor.keyBuilder();
      BestPossibleStateOutput assignments =
          event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString());
      List<ResourceAssignment> changedAssignments = Lists.newLinkedList();
      List<PropertyKey> changedKeys = Lists.newLinkedList();
      for (ResourceId resourceId : assignments.getAssignedResources()) {
        ResourceAssignment assignment = assignments.getResourceAssignment(resourceId);
        Resource resource = cluster.getResource(resourceId);
        boolean toAdd = false;
        if (resource != null) {
          ResourceAssignment existAssignment = resource.getResourceAssignment();
          if (existAssignment == null || !existAssignment.equals(assignment)) {
            toAdd = true;
View Full Code Here

    if (!connection.isConnected()) {
      return "Unable to connect to cluster";
    }
    StringBuilder builder = new StringBuilder();
    ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
    Cluster cluster = clusterAccessor.readCluster();
    Map<ParticipantId, Participant> participants = cluster.getParticipantMap();
    builder.append("AppName").append(TAB).append(clusterId).append(NEWLINE);
    Map<ResourceId, Resource> resources = cluster.getResourceMap();
    for (ResourceId resourceId : resources.keySet()) {
      builder.append("SERVICE").append(TAB).append(resourceId).append(NEWLINE);
      Resource resource = resources.get(resourceId);
      Map<ParticipantId, State> serviceStateMap = null;
      if (resource != null) {
View Full Code Here

    Map<ResourceId, ResourceConfig> resourceMap =
        event.getAttribute(AttributeName.RESOURCES.toString());
    MessageOutput messageOutput = event.getAttribute(AttributeName.MESSAGES_THROTTLE.toString());
    BestPossibleStateOutput bestPossibleStateOutput =
        event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.toString());
    Cluster cluster = event.getAttribute("Cluster");
    ClusterDataCache cache = event.getAttribute("ClusterDataCache");
    Map<ParticipantId, Participant> liveParticipantMap = cluster.getLiveParticipantMap();

    if (manager == null || resourceMap == null || messageOutput == null || cluster == null
        || cache == null || liveParticipantMap == null) {
      throw new StageException(
          "Missing attributes in event:"
View Full Code Here

            break;
          }
        }
        TargetProvider targetProvider = _targetProviderMap.get(resourceId);
        ContainerProvider containerProvider = _containerProviderMap.get(resourceId);
        final Cluster cluster = event.getAttribute("Cluster");
        final ClusterDataCache cache = event.getAttribute("ClusterDataCache");
        final Collection<Participant> participants = cluster.getParticipantMap().values();

        // If a process died, we need to mark it as DISCONNECTED or if the process is ready, mark as
        // CONNECTED
        Map<ParticipantId, Participant> participantMap = cluster.getParticipantMap();
        for (ParticipantId participantId : participantMap.keySet()) {
          Participant participant = participantMap.get(participantId);
          ContainerConfig config = participant.getContainerConfig();
          if (config != null) {
            ContainerState containerState = config.getState();
            if (!participant.isAlive() && ContainerState.CONNECTED.equals(containerState)) {
              // Need to mark as disconnected if process died
              LOG.info("Participant " + participantId + " died, marking as DISCONNECTED");
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.DISCONNECTED);
            } else if (participant.isAlive() && ContainerState.CONNECTING.equals(containerState)) {
              // Need to mark as connected only when the live instance is visible
              LOG.info("Participant " + participantId + " is ready, marking as CONNECTED");
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.CONNECTED);
            } else if (!participant.isAlive() && ContainerState.HALTING.equals(containerState)) {
              // Need to mark as connected only when the live instance is visible
              LOG.info("Participant " + participantId + " is has been killed, marking as HALTED");
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.HALTED);
            }
          }
        }

        // Participants registered in helix
        // Give those participants to targetprovider
        // Provide the response that contains, new containerspecs, containers to be released,
        // containers to be stopped
        // call the respective provisioner to allocate and start the container.
        // Each container is then started its state is changed from any place.
        // The target provider is given the state of container and asked for its new state. For each
        // state there is a corresponding handler function.

        // TargetProvider should be stateless, given the state of cluster and existing participants
        // it should return the same result
        final TargetProviderResponse response =
            targetProvider.evaluateExistingContainers(cluster, resourceId, participants);

        // allocate new containers
        for (final ContainerSpec spec : response.getContainersToAcquire()) {
          final ParticipantId participantId = spec.getParticipantId();
          if (!cluster.getParticipantMap().containsKey(participantId)) {
            // create a new Participant, attach the container spec
            InstanceConfig instanceConfig = new InstanceConfig(participantId);
            instanceConfig.setInstanceEnabled(false);
            instanceConfig.setContainerSpec(spec);
            // create a helix_participant in ACQUIRING state
            instanceConfig.setContainerState(ContainerState.ACQUIRING);
            // create the helix participant and add it to cluster
            helixAdmin.addInstance(cluster.getId().toString(), instanceConfig);
            cache.requireFullRefresh();
          }
          LOG.info("Allocating container for " + participantId);
          ListenableFuture<ContainerId> future = containerProvider.allocateContainer(spec);
          FutureCallback<ContainerId> callback = new FutureCallback<ContainerId>() {
            @Override
            public void onSuccess(ContainerId containerId) {
              LOG.info("Container " + containerId + " acquired. Marking " + participantId);
              updateContainerState(cache, accessor, keyBuilder, cluster, containerId,
                  participantId, ContainerState.ACQUIRED);
            }

            @Override
            public void onFailure(Throwable t) {
              LOG.error("Could not allocate a container for participant " + participantId, t);
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.FAILED);
            }
          };
          safeAddCallback(future, callback);
        }

        // start new containers
        for (final Participant participant : response.getContainersToStart()) {
          final ContainerId containerId = participant.getInstanceConfig().getContainerId();
          updateContainerState(cache, accessor, keyBuilder, cluster, null, participant.getId(),
              ContainerState.CONNECTING);
          // create the helix participant and add it to cluster
          LOG.info("Starting container " + containerId + " for " + participant.getId());
          ListenableFuture<Boolean> future =
              containerProvider.startContainer(containerId, participant);
          FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
            @Override
            public void onSuccess(Boolean result) {
              // Do nothing yet, need to wait for live instance
              LOG.info("Container " + containerId + " started for " + participant.getId());
            }

            @Override
            public void onFailure(Throwable t) {
              LOG.error("Could not start container" + containerId + "for participant "
                  + participant.getId(), t);
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participant.getId(),
                  ContainerState.FAILED);
            }
          };
          safeAddCallback(future, callback);
        }

        // release containers
        for (final Participant participant : response.getContainersToRelease()) {
          // mark it as finalizing
          final ContainerId containerId = participant.getInstanceConfig().getContainerId();
          updateContainerState(cache, accessor, keyBuilder, cluster, null, participant.getId(),
              ContainerState.FINALIZING);
          // remove the participant
          LOG.info("Deallocating container " + containerId + " for " + participant.getId());
          ListenableFuture<Boolean> future = containerProvider.deallocateContainer(containerId);
          FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
            @Override
            public void onSuccess(Boolean result) {
              LOG.info("Container " + containerId + " deallocated. Dropping " + participant.getId());
              InstanceConfig existingInstance =
                  helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
                      .toString());
              helixAdmin.dropInstance(cluster.getId().toString(), existingInstance);
              cache.requireFullRefresh();
            }

            @Override
            public void onFailure(Throwable t) {
View Full Code Here

   * @param clusterDelta change to the cluster configuration
   * @return updated ClusterConfig, or null if there was an error
   */
  public ClusterConfig updateCluster(ClusterConfig.Delta clusterDelta) {
    clusterDelta.merge(_accessor);
    Cluster cluster = readCluster();
    return (cluster != null) ? cluster.getConfig() : null;
  }
View Full Code Here

    // read controller context
    Map<ContextId, ControllerContext> contextMap = readControllerContext(true);

    // create the cluster snapshot object
    return new Cluster(_clusterId, resourceMap, participantMap, controllerMap, leaderId,
        clusterConstraintMap, stateModelMap, contextMap, userConfig, isPaused, autoJoinAllowed,
        _cache);
  }
View Full Code Here

public class ResourceValidationStage extends AbstractBaseStage {
  private static final Logger LOG = Logger.getLogger(ResourceValidationStage.class);

  @Override
  public void process(ClusterEvent event) throws Exception {
    Cluster cluster = event.getAttribute("Cluster");
    if (cluster == null) {
      throw new StageException("Missing attributes in event:" + event + ". Requires Cluster");
    }
    Map<ResourceId, ResourceConfig> resourceConfigMap =
        event.getAttribute(AttributeName.RESOURCES.toString());
    if (resourceConfigMap == null) {
      throw new StageException("Resources must be computed prior to validation!");
    }
    Map<ResourceId, Resource> resourceMap = cluster.getResourceMap();
    Map<String, Map<String, String>> idealStateRuleMap =
        event.getAttribute(AttributeName.IDEAL_STATE_RULES.toString());

    for (ResourceId resourceId : resourceMap.keySet()) {
      // check every ideal state against the ideal state rules
      // the pipeline should not process any resources that have an unsupported ideal state
      IdealState idealState = resourceMap.get(resourceId).getIdealState();
      if (idealState == null) {
        continue;
      }
      if (idealStateRuleMap != null && !idealStateRuleMap.isEmpty()) {
        boolean hasMatchingRule = false;
        for (String ruleName : idealStateRuleMap.keySet()) {
          Map<String, String> rule = idealStateRuleMap.get(ruleName);
          boolean matches = idealStateMatchesRule(idealState, rule);
          hasMatchingRule = hasMatchingRule || matches;
          if (matches) {
            break;
          }
        }
        if (!hasMatchingRule) {
          LOG.warn("Resource " + resourceId + " does not have a valid ideal state!");
          resourceConfigMap.remove(resourceId);
        }
      }

      // check that every resource to process has a live state model definition
      StateModelDefId stateModelDefId = idealState.getStateModelDefId();
      StateModelDefinition stateModelDef = cluster.getStateModelMap().get(stateModelDefId);
      if (stateModelDef == null) {
        LOG.warn("Resource " + resourceId + " uses state model " + stateModelDefId
            + ", but it is not on the cluster!");
        resourceConfigMap.remove(resourceId);
      }
View Full Code Here

TOP

Related Classes of org.apache.helix.api.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.