Package org.apache.helix

Examples of org.apache.helix.HelixAdmin


        3, // replicas
        "MasterSlave", true); // do rebalance

    // setup message constraint
    // "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,CONSTRAINT_VALUE=1";
    HelixAdmin admin = new ZKHelixAdmin(_zkclient);
    ConstraintItemBuilder builder = new ConstraintItemBuilder();
    builder.addConstraintAttribute("MESSAGE_TYPE", "STATE_TRANSITION")
        .addConstraintAttribute("INSTANCE", ".*").addConstraintAttribute("CONSTRAINT_VALUE", "1");

    // Map<String, String> constraints = new TreeMap<String, String>();
    // constraints.put("MESSAGE_TYPE", "STATE_TRANSITION");
    // // constraints.put("TRANSITION", "OFFLINE-SLAVE");
    // constraints.put("CONSTRAINT_VALUE", "1");
    // constraints.put("INSTANCE", ".*");
    admin.setConstraint(clusterName, ConstraintType.MESSAGE_CONSTRAINT, "constraint1",
        builder.build());

    final ZKHelixDataAccessor accessor =
        new ZKHelixDataAccessor(clusterName, _baseAccessor);

View Full Code Here


      new HashMap<ResourceId, ContainerProvider>();

  @Override
  public void process(ClusterEvent event) throws Exception {
    final HelixManager helixManager = event.getAttribute("helixmanager");
    final HelixAdmin helixAdmin = helixManager.getClusterManagmentTool();
    final Map<ResourceId, ResourceConfig> resourceMap =
        event.getAttribute(AttributeName.RESOURCES.toString());
    final HelixDataAccessor accessor = helixManager.getHelixDataAccessor();
    final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
    for (ResourceId resourceId : resourceMap.keySet()) {
      ResourceConfig resourceConfig = resourceMap.get(resourceId);
      ProvisionerConfig provisionerConfig = resourceConfig.getProvisionerConfig();
      if (provisionerConfig != null) {
        Provisioner provisioner;
        provisioner = _provisionerMap.get(resourceId);

        // instantiate and cache a provisioner if there isn't one already cached
        if (provisioner == null) {
          ProvisionerRef provisionerRef = provisionerConfig.getProvisionerRef();
          if (provisionerRef != null) {
            provisioner = provisionerRef.getProvisioner();
          }
          if (provisioner != null) {
            provisioner.init(helixManager, resourceConfig);
            _containerProviderMap.put(resourceId, provisioner.getContainerProvider());
            _targetProviderMap.put(resourceId, provisioner.getTargetProvider());
            _provisionerMap.put(resourceId, provisioner);
          } else {
            LOG.error("Resource " + resourceId + " does not have a valid provisioner class!");
            break;
          }
        }
        TargetProvider targetProvider = _targetProviderMap.get(resourceId);
        ContainerProvider containerProvider = _containerProviderMap.get(resourceId);
        final Cluster cluster = event.getAttribute("Cluster");
        final ClusterDataCache cache = event.getAttribute("ClusterDataCache");
        final Collection<Participant> participants = cluster.getParticipantMap().values();

        // If a process died, we need to mark it as DISCONNECTED or if the process is ready, mark as
        // CONNECTED
        Map<ParticipantId, Participant> participantMap = cluster.getParticipantMap();
        for (ParticipantId participantId : participantMap.keySet()) {
          Participant participant = participantMap.get(participantId);
          ContainerConfig config = participant.getContainerConfig();
          if (config != null) {
            ContainerState containerState = config.getState();
            if (!participant.isAlive() && ContainerState.CONNECTED.equals(containerState)) {
              // Need to mark as disconnected if process died
              LOG.info("Participant " + participantId + " died, marking as DISCONNECTED");
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.DISCONNECTED);
            } else if (participant.isAlive() && ContainerState.CONNECTING.equals(containerState)) {
              // Need to mark as connected only when the live instance is visible
              LOG.info("Participant " + participantId + " is ready, marking as CONNECTED");
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.CONNECTED);
            } else if (!participant.isAlive() && ContainerState.HALTING.equals(containerState)) {
              // Need to mark as connected only when the live instance is visible
              LOG.info("Participant " + participantId + " is has been killed, marking as HALTED");
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.HALTED);
            }
          }
        }

        // Participants registered in helix
        // Give those participants to targetprovider
        // Provide the response that contains, new containerspecs, containers to be released,
        // containers to be stopped
        // call the respective provisioner to allocate and start the container.
        // Each container is then started its state is changed from any place.
        // The target provider is given the state of container and asked for its new state. For each
        // state there is a corresponding handler function.

        // TargetProvider should be stateless, given the state of cluster and existing participants
        // it should return the same result
        final TargetProviderResponse response =
            targetProvider.evaluateExistingContainers(cluster, resourceId, participants);

        // allocate new containers
        for (final ContainerSpec spec : response.getContainersToAcquire()) {
          final ParticipantId participantId = spec.getParticipantId();
          if (!cluster.getParticipantMap().containsKey(participantId)) {
            // create a new Participant, attach the container spec
            InstanceConfig instanceConfig = new InstanceConfig(participantId);
            instanceConfig.setInstanceEnabled(false);
            instanceConfig.setContainerSpec(spec);
            // create a helix_participant in ACQUIRING state
            instanceConfig.setContainerState(ContainerState.ACQUIRING);
            // create the helix participant and add it to cluster
            helixAdmin.addInstance(cluster.getId().toString(), instanceConfig);
            cache.requireFullRefresh();
          }
          LOG.info("Allocating container for " + participantId);
          ListenableFuture<ContainerId> future = containerProvider.allocateContainer(spec);
          FutureCallback<ContainerId> callback = new FutureCallback<ContainerId>() {
            @Override
            public void onSuccess(ContainerId containerId) {
              LOG.info("Container " + containerId + " acquired. Marking " + participantId);
              updateContainerState(cache, accessor, keyBuilder, cluster, containerId,
                  participantId, ContainerState.ACQUIRED);
            }

            @Override
            public void onFailure(Throwable t) {
              LOG.error("Could not allocate a container for participant " + participantId, t);
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participantId,
                  ContainerState.FAILED);
            }
          };
          safeAddCallback(future, callback);
        }

        // start new containers
        for (final Participant participant : response.getContainersToStart()) {
          final ContainerId containerId = participant.getInstanceConfig().getContainerId();
          updateContainerState(cache, accessor, keyBuilder, cluster, null, participant.getId(),
              ContainerState.CONNECTING);
          // create the helix participant and add it to cluster
          LOG.info("Starting container " + containerId + " for " + participant.getId());
          ListenableFuture<Boolean> future =
              containerProvider.startContainer(containerId, participant);
          FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
            @Override
            public void onSuccess(Boolean result) {
              // Do nothing yet, need to wait for live instance
              LOG.info("Container " + containerId + " started for " + participant.getId());
            }

            @Override
            public void onFailure(Throwable t) {
              LOG.error("Could not start container" + containerId + "for participant "
                  + participant.getId(), t);
              updateContainerState(cache, accessor, keyBuilder, cluster, null, participant.getId(),
                  ContainerState.FAILED);
            }
          };
          safeAddCallback(future, callback);
        }

        // release containers
        for (final Participant participant : response.getContainersToRelease()) {
          // mark it as finalizing
          final ContainerId containerId = participant.getInstanceConfig().getContainerId();
          updateContainerState(cache, accessor, keyBuilder, cluster, null, participant.getId(),
              ContainerState.FINALIZING);
          // remove the participant
          LOG.info("Deallocating container " + containerId + " for " + participant.getId());
          ListenableFuture<Boolean> future = containerProvider.deallocateContainer(containerId);
          FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
            @Override
            public void onSuccess(Boolean result) {
              LOG.info("Container " + containerId + " deallocated. Dropping " + participant.getId());
              InstanceConfig existingInstance =
                  helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
                      .toString());
              helixAdmin.dropInstance(cluster.getId().toString(), existingInstance);
              cache.requireFullRefresh();
            }

            @Override
            public void onFailure(Throwable t) {
View Full Code Here

    StatusPrinter statusPrinter = new StatusPrinter();
    statusPrinter.registerWith(helixController);
  }

  void startAdmin() throws Exception {
    HelixAdmin admin = new ZKHelixAdmin(_zkaddr);

    // create cluster
    System.out.println("Creating cluster: " + clusterName);
    admin.addCluster(clusterName, true);

    // add MasterSlave state mode definition
    admin.addStateModelDef(clusterName, "MasterSlave", new StateModelDefinition(
        generateConfigForMasterSlave()));

    // ideal-state znrecord
    ZNRecord record = new ZNRecord(resourceName);
    record.setSimpleField("IDEAL_STATE_MODE", "AUTO");
    record.setSimpleField("NUM_PARTITIONS", "1");
    record.setSimpleField("REPLICAS", "2");
    record.setSimpleField("STATE_MODEL_DEF_REF", "MasterSlave");
    record.setListField(resourceName, Arrays.asList("node1", "node2"));

    admin.setResourceIdealState(clusterName, resourceName, new IdealState(record));

    ConstraintItemBuilder builder = new ConstraintItemBuilder();

    // limit one transition message at a time across the entire cluster
    builder.addConstraintAttribute("MESSAGE_TYPE", "STATE_TRANSITION")
    // .addConstraintAttribute("INSTANCE", ".*") // un-comment this line if using instance-level
    // constraint
        .addConstraintAttribute("CONSTRAINT_VALUE", "1");
    admin.setConstraint(clusterName, ClusterConstraints.ConstraintType.MESSAGE_CONSTRAINT,
        "constraint1", builder.build());
  }
View Full Code Here

    MockController controller =
        new MockController(_zkaddr, clusterName, "controller");
    controller.syncStart();

    // add the ideal state spec (prevents non-CUSTOMIZED MasterSlave ideal states)
    HelixAdmin helixAdmin = controller.getClusterManagmentTool();
    Map<String, String> properties = Maps.newHashMap();
    properties.put("IdealStateRule!sampleRuleName",
        "IDEAL_STATE_MODE=CUSTOMIZED,STATE_MODEL_DEF_REF=MasterSlave");
    helixAdmin.setConfig(
        new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build(),
        properties);

    // start participants
    MockParticipant[] participants = new MockParticipant[NUM_PARTICIPANTS];
View Full Code Here

    HelixManager controllerManager = null;
    Thread[] processArray;
    processArray = new Thread[numInstances];
    try {
      startLocalZookeeper(2199);
      HelixAdmin admin = new ZKHelixAdmin(zkAddress);
      admin.addCluster(clusterName, true);
      admin.addStateModelDef(clusterName, "OnlineOffline", new StateModelDefinition(
          StateModelConfigGenerator.generateConfigForOnlineOffline()));
      admin.addResource(clusterName, lockGroupName, numPartitions, "OnlineOffline",
          RebalanceMode.FULL_AUTO.toString());
      admin.rebalance(clusterName, lockGroupName, 1);
      for (int i = 0; i < numInstances; i++) {
        final String instanceName = "localhost_" + (12000 + i);
        processArray[i] = new Thread(new Runnable() {

          @Override
View Full Code Here

    Assert.assertEquals(message.getTypedToState().toString(), "SLAVE");
    Assert.assertEquals(message.getTgtName(), "localhost_0");

    // round2: drop resource, but keep the
    // message, make sure controller should not send O->DROPPEDN until O->S is done
    HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
    admin.dropResource(clusterName, resourceName);

    runPipeline(event, dataRefresh);
    runPipeline(event, rebalancePipeline);
    msgSelOutput = event.getAttribute(AttributeName.MESSAGES_SELECTED.toString());
    messages =
View Full Code Here

  void disablePartition() {
    String instanceName = _manager.getInstanceName();
    ResourceId resourceId = _message.getResourceId();
    PartitionId partitionId = _message.getPartitionId();
    String clusterName = _manager.getClusterName();
    HelixAdmin admin = _manager.getClusterManagmentTool();
    admin.enablePartition(false, clusterName, instanceName, resourceId.stringify(),
        Arrays.asList(partitionId.stringify()));
    logger.info("error in transit from ERROR to " + _message.getTypedToState() + " for partition: "
        + partitionId + ". disable it on " + instanceName);

  }
View Full Code Here

    TestHelper.setupEmptyCluster(_gZkClient, clusterName);

    admin.connect();
    AssertJUnit.assertTrue(admin.isConnected());

    HelixAdmin adminTool = admin.getClusterManagmentTool();
    // ConfigScope scope = new ConfigScopeBuilder().forCluster(clusterName)
    // .forResource("testResource").forPartition("testPartition").build();
    HelixConfigScope scope =
        new HelixConfigScopeBuilder(ConfigScopeProperty.PARTITION).forCluster(clusterName)
            .forResource("testResource").forPartition("testPartition").build();

    Map<String, String> properties = new HashMap<String, String>();
    properties.put("pKey1", "pValue1");
    properties.put("pKey2", "pValue2");
    adminTool.setConfig(scope, properties);

    properties = adminTool.getConfig(scope, Arrays.asList("pKey1", "pKey2"));
    Assert.assertEquals(properties.size(), 2);
    Assert.assertEquals(properties.get("pKey1"), "pValue1");
    Assert.assertEquals(properties.get("pKey2"), "pValue2");

    admin.disconnect();
View Full Code Here

          HelixControllerMain.startHelixController(zkAddress, config.clusterName, "controller",
              HelixControllerMain.STANDALONE);
      Thread.sleep(5000);

      // HelixAdmin for querying cluster state
      HelixAdmin admin = new ZKHelixAdmin(zkAddress);

      printStatus(admin, config.clusterName, lockGroupName);

      // stop one participant
      System.out.println("Stopping the first participant");
View Full Code Here

        3, // replicas
        "MasterSlave", true); // do rebalance

    // setup message constraint
    // "MESSAGE_TYPE=STATE_TRANSITION,TRANSITION=OFFLINE-SLAVE,INSTANCE=.*,CONSTRAINT_VALUE=1";
    HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
    ConstraintItemBuilder builder = new ConstraintItemBuilder();
    builder.addConstraintAttribute("MESSAGE_TYPE", "STATE_TRANSITION")
        .addConstraintAttribute("INSTANCE", ".*").addConstraintAttribute("CONSTRAINT_VALUE", "1");

    // Map<String, String> constraints = new TreeMap<String, String>();
    // constraints.put("MESSAGE_TYPE", "STATE_TRANSITION");
    // // constraints.put("TRANSITION", "OFFLINE-SLAVE");
    // constraints.put("CONSTRAINT_VALUE", "1");
    // constraints.put("INSTANCE", ".*");
    admin.setConstraint(clusterName, ConstraintType.MESSAGE_CONSTRAINT, "constraint1",
        builder.build());

    final ZKHelixDataAccessor accessor =
        new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));

View Full Code Here

TOP

Related Classes of org.apache.helix.HelixAdmin

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.