Package com.alipay.bluewhale.core.cluster

Examples of com.alipay.bluewhale.core.cluster.StormClusterState


    String[] rtn=new String[list.size()];
    return list.toArray(rtn);
  }
  public static List<String> getStatus(String stormId) throws Exception
  {
    StormClusterState stat=GetShards.getCluster();
   
    Assignment ass=stat.assignment_info(stormId, null);
    Map<String, String> nodeHost=(ass==null)?(new HashMap<String, String>()):ass.getNodeHost();
    Map<Integer, Integer> taskStartTimeSecs=(ass==null)?(new HashMap<Integer, Integer>()):ass.getTaskStartTimeSecs();
    Map<Integer, NodePort> taskToNodeport=(ass==null)?(new HashMap<Integer, NodePort>()):ass.getTaskToNodeport();
   
   
   
    List<String> rtn=new ArrayList<String>();
    List<Integer> taskids=stat.task_ids(stormId);
    for(Integer tid:taskids)
    {
      try{
      TaskInfoContainer con=new TaskInfoContainer();
      con.setStarttime(taskStartTimeSecs.get(tid));
      NodePort np=taskToNodeport.get(tid);
      con.setNp(np);
      if(np!=null)
      {
        con.setHostname(nodeHost.get(np.getNode()));
      }else{
        con.setHostname("nohost");
      }
      con.setTaskId(tid);
      con.setTaskInfo(stat.task_info(stormId, tid));
      con.setHb(stat.task_heartbeat(stormId, tid));
      con.setTaskerrors(stat.task_errors(stormId, tid));
     
      StringBuffer buff=new StringBuffer();
      buff.append("getComponentId:"+ con.getTaskInfo().getComponentId()+"<br>\r\n");
      buff.append("任务:"+ String.format("%03d",con.getTaskId())+"<br>\r\n");
      buff.append("机器域名:"+con.getHostname()+"<br>\r\n");
View Full Code Here


public class SupervisorList {
  public static String[] list() throws Exception
  {
    List<String> rtn=new ArrayList<String>();

    StormClusterState stat=GetShards.getCluster();
    List<String> list=stat.supervisors(null);
    for(String supervisor:list)
    {
      SupervisorContainer container=new SupervisorContainer();
      container.setName(supervisor);
      SupervisorInfo info=stat.supervisor_info(supervisor);
      container.setInfo(info);
      StringBuffer buff=new StringBuffer();
      buff.append("机器域名:"+info.getHostName()+"<br>");
      buff.append("机器id:"+supervisor+"<br>");
      buff.append("启动的端口号:"+info.getWorkPorts().toString()+"<br>");
View Full Code Here

   */
  public static  void mkAssignments(NimbusData data, String topologyid,
      boolean isScratch) throws IOException {
      LOG.debug("Determining assignment for " + topologyid);
      Map<?, ?> conf = data.getConf();
      StormClusterState stormClusterState = data.getStormClusterState();
      //����zk callback�¼�
      RunnableCallback callback =new TransitionZkCallback(data, topologyid);
      //��ȡ���е�supervisor�ڵ���Ϣ��
      Map<String, SupervisorInfo> supInfos = allSupervisorInfo(stormClusterState, callback);
      //��ȡ<supervisorid,hostname>map���ϣ����磺node->host {"4b83cd41-e863-4bd6-b26d-3e27e5ff7799" "dw-perf-3.alipay.net","b8f1664d-5555-4950-8139-5098fb109a81" "dw-perf-2.alipay.net"}
      Map<String, String> nodeHost = getNodeHost(supInfos);
      //��ȡָ��topologyid��assignment��Ϣ��
      Assignment existingAssignment = stormClusterState.assignment_info(topologyid, null);
      //�����ȡtopology�����Ӧ�µ�NodePort
 
     
      Map<Integer, NodePort> taskNodePort = computeNewTaskToNodePort(data,
          topologyid, existingAssignment, stormClusterState, callback,
          supInfos, isScratch);
 
   
      Map<String, String> allNodeHost = new HashMap<String, String>();
     
      if (existingAssignment != null){
          allNodeHost = existingAssignment.getNodeHost();//�����Ƿ�ֹsupervisor���ˣ�task������Ȼ������
      }
     
      if (nodeHost != null){
          allNodeHost.putAll(nodeHost);
      }
      Set<Integer> reassignIds = null;
      if (existingAssignment != null && existingAssignment.getTaskToNodeport() != null){
          reassignIds = changeIds(existingAssignment.getTaskToNodeport(),taskNodePort);
      }else{
          //FIXME changeIds����ִ�У�����startTimes->taskid�п���Ϊnull
          reassignIds = changeIds(new HashMap<Integer, NodePort>(),taskNodePort);
      }
 
 
      //��ʼ����ʼʱ��
      Map<Integer, Integer> startTimes = new HashMap<Integer, Integer>();
      if (existingAssignment != null){
          Map<Integer, Integer> taskStartTimeSecs = existingAssignment.getTaskStartTimeSecs();
          if (taskStartTimeSecs!= null){
              startTimes.putAll(taskStartTimeSecs);
          }
      }
      //������·����ˣ���Ҫ���ó�ʼ��ʱ��
      if (reassignIds != null){
        int nowSecs = TimeUtils.current_time_secs();
          for (Integer reassignid:reassignIds) {
            startTimes.put(reassignid, nowSecs);
          }
      }
     
     
      //select-keys all-node->host (map first (vals task->node+port))
      Map<String, String> storeNodeHosts = new HashMap<String, String>();
 
      if (taskNodePort != null){
        HashSet<String> toSaveHosts=new HashSet<String>();
          for (Entry<Integer, NodePort> entry:taskNodePort.entrySet()) {
            toSaveHosts.add((entry.getValue()).getNode());
          }
         
          for(String node:toSaveHosts)
          {
            String host=allNodeHost.get(node);
            storeNodeHosts.put(node, host);
          }
      }else{
        storeNodeHosts.putAll(allNodeHost);
      }
      Assignment assignment = new Assignment(StormConfig.masterStormdistRoot(
          conf, topologyid), taskNodePort, storeNodeHosts, startTimes);
      if (assignment.equals(existingAssignment)) {
        LOG.debug("Assignment for " + topologyid + " hasn't changed");
      } else {
        LOG.info("Setting new assignment for storm id " + topologyid + ": "
            + assignment);
        stormClusterState.set_assignment(topologyid, assignment);
      }
   

  }
View Full Code Here

   * @param data
   *            NimbusData
   */
  private static void cleanup_corrupt_topologies(NimbusData data) {
    // ��ȡStormClusterState
    StormClusterState stormClusterState = data.getStormClusterState();
    // ��ȡnimbus�����ݴ洢Ŀ¼/nimbus/stormdist·��
    String master_stormdist_root = StormConfig.masterStormdistRoot(data
        .getConf());
    // ��ȡ/nimbus/stormdist·�������ļ����Ƽ���(topology id����)
    List<String> code_ids = PathUtils
        .read_dir_contents(master_stormdist_root);
    // ��ȡ��ǰZK������Ȼ����״̬��topology id����
    List<String> active_ids = data.getStormClusterState().active_storms();
    if (active_ids != null && active_ids.size() > 0) {
      if (code_ids != null) {
        // ��ȡ���ڱ���Ŀ¼�£�������Ȼ����zk�����topology id����
        active_ids.removeAll(code_ids);
      }
      for (String corrupt : active_ids) {
        LOG.info("Corrupt topology "
            + corrupt
            + " has state on zookeeper but doesn't have a local dir on Nimbus. Cleaning up...");
        // ִ������ZK����topology id
        stormClusterState.remove_storm(corrupt);
      }
    }

  }
View Full Code Here

    String stromId=args[0];
    System.out.println(client.getTopologyInfo(stromId));
   
   
    ClusterState cluster_state = new DistributedClusterState(conf);
    StormClusterState zk = new StormZkClusterState(
      cluster_state);
    for(Integer taskid:zk.task_ids(stromId))
    {
      System.out.println("########"+taskid);
        for(TaskError err:zk.task_errors(stromId, taskid))
        {
      System.out.println(err.getError());
        }

    }
    System.out.println("disconnect");

    zk.disconnect();


      }
  }finally{
      transport.close();
View Full Code Here

     * 3.2 register watcher of zk
     * 3.3 register all kinds of callbacks
     * 3.4 create znode
     */
    //�ɲ���zookeeper��ʵ��
    StormClusterState stormClusterState = Cluster
        .mk_storm_cluster_state(conf);

    /*
     * Step 4, create LocalStat LocalStat is one KV database
     * 4.1 create LocalState instance
View Full Code Here

      ArrayList<SolrInfo> newlist=new ArrayList<SolrInfo>();

      try {
        long t1=System.currentTimeMillis();
        LOG.info("sync from zookeeper "+tableName);
        StormClusterState zkCluster = getCluster();
        List<Integer> list = zkCluster.higo_base(tableName,this);
        for (Integer id : list) {
          SolrInfo info = zkCluster.higo_info(tableName, id);
          if (info != null )
          {
            newlist.add(info);
          }
        }
View Full Code Here

  private static void runTable(String[] args) throws Exception {
    String tableName = args[1];
    Map stormconf = Utils.readStormConfig();
    ClusterState zkClusterstate = Cluster
        .mk_distributed_cluster_state(stormconf);
    StormClusterState zkCluster = Cluster
        .mk_storm_cluster_state(zkClusterstate);
    zkCluster.higo_remove(tableName);
    zkCluster.disconnect();

    Integer shards = StormUtils
        .parseInt(stormconf.get("higo.shards.count"));
   
    Integer replication = StormUtils.parseInt(stormconf.containsKey("higo.shards.replication")?stormconf.get("higo.shards.replication"):1);
View Full Code Here

    }

    Map stormconf = Utils.readStormConfig();
    ClusterState zkClusterstate = Cluster
        .mk_distributed_cluster_state(stormconf);
    StormClusterState zkCluster = Cluster
        .mk_storm_cluster_state(zkClusterstate);
    zkCluster.higo_remove(topologyName);
    for (String s : tableName.split(",")) {
      zkCluster.higo_remove(s);
    }
    zkCluster.disconnect();
    System.exit(0);
  }
View Full Code Here

TOP

Related Classes of com.alipay.bluewhale.core.cluster.StormClusterState

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.