Package org.apache.ambari.server.orm.dao

Examples of org.apache.ambari.server.orm.dao.ClusterDAO


    clusterServiceDAO.create(clusterServiceEntity);
    return clusterServiceEntity;
  }
 
  private ClusterServiceEntity addService(ClusterEntity clusterEntity, String serviceName) {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
   
    ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
    clusterServiceEntity.setClusterEntity(clusterEntity);
    clusterServiceEntity.setServiceName(serviceName);
   
    ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
    serviceDesiredStateEntity.setDesiredStackVersion(DESIRED_STACK_VERSION);
    serviceDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
   
    clusterServiceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
    clusterEntity.getClusterServiceEntities().add(clusterServiceEntity);
   
    clusterDAO.merge(clusterEntity);
   
    return clusterServiceEntity;
  }
View Full Code Here


    LOG.debug("Missing configs have been successfully added into Ambari DB.");
  }

  protected void processDecommissionedDatanodes() {
    KeyValueDAO keyValueDAO = injector.getInstance(KeyValueDAO.class);
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    Gson gson = injector.getInstance(Gson.class);
    HostComponentDesiredStateDAO desiredStateDAO = injector.getInstance
      (HostComponentDesiredStateDAO.class);

    KeyValueEntity keyValueEntity = keyValueDAO.findByKey("decommissionDataNodesTag");
    String value = null;
    if (keyValueEntity != null) {
      value = keyValueEntity.getValue();
      if (value != null && !value.isEmpty()) {
        List<ClusterEntity> clusterEntities = clusterDAO.findAll();
        for (ClusterEntity clusterEntity : clusterEntities) {
          Long clusterId = clusterEntity.getClusterId();
          ClusterConfigEntityPK configEntityPK = new ClusterConfigEntityPK();
          configEntityPK.setClusterId(clusterId);
          configEntityPK.setType("hdfs-exclude-file");
          configEntityPK.setTag(value.trim());
          ClusterConfigEntity configEntity = clusterDAO.findConfig(configEntityPK);
          if (configEntity != null) {
            String configData = configEntity.getData();
            if (configData != null) {
              Map<String, String> properties = gson.<Map<String, String>>fromJson(configData, Map.class);
              if (properties != null && !properties.isEmpty()) {
View Full Code Here

  }


  private HostEntity createHost(ClusterEntity clusterEntity) {
    HostDAO hostDAO = injector.getInstance(HostDAO.class);
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    HostEntity hostEntity = new HostEntity();
    hostEntity.setHostName(HOST_NAME);
    hostEntity.setClusterEntities(Collections.singletonList(clusterEntity));
    hostDAO.create(hostEntity);
    clusterEntity.getHostEntities().add(hostEntity);
    clusterDAO.merge(clusterEntity);
    return hostEntity;
  }
View Full Code Here

    KeyValueEntity keyValueEntity = new KeyValueEntity();
    keyValueEntity.setKey("decommissionDataNodesTag");
    keyValueEntity.setValue("1394147791230");
    keyValueDAO.create(keyValueEntity);

    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    ClusterConfigEntity configEntity = new ClusterConfigEntity();
    configEntity.setClusterEntity(clusterEntity);
    configEntity.setClusterId(clusterEntity.getClusterId());
    configEntity.setType("hdfs-exclude-file");
    configEntity.setTag("1394147791230");
    configEntity.setData("{\"datanodes\":\"" + HOST_NAME + "\"}");
    configEntity.setTimestamp(System.currentTimeMillis());
    clusterDAO.createConfig(configEntity);

    UpgradeCatalog150 upgradeCatalog150 = injector.getInstance(UpgradeCatalog150.class);

    upgradeCatalog150.processDecommissionedDatanodes();
View Full Code Here

    Assert.assertEquals("1394147791230", keyValueEntity.getValue());
  }

  @Test
  public void testAddMissingLog4jConfigs() throws Exception {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);

    ClusterEntity clusterEntity = createCluster();
    ClusterServiceEntity clusterServiceEntityMR = addService(clusterEntity, "HDFS");

    Long clusterId = clusterEntity.getClusterId();

    ClusterConfigEntityPK configEntityPK = new ClusterConfigEntityPK();
    configEntityPK.setClusterId(clusterId);
    configEntityPK.setType("hdfs-log4j");
    configEntityPK.setTag("version1");
    ClusterConfigEntity configEntity = clusterDAO.findConfig(configEntityPK);
    Assert.assertNull(configEntity);

    for (ClusterConfigMappingEntity ccme : clusterEntity.getConfigMappingEntities()) {
      if ("hdfs-log4j".equals(ccme.getType())) {
        Assert.fail();
      }
    }

    UpgradeCatalog150 upgradeCatalog150 = injector.getInstance(UpgradeCatalog150.class);
    upgradeCatalog150.addMissingLog4jConfigs();

    configEntity = clusterDAO.findConfig(configEntityPK);
    Assert.assertNotNull(configEntity);

    //Get updated cluster
    clusterEntity = clusterDAO.findById(1L);

    boolean failFlag = true;
    for (ClusterConfigMappingEntity ccme : clusterEntity.getConfigMappingEntities()) {
      if ("hdfs-log4j".equals(ccme.getType())) {
        failFlag = false;
View Full Code Here

  public void tearDown() {
    injector.getInstance(PersistService.class).stop();
  }

  private ClusterEntity createCluster() {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    ClusterEntity clusterEntity = new ClusterEntity();
    clusterEntity.setClusterId(1L);
    clusterEntity.setClusterName(CLUSTER_NAME);
    clusterEntity.setDesiredStackVersion(DESIRED_STACK_VERSION);
    clusterDAO.create(clusterEntity);
    return clusterEntity;
  }
View Full Code Here

    clusterServiceDAO.create(clusterServiceEntity);
    return clusterServiceEntity;
  }
 
  private ClusterServiceEntity addService(ClusterEntity clusterEntity, String serviceName) {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
   
    ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
    clusterServiceEntity.setClusterEntity(clusterEntity);
    clusterServiceEntity.setServiceName(serviceName);
   
    ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
    serviceDesiredStateEntity.setDesiredStackVersion(DESIRED_STACK_VERSION);
    serviceDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
   
    clusterServiceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
    clusterEntity.getClusterServiceEntities().add(clusterServiceEntity);
   
    clusterDAO.merge(clusterEntity);
   
    return clusterServiceEntity;
  }
View Full Code Here

  }


  private HostEntity createHost(ClusterEntity clusterEntity) {
    HostDAO hostDAO = injector.getInstance(HostDAO.class);
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    HostEntity hostEntity = new HostEntity();
    hostEntity.setHostName(HOST_NAME);
    hostEntity.setClusterEntities(Collections.singletonList(clusterEntity));
    hostDAO.create(hostEntity);
    clusterEntity.getHostEntities().add(hostEntity);
    clusterDAO.merge(clusterEntity);
    return hostEntity;
  }
View Full Code Here

    KeyValueEntity keyValueEntity = new KeyValueEntity();
    keyValueEntity.setKey("decommissionDataNodesTag");
    keyValueEntity.setValue("1394147791230");
    keyValueDAO.create(keyValueEntity);

    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    ClusterConfigEntity configEntity = new ClusterConfigEntity();
    configEntity.setClusterEntity(clusterEntity);
    configEntity.setClusterId(clusterEntity.getClusterId());
    configEntity.setType("hdfs-exclude-file");
    configEntity.setTag("1394147791230");
    configEntity.setData("{\"datanodes\":\"" + HOST_NAME + "\"}");
    configEntity.setTimestamp(System.currentTimeMillis());
    clusterDAO.createConfig(configEntity);

    UpgradeCatalog150 upgradeCatalog150 = injector.getInstance(UpgradeCatalog150.class);

    upgradeCatalog150.processDecommissionedDatanodes();
View Full Code Here

    Assert.assertEquals("1394147791230", keyValueEntity.getValue());
  }

  @Test
  public void testAddMissingLog4jConfigs() throws Exception {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);

    ClusterEntity clusterEntity = createCluster();
    ClusterServiceEntity clusterServiceEntityMR = addService(clusterEntity, "HDFS");

    Long clusterId = clusterEntity.getClusterId();

    ClusterConfigEntityPK configEntityPK = new ClusterConfigEntityPK();
    configEntityPK.setClusterId(clusterId);
    configEntityPK.setType("hdfs-log4j");
    configEntityPK.setTag("version1");
    ClusterConfigEntity configEntity = clusterDAO.findConfig(configEntityPK);
    Assert.assertNull(configEntity);

    for (ClusterConfigMappingEntity ccme : clusterEntity.getConfigMappingEntities()) {
      if ("hdfs-log4j".equals(ccme.getType())) {
        Assert.fail();
      }
    }

    UpgradeCatalog150 upgradeCatalog150 = injector.getInstance(UpgradeCatalog150.class);
    upgradeCatalog150.addMissingLog4jConfigs();

    configEntity = clusterDAO.findConfig(configEntityPK);
    Assert.assertNotNull(configEntity);

    //Get updated cluster
    clusterEntity = clusterDAO.findById(1L);

    boolean failFlag = true;
    for (ClusterConfigMappingEntity ccme : clusterEntity.getConfigMappingEntities()) {
      if ("hdfs-log4j".equals(ccme.getType())) {
        failFlag = false;
View Full Code Here

TOP

Related Classes of org.apache.ambari.server.orm.dao.ClusterDAO

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.