Package org.apache.ambari.server.orm.entities

Examples of org.apache.ambari.server.orm.entities.ClusterEntity


      try {
        cluster = clusters.getCluster(stage.getClusterName());
      } catch (AmbariException e) {
        throw new RuntimeException(e);
      }
      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());

      stageEntity.setCluster(clusterEntity);
      clusterEntity.getStages().add(stageEntity);

      for (HostRoleCommand hostRoleCommand : stage.getOrderedHostRoleCommands()) {
        HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity();
        stageEntity.getHostRoleCommands().add(hostRoleCommandEntity);
        hostRoleCommandEntity.setStage(stageEntity);
View Full Code Here


        throw new DuplicateResourceException("Attempted to create a Cluster which already exists"
            + ", clusterName=" + clusterName);
      }
      // retrieve new cluster id
      // add cluster id -> cluster mapping into clustersById
      ClusterEntity clusterEntity = new ClusterEntity();
      clusterEntity.setClusterName(clusterName);
      clusterEntity.setDesiredStackVersion(gson.toJson(new StackId()));

      try {
        clusterDAO.create(clusterEntity);
        clusterEntity = clusterDAO.merge(clusterEntity);
      } catch (RollbackException e) {
View Full Code Here

  }

  @Transactional
  void mapHostClusterEntities(String hostName, Long clusterId) {
    HostEntity hostEntity = hostDAO.findByName(hostName);
    ClusterEntity clusterEntity = clusterDAO.findById(clusterId);

    hostEntity.getClusterEntities().add(clusterEntity);
    clusterEntity.getHostEntities().add(hostEntity);

    clusterDAO.merge(clusterEntity);
    hostDAO.merge(hostEntity);
  }
View Full Code Here

    injector.getInstance(PersistService.class).stop();
  }

  private ClusterEntity createCluster() {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    ClusterEntity clusterEntity = new ClusterEntity();
    clusterEntity.setClusterId(1L);
    clusterEntity.setClusterName(CLUSTER_NAME);
    clusterEntity.setDesiredStackVersion(DESIRED_STACK_VERSION);
    clusterDAO.create(clusterEntity);
    return clusterEntity;
  }
View Full Code Here

    }
  }

  @Test
  public void testAddHistoryServer() throws AmbariException {
    final ClusterEntity clusterEntity = createCluster();
    final ClusterServiceEntity clusterServiceEntityMR = addService(clusterEntity, "MAPREDUCE");
    final HostEntity hostEntity = createHost(clusterEntity);
   
    executeInTransaction(new Runnable() {
      @Override
View Full Code Here

    upgradeCatalog150.addHistoryServer();
  }

  @Test
  public void testProcessDecommissionedDatanodes() throws Exception {
    ClusterEntity clusterEntity = createCluster();
    ClusterServiceEntity clusterServiceEntity = createService(clusterEntity);
    HostEntity hostEntity = createHost(clusterEntity);

    ServiceComponentDesiredStateEntity componentDesiredStateEntity =
      new ServiceComponentDesiredStateEntity();
    componentDesiredStateEntity.setClusterId(clusterEntity.getClusterId());
    componentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
    componentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
    componentDesiredStateEntity.setComponentName("DATANODE");

    //componentDesiredStateDAO.create(componentDesiredStateEntity);

    HostComponentDesiredStateDAO hostComponentDesiredStateDAO =
      injector.getInstance(HostComponentDesiredStateDAO.class);

    HostComponentDesiredStateEntity hostComponentDesiredStateEntity =
      new HostComponentDesiredStateEntity();

    hostComponentDesiredStateEntity.setClusterId(clusterEntity.getClusterId());
    hostComponentDesiredStateEntity.setComponentName("DATANODE");
    hostComponentDesiredStateEntity.setAdminState(HostComponentAdminState.INSERVICE);
    hostComponentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
    hostComponentDesiredStateEntity.setServiceComponentDesiredStateEntity(componentDesiredStateEntity);
    hostComponentDesiredStateEntity.setHostEntity(hostEntity);
    hostComponentDesiredStateEntity.setHostName(hostEntity.getHostName());

    hostComponentDesiredStateDAO.create(hostComponentDesiredStateEntity);

    HostComponentDesiredStateEntity entity = hostComponentDesiredStateDAO.findAll().get(0);

    Assert.assertEquals(HostComponentAdminState.INSERVICE.name(), entity.getAdminState().name());

    KeyValueDAO keyValueDAO = injector.getInstance(KeyValueDAO.class);
    KeyValueEntity keyValueEntity = new KeyValueEntity();
    keyValueEntity.setKey("decommissionDataNodesTag");
    keyValueEntity.setValue("1394147791230");
    keyValueDAO.create(keyValueEntity);

    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
    ClusterConfigEntity configEntity = new ClusterConfigEntity();
    configEntity.setClusterEntity(clusterEntity);
    configEntity.setClusterId(clusterEntity.getClusterId());
    configEntity.setType("hdfs-exclude-file");
    configEntity.setTag("1394147791230");
    configEntity.setData("{\"datanodes\":\"" + HOST_NAME + "\"}");
    configEntity.setTimestamp(System.currentTimeMillis());
    clusterDAO.createConfig(configEntity);
View Full Code Here

  @Test
  public void testAddMissingLog4jConfigs() throws Exception {
    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);

    ClusterEntity clusterEntity = createCluster();
    ClusterServiceEntity clusterServiceEntityMR = addService(clusterEntity, "HDFS");

    Long clusterId = clusterEntity.getClusterId();

    ClusterConfigEntityPK configEntityPK = new ClusterConfigEntityPK();
    configEntityPK.setClusterId(clusterId);
    configEntityPK.setType("hdfs-log4j");
    configEntityPK.setTag("version1");
    ClusterConfigEntity configEntity = clusterDAO.findConfig(configEntityPK);
    Assert.assertNull(configEntity);

    for (ClusterConfigMappingEntity ccme : clusterEntity.getConfigMappingEntities()) {
      if ("hdfs-log4j".equals(ccme.getType())) {
        Assert.fail();
      }
    }

    UpgradeCatalog150 upgradeCatalog150 = injector.getInstance(UpgradeCatalog150.class);
    upgradeCatalog150.addMissingLog4jConfigs();

    configEntity = clusterDAO.findConfig(configEntityPK);
    Assert.assertNotNull(configEntity);

    //Get updated cluster
    clusterEntity = clusterDAO.findById(1L);

    boolean failFlag = true;
    for (ClusterConfigMappingEntity ccme : clusterEntity.getConfigMappingEntities()) {
      if ("hdfs-log4j".equals(ccme.getType())) {
        failFlag = false;
      }
    }
    Assert.assertFalse(failFlag);
View Full Code Here

   *
   * @throws Exception
   */
  @Transactional
  private void persistEntities() {
    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
    configGroupEntity.setClusterEntity(clusterEntity);
    configGroupEntity.setTimestamp(System.currentTimeMillis());
    configGroupDAO.create(configGroupEntity);

    persistConfigMapping(clusterEntity);
View Full Code Here

    }
  }

  @Transactional
  private void saveIfPersisted() {
    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());

    if (isPersisted) {
      configGroupDAO.merge(configGroupEntity);
      persistHostMapping();
      persistConfigMapping(clusterEntity);
View Full Code Here

  @Transactional
  public void persistActions(Request request) {

    RequestEntity requestEntity = request.constructNewPersistenceEntity();

    ClusterEntity clusterEntity = clusterDAO.findById(request.getClusterId());
    if (clusterEntity == null) {
      throw new RuntimeException(String.format("Cluster with id=%s not found", request.getClusterId()));
    }
    requestEntity.setCluster(clusterEntity);
    requestDAO.create(requestEntity);
View Full Code Here

TOP

Related Classes of org.apache.ambari.server.orm.entities.ClusterEntity

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.