Package com.vmware.bdd.apitypes

Examples of com.vmware.bdd.apitypes.NodeGroupCreate


      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);
      StorageRead storage = new StorageRead();
      storage.setType(DatastoreType.LOCAL.toString());
      List<String> dsNames = new ArrayList<String>();
      dsNames.add("testSharedStore");
      dsNames.add("testLocalStore");
      storage.setDsNames(dsNames);
      group.setStorage(storage);
      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster6");
      Assert.assertTrue(cluster != null);
View Full Code Here


      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      roles.add("hadoop_jobtracker");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster7");
View Full Code Here

      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);
      String configJson =
            "{\"cluster_configuration\":{\"hadoop\":{\"core-site.xml\":{\"hadoop.security.group.mapping\":\"xxx\",\"hadoop.security.authorization\":false}}}}";
      Map config = (new Gson()).fromJson(configJson, Map.class);
      group.setConfiguration((Map<String, Object>) (config
            .get("cluster_configuration")));

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);
View Full Code Here

   }

   @Test
   public void testContainsComputeOnlyNodeGroups() {
      ClusterCreate cluster = new ClusterCreate();
      NodeGroupCreate compute = new NodeGroupCreate();
      NodeGroupCreate data = new NodeGroupCreate();
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString()));
      data.setRoles(Arrays.asList(HadoopRole.HADOOP_DATANODE.toString()));
      cluster.setNodeGroups(new NodeGroupCreate[] { compute, data });
      assertEquals(true, cluster.containsComputeOnlyNodeGroups(defaultSoftwareManager));
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString(),
            HadoopRole.TEMPFS_CLIENT_ROLE.toString()));
      cluster.setNodeGroups(new NodeGroupCreate[] { compute, data });
      assertEquals(true, cluster.containsComputeOnlyNodeGroups(defaultSoftwareManager));
      NodeGroupCreate worker = new NodeGroupCreate();
      worker.setRoles(Arrays.asList(HadoopRole.HADOOP_TASKTRACKER.toString(),
            HadoopRole.HADOOP_DATANODE.toString()));
      cluster.setNodeGroups(new NodeGroupCreate[] { worker });
      assertEquals(false, cluster.containsComputeOnlyNodeGroups(defaultSoftwareManager));
   }
View Full Code Here

   }

   @Test
   public void testSortingNodeGroups() {
      ClusterCreate cluster = new ClusterCreate();
      NodeGroupCreate client = new NodeGroupCreate();
      client.setRoles(Arrays.asList("hadoop_client"));
      NodeGroupCreate worker = new NodeGroupCreate();
      worker.setRoles(Arrays.asList("hadoop_tasktracker",
      "hadoop_datanode"));
      NodeGroupCreate master = new NodeGroupCreate();
      master.setRoles(Arrays.asList("hadoop_namenode",
      "hadoop_jobtracker"));
      cluster.setNodeGroups(new NodeGroupCreate[] { client, worker, master });
      assertEquals(3, cluster.getNodeGroups().length);
      ClusterBlueprint blueprint = cluster.toBlueprint();
      defaultSoftwareManager.updateInfrastructure(blueprint);
      assertEquals(master.getName(), blueprint.getNodeGroups().get(0).getName());
      assertEquals(worker.getName(), blueprint.getNodeGroups().get(1).getName());
      assertEquals(client.getName(), blueprint.getNodeGroups().get(2).getName());
   }
View Full Code Here

      // mock cluster config manager
      ClusterConfigManager configMgr = Mockito.mock(ClusterConfigManager.class);

      // mock getClusterConfig
      NodeGroupCreate nodeGroup = new NodeGroupCreate();
      nodeGroup.setName(NODE_GROUP_NAME);
      nodeGroup.setStorage(new StorageRead());
      NodeGroupCreate[] nodeGroups = new NodeGroupCreate[] { nodeGroup };

      ClusterCreate spec = new ClusterCreate();
      spec.setName(CLUSTER_NAME);
      spec.setNodeGroups(nodeGroups);
View Full Code Here

   private ClusterCreate createClusterSpec() {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("test");
      NodeGroupCreate[] nodeGroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      group.setVmFolderPath("root/test/master");
      group.setName("master");
      nodeGroups[0] = group;
      spec.setNodeGroups(nodeGroups);
      return spec;
   }
View Full Code Here

  
   @MockClass(realClass = ClusterCreate.class)
   public static class MockClusterCreateForScale {
      @Mock
      public NodeGroupCreate getNodeGroup(String ngName) {
         return new NodeGroupCreate();        
      }
View Full Code Here

                  + hdfsArray[1] + "\"}}}}";
      Map clusterConfig = (new Gson()).fromJson(clusterConfigJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (clusterConfig
            .get("configuration")));
      //build a jobtracker group, two compute node groups.
      NodeGroupCreate ng0 = new NodeGroupCreate();
      List<String> computerMasterRoles = new ArrayList<String>();
      computerMasterRoles.add("hadoop_resourcemanager");
      ng0.setRoles(computerMasterRoles);
      ng0.setName("resourcemanager");
      ng0.setInstanceNum(1);
      ng0.setInstanceType(InstanceType.LARGE);
      String ng0ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[2] + "\"}}}}";
      Map ng0Config = (new Gson()).fromJson(ng0ConfigJson, Map.class);
      ng0.setConfiguration((Map<String, Object>) (ng0Config
            .get("configuration")));

      NodeGroupCreate ng1 = new NodeGroupCreate();
      List<String> computeWorkerRoles = new ArrayList<String>();
      computeWorkerRoles.add("hadoop_nodemanager");
      ng1.setRoles(computeWorkerRoles);
      ng1.setName("compute1");
      ng1.setInstanceNum(4);
      ng1.setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(10);
      ng1.setStorage(storage);
      String ng1ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[3] + "\"}}}}";
      Map ng1Config = (new Gson()).fromJson(ng1ConfigJson, Map.class);
      ng1.setConfiguration((Map<String, Object>) (ng1Config
            .get("configuration")));
      NodeGroupCreate ng2 = new NodeGroupCreate();
      ng2.setRoles(computeWorkerRoles);
      ng2.setName("compute2");
      ng2.setInstanceNum(2);
      ng2.setInstanceType(InstanceType.MEDIUM);
      StorageRead storageCompute = new StorageRead();
      storageCompute.setType("LOCAL");
      storageCompute.setSizeGB(10);
      ng2.setStorage(storageCompute);

      NodeGroupCreate[] ngs = new NodeGroupCreate[] { ng0, ng1, ng2 };
      spec.setNodeGroups(ngs);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);
View Full Code Here

                  + externalHDFS + "\"}}}}";
      Map clusterConfig = (new Gson()).fromJson(clusterConfigJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (clusterConfig
            .get("configuration")));
      // build 3 worker groups
      NodeGroupCreate worker = new NodeGroupCreate();
      List<String> computeRoles = new ArrayList<String>();
      computeRoles.add("hadoop_nodemanager");
      worker.setRoles(computeRoles);
      worker.setName("compute1");
      worker.setInstanceNum(2);
      worker.setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(10);
      worker.setStorage(storage);
      spec.setNodeGroups(new NodeGroupCreate[] { worker });

      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);
      ClusterEntity cluster =
View Full Code Here

TOP

Related Classes of com.vmware.bdd.apitypes.NodeGroupCreate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.