Examples of ClusterCreate


Examples of com.vmware.bdd.apitypes.ClusterCreate

   @Test(groups = { "TestClusterConfigManager" })
   public void testClusterConfigWithExternalMapReduce() throws Exception {
      String externalMR = "192.168.0.1:8021";
      String externalHDFS = "hdfs://192.168.0.2:8020";
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster-external-mr");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);
      spec.setExternalMapReduce(externalMR);
      spec.setExternalHDFS(externalHDFS);
      String clusterConfigJson =
            "{\"configuration\":{\"hadoop\":{\"mapred-site.xml\":{\"mapred.job.tracker\":\""
                  + externalMR
                  + "\"}, \"core-site.xml\":{\"fs.default.name\":\""
                  + externalHDFS + "\"}}}}";
      Map clusterConfig = (new Gson()).fromJson(clusterConfigJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (clusterConfig
            .get("configuration")));
      // build 3 worker groups
      NodeGroupCreate worker = new NodeGroupCreate();
      List<String> computeRoles = new ArrayList<String>();
      computeRoles.add("hadoop_nodemanager");
      worker.setRoles(computeRoles);
      worker.setName("compute1");
      worker.setInstanceNum(2);
      worker.setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(10);
      worker.setStorage(storage);
      spec.setNodeGroups(new NodeGroupCreate[] { worker });

      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);
      ClusterEntity cluster =
            clusterEntityMgr.findByName("my-cluster-external-mr");
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   public void testClusterConfigWithExternalHDFSFailure() throws Exception {
      String[] hdfsArray =
            new String[] { "hdfs://168.192.0.70:8020",
                  "hdfs://168.192.0.71:8020", "hdfs://168.192.0.72:8020",
                  "hdfs://168.192.0.73:8020" };
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster-external-hdfs-failure");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);
      String clusterConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[1] + "\"}}}}";
      Map clusterConfig = (new Gson()).fromJson(clusterConfigJson, Map.class);
      spec.setConfiguration((Map<String, Object>) (clusterConfig
            .get("configuration")));
      //build a master group, a compute node group and a datanode.
      NodeGroupCreate ng0 = new NodeGroupCreate();
      List<String> masterRole = new ArrayList<String>();
      masterRole.add("hadoop_namenode");
      masterRole.add("hadoop_resourcemanager");
      ng0.setRoles(masterRole);
      ng0.setName("master");
      ng0.setInstanceNum(1);
      ng0.setInstanceType(InstanceType.LARGE);
      String ng0ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[2] + "\"}}}}";
      Map ng0Config = (new Gson()).fromJson(ng0ConfigJson, Map.class);
      ng0.setConfiguration((Map<String, Object>) (ng0Config
            .get("configuration")));

      NodeGroupCreate ng1 = new NodeGroupCreate();
      List<String> computeRoles = new ArrayList<String>();
      computeRoles.add("hadoop_nodemanager");
      ng1.setRoles(computeRoles);
      ng1.setName("compute1");
      ng1.setInstanceNum(4);
      ng1.setCpuNum(2);
      ng1.setMemCapacityMB(7500);
      ng1.setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(10);
      ng1.setStorage(storage);
      String ng1ConfigJson =
            "{\"configuration\":{\"hadoop\":{\"core-site.xml\":{\"fs.default.name\":\""
                  + hdfsArray[3] + "\"}}}}";
      Map ng1Config = (new Gson()).fromJson(ng1ConfigJson, Map.class);
      ng1.setConfiguration((Map<String, Object>) (ng1Config
            .get("configuration")));
      NodeGroupCreate ng2 = new NodeGroupCreate();
      List<String> dataRoles = new ArrayList<String>();
      dataRoles.add("hadoop_datanode");
      ng2.setRoles(dataRoles);
      ng2.setName("data1");
      ng2.setInstanceNum(2);
      ng2.setInstanceType(InstanceType.MEDIUM);
      StorageRead storageCompute = new StorageRead();
      storageCompute.setType("LOCAL");
      storageCompute.setSizeGB(10);
      ng2.setStorage(storageCompute);

      NodeGroupCreate[] ngs = new NodeGroupCreate[] { ng0, ng1, ng2 };
      spec.setNodeGroups(ngs);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findClusterById(1l);
      List<ClusterEntity> cs = clusterEntityMgr.findAllClusters();
      for (ClusterEntity c : cs) {
         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster-external-hdfs-failure");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs =
            clusterConfigMgr
                  .getClusterConfig("my-cluster-external-hdfs-failure");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

   }

   @Test(groups = { "TestClusterConfigManager" }, enabled = false)
   public void testClusterConfigWithTempfs() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster-dc-tempfs");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      spec.setNetworkConfig(createNetConfigs());
      spec.setDistro("apache");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);

      //build a master group, a datanode group, a compute node group with strict association and tempfs.
      NodeGroupCreate[] ngs = new NodeGroupCreate[3];
      NodeGroupCreate ng0 = new NodeGroupCreate();
      ngs[0] = ng0;
      List<String> masterRoles = new ArrayList<String>();
      masterRoles.add("hadoop_namenode");
      masterRoles.add("hadoop_jobtracker");
      ngs[0].setRoles(masterRoles);
      ngs[0].setName("master");
      ngs[0].setInstanceNum(1);
      ngs[0].setInstanceType(InstanceType.LARGE);

      NodeGroupCreate ng1 = new NodeGroupCreate();
      ngs[1] = ng1;
      List<String> dataNodeRoles = new ArrayList<String>();
      dataNodeRoles.add("hadoop_datanode");
      ngs[1].setRoles(dataNodeRoles);
      ngs[1].setName("data");
      ngs[1].setInstanceNum(4);
      ngs[1].setInstanceType(InstanceType.MEDIUM);
      StorageRead storage = new StorageRead();
      storage.setType("LOCAL");
      storage.setSizeGB(50);
      ngs[1].setStorage(storage);

      NodeGroupCreate ng2 = new NodeGroupCreate();
      ngs[2] = ng2;
      List<String> computeNodeRoles = new ArrayList<String>();
      computeNodeRoles.add("hadoop_tasktracker");
      ngs[2].setRoles(computeNodeRoles);
      ngs[2].setName("compute");
      ngs[2].setInstanceNum(8);
      ngs[2].setInstanceType(InstanceType.MEDIUM);
      StorageRead storageCompute = new StorageRead();
      storageCompute.setType("TEMPFS");
      storageCompute.setSizeGB(50);
      ngs[2].setStorage(storageCompute);
      PlacementPolicy policy = new PlacementPolicy();
      policy.setInstancePerHost(2);
      List<GroupAssociation> associates = new ArrayList<GroupAssociation>();
      GroupAssociation associate = new GroupAssociation();
      associate.setReference("data");
      associate.setType(GroupAssociationType.STRICT);
      associates.add(associate);
      policy.setGroupAssociations(associates);
      ngs[2].setPlacementPolicies(policy);

      spec.setNodeGroups(ngs);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findClusterById(1l);
      List<ClusterEntity> cs = clusterEntityMgr.findAllClusters();
      for (ClusterEntity c : cs) {
         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster-dc-tempfs");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs =
            clusterConfigMgr.getClusterConfig("my-cluster-dc-tempfs");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(manifest.indexOf("master") != -1,
            "manifest should contains nodegroups");
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

            manifest.indexOf("[\"tempfs_client\",\"hadoop_tasktracker\"]") != -1,
            "manifest is inconsistent");
   }

   public void testClusterConfigWithGroupSlave() {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster1");
      spec.setNetworkConfig(createNetConfigs());
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(10);
      group.setInstanceType(InstanceType.SMALL);
      group.setHaFlag("off");
      group.setName("slave");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_datanode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster1");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster1");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            manifest.indexOf("master") != -1 && manifest.indexOf("slave") != -1,
            "manifest should contains nodegroups");
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

      }
   }

   public static List<BaseNode> getExistedNodes(String fileName)
         throws Exception {
      ClusterCreate cluster = getSimpleClusterSpec(DC_SPLIT_CLUSTER_SPEC);

      String json = readJson(fileName);
      ObjectMapper mapper = new ObjectMapper();
      List<BaseNode> existedNodes;
      try {
         existedNodes = mapper.readValue(json, new TypeReference<List<BaseNode>>(){});
      } catch (Exception e) {
         logger.error(e.getMessage());
         throw e;
      }
      Assert.assertNotNull(existedNodes);
      for (BaseNode node : existedNodes) {
         node.setCluster(cluster);
         String groupName = node.getVmName().split("-")[1];
         node.setNodeGroup(cluster.getNodeGroup(groupName));
      }

      return existedNodes;
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

                  .indexOf("{\"name\":\"my-cluster1\",\"groups\":[{\"name\":\"expanded_master\",\"roles\":[\"hadoop_namenode\",\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster1/expanded_master\"},{\"name\":\"slave\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":10,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":3,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster1/slave\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork1\",\"type\":\"static\",\"gateway\":\"192.168.1.254\",\"netmask\":\"255.255.0.0\",\"dns\":[\"2.2.2.2\"],\"ip\":[\"192.168.1.1-192.168.1.3\",\"192.168.1.102\",\"192.168.1.104-192.168.1.110\"]}]") != -1,
            "manifest is inconsistent.");
   }

   public void testClusterConfigWithGroupSlave2() {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster-slave2");
      spec.setNetworkConfig(createNetConfigs());
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(10);
      group.setInstanceType(InstanceType.SMALL);
      group.setHaFlag("off");
      group.setName("slave");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_tasktracker");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster-slave2");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs =
            clusterConfigMgr.getClusterConfig("my-cluster-slave2");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            manifest.indexOf("master") != -1 && manifest.indexOf("slave") != -1,
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

                  .indexOf("{\"name\":\"my-cluster-slave2\",\"groups\":[{\"name\":\"expanded_master\",\"roles\":[\"hadoop_namenode\",\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster-slave2/expanded_master\"},{\"name\":\"slave\",\"roles\":[\"hadoop_tasktracker\",\"hadoop_datanode\"],\"instance_num\":10,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":3,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster-slave2/slave\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1,
            "manifest is inconsistent.");
   }

   public void testClusterCreateWithGroupMaster() {
      ClusterCreate spec = new ClusterCreate();
      spec.setNetworkConfig(createNetConfigs());
      spec.setName("my-cluster2");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setHaFlag("off");
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster2");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster2");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(
            manifest.indexOf("main_group") != -1
                  && manifest.indexOf("expanded_master") != -1
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

                  .indexOf("{\"name\":\"my-cluster2\",\"groups\":[{\"name\":\"main_group\",\"roles\":[\"hadoop_namenode\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":100},\"cpu\":3,\"memory\":15000,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster2/main_group\"},{\"name\":\"expanded_master\",\"roles\":[\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster2/expanded_master\"},{\"name\":\"expanded_worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster2/expanded_worker\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1,
            "manifest is inconsistent");
   }

   public void testClusterConfigWithGroupMasterNeg() {
      ClusterCreate spec = new ClusterCreate();
      spec.setNetworkConfig(createNetConfigs());
      spec.setName("my-cluster3");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[1];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(10);
      group.setInstanceType(InstanceType.LARGE);
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      try {
         clusterConfigMgr.createClusterConfig(spec);

         ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster3");
         Assert.assertTrue(cluster != null);
         ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster3");
         String manifest = gson.toJson(attrs);
         System.out.println(manifest);
         Assert.assertTrue(false, "should get exception");
      } catch (BddException e) {
         Assert.assertTrue(true, "get expected exception.");
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

         Assert.assertTrue(true, "get expected exception.");
      }
   }

   public void testClusterConfigWithGroupMasterNeg1() {
      ClusterCreate spec = new ClusterCreate();
      spec.setNetworkConfig(createNetConfigs());
      spec.setName("my-cluster3");
      List<String> rps = new ArrayList<String>();
      rps.add("myRp2");
      rps.add("myRp3");
      rps.add("myRp4");
      rps.add("myRp5");
      spec.setRpNames(rps);

      NodeGroupCreate[] nodegroups = new NodeGroupCreate[2];
      NodeGroupCreate group = new NodeGroupCreate();
      nodegroups[0] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setName("main_group");
      List<String> roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      group = new NodeGroupCreate();
      nodegroups[1] = group;
      group.setCpuNum(3);
      group.setInstanceNum(1);
      group.setInstanceType(InstanceType.LARGE);
      group.setName("main_group1");
      roles = new ArrayList<String>();
      roles.add("hadoop_namenode");
      group.setRoles(roles);

      spec.setNodeGroups(nodegroups);
      try {
         clusterConfigMgr.createClusterConfig(spec);

         ClusterEntity cluster = clusterEntityMgr.findByName("my-cluster3");
         Assert.assertTrue(cluster != null);
         ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster3");
         String manifest = gson.toJson(attrs);
         System.out.println(manifest);
         Assert.assertTrue(false, "should get exception");
      } catch (BddException e) {
         Assert.assertTrue(e.getErrorId()
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterCreate

      }
   }

   @Test(groups = { "TestClusterConfigManager" })
   public void testClusterConfigWithClusterStorage() throws Exception {
      ClusterCreate spec = new ClusterCreate();
      spec.setName("my-cluster4");
      spec.setNetworkConfig(createNetConfigs());
      List<String> rps = new ArrayList<String>();
      rps.add("myRp1");
      spec.setRpNames(rps);
      List<String> dsNames = new ArrayList<String>();
      dsNames.add("testSharedStore");
      dsNames.add("testLocalStore");
      spec.setDsNames(dsNames);
      spec.setType(ClusterType.HDFS_MAPRED);
      spec.setDistro("bigtop");
      spec.setDistroVendor(Constants.DEFAULT_VENDOR);
      spec = ClusterSpecFactory.getCustomizedSpec(spec, null);
      clusterConfigMgr.createClusterConfig(spec);

      ClusterEntity cluster = clusterEntityMgr.findClusterById(1l);
      List<ClusterEntity> cs = clusterEntityMgr.findAllClusters();
      for (ClusterEntity c : cs) {
         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster4");
      Assert.assertTrue(cluster != null);

      ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster4");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
      Assert.assertTrue(manifest.indexOf("master") != -1,
            "manifest should contains nodegroups");
      //      Assert.assertTrue("manifest is inconsistent",
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.