Package org.voltdb.catalog

Examples of org.voltdb.catalog.Cluster


        assertFalse(expected.isEmpty());
       
        Database clone_db = CatalogCloner.cloneDatabase(catalog_db);
        assertNotNull(clone_db);

        Cluster catalog_cluster = CatalogUtil.getCluster(catalog_db);
        assertNotNull(catalog_cluster);
        Cluster clone_cluster = CatalogUtil.getCluster(clone_db);
        assertNotNull(clone_cluster);
        for (Host catalog_host : catalog_cluster.getHosts()) {
            Host clone_host = clone_cluster.getHosts().get(catalog_host.getName());
            assertNotNull(clone_host);
            checkFields(Host.class, catalog_host, clone_host);
        } // FOR

        for (Site catalog_site : catalog_cluster.getSites()) {
            Site clone_site = clone_cluster.getSites().get(catalog_site.getName());
            assertNotNull(clone_site);
            checkFields(Site.class, catalog_site, clone_site);
        }

        assertEquals(NUM_PARTITIONS, CatalogUtil.getNumberOfPartitions(clone_db));
View Full Code Here


    public void testCloneCatalog() throws Exception {
        Catalog clone = CatalogCloner.cloneBaseCatalog(catalog);
        assertNotNull(clone);

        for (Cluster catalog_clus : catalog.getClusters()) {
            Cluster clone_clus = clone.getClusters().get(catalog_clus.getName());
            assertNotNull(clone_clus);
            this.checkFields(Cluster.class, catalog_clus, clone_clus);
        } // FOR (Cluster)
    }
View Full Code Here

            // need real error path
            System.exit(28);
        }
        Catalog catalog = new Catalog();
        catalog.execute(serializedCatalog);
        Cluster cluster = catalog.getClusters().get("cluster");
        Database db = cluster.getDatabases().get("database");
        log("catalog loaded");

        //////////////////////
        // LOAD HSQL
        //////////////////////
View Full Code Here

     * @param a_id
     * @return
     */
    protected static long getDataId(long a_id, AbstractRandomGenerator rng, ExecutionType type, Catalog catalog, Map<String, Long> table_sizes) {
        long a_id2 = -1;
        Cluster catalog_clus = CatalogUtil.getCluster(catalog);
        long temp = LocalityConstants.TABLESIZE_TABLEA / catalog_clus.getNum_partitions();
        int num_aids_per_partition = (int) (Math.floor(temp));
        int random_int = rng.nextInt(num_aids_per_partition);
        switch (type) {
            case SAME_PARTITION: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                System.out.println("Total number of partitions: " + catalog_clus.getNum_partitions());
                a_id2 = random_int * catalog_clus.getNum_partitions() + partition_num;
                break;
            }
            case SAME_SITE: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                double a_id_site_num = Math.floor((double) partition_num / (double) num_partitions_per_site);
                double a_id_host_num = Math.floor((double) a_id_site_num / (double) num_sites_per_host);
                // determine the partition range for the cluster (with a random
                // host and random sites)
                // and pick partition randomly from this range
                int lowerbound = (int) a_id_host_num * num_sites_per_host * num_partitions_per_site + (int) a_id_site_num * num_partitions_per_site;
                int upperbound = (int) a_id_host_num * num_sites_per_host * num_partitions_per_site + (int) a_id_site_num * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.numberExcluding(lowerbound, upperbound, partition_num);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case SAME_HOST: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();

                double a_id_site_num = Math.floor((double) partition_num / (double) num_partitions_per_site);
                double a_id_host_num = Math.floor((double) a_id_site_num / (double) num_sites_per_host);
                int lowerboundsite = (int) a_id_host_num * num_sites_per_host;
                int upperboundsite = (int) a_id_host_num * num_sites_per_host + (num_sites_per_host - 1);
                int new_site = rng.numberExcluding(lowerboundsite, upperboundsite, (int) a_id_site_num);
                int lowerbound = new_site * num_partitions_per_site;
                int upperbound = new_site * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.number(lowerbound, upperbound);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case REMOTE_HOST: {
                int total_number_of_hosts = catalog_clus.getHosts().size();
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                // get the site number the partition exists on
                double a_id_site_num = Math.floor((double) partition_num / (double) num_partitions_per_site);
                double a_id_host_num = Math.floor((double) a_id_site_num / (double) num_sites_per_host);
                int new_host = (int) a_id_host_num;
                if (total_number_of_hosts > 1) {
                    new_host = rng.numberExcluding(0, total_number_of_hosts - 1, (int) a_id_host_num);
                }
                int new_site = rng.number(0, num_sites_per_host - 1);
                // determine the partition range for the cluster (with a random
                // host and random sites)
                // and pick partition randomly from this range
                int lowerbound = new_host * num_sites_per_host * num_partitions_per_site + new_site * num_partitions_per_site;
                int upperbound = new_host * num_sites_per_host * num_partitions_per_site + new_site * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.number(lowerbound, upperbound);
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case RANDOM: {
                a_id2 = rng.nextInt(table_sizes.get(LocalityConstants.TABLENAME_TABLEA).intValue());
                break;
View Full Code Here

    /**
     * Initialize TheHashinator
     * @param catalog A pointer to the catalog data structure.
     */
    public static void initialize(Catalog catalog) {
        Cluster cluster = catalog.getClusters().get("cluster");
        partitionCount = cluster.getNum_partitions();
    }
View Full Code Here

     * @param a_id
     * @return
     */
    protected static long getDataId(long a_id,  AbstractRandomGenerator rng, ExecutionType type, Catalog catalog, Map<String, Long> table_sizes) {
        long a_id2 = -1;
        Cluster catalog_clus = CatalogUtil.getCluster(catalog);
        long temp = MapReduceConstants.TABLESIZE_TABLEA / catalog_clus.getNum_partitions();
        int num_aids_per_partition = (int)(Math.floor(temp));
        int random_int = rng.nextInt(num_aids_per_partition);
        switch (type) {
            case SAME_PARTITION: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                System.out.println("Total number of partitions: " + catalog_clus.getNum_partitions());
                a_id2 = random_int * catalog_clus.getNum_partitions() + partition_num;
              break;
            }
            case SAME_SITE: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                double a_id_site_num = Math.floor((double)partition_num / (double)num_partitions_per_site);
                double a_id_host_num = Math.floor((double)a_id_site_num / (double)num_sites_per_host);
                //determine the partition range for the cluster (with a random host and random sites)
                //and pick partition randomly from this range
                int lowerbound = (int)a_id_host_num * num_sites_per_host * num_partitions_per_site + (int)a_id_site_num * num_partitions_per_site;
                int upperbound = (int)a_id_host_num * num_sites_per_host * num_partitions_per_site  + (int)a_id_site_num * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.numberExcluding(lowerbound, upperbound, partition_num);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case SAME_HOST: {
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();

                double a_id_site_num = Math.floor((double)partition_num / (double)num_partitions_per_site);
                double a_id_host_num = Math.floor((double)a_id_site_num / (double)num_sites_per_host);
                int lowerboundsite = (int)a_id_host_num * num_sites_per_host;
                int upperboundsite = (int)a_id_host_num * num_sites_per_host + (num_sites_per_host - 1);
                int new_site = rng.numberExcluding(lowerboundsite, upperboundsite, (int)a_id_site_num);
                int lowerbound = new_site * num_partitions_per_site;
                int upperbound = new_site * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.number(lowerbound, upperbound);
                // get a random partition
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case REMOTE_HOST: {
                int total_number_of_hosts = catalog_clus.getHosts().size();
                int partition_num = TheHashinator.hashToPartition(a_id, catalog_clus.getNum_partitions());
                Site site = CatalogUtil.getPartitionById(catalog, partition_num).getParent();
                int num_sites_per_host = CatalogUtil.getSitesPerHost(site).get(site.getHost()).size();
                int num_partitions_per_site = site.getPartitions().size();
                // get the site number the partition exists on
                double a_id_site_num = Math.floor((double)partition_num / (double)num_partitions_per_site);
                double a_id_host_num = Math.floor((double)a_id_site_num / (double)num_sites_per_host);
                int new_host = (int)a_id_host_num;
                if (total_number_of_hosts > 1)
                {
                    new_host = rng.numberExcluding(0, total_number_of_hosts -1, (int)a_id_host_num);                   
                }
                int new_site = rng.number(0, num_sites_per_host - 1);
                //determine the partition range for the cluster (with a random host and random sites)
                //and pick partition randomly from this range
                int lowerbound = new_host * num_sites_per_host * num_partitions_per_site + new_site * num_partitions_per_site;
                int upperbound = new_host * num_sites_per_host * num_partitions_per_site + new_site * num_partitions_per_site + (num_partitions_per_site - 1);
                int a_id2_partition_num = rng.number(lowerbound, upperbound);
                a_id2 = random_int * catalog_clus.getNum_partitions() + a_id2_partition_num;
                break;
            }
            case RANDOM: {
                a_id2 = rng.nextInt(table_sizes.get(MapReduceConstants.TABLENAME_TABLEA).intValue());
                break;
View Full Code Here

        int hostCount = clusterConfig.getHostCount();
        int partitionCount = clusterConfig.getPartitionCount();
        int sitesPerHost = clusterConfig.getSitesPerHost();

        // add all the hosts
        Cluster cluster = catalog.getClusters().get("cluster");
        cluster.setNum_partitions(partitionCount);
        // set the address of the coordinator
        cluster.setLeaderaddress(clusterConfig.getLeaderAddress().trim());
        for (int i = 0; i < hostCount; i++) {
            Host host = cluster.getHosts().add(String.valueOf(i));
            host.setIpaddr("localhost"); // DEFAULT
        }

        // add all the partitions.
        for (int i = 0; i < partitionCount; ++i) {
            //cluster.getPartitions().add(String.valueOf(i));
        }

        // add all the sites
        int initiatorsPerHost = 1;
        int partitionCounter = -1;
        int nextInitiatorId = 1;
        int siteId = -1;
        for (int i = 0, cnt = (sitesPerHost * hostCount); i < cnt; i++) {

            int hostForSite = i / cnt;
            Host host = cluster.getHosts().get(String.valueOf(hostForSite));
            int hostId = Integer.parseInt(host.getTypeName());

//            int withinHostId = i % (sitesPerHost + initiatorsPerHost);

            //int siteId = hostId * VoltDB.SITES_TO_HOST_DIVISOR;// + withinHostId;

            Site site = cluster.getSites().add(String.valueOf(++siteId));
            site.setId(siteId);
            site.setHost(host);
            site.setProc_port(HStoreConstants.DEFAULT_PORT);
            site.setMessenger_port(HStoreConstants.DEFAULT_PORT + HStoreConstants.MESSENGER_PORT_OFFSET);
            site.setIsup(true);
View Full Code Here

    /**
     * testAddHostInfo
     */
    public void testAddHostInfo() throws Exception {
        Catalog new_catalog = FixCatalog.cloneCatalog(catalog, NUM_HOSTS, NUM_SITES_PER_HOST, NUM_PARTITIONS_PER_SITE);
        Cluster catalog_clus = CatalogUtil.getCluster(new_catalog);
        assertEquals(NUM_PARTITIONS_PER_SITE * NUM_SITES_PER_HOST * NUM_HOSTS, CatalogUtil.getNumberOfPartitions(new_catalog));

        Set<Host> seen_hosts = new HashSet<Host>();
        Set<Site> seen_sites = new HashSet<Site>();
        Set<Partition> seen_partitions = new HashSet<Partition>();

        Set<Integer> seen_site_ids = new HashSet<Integer>();
        Set<Integer> seen_partition_ids = new HashSet<Integer>();

        for (Host catalog_host : catalog_clus.getHosts()) {
            assertNotNull(catalog_host);
            Collection<Site> sites = CatalogUtil.getSitesForHost(catalog_host);
            assertEquals(sites.toString(), NUM_SITES_PER_HOST, sites.size());

            for (Site catalog_site : sites) {
                assertEquals(catalog_host, catalog_site.getHost());
                assertEquals(NUM_PARTITIONS_PER_SITE, catalog_site.getPartitions().size());
                assertFalse(seen_site_ids.contains(catalog_site.getId()));

                for (Partition catalog_part : catalog_site.getPartitions()) {
                    assertNotNull(catalog_part);
                    assertFalse(catalog_part.toString(), seen_partitions.contains(catalog_part));
                    assertFalse(seen_partition_ids.contains(catalog_part.getId()));
                    seen_partitions.add(catalog_part);
                    seen_partition_ids.add(catalog_part.getId());
                } // FOR (partitions)
                seen_sites.add(catalog_site);
            } // FOR (sites)
            seen_hosts.add(catalog_host);
        } // FOR (hosts)
        assertEquals(NUM_HOSTS, seen_hosts.size());
        assertEquals(NUM_HOSTS * NUM_SITES_PER_HOST, seen_sites.size());
        assertEquals(NUM_HOSTS * NUM_SITES_PER_HOST * NUM_PARTITIONS_PER_SITE, seen_partitions.size());
        assertEquals(seen_partition_ids.size(), catalog_clus.getNum_partitions());
    }
View Full Code Here

            m_catalog = new Catalog();
            m_catalog.execute("add / clusters " + m_clusterName);
            m_catalog.execute("add " + m_catalog.getClusters().get(m_clusterName).getPath() + " databases " +
                    m_databaseName);
            Cluster cluster = m_catalog.getClusters().get(m_clusterName);
            // Set a sane default for TestMessaging (at least)
            cluster.setHeartbeattimeout(10000);
            assert(cluster != null);

            try {
                m_hostMessenger.start();
            } catch (Exception e) {
View Full Code Here

            "   </users>" +
            "</deployment>";

        final File tmpSecOff = VoltProjectBuilder.writeStringToTempFile(secOff);
        CatalogUtil.compileDeployment(catalog, tmpSecOff.getPath(), true, false);
        Cluster cluster =  catalog.getClusters().get("cluster");
        assertFalse(cluster.getSecurityenabled());

        setUp();
        final File tmpSecOn = VoltProjectBuilder.writeStringToTempFile(secOn);
        CatalogUtil.compileDeployment(catalog, tmpSecOn.getPath(), true, false);
        cluster =  catalog.getClusters().get("cluster");
        assertTrue(cluster.getSecurityenabled());
    }
View Full Code Here

TOP

Related Classes of org.voltdb.catalog.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.