Package edu.brown.costmodel

Examples of edu.brown.costmodel.SingleSitedCostModel$TransactionCacheEntry


    @Override
    protected void setUp() throws Exception {
        super.setUp(ProjectType.TM1, true);
       
        // BasePartitionerTestCase will setup most of what we need
        this.info.setCostModel(new SingleSitedCostModel(catalogContext));
        this.info.setPartitionerClass(MockPartitioner.class);
        assertNotNull(info.getStats());
       
        this.designer = new Designer(this.info, this.hints, this.info.getArgs());
        this.partitioner = (MockPartitioner) this.designer.getPartitioner();
View Full Code Here


    public void testSingleSitedCostModel() throws Exception {
        Database clone_db = CatalogCloner.cloneDatabase(catalog_db);
        CatalogContext clone_catalogContext = new CatalogContext(clone_db.getCatalog());
       
        info = this.generateInfo(clone_catalogContext);
        SingleSitedCostModel costModel = new SingleSitedCostModel(clone_catalogContext);
        costModel.setCachingEnabled(true);
       
        Table catalog_tbl = this.getTable(clone_db, TM1Constants.TABLENAME_SUBSCRIBER);
        Column target_col = this.getColumn(catalog_tbl, "S_ID");
        Collection<VerticalPartitionColumn> candidates = VerticalPartitionerUtil.generateCandidates(target_col, info.stats);
        assertNotNull(candidates);
        assertFalse(candidates.isEmpty());
        VerticalPartitionColumn vpc = CollectionUtil.first(candidates);
        assertNotNull(vpc);
        assertFalse(vpc.isUpdateApplied());

        // Create a filter that only has the procedures that will be optimized by our VerticalPartitionColumn
        ProcedureNameFilter filter = new ProcedureNameFilter(false);
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            filter.include(catalog_stmt.getParent().getName(), 1);
        } // FOR
       
        // Calculate the cost *BEFORE* applying the vertical partition optimization
        double expected_cost = costModel.estimateWorkloadCost(clone_catalogContext, workload, filter, null);
        System.err.println("ORIGINAL COST: " + expected_cost);
        Map<Long, TransactionCacheEntry> expected_entries = new HashMap<Long, TransactionCacheEntry>();
        for (TransactionCacheEntry txn_entry : costModel.getTransactionCacheEntries()) {
            // There should be no unknown queries and all transactions should be multi-sited
            assertEquals(txn_entry.toString(), 0, txn_entry.getUnknownQueryCount());
            assertFalse(txn_entry.isSinglePartitioned());
           
            TransactionCacheEntry clone = (TransactionCacheEntry)txn_entry.clone();
            assertNotSame(txn_entry, clone);
            expected_entries.put(txn_entry.getTransactionId(), clone);
            // System.err.println(StringUtil.columns(txn_entry.debug(), clone.debug()));
            // System.err.println(StringUtil.SINGLE_LINE);
        } // FOR
        assertFalse(expected_entries.isEmpty());
        for (Statement catalog_stmt : vpc.getOptimizedQueries()) {
            Collection<QueryCacheEntry> entries = costModel.getQueryCacheEntries(catalog_stmt);
            assertNotNull(entries);
            assertFalse(entries.isEmpty());
        } // FOR
       
        // Now apply the update and get the new cost. We don't care what the cost
        // is because SingleSitedCostModel only looks to see whether a txn is single-partition
        // and not how many partition it actually touches
        // We have to clear the cache for these queries first though
        vpc.applyUpdate();
        costModel.invalidateCache(vpc.getOptimizedQueries());
        double new_cost = costModel.estimateWorkloadCost(clone_catalogContext, workload, filter, null);
        System.err.println("NEW COST: " + new_cost);
        Collection<TransactionCacheEntry> new_entries = costModel.getTransactionCacheEntries();
        assertNotNull(new_entries);
        assertEquals(expected_entries.size(), new_entries.size());
        for (TransactionCacheEntry txn_entry : costModel.getTransactionCacheEntries()) {
            TransactionCacheEntry expected = expected_entries.get(txn_entry.getTransactionId());
            assertNotNull(expected);
           
            assertEquals(expected.getUnknownQueryCount(), txn_entry.getUnknownQueryCount());
            assertEquals(expected.getExaminedQueryCount(), txn_entry.getExaminedQueryCount());
            assertEquals(expected.getTotalQueryCount(), txn_entry.getTotalQueryCount());
            assertEquals(expected.getExecutionPartition(), txn_entry.getExecutionPartition());
           
            assertThat(expected.getMultiSiteQueryCount(), not(equalTo(txn_entry.getMultiSiteQueryCount())));
            assertThat(expected.getSingleSiteQueryCount(), not(equalTo(txn_entry.getSingleSiteQueryCount())));
           
            // None of the queries should touch all of the partitions
            for (QueryCacheEntry query_entry : costModel.getQueryCacheEntries(txn_entry.getTransactionId())) {
                assertNotNull(query_entry);
                assertFalse(query_entry.isInvalid());
                assertFalse(query_entry.isUnknown());
                assertEquals(1, query_entry.getAllPartitions().size());
            } // FOR
View Full Code Here

        //
        // Unfortunately at this point we have to run this through again in
        // order to figure
        // out where multi-site transactions are going.
        //
        SingleSitedCostModel cost_model = new SingleSitedCostModel(info.catalogContext);
        LOG.info("Generating cost model information for given PartitionPlan");
        cost_model.estimateWorkloadCost(info.catalogContext, this.info.workload);

        int num_partitions = info.catalogContext.numberOfPartitions;
        AbstractHasher hasher = new DefaultHasher(info.catalogContext, num_partitions);

        Collection<Table> roots = pplan.getNonReplicatedRoots();
        Map<Table, List<Integer>> table_partition_values = new HashMap<Table, List<Integer>>();
        for (Table catalog_tbl : info.catalogContext.database.getTables()) {
            table_partition_values.put(catalog_tbl, new ArrayList<Integer>());
            if (roots.contains(catalog_tbl)) {
                this.table_histogram_xref.put(catalog_tbl, new HashSet<FragmentAffinity>());
            }
        } // FOR

        // ----------------------------------------------------------
        // Affinity Histograms
        // ----------------------------------------------------------

        //
        // Construct a mapping from pairs of roots to a histogram
        // This allows us to keep track of
        //
        for (Table root0 : roots) {
            for (Table root1 : roots) {
                Pair<Table, Table> pair = new Pair<Table, Table>(root0, root1);
                FragmentAffinity affinity0 = new FragmentAffinity(root0);
                this.partition_histograms.put(pair, affinity0);
                this.table_histogram_xref.get(root0).add(affinity0);

                if (!root0.equals(root1)) {
                    pair = new Pair<Table, Table>(root1, root0);
                    FragmentAffinity affinity1 = new FragmentAffinity(root1);
                    this.partition_histograms.put(pair, new FragmentAffinity(root1));
                    this.table_histogram_xref.get(root1).add(affinity1);
                }
            } // FOR
        } // FOR

        //
        // For each transaction that is not single-sited, figure out what
        // partitions
        // that it wants to go to
        //
        LOG.info("Generating affinity information for roots " + roots);
        Set<TransactionTrace> multisite_xacts = new HashSet<TransactionTrace>();
        int xact_ctr = 0;
        for (AbstractTraceElement<?> element : this.info.workload) {
            if (!(element instanceof TransactionTrace))
                continue;
            TransactionTrace xact = (TransactionTrace) element;
            SingleSitedCostModel.TransactionCacheEntry xact_cost = null; // FIXME
                                                                         // cost_model.getTransactionCacheEntry(xact);
            if (xact_cost.isSinglePartitioned())
                continue;
            multisite_xacts.add(xact);

            // System.out.println(xact + "\n" + xact_cost);

            for (List<Integer> values : table_partition_values.values()) {
                values.clear();
            } // FOR

            //
            // There are two types of multi-site transactions:
            // (1) All of the individual queries are ss, but the total xact is
            // ms
            // (2) One or more of the queries are multi-sited
            // A transaction may be a combination of both of these.
            //
            // Furthermore, a ms xact may be multi-sited because it either:
            // (1) Uses data in separate partition trees (at this point we don't
            // know whether
            // the fragments from the different trees are on the same site)
            // (2) Use data from the same partition tree, but based on different
            // values
            // of the partitioning attribute.
            //
            // So now we need to look at each query and determine what fragments
            // they
            // want to go at.
            //

            for (SingleSitedCostModel.QueryCacheEntry query_cost : cost_model.getQueryCacheEntries(xact)) {
                // QueryTrace query =
                // (QueryTrace)info.workload.getTraceObject(query_cost.getQueryId());
                // assert(query != null);
                // System.out.println(query + "\n" + query_cost + "\n");
                //
View Full Code Here

TOP

Related Classes of edu.brown.costmodel.SingleSitedCostModel$TransactionCacheEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.