Package org.rhq.core.util

Examples of org.rhq.core.util.StopWatch


        return vacuum(whoami, TABLES_TO_VACUUM);
    }

    private long doCommand(DatabaseType dbtype, Connection conn, String command, String table) {
        Statement stmt = null;
        StopWatch watch = new StopWatch();

        if (table == null) {
            table = "";
        }

        command = command.replace("{0}", table);

        if (LOG.isDebugEnabled()) {
            LOG.debug("Execute command: " + command);
        }

        try {
            stmt = conn.createStatement();
            stmt.execute(command);
            return watch.getElapsed();
        } catch (SQLException e) {
            LOG.error("Error in command: " + command + ": " + e, e);
            return watch.getElapsed();
        } finally {
            dbtype.closeStatement(stmt);
        }
    }
View Full Code Here


        log.info("Begin compressing data from table [" + fromTable + "] to table [" + toTable + "] between ["
            + TimeUtil.toString(begin) + "] and [" + TimeUtil.toString(end) + "]");

        int rows = 0;
        StopWatch watch = new StopWatch();
        try {
            conn = dataSource.getConnection();

            // One special case. If we are compressing from an
            // already compressed table, we'll take the MIN and
            // MAX from the already calculated min and max columns.
            String minMax;
            if (MeasurementDataManagerUtility.isRawTable(fromTable)) {
                minMax = "AVG(value), MIN(value), MAX(value) ";
            } else {
                minMax = "AVG(value), MIN(minvalue), MAX(maxvalue) ";
            }

            // TODO GH: Why does this do each schedule separately?
            insStmt = conn.prepareStatement("INSERT INTO " + toTable + " (SELECT ?, ft.schedule_id, " + minMax
                + "  FROM " + fromTable + " ft " + "  WHERE ft.time_stamp >= ? AND ft.time_stamp < ? "
                + "  GROUP BY ft.schedule_id)");

            // Compress
            insStmt.setLong(1, begin);
            insStmt.setLong(2, begin);
            insStmt.setLong(3, end);

            watch.reset();
            rows = insStmt.executeUpdate();

        } finally {
            JDBCUtil.safeClose(conn, insStmt, null);
        }

        log.info("Finished compressing data from table [" + fromTable + "] to table [" + toTable + "] between ["
            + TimeUtil.toString(begin) + "] and [" + TimeUtil.toString(end) + "], [" + rows + "] compressed rows in ["
            + (watch.getElapsed() / SECOND) + "] seconds");

        return rows;
    }
View Full Code Here

        PreparedStatement stmt = null;

        log.info("Begin purging data from table [" + tableName + "] between [" + TimeUtil.toString(purgeAfter)
            + "] and [" + TimeUtil.toString(purgeBefore) + "]");

        StopWatch watch = new StopWatch();
        int rows;
        try {
            conn = dataSource.getConnection();

            String sql = "DELETE FROM " + tableName + " WHERE time_stamp >= ? AND time_stamp < ?";

            stmt = conn.prepareStatement(sql);
            stmt.setLong(1, purgeAfter);
            stmt.setLong(2, purgeBefore);

            rows = stmt.executeUpdate();
        } finally {
            JDBCUtil.safeClose(conn, stmt, null);
        }

        MeasurementMonitor.getMBean().incrementPurgeTime(watch.getElapsed());

        log.info("Finished purging data from table [" + tableName + "] between [" + TimeUtil.toString(purgeAfter)
            + "] and [" + TimeUtil.toString(purgeBefore) + "], [" + rows + "] rows removed in ["
            + ((watch.getElapsed()) / SECOND) + "] seconds");

        return rows;
    }
View Full Code Here

    public void truncateMeasurements(String tableName) throws SQLException {
        // Make sure we only truncate the dead table... other tables may have live data in them
        if (tableName.equals(MeasurementDataManagerUtility.getDeadTable(System.currentTimeMillis()))) {
            Connection conn = null;
            Statement stmt = null;
            StopWatch watch = new StopWatch();
            try {
                conn = dataSource.getConnection();
                stmt = conn.createStatement();
                long startTime = System.currentTimeMillis();
                stmt.executeUpdate("TRUNCATE TABLE " + tableName);
                MeasurementMonitor.getMBean().incrementPurgeTime(System.currentTimeMillis() - startTime);
            } finally {
                JDBCUtil.safeClose(conn, stmt, null);
                log.info("Truncated table [" + tableName + "] in [" + (watch.getElapsed() / SECOND) + "] seconds");
            }
        }
    }
View Full Code Here

    @Override
    @TransactionAttribute(TransactionAttributeType.NEVER)
    public boolean mergeAvailabilityReport(AvailabilityReport report) {
        int reportSize = report.getResourceAvailability().size();
        String agentName = report.getAgentName();
        StopWatch watch = new StopWatch();

        if (reportSize == 0) {
            log.error("Agent [" + agentName + "] sent an empty availability report.  This is a bug, please report it");
            return true; // even though this report is bogus, do not ask for an immediate full report to avoid unusual infinite recursion due to this error condition

        } else if (log.isDebugEnabled()) {
            log.debug("Agent [" + agentName + "]: processing availability report of size: " + reportSize);
        }

        // translate data into Availability objects for downstream processing
        List<Availability> availabilities = new ArrayList<Availability>(report.getResourceAvailability().size());
        for (AvailabilityReport.Datum datum : report.getResourceAvailability()) {
            availabilities.add(new Availability(new Resource(datum.getResourceId()), datum.getStartTime(), datum
                .getAvailabilityType()));
        }

        Integer agentToUpdate = agentManager.getAgentIdByName(agentName);
        MergeInfo mergeInfo = new MergeInfo(report);

        // For agent reports (not a server-side report)
        if (!report.isServerSideReport() && agentToUpdate != null) {
            // if this is a changes-only report, and the agent appears backfilled, then immediately request and process
            // a full report because, obviously, the agent is no longer down but the server thinks it still is down -
            // we need to know the availabilities for all the resources on that agent
            if (report.isChangesOnlyReport() && agentManager.isAgentBackfilled(agentToUpdate.intValue())) {
                mergeInfo.setAskForFullReport(true);
            }

            // update the lastAvailReport time and unset the backfill flag if it is set.
            availabilityManager.updateLastAvailabilityReportInNewTransaction(agentToUpdate.intValue());

        }

        // process the report in batches to avoid an overly long transaction and to potentially increase the
        // speed in which an avail change becomes visible.

        while (!availabilities.isEmpty()) {
            int size = availabilities.size();
            int end = (MERGE_BATCH_SIZE < size) ? MERGE_BATCH_SIZE : size;

            List<Availability> availBatch = availabilities.subList(0, end);
            availabilityManager.mergeAvailabilitiesInNewTransaction(availBatch, mergeInfo);

            // Advance our progress and possibly help GC. This will remove the processed avails from the backing list
            availBatch.clear();
        }

        MeasurementMonitor.getMBean().incrementAvailabilityReports(report.isChangesOnlyReport());
        MeasurementMonitor.getMBean().incrementAvailabilitiesInserted(mergeInfo.getNumInserted());
        MeasurementMonitor.getMBean().incrementAvailabilityInsertTime(watch.getElapsed());
        watch.reset();

        if (!report.isServerSideReport()) {
            if (agentToUpdate != null) {
                // don't bother asking for a full report if the one we are currently processing is already full
                if (mergeInfo.isAskForFullReport() && report.isChangesOnlyReport()) {
View Full Code Here

        /////
        // Now we need to loop over batches of the resource ID list - asking the server for their resource representations.
        // When we get the resources from the server, we put them in our resourceMap, keyed on ID.

        final boolean isDebugEnabled = log.isDebugEnabled();
        final StopWatch stopWatch = new StopWatch();
        String marker = null;
        Map<Integer, Resource> resourceMap = new HashMap<Integer, Resource>(resourceIdList.size());
        int batchNumber = 0;
        while (!resourceIdList.isEmpty()) {
            // Our current batch starts at the head of the list, but
            // we need to determine how big our current batch should be and where in the list of IDs that batch ends
            int size = resourceIdList.size();
            int end = (SYNC_BATCH_SIZE < size) ? SYNC_BATCH_SIZE : size;
            batchNumber++;

            // Determine the content of our current batch - this is simply a sublist of our IDs list.
            // Note that we use .clear() once we get the batch array in order to advance our progress and help GC.
            // This usage of .clear() will remove the processed resources from the backing list.
            String markerPrefix = null;
            if (isDebugEnabled) {
                markerPrefix = String.format("a. Batch [%03d] (%d): ", batchNumber, syncInfos.size());
                marker = String.format("%sGet resource ID sublist - %d of %d remaining", markerPrefix, end, size);
                stopWatch.markTimeBegin(marker);
            }

            List<Integer> resourceIdBatch = resourceIdList.subList(0, end);
            Integer[] resourceIdArray = resourceIdBatch.toArray(new Integer[resourceIdBatch.size()]);

            resourceIdBatch.clear();

            if (isDebugEnabled) {
                stopWatch.markTimeEnd(marker);

                marker = markerPrefix + "Get sublist of resources from server";
                stopWatch.markTimeBegin(marker);
            }

            // Ask the server for the resource representation of all resource IDs in our batch.
            // This is a potentially expensive operation depending on the size of the batch and the content of the resources.
            List<Resource> resourceBatch = configuration.getServerServices().getDiscoveryServerService()
                .getResourcesAsList(resourceIdArray);

            if (isDebugEnabled) {
                stopWatch.markTimeEnd(marker);

                marker = markerPrefix + "Store sublist of resources to map";
                stopWatch.markTimeBegin(marker);
            }

            // Now that the server told us the resources in our batch, we add them to our master map.
            // Note our usage of clear on the batch - this is to help GC.
            for (Resource r : resourceBatch) {
                //  protect against childResources notNull assumptions downstream
                if (null == r.getChildResources()) {
                    r.setChildResources(new CopyOnWriteArraySet()); // this will actually initialize to an empty Set
                }
                compactResource(r);
                resourceMap.put(r.getId(), r);
            }
            resourceBatch.clear();

            if (isDebugEnabled) {
                stopWatch.markTimeEnd(marker);
            }
        }

        if (syncInfos.size() != resourceMap.size()) {
            log.warn("Expected [" + syncInfos.size() + "] but found [" + resourceMap.size()
                + "] resources when fetching from server");
        }

        /////
        // We now have all the resources associated with all sync infos in a map.
        // We need to build the full resource tree using the resource parent info as the blueprint for how to
        // link the resources in the tree.
        if (isDebugEnabled) {
            marker = "b. Build the resource hierarchies";
            stopWatch.markTimeBegin(marker);
        }

        // The root resources to be merged (i.e. the resources whose parents are not in the map)
        Set<Resource> result = new HashSet<Resource>();
        for (Resource resource : resourceMap.values()) {
            if (null == resource.getParentResource()) {
                result.add(resource); // the platform, make sure we have this
                continue;
            }
            Resource parent = resourceMap.get(resource.getParentResource().getId());
            if (null != parent) {
                parent.addChildResource(resource);
            } else {
                result.add(resource);
            }
        }

        if (isDebugEnabled) {
            stopWatch.markTimeEnd(marker);

            log.debug("Resource trees built from map - performance: " + stopWatch);
        }

        return result;
View Full Code Here

    @TransactionAttribute(REQUIRES_NEW)
    public void purgeByDriftDefinitionName(Subject subject, int resourceId, String driftDefName) throws Exception {

        int driftsDeleted;
        int changeSetsDeleted;
        StopWatch timer = new StopWatch();

        // purge all drift entities first
        Query q = entityManager.createNamedQuery(JPADrift.QUERY_DELETE_BY_DRIFTDEF_RESOURCE);
        q.setParameter("resourceId", resourceId);
        q.setParameter("driftDefinitionName", driftDefName);
        driftsDeleted = q.executeUpdate();

        // delete the drift set
        //        JPADriftChangeSet changeSet = entityManager.createQuery(
        //            "select c from JPADriftChangeSet c where c.version = 0 and c.driftDefinition")

        // now purge all changesets
        q = entityManager.createNamedQuery(JPADriftChangeSet.QUERY_DELETE_BY_DRIFTDEF_RESOURCE);
        q.setParameter("resourceId", resourceId);
        q.setParameter("driftDefinitionName", driftDefName);
        changeSetsDeleted = q.executeUpdate();

        LOG.info("Purged [" + driftsDeleted + "] drift items and [" + changeSetsDeleted
                + "] changesets associated with drift def [" + driftDefName + "] from resource [" + resourceId
                + "]. Elapsed time=[" + timer.getElapsed() + "]ms");
    }
View Full Code Here

    @Override
    public ActionForward execute(ComponentContext context, ActionMapping mapping, ActionForm form,
        HttpServletRequest request, HttpServletResponse response) throws Exception {

        try {
            StopWatch timer = new StopWatch();
            Log timingLog = LogFactory.getLog("DASHBOARD-TIMING");

            ResourceHubForm hubForm = (ResourceHubForm) form;
            for (ResourceCategory category : ResourceCategory.values()) {
                hubForm.addFunction(new LabelValueBean(category.name(), category.name()));
            }

            timingLog.trace("SearchHubPrepare - timing [" + timer.toString() + "]");
        } catch (Exception e) {
            if (log.isDebugEnabled()) {
                log.debug("Dashboard Portlet [SavedQueries] experienced an error: " + e.getMessage(), e);
            } else {
                log.error("Dashboard Portlet [SavedQueries] experienced an error: " + e.getMessage());
View Full Code Here

TOP

Related Classes of org.rhq.core.util.StopWatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.