Package bixo.datum

Examples of bixo.datum.StatusDatum


        // Test for all valid fetches.
        Tap validate = platform.makeTap(platform.makeBinaryScheme(StatusDatum.FIELDS), statusPath);
        TupleEntryIterator tupleEntryIterator = validate.openForRead(platform.makeFlowProcess());
        while (tupleEntryIterator.hasNext()) {
            TupleEntry entry = tupleEntryIterator.next();
            StatusDatum sd = new StatusDatum(entry);
            if (sd.getStatus() != UrlStatus.FETCHED) {
                LOGGER.error(String.format("Fetched failed! Status is %s for %s", sd.getStatus(), sd.getUrl()));
                BaseFetchException e = sd.getException();
                if (e != null) {
                    LOGGER.error("Fetched failed due to exception", e);
                }
               
                Assert.fail("Status not equal to FETCHED");
View Full Code Here


       
        // We will end up with 1- n entries of (C)rawlDbDatum, (S)tatusDatum, (A)nalyzedDatum, (L)inkDatum
        // [C | S | A | L] [C | S | A | L] [C | S | A | L] [C | S | A | L]

        CrawlDbDatum crawlDbDatum = null;
        StatusDatum statusDatum = null;
        AnalyzedDatum analyzedDatum = null;
        UrlStatus status = null;
        float pageScore = 0;
        float linkScore = 0;

        String url = null;

        while (iter.hasNext()) {
            TupleEntry entry = iter.next();
           
            boolean isCrawlDatum = entry.getString(CRAWLDBDATUM_URL_FIELD) != null;
            boolean isStatus = entry.getString(STATUSDATUM_URL_FIELD) != null;
            boolean isAnalyzed = entry.getString(ANALYZEDDATUM_URL_FIELD) != null;
            if (isCrawlDatum) {
               Tuple crawlDbTuple = TupleEntry.select(CrawlDbDatum.FIELDS, entry);
               crawlDbDatum = new CrawlDbDatum(crawlDbTuple);
               url = crawlDbDatum.getUrl();
            }
           
            if (isStatus) {
                statusDatum = new StatusDatum(entry);
                url = statusDatum.getUrl();
            }

            if (isAnalyzed) {
                Tuple analyzedTuple = TupleEntry.select(AnalyzedDatum.FIELDS, entry);
                analyzedDatum = new AnalyzedDatum(analyzedTuple);
                url = analyzedDatum.getUrl();
            }

            // we could have either status + link or just link tuple entry
            if (entry.getString(new Fields(LinkDatum.URL_FN)) != null) {
                LinkDatum linkDatum = new LinkDatum(TupleEntry.select(LinkDatum.FIELDS, entry));
               
                pageScore = linkDatum.getPageScore();
                // Add up the link scores
                linkScore += linkDatum.getLinkScore();
                url = linkDatum.getUrl();
            }
        }
       
        long lastFetched = 0;
        if (crawlDbDatum != null) {
            status = crawlDbDatum.getLastStatus();
            pageScore = crawlDbDatum.getPageScore();
            linkScore += crawlDbDatum.getLinksScore();
            lastFetched = crawlDbDatum.getLastFetched();
        } else if (statusDatum != null) {
            status = statusDatum.getStatus();
            if (status != UrlStatus.FETCHED ) {
                pageScore = 0; // if we didn't fetch the page, then we can't have a page score
                linkScore += (Float)statusDatum.getPayloadValue(CustomFields.LINKS_SCORE_FN);
            } else {
                if (analyzedDatum != null) {
                    pageScore = analyzedDatum.getPageScore();
                }
            }
            lastFetched = statusDatum.getStatusTime();
        } else {
            status = UrlStatus.UNFETCHED;
        }
           
        if (url == null) {
View Full Code Here

        LOGGER.info(String.format("Created %d URLs", _numCreated));
    }

    @Override
    public void operate(FlowProcess process, FunctionCall<NullContext> funcCall) {
        StatusDatum datum = new StatusDatum(funcCall.getArguments());
        UrlStatus status = datum.getStatus();
        String url = datum.getUrl();
        long statusTime = datum.getStatusTime();
       
        long fetchTime = statusTime; // Not exactly true... since in some cases we
                    // may not have fetched the url. But because we are sharing this logic
                    // between the JDBCCrawlTool and the DemoCrawlTool, we use the value
                    // of the fetchTime while selecting the "latest" url. Newly added urls
                    // have a fetchTime of 0, so in order to preserve say a SKIPPED status
                    // we set the fetch time here.

        _numCreated += 1;

        UrlDatum urlDatum = new UrlDatum(url);
        urlDatum.setPayloadValue(CrawlDbDatum.LAST_FETCHED_FIELD, fetchTime);
        urlDatum.setPayloadValue(CrawlDbDatum.LAST_UPDATED_FIELD, statusTime);
        urlDatum.setPayloadValue(CrawlDbDatum.LAST_STATUS_FIELD, status.name());
        // Don't change the crawl depth here - we do that only in the case of a
        // successful parse
        urlDatum.setPayloadValue(CrawlDbDatum.CRAWL_DEPTH, datum.getPayloadValue(CrawlDbDatum.CRAWL_DEPTH));

        funcCall.getOutputCollector().add(urlDatum.getTuple());
    }
View Full Code Here

        flow.complete();
       
        Tap validate = platform.makeTap(platform.makeBinaryScheme(StatusDatum.FIELDS), statusPath);
        TupleEntryIterator tupleEntryIterator = validate.openForRead(platform.makeFlowProcess());
        Assert.assertTrue(tupleEntryIterator.hasNext());
        StatusDatum sd = new StatusDatum(tupleEntryIterator.next());
        Assert.assertEquals(UrlStatus.FETCHED, sd.getStatus());
        HttpHeaders headers = sd.getHeaders();
        Assert.assertNotNull(headers);
        Assert.assertTrue(headers.getNames().size() > 0);
    }
View Full Code Here

        while (tupleEntryIterator.hasNext()) {
            TupleEntry entry = tupleEntryIterator.next();
            totalEntries += 1;

            // Verify we can convert properly
            StatusDatum sd = new StatusDatum(entry);
            Assert.assertEquals(UrlStatus.FETCHED, sd.getStatus());
           
           
            // Verify that we got one of each page
            String url = sd.getUrl();
            Assert.assertNotNull(url);
            int idOffset = url.indexOf(".html") - 1;
            int pageId = Integer.parseInt(url.substring(idOffset, idOffset + 1));
            Assert.assertFalse(fetchedPages[pageId]);
            fetchedPages[pageId] = true;
View Full Code Here

        while (tupleEntryIterator.hasNext()) {
            TupleEntry entry = tupleEntryIterator.next();
            totalEntries += 1;

            // Verify we can convert properly
            StatusDatum sd = new StatusDatum(entry);
            Assert.assertTrue(sd.getException() instanceof RedirectFetchException);
            RedirectFetchException redirectException =
                (RedirectFetchException)(sd.getException());
            Assert.assertEquals(RedirectResponseHandler.REDIRECT_TARGET_URL,
                                redirectException.getRedirectedUrl());
            Assert.assertEquals(payload.get("payload-field-1"),
                                sd.getPayloadValue("payload-field-1"));
           
            // Verify that we got one of each page
            String url = sd.getUrl();
            Assert.assertNotNull(url);
            int idOffset = url.indexOf(".html") - 1;
            int pageId = Integer.parseInt(url.substring(idOffset, idOffset + 1));
            Assert.assertFalse(fetchedPages[pageId]);
            fetchedPages[pageId] = true;
View Full Code Here

        while (tupleEntryIterator.hasNext()) {
            TupleEntry entry = tupleEntryIterator.next();
            totalEntries += 1;

            // Verify we can convert properly
            StatusDatum sd = new StatusDatum(entry);
            Assert.assertEquals(UrlStatus.SKIPPED_INTERRUPTED, sd.getStatus());
        }
       

        // TODO CSc - re-enable this test, when termination really works.
        // Assert.assertEquals(numPages, totalEntries);
View Full Code Here

        while (tupleEntryIterator.hasNext()) {
            TupleEntry entry = tupleEntryIterator.next();
            totalEntries += 1;

            // Verify we can convert properly
            StatusDatum sd = new StatusDatum(entry);
            Assert.assertEquals(UrlStatus.FETCHED, sd.getStatus());
            String payloadValue = (String)sd.getPayloadValue("key");
            Assert.assertNotNull(payloadValue);
            Assert.assertEquals("value", payloadValue);
        }
       
        Assert.assertEquals(1, totalEntries);
View Full Code Here

       
        int numEntries = 0;
        while (tupleEntryIterator.hasNext()) {
            numEntries += 1;
            TupleEntry entry = tupleEntryIterator.next();
            StatusDatum status = new StatusDatum(entry);
            Assert.assertEquals(UrlStatus.SKIPPED_TIME_LIMIT, status.getStatus());
        }
       
        Assert.assertEquals(10, numEntries);
    }
View Full Code Here

       
        int numSkippedEntries = 0;
        int numFetchedEntries = 0;
        while (tupleEntryIterator.hasNext()) {
            TupleEntry entry = tupleEntryIterator.next();
            StatusDatum status = new StatusDatum(entry);
            if (status.getStatus() == UrlStatus.SKIPPED_PER_SERVER_LIMIT) {
                numSkippedEntries += 1;
            } else if (status.getStatus() == UrlStatus.FETCHED) {
                numFetchedEntries += 1;
            } else {
                Assert.fail("Unexpected status: " + status.getStatus());
            }
        }
       
        Assert.assertEquals(numFetchedEntries, maxUrls);
        Assert.assertEquals(numSkippedEntries, sourceUrls - maxUrls);
View Full Code Here

TOP

Related Classes of bixo.datum.StatusDatum

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.