Package org.apache.nutch.crawl

Examples of org.apache.nutch.crawl.CrawlDatum


    for (int i = 0; i < sampleFiles.length; i++) {
      urlString = "file:" + sampleDir + fileSeparator + sampleFiles[i];

      Configuration conf = NutchConfiguration.create();
      protocol = new ProtocolFactory(conf).getProtocol(urlString);
      content = protocol.getProtocolOutput(new Text(urlString), new CrawlDatum()).getContent();
      parse = new ParseUtil(conf).parseByExtensionId("parse-tika", content).get(content.getUrl());

      int index = parse.getText().indexOf(expectedText);
      Assert.assertTrue(index > 0);
    }
View Full Code Here


    for (int i = 0; i < sampleFiles.length; i++) {
      urlString = "file:" + sampleDir + fileSeparator + sampleFiles[i];

      protocol = new ProtocolFactory(conf).getProtocol(urlString);
      content = protocol.getProtocolOutput(new Text(urlString),
          new CrawlDatum()).getContent();
      parse = new ParseUtil(conf).parseByExtensionId("parse-tika",
          content).get(content.getUrl());

      // check that there are 2 outlinks:
      // unlike the original parse-rss
View Full Code Here

          String sig = parseData.getContentMeta().get(Nutch.SIGNATURE_KEY);
          if (sig != null) {
            byte[] signature = StringUtil.fromHexString(sig);
            if (signature != null) {
              // append a CrawlDatum with a signature
              CrawlDatum d = new CrawlDatum(CrawlDatum.STATUS_SIGNATURE, 0);
              d.setSignature(signature);
              crawlOut.append(key, d);
            }
          }
         
        // see if the parse metadata contain things that we'd like
        // to pass to the metadata of the crawlDB entry
        CrawlDatum parseMDCrawlDatum = null;
        for (String mdname : parseMDtoCrawlDB) {
          String mdvalue = parse.getData().getParseMeta().get(mdname);
          if (mdvalue != null) {
            if (parseMDCrawlDatum == null) parseMDCrawlDatum = new CrawlDatum(
                CrawlDatum.STATUS_PARSE_META, 0);
            parseMDCrawlDatum.getMetaData().put(new Text(mdname),
                new Text(mdvalue));
          }
        }
        if (parseMDCrawlDatum != null) crawlOut.append(key, parseMDCrawlDatum);

        if (ignoreExternalLinks) {
          // need to determine fromHost (once for all outlinks)
          try {
            fromHost = new URL(fromUrl).getHost().toLowerCase();
          } catch (MalformedURLException e) {
            fromHost = null;
          }
        } else {
          fromHost = null;
        }

        ParseStatus pstatus = parseData.getStatus();
        if (pstatus != null && pstatus.isSuccess()
            && pstatus.getMinorCode() == ParseStatus.SUCCESS_REDIRECT) {
          String newUrl = pstatus.getMessage();
          int refreshTime = Integer.valueOf(pstatus.getArgs()[1]);
          newUrl = filterNormalize(fromUrl, newUrl, fromHost,
              ignoreExternalLinks, filters, normalizers,
              URLNormalizers.SCOPE_FETCHER);

          if (newUrl != null) {
            String reprUrl = URLUtil.chooseRepr(fromUrl, newUrl,
                refreshTime < Fetcher.PERM_REFRESH_TIME);
            CrawlDatum newDatum = new CrawlDatum();
            newDatum.setStatus(CrawlDatum.STATUS_LINKED);
            if (reprUrl != null && !reprUrl.equals(newUrl)) {
              newDatum.getMetaData().put(Nutch.WRITABLE_REPR_URL_KEY,
                  new Text(reprUrl));
            }
            crawlOut.append(new Text(newUrl), newDatum);
          }
        }

          // collect outlinks for subsequent db update
          Outlink[] links = parseData.getOutlinks();
          int outlinksToStore = Math.min(maxOutlinks, links.length);

          int validCount = 0;
          CrawlDatum adjust = null;
          List<Entry<Text, CrawlDatum>> targets = new ArrayList<Entry<Text, CrawlDatum>>(outlinksToStore);
          List<Outlink> outlinkList = new ArrayList<Outlink>(outlinksToStore);
          for (int i = 0; i < links.length && validCount < outlinksToStore; i++) {
            String toUrl = links[i].getToUrl();

            // Only normalize and filter if fetcher.parse = false
            if (!isParsing) {
              toUrl = ParseOutputFormat.filterNormalize(fromUrl, toUrl, fromHost, ignoreExternalLinks, filters, normalizers);
              if (toUrl == null) {
                continue;
              }
            }

            CrawlDatum target = new CrawlDatum(CrawlDatum.STATUS_LINKED, interval);
            Text targetUrl = new Text(toUrl);
           
            // see if the outlink has any metadata attached
            // and if so pass that to the crawldatum so that
            // the initial score or distribution can use that
            MapWritable outlinkMD = links[i].getMetadata();
            if (outlinkMD!=null){
              target.getMetaData().putAll(outlinkMD);
            }
           
            try {
              scfilters.initialScore(targetUrl, target);
            } catch (ScoringFilterException e) {
              LOG.warn("Cannot filter init score for url " + key +
                      ", using default: " + e.getMessage());
              target.setScore(0.0f);
            }

            targets.add(new SimpleEntry(targetUrl, target));

            // OVerwrite URL in Outlink object with normalized URL (NUTCH-1174)
            links[i].setUrl(toUrl);
            outlinkList.add(links[i]);
            validCount++;
          }

          try {
            // compute score contributions and adjustment to the original score
            adjust = scfilters.distributeScoreToOutlinks(key, parseData,
                      targets, null, links.length);
          } catch (ScoringFilterException e) {
            LOG.warn("Cannot distribute score from " + key + ": " + e.getMessage());
          }
          for (Entry<Text, CrawlDatum> target : targets) {
            crawlOut.append(target.getKey(), target.getValue());
          }
          if (adjust != null) crawlOut.append(key, adjust);

          Outlink[] filteredLinks = outlinkList.toArray(new Outlink[outlinkList.size()]);
          parseData = new ParseData(parseData.getStatus(), parseData.getTitle(),
                                    filteredLinks, parseData.getContentMeta(),
                                    parseData.getParseMeta());
          dataOut.append(key, parseData);
          if (!parse.isCanonical()) {
            CrawlDatum datum = new CrawlDatum();
            datum.setStatus(CrawlDatum.STATUS_FETCH_SUCCESS);
            String timeString = parse.getData().getContentMeta().get(Nutch.FETCH_TIME_KEY);
            try {
              datum.setFetchTime(Long.parseLong(timeString));
            } catch (Exception e) {
              LOG.warn("Can't read fetch time for: " + key);
              datum.setFetchTime(System.currentTimeMillis());
            }
            crawlOut.append(key, datum);
          }
        }
       
View Full Code Here

        @Override
        public void reduce(BytesWritable key, Iterator<CrawlDatum> values,
                OutputCollector<Text, CrawlDatum> output, Reporter reporter)
                throws IOException {
            CrawlDatum existingDoc = null;

            while (values.hasNext()) {
                if (existingDoc == null) {
                    existingDoc = new CrawlDatum();
                    existingDoc.set(values.next());
                    continue;
                }
                CrawlDatum newDoc = values.next();
                // compare based on score
                if (existingDoc.getScore() < newDoc.getScore()) {
                    writeOutAsDuplicate(existingDoc, output, reporter);
                    existingDoc = new CrawlDatum();
                    existingDoc.set(newDoc);
                    continue;
                } else if (existingDoc.getScore() > newDoc.getScore()) {
                    // mark new one as duplicate
                    writeOutAsDuplicate(newDoc, output, reporter);
                    continue;
                }
                // same score? delete the one which is oldest
                if (existingDoc.getFetchTime() > newDoc.getFetchTime()) {
                    // mark new one as duplicate
                    writeOutAsDuplicate(newDoc, output, reporter);
                    continue;
                } else if (existingDoc.getFetchTime() < newDoc.getFetchTime()) {
                    // mark existing one as duplicate
                    writeOutAsDuplicate(existingDoc, output, reporter);
                    existingDoc = new CrawlDatum();
                    existingDoc.set(newDoc);
                    continue;
                }
                // same time? keep the one which has the shortest URL
                String urlExisting = existingDoc.getMetaData().get(urlKey).toString();
                String urlnewDoc = newDoc.getMetaData().get(urlKey).toString();
                if (urlExisting.length()<urlnewDoc.length()){
                  // mark new one as duplicate
                  writeOutAsDuplicate(newDoc, output, reporter);
                  continue;
                }
                else if (urlExisting.length()>urlnewDoc.length()){
                  // mark existing one as duplicate
                  writeOutAsDuplicate(existingDoc, output, reporter);
                  existingDoc = new CrawlDatum();
                  existingDoc.set(newDoc);
                  continue;
                }
            }
        }
View Full Code Here

                OutputCollector<Text, CrawlDatum> output, Reporter reporter)
                throws IOException {
            boolean duplicateSet = false;
           
            while (values.hasNext()) {
                CrawlDatum val = values.next();
                if (val.getStatus() == CrawlDatum.STATUS_DB_DUPLICATE) {
                    duplicate.set(val);
                    duplicateSet = true;
                } else {
                    old.set(val);
                }
View Full Code Here

    Assert.assertNotNull(filter);
    NutchDocument doc = new NutchDocument();
    ParseImpl parse = new ParseImpl("foo bar", new ParseData());
   
    try{
        filter.filter(doc, parse, new Text("http://nutch.apache.org/index.html"), new CrawlDatum(), new Inlinks());
    }
    catch(Exception e){
        e.printStackTrace();
        Assert.fail(e.getMessage());
    }
View Full Code Here

    MoreIndexingFilter filter = new MoreIndexingFilter();
    filter.setConf(conf);

    NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(
      new ParseStatus(), "title", new Outlink[0], metadata)), new Text(
        "http://www.example.com/"), new CrawlDatum(), new Inlinks());

    Assert.assertEquals("content-disposition not detected", "filename.ext", doc.getFieldValue("title"));
  }
View Full Code Here

    metadata.add(Response.CONTENT_TYPE, source);
    MoreIndexingFilter filter = new MoreIndexingFilter();
    filter.setConf(conf);
    NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(
        new ParseStatus(), "title", new Outlink[0], metadata)), new Text(
        "http://www.example.com/"), new CrawlDatum(), new Inlinks());
    Assert.assertEquals("mime type not detected", expected, doc.getFieldValue("type"));
  }
View Full Code Here

    OutputCollector<Text, CrawlDatum> output, Reporter reporter)
    throws IOException {

    String url = key.toString();
    Node node = null;
    CrawlDatum datum = null;

    // set the node and the crawl datum, should be one of each unless no node
    // for url in the crawldb
    while (values.hasNext()) {
      ObjectWritable next = values.next();
      Object value = next.get();
      if (value instanceof Node) {
        node = (Node)value;
      }
      else if (value instanceof CrawlDatum) {
        datum = (CrawlDatum)value;
      }
    }

    // datum should never be null, could happen if somehow the url was
    // normalized or changed after being pulled from the crawldb
    if (datum != null) {

      if (node != null) {
       
        // set the inlink score in the nodedb
        float inlinkScore = node.getInlinkScore();
        datum.setScore(inlinkScore);
        LOG.debug(url + ": setting to score " + inlinkScore);
      }
      else {
       
        // clear out the score in the crawldb
        datum.setScore(clearScore);
        LOG.debug(url + ": setting to clear score of " + clearScore);
      }

      output.collect(key, datum);
    }
View Full Code Here

    if (LOG.isInfoEnabled()) {
      LOG.info("fetching: " + url);
    }

    CrawlDatum cd = new CrawlDatum();

    Iterator<String> iter = metadata.keySet().iterator();
    while (iter.hasNext()) {
      String key = iter.next();
      String value = metadata.get(key);
      if (value == null)
        value = "";
      cd.getMetaData().put(new Text(key), new Text(value));
    }

    ProtocolFactory factory = new ProtocolFactory(conf);
    Protocol protocol = factory.getProtocol(url);
    Text turl = new Text(url);
View Full Code Here

TOP

Related Classes of org.apache.nutch.crawl.CrawlDatum

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.