Package org.apache.nutch.crawl

Examples of org.apache.nutch.crawl.CrawlDb


    if (!getFS().exists(dbPath))
    {
      getFS().mkdirs(dbPath);
    }
     
    CrawlDb cdb = new NutchwaxCrawlDb(getJobConf());
     
    if (segments != null)
    {
      List<Path> paths = new ArrayList<Path>(segments.length);
       
      for (int i = 0; i < segments.length; i++)
      {
        Path p = new Path(segments[i]);
         
        if (!getFS().exists(p))
        {
          throw new FileNotFoundException(p.toString());
        }

        paths.add(p);
      }
       
      cdb.update(od.getCrawlDb(), paths.toArray(new Path[paths.size()]),
        true, true);
    }
    else
    {
      Path[] allSegments = getSegments(od);
       
      // This just does the last segment created.
      cdb.update(od.getCrawlDb(),
        new Path[] {allSegments[allSegments.length - 1]}, true, true);
    }
  }
View Full Code Here


            // generate
            Generator g = new Generator(conf);
            // fetch
            conf.setBoolean("fetcher.parse", true);
            Fetcher fetcher = new Fetcher(conf);
            CrawlDb crawlDbTool = new CrawlDb(conf);

            int depth = 5;
            int threads = 4;
            for (int i = 0; i < depth; i++) { // generate new segment
                Path generatedSegment = g.generate(crawldbPath, segmentsPath, 1, Long.MAX_VALUE, Long.MAX_VALUE, false,
                        false);

                if (generatedSegment == null) {
                    logger.info("Stopping at depth=" + i + " - no more URLs to fetch.");
                    break;
                }
                fetcher.fetch(generatedSegment, threads, true);
                crawlDbTool.update(crawldbPath, new Path[] { generatedSegment }, true, true);
            }
        } catch (IOException e) {
            logger.error("Exception while crawling", e);
        }
    }
View Full Code Here

    res.elapsed = System.currentTimeMillis();
    Injector injector = new Injector(getConf());
    Generator generator = new Generator(getConf());
    Fetcher fetcher = new Fetcher(getConf());
    ParseSegment parseSegment = new ParseSegment(getConf());
    CrawlDb crawlDbTool = new CrawlDb(getConf());
    LinkDb linkDbTool = new LinkDb(getConf());
     
    // initialize crawlDb
    long start = System.currentTimeMillis();
    injector.inject(crawlDb, rootUrlDir);
    long delta = System.currentTimeMillis() - start;
    res.addTiming("inject", "0", delta);
    int i;
    for (i = 0; i < depth; i++) {             // generate new segment
      start = System.currentTimeMillis();
      Path[] segs = generator.generate(crawlDb, segments, -1, topN, System
          .currentTimeMillis());
      delta = System.currentTimeMillis() - start;
      res.addTiming("generate", i + "", delta);
      if (segs == null) {
        LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
        break;
      }
      start = System.currentTimeMillis();
      fetcher.fetch(segs[0], threads, org.apache.nutch.fetcher.Fetcher.isParsing(getConf()))// fetch it
      delta = System.currentTimeMillis() - start;
      res.addTiming("fetch", i + "", delta);
      if (!Fetcher.isParsing(job)) {
        start = System.currentTimeMillis();
        parseSegment.parse(segs[0]);    // parse it, if needed
        delta = System.currentTimeMillis() - start;
        res.addTiming("parse", i + "", delta);
      }
      start = System.currentTimeMillis();
      crawlDbTool.update(crawlDb, segs, true, true); // update crawldb
      delta = System.currentTimeMillis() - start;
      res.addTiming("update", i + "", delta);
      start = System.currentTimeMillis();
      linkDbTool.invert(linkDb, segs, true, true, false); // invert links
      delta = System.currentTimeMillis() - start;
View Full Code Here

    res.elapsed = System.currentTimeMillis();
    Injector injector = new Injector(getConf());
    Generator generator = new Generator(getConf());
    Fetcher fetcher = new Fetcher(getConf());
    ParseSegment parseSegment = new ParseSegment(getConf());
    CrawlDb crawlDbTool = new CrawlDb(getConf());
    LinkDb linkDbTool = new LinkDb(getConf());
     
    // initialize crawlDb
    long start = System.currentTimeMillis();
    injector.inject(crawlDb, rootUrlDir);
    long delta = System.currentTimeMillis() - start;
    res.addTiming("inject", "0", delta);
    int i;
    for (i = 0; i < depth; i++) {             // generate new segment
      start = System.currentTimeMillis();
      Path[] segs = generator.generate(crawlDb, segments, -1, topN, System
          .currentTimeMillis());
      delta = System.currentTimeMillis() - start;
      res.addTiming("generate", i + "", delta);
      if (segs == null) {
        LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
        break;
      }
      start = System.currentTimeMillis();
      fetcher.fetch(segs[0], threads)// fetch it
      delta = System.currentTimeMillis() - start;
      res.addTiming("fetch", i + "", delta);
      if (!Fetcher.isParsing(job)) {
        start = System.currentTimeMillis();
        parseSegment.parse(segs[0]);    // parse it, if needed
        delta = System.currentTimeMillis() - start;
        res.addTiming("parse", i + "", delta);
      }
      start = System.currentTimeMillis();
      crawlDbTool.update(crawlDb, segs, true, true); // update crawldb
      delta = System.currentTimeMillis() - start;
      res.addTiming("update", i + "", delta);
      start = System.currentTimeMillis();
      linkDbTool.invert(linkDb, segs, true, true, false); // invert links
      delta = System.currentTimeMillis() - start;
View Full Code Here

TOP

Related Classes of org.apache.nutch.crawl.CrawlDb

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.