Package org.apache.nutch.indexer

Examples of org.apache.nutch.indexer.DeleteDuplicates


        indexer.indexSolr(solrUrl, crawlDb, linkDb,
            Arrays.asList(HadoopFSUtil.getPaths(fstats)));
      }
      else {
       
        DeleteDuplicates dedup = new DeleteDuplicates(conf);       
        if(indexes != null) {
          // Delete old indexes
          if (fs.exists(indexes)) {
            LOG.info("Deleting old indexes: " + indexes);
            fs.delete(indexes, true);
          }

          // Delete old index
          if (fs.exists(index)) {
            LOG.info("Deleting old merged index: " + index);
            fs.delete(index, true);
          }
        }
       
        Indexer indexer = new Indexer(conf);
        indexer.index(indexes, crawlDb, linkDb,
            Arrays.asList(HadoopFSUtil.getPaths(fstats)));
       
        IndexMerger merger = new IndexMerger(conf);
        if(indexes != null) {
          dedup.dedup(new Path[] { indexes });
          fstats = fs.listStatus(indexes, HadoopFSUtil.getPassDirectoriesFilter(fs));
          merger.merge(HadoopFSUtil.getPaths(fstats), index, tmpDir);
        }
      }   
     
View Full Code Here


    Fetcher fetcher = new Fetcher(conf);
    ParseSegment parseSegment = new ParseSegment(conf);
    CrawlDb crawlDbTool = new CrawlDb(conf);
    LinkDb linkDbTool = new LinkDb(conf);
    Indexer indexer = new Indexer(conf);
    DeleteDuplicates dedup = new DeleteDuplicates(conf);
    IndexMerger merger = new IndexMerger(conf);
     
    // initialize crawlDb
    injector.inject(crawlDb, rootUrlDir);
    int i;
    for (i = 0; i < depth; i++) {             // generate new segment
      Path segment = generator.generate(crawlDb, segments, -1, topN, System
          .currentTimeMillis(), false, false);
      if (segment == null) {
        LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
        break;
      }
      fetcher.fetch(segment, threads)// fetch it
      if (!Fetcher.isParsing(job)) {
        parseSegment.parse(segment);    // parse it, if needed
      }
      crawlDbTool.update(crawlDb, new Path[]{segment}, true, true); // update crawldb
    }
    if (i > 0) {
      linkDbTool.invert(linkDb, segments, true, true, false); // invert links

      // index, dedup & merge
      indexer.index(indexes, crawlDb, linkDb, fs.listPaths(segments));
      dedup.dedup(new Path[] { indexes });
      merger.merge(fs.listPaths(indexes), index, tmpDir);
    } else {
      LOG.warn("No URLs to fetch - check your seed list and URL filters.");
    }
    if (LOG.isInfoEnabled()) { LOG.info("crawl finished: " + dir); }
View Full Code Here

  protected void doDedup(final OutputDirectories od) throws IOException
  {
    LOG.info("dedup " + od.getIndex());

    new DeleteDuplicates(getJobConf()).dedup(new Path[] {od.getIndexes()});
  }
View Full Code Here

    Fetcher fetcher = new Fetcher(conf);
    ParseSegment parseSegment = new ParseSegment(conf);
    CrawlDb crawlDbTool = new CrawlDb(conf);
    LinkDb linkDbTool = new LinkDb(conf);
    Indexer indexer = new Indexer(conf);
    DeleteDuplicates dedup = new DeleteDuplicates(conf);
    IndexMerger merger = new IndexMerger(conf);
     
    // initialize crawlDb
    injector.inject(crawlDb, rootUrlDir);
    int i;
    for (i = 0; i < depth; i++) {             // generate new segment
      Path segment = generator.generate(crawlDb, segments, -1, topN, System
          .currentTimeMillis(), false, false);
      if (segment == null) {
        LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
        break;
      }
      fetcher.fetch(segment, threads)// fetch it
      if (!Fetcher.isParsing(job)) {
        parseSegment.parse(segment);    // parse it, if needed
      }
      crawlDbTool.update(crawlDb, new Path[]{segment}, true, true); // update crawldb
    }
    if (i > 0) {
      linkDbTool.invert(linkDb, segments, true, true, false); // invert links

      // index, dedup & merge
      indexer.index(indexes, crawlDb, linkDb, fs.listPaths(segments));
      dedup.dedup(new Path[] { indexes });
      merger.merge(fs.listPaths(indexes), index, tmpDir);
    } else {
      LOG.warn("No URLs to fetch - check your seed list and URL filters.");
    }
    if (LOG.isInfoEnabled()) { LOG.info("crawl finished: " + dir); }
View Full Code Here

    Fetcher fetcher = new Fetcher(conf);
    ParseSegment parseSegment = new ParseSegment(conf);
    CrawlDb crawlDbTool = new CrawlDb(conf);
    LinkDb linkDbTool = new LinkDb(conf);
    Indexer indexer = new Indexer(conf);
    DeleteDuplicates dedup = new DeleteDuplicates(conf);
    IndexMerger merger = new IndexMerger(conf);
     
    // initialize crawlDb
    injector.inject(crawlDb, rootUrlDir);
    int i;
    for (i = 0; i < depth; i++) {             // generate new segment
      Path segment = generator.generate(crawlDb, segments, -1, topN, System
          .currentTimeMillis());
      if (segment == null) {
        LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
        break;
      }
      fetcher.fetch(segment, threads, org.apache.nutch.fetcher.Fetcher.isParsing(conf))// fetch it
      if (!Fetcher.isParsing(job)) {
        parseSegment.parse(segment);    // parse it, if needed
      }
      crawlDbTool.update(crawlDb, new Path[]{segment}, true, true); // update crawldb
    }
    if (i > 0) {
      linkDbTool.invert(linkDb, segments, true, true, false); // invert links

      if(indexes != null) {
        // Delete old indexes
        if (fs.exists(indexes)) {
          LOG.info("Deleting old indexes: " + indexes);
          fs.delete(indexes, true);
        }

        // Delete old index
        if (fs.exists(index)) {
          LOG.info("Deleting old merged index: " + index);
          fs.delete(index, true);
        }
      }

      // index, dedup & merge
      FileStatus[] fstats = fs.listStatus(segments, HadoopFSUtil.getPassDirectoriesFilter(fs));
      indexer.index(indexes, crawlDb, linkDb, Arrays.asList(HadoopFSUtil.getPaths(fstats)));
      if(indexes != null) {
        dedup.dedup(new Path[] { indexes });
        fstats = fs.listStatus(indexes, HadoopFSUtil.getPassDirectoriesFilter(fs));
        merger.merge(HadoopFSUtil.getPaths(fstats), index, tmpDir);
      }
    } else {
      LOG.warn("No URLs to fetch - check your seed list and URL filters.");
View Full Code Here

     
    new LinkDb(job).invert(linkDb, segments); // invert links

    // index, dedup & merge
    new Indexer(job).index(indexes, crawlDb, linkDb, fs.listPaths(segments));
    new DeleteDuplicates(job).dedup(new Path[] { indexes });
    new IndexMerger(fs, fs.listPaths(indexes), index, tmpDir, job).merge();

    if (LOG.isInfoEnabled()) { LOG.info("crawl finished: " + dir); }
  }
View Full Code Here

     
    new LinkDb(job).invert(linkDb, segments); // invert links

    // index, dedup & merge
    new Indexer(job).index(indexes, crawlDb, linkDb, fs.listPaths(segments));
    new DeleteDuplicates(job).dedup(new Path[] { indexes });
    new IndexMerger(fs, fs.listPaths(indexes), index, tmpDir, job).merge();

    if (LOG.isInfoEnabled()) { LOG.info("crawl finished: " + dir); }
  }
View Full Code Here

TOP

Related Classes of org.apache.nutch.indexer.DeleteDuplicates

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.