Package org.apache.lucene.search

Examples of org.apache.lucene.search.Hits


     * @throws  ProcessingException  iff an error occurs
     */
    private LuceneCocoonPager buildHits() throws ProcessingException {

        if (queryString != null && queryString.length() != 0) {
            Hits hits = null;

            // TODO (VG): Move parts into compose/initialize/recycle
            try {
                lcs = (LuceneCocoonSearcher) this.manager.lookup(LuceneCocoonSearcher.ROLE);
                Analyzer analyzer = LuceneCocoonHelper.getAnalyzer("org.apache.lucene.analysis.standard.StandardAnalyzer");
                lcs.setAnalyzer(analyzer);
                // get the directory where the index resides
                Directory directory = LuceneCocoonHelper.getDirectory(index, false);
                lcs.setDirectory(directory);
                hits = lcs.search(queryString, LuceneXMLIndexer.BODY_FIELD);
            } catch (IOException ioe) {
                throw new ProcessingException("IOException in search", ioe);
            } catch (ComponentException ce) {
                throw new ProcessingException("ComponentException in search", ce);
            } finally {
                if (lcs != null) {
                    this.manager.release(lcs);
                    lcs = null;
                }
            }

            // wrap the hits by an pager help object for accessing only a range of hits
            LuceneCocoonPager pager = new LuceneCocoonPager(hits);

            int start_index = START_INDEX_DEFAULT;
            if (this.startIndex != null) {
                start_index = this.startIndex.intValue();
                if (start_index <= 0) {
                    start_index = 0;
                }
                pager.setStartIndex(start_index);
            }

            int page_length = PAGE_LENGTH_DEFAULT;
            if (this.pageLength != null) {
                page_length = this.pageLength.intValue();
                if (page_length <= 0) {
                    page_length = hits.length();
                }
                pager.setCountOfHitsPerPage(page_length);
            }

            return pager;
View Full Code Here


        Analyzer analyzer = new StandardAnalyzer();

        Query query = QueryParser.parse(line, "contents", analyzer);
        System.out.println("Searching for: " + query.toString("contents"));

                Hits hits = searcher.search(query);
                System.out.println("Total matching documents: " + hits.length());

                final int HITS_PER_PAGE = 10;

                for (int start = 0; start < hits.length(); start += HITS_PER_PAGE) {
                    int end = Math.min(hits.length(), start + HITS_PER_PAGE);

                    for (int i = start; i < end; i++) {
                        Document doc = hits.doc(i);
                        String path = doc.get("path");

                        if (path != null) {
                            System.out.println(i + ". " + path);
                        } else {
View Full Code Here

   * @param searcher The index searcher.
   * @return The lucene hits.
   * @throws IOException in case there is an error executing the lucene search.
   */
  private QueryAndHits getQueryAndHits(Searcher searcher) throws IOException {
    Hits hits;
    org.apache.lucene.search.Query query = filterQueryByClasses( luceneQuery );
    buildFilters();
    hits = searcher.search( query, filter, sort );
    setResultSize( hits );
    return new QueryAndHits( query, hits );
View Full Code Here

      IndexSearcher searcher = buildSearcher( searchFactoryImplementor );
      if ( searcher == null ) {
        resultSize = 0;
      }
      else {
        Hits hits;
        try {
          hits = getQueryAndHits( searcher ).hits;
          resultSize = hits.length();
        }
        catch (IOException e) {
          throw new HibernateException( "Unable to query Lucene index", e );
        }
        finally {
View Full Code Here

      Term searchTerm = new Term("content", "aaa");       
      IndexReader reader = IndexReader.open(startDir);
      assertEquals("first docFreq", 57, reader.docFreq(searchTerm));

      IndexSearcher searcher = new IndexSearcher(reader);
      Hits hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("first number of hits", 57, hits.length());
      searcher.close();
      reader.close();

      // Iterate with larger and larger amounts of free
      // disk space.  With little free disk space,
      // addIndexes will certainly run out of space &
      // fail.  Verify that when this happens, index is
      // not corrupt and index in fact has added no
      // documents.  Then, we increase disk space by 2000
      // bytes each iteration.  At some point there is
      // enough free disk space and addIndexes should
      // succeed and index should show all documents were
      // added.

      // String[] files = startDir.list();
      long diskUsage = startDir.sizeInBytes();

      long startDiskUsage = 0;
      String[] files = startDir.list();
      for(int i=0;i<files.length;i++) {
        startDiskUsage += startDir.fileLength(files[i]);
      }

      for(int iter=0;iter<6;iter++) {

        if (debug)
          System.out.println("TEST: iter=" + iter);

        // Start with 100 bytes more than we are currently using:
        long diskFree = diskUsage+100;

        boolean autoCommit = iter % 2 == 0;
        int method = iter/2;

        boolean success = false;
        boolean done = false;

        String methodName;
        if (0 == method) {
          methodName = "addIndexes(Directory[])";
        } else if (1 == method) {
          methodName = "addIndexes(IndexReader[])";
        } else {
          methodName = "addIndexesNoOptimize(Directory[])";
        }

        while(!done) {

          // Make a new dir that will enforce disk usage:
          MockRAMDirectory dir = new MockRAMDirectory(startDir);
          writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
          IOException err = null;

          MergeScheduler ms = writer.getMergeScheduler();
          for(int x=0;x<2;x++) {
            if (ms instanceof ConcurrentMergeScheduler)
              // This test intentionally produces exceptions
              // in the threads that CMS launches; we don't
              // want to pollute test output with these.
              if (0 == x)
                ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
              else
                ((ConcurrentMergeScheduler) ms).clearSuppressExceptions();

            // Two loops: first time, limit disk space &
            // throw random IOExceptions; second time, no
            // disk space limit:

            double rate = 0.05;
            double diskRatio = ((double) diskFree)/diskUsage;
            long thisDiskFree;

            String testName = null;

            if (0 == x) {
              thisDiskFree = diskFree;
              if (diskRatio >= 2.0) {
                rate /= 2;
              }
              if (diskRatio >= 4.0) {
                rate /= 2;
              }
              if (diskRatio >= 6.0) {
                rate = 0.0;
              }
              if (debug)
                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit;
            } else {
              thisDiskFree = 0;
              rate = 0.0;
              if (debug)
                testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit;
            }

            if (debug)
              System.out.println("\ncycle: " + testName);

            dir.setMaxSizeInBytes(thisDiskFree);
            dir.setRandomIOExceptionRate(rate, diskFree);

            try {

              if (0 == method) {
                writer.addIndexes(dirs);
              } else if (1 == method) {
                IndexReader readers[] = new IndexReader[dirs.length];
                for(int i=0;i<dirs.length;i++) {
                  readers[i] = IndexReader.open(dirs[i]);
                }
                try {
                  writer.addIndexes(readers);
                } finally {
                  for(int i=0;i<dirs.length;i++) {
                    readers[i].close();
                  }
                }
              } else {
                writer.addIndexesNoOptimize(dirs);
              }

              success = true;
              if (debug) {
                System.out.println("  success!");
              }

              if (0 == x) {
                done = true;
              }

            } catch (IOException e) {
              success = false;
              err = e;
              if (debug) {
                System.out.println("  hit IOException: " + e);
                e.printStackTrace(System.out);
              }

              if (1 == x) {
                e.printStackTrace(System.out);
                fail(methodName + " hit IOException after disk space was freed up");
              }
            }

            // Make sure all threads from
            // ConcurrentMergeScheduler are done
            _TestUtil.syncConcurrentMerges(writer);

            if (autoCommit) {

              // Whether we succeeded or failed, check that
              // all un-referenced files were in fact
              // deleted (ie, we did not create garbage).
              // Only check this when autoCommit is true:
              // when it's false, it's expected that there
              // are unreferenced files (ie they won't be
              // referenced until the "commit on close").
              // Just create a new IndexFileDeleter, have it
              // delete unreferenced files, then verify that
              // in fact no files were deleted:

              String successStr;
              if (success) {
                successStr = "success";
              } else {
                successStr = "IOException";
              }
              String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)";
              assertNoUnreferencedFiles(dir, message);
            }

            if (debug) {
              System.out.println("  now test readers");
            }

            // Finally, verify index is not corrupt, and, if
            // we succeeded, we see all docs added, and if we
            // failed, we see either all docs or no docs added
            // (transactional semantics):
            try {
              reader = IndexReader.open(dir);
            } catch (IOException e) {
              e.printStackTrace(System.out);
              fail(testName + ": exception when creating IndexReader: " + e);
            }
            int result = reader.docFreq(searchTerm);
            if (success) {
              if (autoCommit && result != END_COUNT) {
                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
              } else if (!autoCommit && result != START_COUNT) {
                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]");
              }
            } else {
              // On hitting exception we still may have added
              // all docs:
              if (result != START_COUNT && result != END_COUNT) {
                err.printStackTrace(System.out);
                fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
              }
            }

            searcher = new IndexSearcher(reader);
            try {
              hits = searcher.search(new TermQuery(searchTerm));
            } catch (IOException e) {
              e.printStackTrace(System.out);
              fail(testName + ": exception when searching: " + e);
            }
            int result2 = hits.length();
            if (success) {
              if (result2 != result) {
                fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
              }
            } else {
View Full Code Here

        }
        writer.close();

        Term searchTerm = new Term("content", "aaa");       
        IndexSearcher searcher = new IndexSearcher(dir);
        Hits hits = searcher.search(new TermQuery(searchTerm));
        assertEquals("first number of hits", 14, hits.length());
        searcher.close();

        IndexReader reader = IndexReader.open(dir);

        writer = new IndexWriter(dir, false, new WhitespaceAnalyzer());
        for(int i=0;i<3;i++) {
          for(int j=0;j<11;j++) {
            addDoc(writer);
          }
          searcher = new IndexSearcher(dir);
          hits = searcher.search(new TermQuery(searchTerm));
          assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());
          searcher.close();
          assertTrue("reader should have still been current", reader.isCurrent());
        }

        // Now, close the writer:
        writer.close();
        assertFalse("reader should not be current now", reader.isCurrent());

        searcher = new IndexSearcher(dir);
        hits = searcher.search(new TermQuery(searchTerm));
        assertEquals("reader did not see changes after writer was closed", 47, hits.length());
        searcher.close();
    }
View Full Code Here

      }
      writer.close();

      Term searchTerm = new Term("content", "aaa");       
      IndexSearcher searcher = new IndexSearcher(dir);
      Hits hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("first number of hits", 14, hits.length());
      searcher.close();

      writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
      writer.setMaxBufferedDocs(10);
      for(int j=0;j<17;j++) {
        addDoc(writer);
      }
      // Delete all docs:
      writer.deleteDocuments(searchTerm);

      searcher = new IndexSearcher(dir);
      hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());
      searcher.close();

      // Now, close the writer:
      writer.abort();

      assertNoUnreferencedFiles(dir, "unreferenced files remain after abort()");

      searcher = new IndexSearcher(dir);
      hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("saw changes after writer.abort", 14, hits.length());
      searcher.close();
         
      // Now make sure we can re-open the index, add docs,
      // and all is good:
      writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);
      writer.setMaxBufferedDocs(10);
      for(int i=0;i<12;i++) {
        for(int j=0;j<17;j++) {
          addDoc(writer);
        }
        searcher = new IndexSearcher(dir);
        hits = searcher.search(new TermQuery(searchTerm));
        assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());
        searcher.close();
      }

      writer.close();
      searcher = new IndexSearcher(dir);
      hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("didn't see changes after close", 218, hits.length());
      searcher.close();

      dir.close();
    }
View Full Code Here

        }
      }
      writer.close();

      IndexSearcher searcher = new IndexSearcher(dir);
      Hits hits = searcher.search(new TermQuery(new Term("field", "aaa")));
      assertEquals(300, hits.length());
      searcher.close();

      dir.close();
    }
View Full Code Here

      writer.close();

      Term searchTerm = new Term("field", "aaa");

      IndexSearcher searcher = new IndexSearcher(dir);
      Hits hits = searcher.search(new TermQuery(searchTerm));
      assertEquals(10, hits.length());
      searcher.close();

      writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
      writer.setMaxBufferedDocs(10);
      // Enable norms for only 1 doc, post flush
      for(int j=0;j<27;j++) {
        Document doc = new Document();
        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
        if (j != 26) {
          f.setOmitNorms(true);
        }
        doc.add(f);
        writer.addDocument(doc);
      }
      writer.close();
      searcher = new IndexSearcher(dir);
      hits = searcher.search(new TermQuery(searchTerm));
      assertEquals(27, hits.length());
      searcher.close();

      IndexReader reader = IndexReader.open(dir);
      reader.close();
View Full Code Here

        addDoc(writer);
      }
      writer.close();
      Term searchTerm = new Term("content", "aaa");       
      IndexSearcher searcher = new IndexSearcher(dir);
      Hits hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("did not get right number of hits", 100, hits.length());
      writer.close();

      writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
      writer.close();
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Hits

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.