Package org.apache.lucene.search

Examples of org.apache.lucene.search.Hits


    try {
      File directory = searchIndexManager.getIndexDirectory();
      searcher = new IndexSearcher(IndexReader.open(directory));
      Analyzer analyzer = DocumentCreator.createDocumentAnalyzer();
      QueryParser parser = new QueryParser(DocumentCreator.FIELD_TEXT,analyzer);
      Hits hits = searcher.search(parser.parse(query));
      List<BookmarkDoc> bmDocs = new ArrayList<BookmarkDoc>();
      if (offset >= 0 && offset < hits.length()) {
        if (count > 0) {         
          for (int i = offset; i < hits.length() && bmDocs.size() < count; i++) {
            BookmarkDoc doc = createBookmarkDoc(hits.doc(i));
            if (doc != null) {
              bmDocs.add(doc);
            }
          }
        }
      }
      result = new DaoResult<BookmarkDoc>(bmDocs,hits.length());
    } finally {
      if (searcher != null) {
        searcher.close();
      }
    }
View Full Code Here


      Query inputQuery = parser.parse(query);
      BooleanQuery boolQuery = new BooleanQuery();
      boolQuery.add(matchUserQuery, BooleanClause.Occur.MUST);
      boolQuery.add(inputQuery,BooleanClause.Occur.MUST);
     
      Hits hits = searcher.search(boolQuery);
      List<BookmarkDoc> bmDocs = new ArrayList<BookmarkDoc>();
      if (offset >= 0 && offset < hits.length()) {
        if (count > 0) {         
          for (int i = offset; i < hits.length() && bmDocs.size() < count; i++) {
            BookmarkDoc doc = createBookmarkDoc(hits.doc(i));
            if (doc != null) {
              bmDocs.add(doc);
            }
          }
        }
      }
      result = new DaoResult<BookmarkDoc>(bmDocs,hits.length());
    } finally {
      if (searcher != null) {
        searcher.close();
      }
    }
View Full Code Here

      IndexReader indexReader = null;
      IndexSearcher indexSearch = null;
      try{
        indexReader = searchSuggestIndexer.openSuggestIndexReader();
        indexSearch = new IndexSearcher(indexReader);
        Hits hits = indexSearch.search(prefixQuery);
        int maxNumCandidate = maxSuggestionSize;
        if(idxReader != null && field != null){
          maxNumCandidate = maxSuggestionSize * 10;
        }
        PriorityQueue<SuggestWord> suggestQueue = new PriorityQueue<SuggestWord>(maxNumCandidate);
        for(int i = 0; i < hits.length() && i < maxNumCandidate; i++){
          String sugWord = hits.doc(i).get("t");
          // check if the 'sugWord' matches at least one doc in the
          // source index database (idxReader)
          if(idxReader != null && field != null){
            int freq = idxReader.docFreq(new Term(field,sugWord));
            if(freq > 0){
View Full Code Here

  {
   System.out.println("Started testQuery");
   SearchQuery sq = new SearchQuery();
   String query = "money";
   System.out.println("Query: " + query);
   Hits hits = sq.getHits(query); sq.dumpHits(hits, false);
   String[] results = sq.getResults();
   for (int i = 0; i < results.length; i++)
    System.out.println(i + ": " + results[i]);
  
   System.out.println("Ended testQuery");
View Full Code Here

  * @return Hits A hits object
  */
public Hits getHits(String question)
{
  //*-- translate the question into a search engine query
  Hits hits = null;
  try { query = buildQuery(question);
        logger.info("Question: " + question + " is parsed to " + query);
        hits = is.search(query);
       }
  catch (IOException ie) { logger.error("IO Error in fetching hits for query " + question); hits = null; }
View Full Code Here

  if (ranks == null) return ("");
 
  try
  {
   //*-- submit the question to the search engine and fetch the hits
   Hits hits = getHits(question);
   if (hits == null) throw new IOException("Could not find any hits for question " + question);
  
   //*-- build the list of answers
   DbTools dbt = Constants.getDbt();
   dbt.openDB(Constants.EXT_FILES_DB, true, false); //*-- read only access
  
   Explanation explanation;
   LOOP: for (int i = 0; i < hits.length(); i++)
   {
     //*-- limit explanations for the top 100 hits
     if (i > 100) break LOOP; boolean foundHit = false;
   
     //*-- check if the hit rank matches the passed rank
     for (int j = 0; j < ranks.length; j++) if (ranks[j] == i) foundHit = true;
     if (!foundHit) continue LOOP;
    
     retv.append("Document: " + i + Constants.NEWLINE);
     explanation = is.explain(query, hits.id(i))
     Document doc = hits.doc(i);
     String key = doc.get("key");
     DatabaseEntry data = new DatabaseEntry();
     if (!dbt.fetch(key, data)) continue LOOP;

     //*-- extract the text
     IndexableDoc idoc = new IndexableDoc();
     idoc = (IndexableDoc) idoc.getBdbBinding().entryToObject(data);
     String line= idoc.getContents().toString();
     if (line.length() > 1000) line = line.substring(0, 999);
 
     retv.append(" Score: " + hits.score(i) + " TEXT: " + line + Constants.NEWLINE);
     retv.append(explanation.toString());
     retv.append("------------------------------------------------------------------");
     retv.append(Constants.NEWLINE); retv.append(Constants.NEWLINE);
   }
  } //*-- end of try
View Full Code Here

public Hits getHits(String str)
{ return getHits(str, false); }
public Hits getHits(String str, boolean filter)
{
  Hits hits = null; bgramAnalyzer.setExtractEntities(false);
  try
  {
   QueryParser qp = new QueryParser("contents", bgramAnalyzer );
   if (filter) str = str.replaceAll("[^a-zA-Z0-9\"']", " ");
   query = qp.parse(str);
View Full Code Here

                                analyzer);
    } catch (ParseException pe) {
      // TODO: wrap into own exception for more independence of lucene
      throw new QueryParseException(pe);
    }
    Hits hits = searcher.search(query);
    nrOfHits = hits.length();
    logger.info("Query returned " + nrOfHits + " hits.");
    List<ItemResult> results = new ArrayList<ItemResult>();
    for (int i = 0; i < hits.length() && i < maxResults; i++) {
      Document doc = hits.doc(i);
      long channelId = Long.parseLong(doc.get(ItemFieldConstants.CHANNEL_ID));
      ChannelIF channel = channels.getById(channelId);
      if (channel == null) {
        throw new UnretrievableException("channel " + channelId);
      }
      // TODO: could this be done in another fashion or using a context?
      long itemId = Long.parseLong(doc.get(ItemFieldConstants.ITEM_ID));
      ItemIF item = channel.getItem(itemId);
      if (item == null) {
        throw new UnretrievableException("item " + itemId);
      }
      results.add(new ItemResult(item, hits.score(i)));
    }
    searcher.close();
    return results;
  }
View Full Code Here

      } else {
        aRetArr = null;
      }
    } else {
      if (DebugFile.trace) DebugFile.writeln("IndexSearcher.search("+oQry.toString()+")");
      Hits oHitSet = oSearch.search(oQry);
      int iHitCount = oHitSet.length();
      if (DebugFile.trace) DebugFile.writeln("hit count is "+String.valueOf(iHitCount));
      if (iHitCount>0) {
        aRetArr = new MailRecord[iHitCount];
        for (int h=0; h<iHitCount; h++) {
          Document oDoc = oHitSet.doc(h);
          if (DebugFile.trace) DebugFile.writeln("found "+oDoc.get("guid")+" "+oDoc.get("title")+" created by "+" "+oDoc.get("author")+" at date "+oDoc.get("created"));
          String[] aAbstract = Gadgets.split(oDoc.get("abstract"), 'ยจ');
          aRetArr[h] = new MailRecord(aAbstract[0], aAbstract[1], aAbstract[2],
                                      aAbstract[3], aAbstract[4], aAbstract[5],
                                      oDoc.get("container"));
View Full Code Here

      System.out.println(query);
      Searcher searcher = new IndexSearcher(ramDir);
      query = query.rewrite(reader);
      System.out.println(query);
      System.out.println("Searching for: " + query.toString(FIELD_NAME));
      Hits hits = searcher.search(query);
     
      BoldFormatter formatter = new BoldFormatter();
      Highlighter highlighter =new Highlighter(formatter,new QueryScorer(query));
      highlighter.setTextFragmenter(new SimpleFragmenter(50));
      for (int i = 0; i < hits.length(); i++)
      {
        String text = hits.doc(i).get(FIELD_NAME);
        int maxNumFragmentsRequired = 5;
        String fragmentSeparator = "...";
        TermPositionVector tpv = (TermPositionVector)reader.getTermFreqVector(hits.id(i),FIELD_NAME);
        TokenStream tokenStream=TokenSources.getTokenStream(tpv);
        /*
        TokenStream tokenStream2=
          (new StandardAnalyzer())
          //XFactory.getWriterAnalyzer()
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Hits

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.