Package org.apache.lucene.search

Examples of org.apache.lucene.search.PhraseQuery


    doc.add(new TextField("body", "test of gaps", Field.Store.NO));
    iw.addDocument(doc);
    IndexReader ir = iw.getReader();
    iw.close();
    IndexSearcher is = newSearcher(ir);
    PhraseQuery pq = new PhraseQuery();
    pq.add(new Term("body", "just"), 0);
    pq.add(new Term("body", "test"), 2);
    // body:"just ? test"
    assertEquals(1, is.search(pq, 5).totalHits);
    ir.close();
    dir.close();
  }
View Full Code Here


    doc.add(new TextField("body", "test of gaps", Field.Store.NO));
    iw.addDocument(doc);
    IndexReader ir = iw.getReader();
    iw.close();
    IndexSearcher is = newSearcher(ir);
    PhraseQuery pq = new PhraseQuery();
    pq.add(new Term("body", "just"), 0);
    pq.add(new Term("body", "test"), 3);
    // body:"just ? ? test"
    assertEquals(1, is.search(pq, 5).totalHits);
    ir.close();
    dir.close();
  }
View Full Code Here

    File oldIndexDir = _TestUtil.getTempDir("negatives");
    _TestUtil.unzip(getDataFile(bogus24IndexName), oldIndexDir);
    Directory dir = newFSDirectory(oldIndexDir);
    DirectoryReader ir = DirectoryReader.open(dir);
    IndexSearcher is = new IndexSearcher(ir);
    PhraseQuery pq = new PhraseQuery();
    pq.add(new Term("field3", "more"));
    pq.add(new Term("field3", "text"));
    TopDocs td = is.search(pq, 10);
    assertEquals(1, td.totalHits);
    AtomicReader wrapper = SlowCompositeReaderWrapper.wrap(ir);
    DocsAndPositionsEnum de = wrapper.termPositionsEnum(new Term("field3", "broken"));
    assert de != null;
View Full Code Here

            mpq.add(multiTerms.toArray(new Term[0]));
          }
          return mpq;
        }
      } else {
        PhraseQuery pq = newPhraseQuery();
        pq.setSlop(phraseSlop);
        int position = -1;

        for (int i = 0; i < numTokens; i++) {
          int positionIncrement = 1;

          try {
            boolean hasNext = buffer.incrementToken();
            assert hasNext == true;
            termAtt.fillBytesRef();
            if (posIncrAtt != null) {
              positionIncrement = posIncrAtt.getPositionIncrement();
            }
          } catch (IOException e) {
            // safe to ignore, because we know the number of tokens
          }

          if (enablePositionIncrements) {
            position += positionIncrement;
            pq.add(new Term(field, BytesRef.deepCopyOf(bytes)),position);
          } else {
            pq.add(new Term(field, BytesRef.deepCopyOf(bytes)));
          }
        }
        return pq;
      }
    }
View Full Code Here

   * <p>
   * This is intended for subclasses that wish to customize the generated queries.
   * @return new PhraseQuery instance
   */
  protected PhraseQuery newPhraseQuery() {
    return new PhraseQuery();
  }
View Full Code Here

  }

  protected void smokeTestSearcher(IndexSearcher s) throws Exception {
    runQuery(s, new TermQuery(new Term("body", "united")));
    runQuery(s, new TermQuery(new Term("titleTokenized", "states")));
    PhraseQuery pq = new PhraseQuery();
    pq.add(new Term("body", "united"));
    pq.add(new Term("body", "states"));
    runQuery(s, pq);
  }
View Full Code Here

    doc.add(new Field("field", tokens));
    w.addDocument(doc);
    w.commit();

    IndexSearcher s = new IndexSearcher(dir);
    PhraseQuery pq = new PhraseQuery();
    pq.add(new Term("field", "a"));
    pq.add(new Term("field", "b"));
    pq.add(new Term("field", "c"));
    ScoreDoc[] hits = s.search(pq, null, 1000).scoreDocs;
    assertEquals(1, hits.length);

    Query q = new SpanTermQuery(new Term("field", "a"));
    hits = s.search(q, null, 1000).scoreDocs;
View Full Code Here

  protected Query pq( float boost, String field, String... texts ){
    return pq( boost, 0, field, texts );
  }
 
  protected Query pq( float boost, int slop, String field, String... texts ){
    PhraseQuery query = new PhraseQuery();
    for( String text : texts ){
      query.add( new Term( field, text ) );
    }
    query.setBoost( boost );
    query.setSlop( slop );
    return query;
  }
View Full Code Here

    return bytesRefs;
  }

  protected PhraseQuery toPhraseQuery(List<BytesRef> bytesRefs, String field) {
    PhraseQuery phraseQuery = new PhraseQuery();
    for (BytesRef bytesRef : bytesRefs) {
      phraseQuery.add(new Term(field, bytesRef));
    }
    return phraseQuery;
  }
View Full Code Here

    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    writer.addDocument(makeDocumentWithFields());
    IndexReader reader = writer.getReader();
   
    IndexSearcher searcher = newSearcher(reader);
    PhraseQuery query = new PhraseQuery();
    query.add(new Term("indexed_not_tokenized", "test1"));
    query.add(new Term("indexed_not_tokenized", "test2"));
   
    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
    assertEquals(1, hits.length);
   
    doAssert(searcher.doc(hits[0].doc), true);
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.PhraseQuery

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.