Package org.apache.lucene.search

Examples of org.apache.lucene.search.Scorer$ChildScorer


            List<Query> filters = rb.getFilters();
            if (filters!=null){
              final ArrayList<DocIdSet> docsets = new ArrayList<DocIdSet>(filters.size());
                for (Query filter : filters){
                  Weight weight = filter.createWeight(rb.req.getSearcher());
                  final Scorer scorer = weight.scorer(reader, false, true);
                  docsets.add(new DocIdSet(){
                @Override
                public DocIdSetIterator iterator() throws IOException {
                  return scorer;
                }
View Full Code Here


  {
    IndexReader reader = searcher.getIndexReader();
   
    BooleanQuery bquery;
    SectionSearchQuery squery;
    Scorer scorer;
    int count;
   
    // 1. (+f1:aa +f2:aaa)
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","aa")), BooleanClause.Occur.MUST);
    bquery.add(new TermQuery(new Term("f2","aaa")), BooleanClause.Occur.MUST);

    scorer = bquery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("non-section count mismatch", 4, count);
   
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("seciton count mismatch", 2, count);
   
    // 2. (+f1:bb + f2:aaa)
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","bb")), BooleanClause.Occur.MUST);
    bquery.add(new TermQuery(new Term("f2","aaa")), BooleanClause.Occur.MUST);

    scorer = bquery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("non-section count mismatch", 4, count);
   
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("seciton count mismatch", 3, count);
   
    // 3. (+f1:aa +f2:bbb)
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","aa")), BooleanClause.Occur.MUST);
    bquery.add(new TermQuery(new Term("f2","bbb")), BooleanClause.Occur.MUST);

    scorer = bquery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("non-section count mismatch", 3, count);
   
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("seciton count mismatch", 2, count);
   
    // 4. (+f1:aa +(f2:bbb f2:ccc))
    BooleanQuery bquery2 = new BooleanQuery();
    bquery2.add(new TermQuery(new Term("f2","bbb")), BooleanClause.Occur.SHOULD);
    bquery2.add(new TermQuery(new Term("f2","ccc")), BooleanClause.Occur.SHOULD);
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","aa")), BooleanClause.Occur.MUST);
    bquery.add(bquery2, BooleanClause.Occur.MUST);

    scorer = bquery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("non-section count mismatch", 4, count);
   
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("section count mismatch", 3, count);
  }
View Full Code Here

  {
    IndexReader reader = searcher.getIndexReader();
   
    BooleanQuery bquery;
    SectionSearchQuery squery;
    Scorer scorer;
    int count;
   
    // 1.
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","aa")), BooleanClause.Occur.MUST);
    bquery.add(new IntMetaDataQuery(intMetaTerm, new IntMetaDataQuery.SimpleValueValidator(100)), BooleanClause.Occur.MUST);
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("section count mismatch", 1, count);
   
    // 2.
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","aa")), BooleanClause.Occur.MUST);
    bquery.add(new IntMetaDataQuery(intMetaTerm, new IntMetaDataQuery.SimpleValueValidator(200)), BooleanClause.Occur.MUST);
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("section count mismatch", 1, count);
   
    // 3.
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","bb")), BooleanClause.Occur.MUST);
    bquery.add(new IntMetaDataQuery(intMetaTerm, new IntMetaDataQuery.SimpleValueValidator(200)), BooleanClause.Occur.MUST);
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("section count mismatch", 2, count);
   
    // 4.
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","aa")), BooleanClause.Occur.MUST);
    bquery.add(new IntMetaDataQuery(intMetaTerm, new IntMetaDataQuery.SimpleValueValidator(300)), BooleanClause.Occur.MUST);
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("section count mismatch", 1, count);
   
    // 5.
    bquery = new BooleanQuery();
    bquery.add(new TermQuery(new Term("f1","bb")), BooleanClause.Occur.MUST);
    bquery.add(new IntMetaDataQuery(intMetaTerm, new IntMetaDataQuery.SimpleValueValidator(300)), BooleanClause.Occur.MUST);
    squery = new SectionSearchQuery(bquery);
    scorer = squery.weight(searcher).scorer(reader, true, true);
    count = 0;
    while(scorer.nextDoc() != Scorer.NO_MORE_DOCS) count++;
    assertEquals("section count mismatch", 3, count);
  }
View Full Code Here

        int docStart = start + _docStarts[i];
      collector.setNextReader(_subReaders[i], docStart);
      validator.setNextReader(_subReaders[i], docStart);
   
     
      Scorer scorer = weight.scorer(_subReaders[i], true, true);
      if (scorer != null) {
        collector.setScorer(scorer);
        target = scorer.nextDoc();
        while(target!=DocIdSetIterator.NO_MORE_DOCS)
        {
          if(validator.validate(target))
          {
            collector.collect(target);
            target = scorer.nextDoc();
          }
          else
          {
            target = validator._nextTarget;
            target = scorer.advance(target);
          }
        }
      }
      }
      return;
    }

    for (int i = 0; i < _subReaders.length; i++) {
      DocIdSet filterDocIdSet = filter.getDocIdSet(_subReaders[i]);
      if (filterDocIdSet == null) return;
      int docStart = start + _docStarts[i];
      collector.setNextReader(_subReaders[i], docStart);
      validator.setNextReader(_subReaders[i], docStart);
      Scorer scorer = weight.scorer(_subReaders[i], true, false);
      if (scorer!=null){
        collector.setScorer(scorer);
        DocIdSetIterator filterDocIdIterator = filterDocIdSet.iterator(); // CHECKME: use ConjunctionScorer here?

        int doc = -1;
        target = filterDocIdIterator.nextDoc();
        while(target < DocIdSetIterator.NO_MORE_DOCS)
        {
          if(doc < target)
          {
            doc = scorer.advance(target);
          }

          if(doc == target) // permitted by filter
          {
            if(validator.validate(doc))
View Full Code Here

   * this causes problems
   */
  public void testSpanNearScorerSkipTo1() throws Exception {
    SpanNearQuery q = makeQuery();
    Weight w = q.weight(searcher);
    Scorer s = w.scorer(searcher.getIndexReader(), true, false);
    assertEquals(1, s.advance(1));
  }
View Full Code Here

      public Similarity getSimilarity(Searcher s) {
        return sim;
      }
    };

    Scorer spanScorer = snq.weight(searcher).scorer(searcher.getIndexReader(), true, false);

    assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertEquals("first doc number", spanScorer.docID(), 11);
    float score = spanScorer.score();
    assertTrue("first doc score should be zero, " + score, score == 0.0f);
    assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
  }
View Full Code Here

    IndexSearcher searcher = new IndexSearcher(reader);
    for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
      Query query = entry.getKey();
      int limit = entry.getValue().intValue();
      Weight weight = query.weight(searcher);
      Scorer scorer = weight.scorer(reader, true, false);
      if (scorer != null) {
        while(true)  {
          int doc = scorer.nextDoc();
          if (((long) docIDStart) + doc >= limit)
            break;
          reader.deleteDocument(doc);
          any = true;
        }
View Full Code Here

      // Pass true for "scoresDocsInOrder", because we
      // require in-order scoring, even if caller does not,
      // since we call advance on the valSrcScorers.  Pass
      // false for "topScorer" because we will not invoke
      // score(Collector) on these scorers:
      Scorer subQueryScorer = subQueryWeight.scorer(reader, true, false);
      if (subQueryScorer == null) {
        return null;
      }
      Scorer[] valSrcScorers = new Scorer[valSrcWeights.length];
      for(int i = 0; i < valSrcScorers.length; i++) {
View Full Code Here

    int target = 0;
    if (filter == null) {
      int docStart = start;
      collector.setNextReader(_atomicReaderContext);
      validator.setNextReader(_boboSegmentReader, docStart);
      Scorer scorer = weight.scorer(_atomicReaderContext, true, true,
        _boboSegmentReader.getLiveDocs());
      if (scorer != null) {
        collector.setScorer(scorer);
        target = scorer.nextDoc();
        while (target != DocIdSetIterator.NO_MORE_DOCS) {
          if (validator.validate(target)) {
            collector.collect(target);
            target = scorer.nextDoc();
          } else {
            target = validator._nextTarget;
            target = scorer.advance(target);
          }
        }
      }
      if (mapReduceWrapper != null) {
        mapReduceWrapper.mapFullIndexReader(_boboSegmentReader, validator.getCountCollectors());
      }
      return;
    }

    DocIdSet filterDocIdSet = filter.getDocIdSet(_atomicReaderContext,
      _boboSegmentReader.getLiveDocs());
    // shall we use return or continue here ??
    if (filterDocIdSet == null) {
      return;
    }
    int docStart = start;
    collector.setNextReader(_atomicReaderContext);
    validator.setNextReader(_boboSegmentReader, docStart);
    Scorer scorer = weight.scorer(_atomicReaderContext, true, false,
      _boboSegmentReader.getLiveDocs());
    if (scorer != null) {
      collector.setScorer(scorer);
      DocIdSetIterator filterDocIdIterator = filterDocIdSet.iterator(); // CHECKME: use
                                                                        // ConjunctionScorer here?

      if (filterDocIdIterator == null) {
        return;
      }

      int doc = -1;
      target = filterDocIdIterator.nextDoc();
      if (mapReduceWrapper == null) {
        while (target < DocIdSetIterator.NO_MORE_DOCS) {
          if (doc < target) {
            doc = scorer.advance(target);
          }

          if (doc == target) // permitted by filter
          {
            if (validator.validate(doc)) {
              collector.collect(doc);

              target = filterDocIdIterator.nextDoc();
            } else {
              // skip to the next possible docid
              target = filterDocIdIterator.advance(validator._nextTarget);
            }
          } else // doc > target
          {
            if (doc == DocIdSetIterator.NO_MORE_DOCS) break;
            target = filterDocIdIterator.advance(doc);
          }
        }
      } else {
        // MapReduce wrapper is not null
        while (target < DocIdSetIterator.NO_MORE_DOCS) {
          if (doc < target) {
            doc = scorer.advance(target);
          }

          if (doc == target) // permitted by filter
          {
            if (validator.validate(doc)) {
View Full Code Here

    this.addDocuments(
      "{ \"aaa bbb\" : \"aaa ccc\" , \"ccc\" \"bbb ccc\" }",
      "{ \"aaa\" : \"aaa bbb ddd\" }"
    );

    final Scorer scorer1 = this.getScorer(
      ntq("aaa").getLuceneProxyQuery()
    );

    assertTrue(scorer1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertEquals(0, scorer1.docID());
    assertEquals(2, scorer1.freq(), 0);
    assertTrue(scorer1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertEquals(1, scorer1.docID());
    assertEquals(2, scorer1.freq(), 0);
    assertTrue(scorer1.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);

    final Scorer scorer2 = this.getScorer(
      ntq("ccc").getLuceneProxyQuery()
    );

    assertTrue(scorer2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertEquals(0, scorer2.docID());
    assertEquals(3, scorer2.freq(), 0);
    assertTrue(scorer2.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);

    final Scorer scorer3 = this.getScorer(
      ntq("ddd").getLuceneProxyQuery()
    );

    assertTrue(scorer3.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
    assertEquals(1, scorer3.docID());
    assertEquals(1, scorer3.freq(), 0);
    assertTrue(scorer3.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Scorer$ChildScorer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.