Package org.apache.lucene.util

Examples of org.apache.lucene.util.FixedBitSet


    throws IOException {
      final String field = key.field;
      final int maxDoc = reader.maxDoc();

      // Visit all docs that have terms for this field
      FixedBitSet res = null;
      Terms terms = reader.terms(field);
      if (terms != null) {
        final int termsDocCount = terms.getDocCount();
        assert termsDocCount <= maxDoc;
        if (termsDocCount == maxDoc) {
          // Fast case: all docs have this field:
          return new Bits.MatchAllBits(maxDoc);
        }
        final TermsEnum termsEnum = terms.iterator(null);
        DocsEnum docs = null;
        while(true) {
          final BytesRef term = termsEnum.next();
          if (term == null) {
            break;
          }
          if (res == null) {
            // lazy init
            res = new FixedBitSet(maxDoc);
          }

          docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
          // TODO: use bulk API
          while (true) {
            final int docID = docs.nextDoc();
            if (docID == DocIdSetIterator.NO_MORE_DOCS) {
              break;
            }
            res.set(docID);
          }
        }
      }
      if (res == null) {
        return new Bits.MatchNoBits(maxDoc);
      }
      final int numSet = res.cardinality();
      if (numSet >= maxDoc) {
        // The cardinality of the BitSet is maxDoc if all documents have a value.
        assert numSet == maxDoc;
        return new Bits.MatchAllBits(maxDoc);
      }
View Full Code Here


        }

        final TermsEnum termsEnum = termsEnum(terms);

        DocsEnum docs = null;
        FixedBitSet docsWithField = null;
        while(true) {
          final BytesRef term = termsEnum.next();
          if (term == null) {
            break;
          }
          visitTerm(term);
          docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
          while (true) {
            final int docID = docs.nextDoc();
            if (docID == DocIdSetIterator.NO_MORE_DOCS) {
              break;
            }
            visitDoc(docID);
            if (setDocsWithField) {
              if (docsWithField == null) {
                // Lazy init
                this.docsWithField = docsWithField = new FixedBitSet(maxDoc);
              }
              docsWithField.set(docID);
            }
          }
        }
      }
    }
View Full Code Here

     
      Comparator<BytesRef> termComp = terms.getComparator();
     
      long sumTotalTermFreq = 0;
      long sumDocFreq = 0;
      FixedBitSet visitedDocs = new FixedBitSet(maxDoc);
      while(true) {
       
        final BytesRef term = termsEnum.next();
        if (term == null) {
          break;
        }
       
        assert term.isValid();
       
        // make sure terms arrive in order according to
        // the comp
        if (lastTerm == null) {
          lastTerm = BytesRef.deepCopyOf(term);
        } else {
          if (termComp.compare(lastTerm, term) >= 0) {
            throw new RuntimeException("terms out of order: lastTerm=" + lastTerm + " term=" + term);
          }
          lastTerm.copyBytes(term);
        }
       
        final int docFreq = termsEnum.docFreq();
        if (docFreq <= 0) {
          throw new RuntimeException("docfreq: " + docFreq + " is out of bounds");
        }
        sumDocFreq += docFreq;
       
        docs = termsEnum.docs(liveDocs, docs);
        postings = termsEnum.docsAndPositions(liveDocs, postings);

        if (hasFreqs == false) {
          if (termsEnum.totalTermFreq() != -1) {
            throw new RuntimeException("field \"" + field + "\" hasFreqs is false, but TermsEnum.totalTermFreq()=" + termsEnum.totalTermFreq() + " (should be -1)");  
          }
        }
       
        if (hasOrd) {
          long ord = -1;
          try {
            ord = termsEnum.ord();
          } catch (UnsupportedOperationException uoe) {
            hasOrd = false;
          }
         
          if (hasOrd) {
            final long ordExpected = status.delTermCount + status.termCount - termCountStart;
            if (ord != ordExpected) {
              throw new RuntimeException("ord mismatch: TermsEnum has ord=" + ord + " vs actual=" + ordExpected);
            }
          }
        }
       
        final DocsEnum docs2;
        if (postings != null) {
          docs2 = postings;
        } else {
          docs2 = docs;
        }
       
        int lastDoc = -1;
        int docCount = 0;
        long totalTermFreq = 0;
        while(true) {
          final int doc = docs2.nextDoc();
          if (doc == DocIdSetIterator.NO_MORE_DOCS) {
            break;
          }
          status.totFreq++;
          visitedDocs.set(doc);
          int freq = -1;
          if (hasFreqs) {
            freq = docs2.freq();
            if (freq <= 0) {
              throw new RuntimeException("term " + term + ": doc " + doc + ": freq " + freq + " is out of bounds");
            }
            status.totPos += freq;
            totalTermFreq += freq;
          } else {
            // When a field didn't index freq, it must
            // consistently "lie" and pretend that freq was
            // 1:
            if (docs2.freq() != 1) {
              throw new RuntimeException("term " + term + ": doc " + doc + ": freq " + freq + " != 1 when Terms.hasFreqs() is false");
            }
          }
          docCount++;
         
          if (doc <= lastDoc) {
            throw new RuntimeException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
          }
          if (doc >= maxDoc) {
            throw new RuntimeException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
          }
         
          lastDoc = doc;
         
          int lastPos = -1;
          int lastOffset = 0;
          if (hasPositions) {
            for(int j=0;j<freq;j++) {
              final int pos = postings.nextPosition();

              if (pos < 0) {
                throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + " is out of bounds");
              }
              if (pos < lastPos) {
                throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + " < lastPos " + lastPos);
              }
              lastPos = pos;
              BytesRef payload = postings.getPayload();
              if (payload != null) {
                assert payload.isValid();
              }
              if (payload != null && payload.length < 1) {
                throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + " payload length is out of bounds " + payload.length);
              }
              if (hasOffsets) {
                int startOffset = postings.startOffset();
                int endOffset = postings.endOffset();
                // NOTE: we cannot enforce any bounds whatsoever on vectors... they were a free-for-all before?
                // but for offsets in the postings lists these checks are fine: they were always enforced by IndexWriter
                if (!isVectors) {
                  if (startOffset < 0) {
                    throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + ": startOffset " + startOffset + " is out of bounds");
                  }
                  if (startOffset < lastOffset) {
                    throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + ": startOffset " + startOffset + " < lastStartOffset " + lastOffset);
                  }
                  if (endOffset < 0) {
                    throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + ": endOffset " + endOffset + " is out of bounds");
                  }
                  if (endOffset < startOffset) {
                    throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + ": endOffset " + endOffset + " < startOffset " + startOffset);
                  }
                }
                lastOffset = startOffset;
              }
            }
          }
        }
       
        if (docCount != 0) {
          status.termCount++;
        } else {
          status.delTermCount++;
        }
       
        final long totalTermFreq2 = termsEnum.totalTermFreq();
        final boolean hasTotalTermFreq = hasFreqs && totalTermFreq2 != -1;
       
        // Re-count if there are deleted docs:
        if (liveDocs != null) {
          if (hasFreqs) {
            final DocsEnum docsNoDel = termsEnum.docs(null, docsAndFreqs);
            docCount = 0;
            totalTermFreq = 0;
            while(docsNoDel.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
              visitedDocs.set(docsNoDel.docID());
              docCount++;
              totalTermFreq += docsNoDel.freq();
            }
          } else {
            final DocsEnum docsNoDel = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
            docCount = 0;
            totalTermFreq = -1;
            while(docsNoDel.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
              visitedDocs.set(docsNoDel.docID());
              docCount++;
            }
          }
        }
       
        if (docCount != docFreq) {
          throw new RuntimeException("term " + term + " docFreq=" + docFreq + " != tot docs w/o deletions " + docCount);
        }
        if (hasTotalTermFreq) {
          if (totalTermFreq2 <= 0) {
            throw new RuntimeException("totalTermFreq: " + totalTermFreq2 + " is out of bounds");
          }
          sumTotalTermFreq += totalTermFreq;
          if (totalTermFreq != totalTermFreq2) {
            throw new RuntimeException("term " + term + " totalTermFreq=" + totalTermFreq2 + " != recomputed totalTermFreq=" + totalTermFreq);
          }
        }
       
        // Test skipping
        if (hasPositions) {
          for(int idx=0;idx<7;idx++) {
            final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
            postings = termsEnum.docsAndPositions(liveDocs, postings);
            final int docID = postings.advance(skipDocID);
            if (docID == DocIdSetIterator.NO_MORE_DOCS) {
              break;
            } else {
              if (docID < skipDocID) {
                throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID);
              }
              final int freq = postings.freq();
              if (freq <= 0) {
                throw new RuntimeException("termFreq " + freq + " is out of bounds");
              }
              int lastPosition = -1;
              int lastOffset = 0;
              for(int posUpto=0;posUpto<freq;posUpto++) {
                final int pos = postings.nextPosition();

                if (pos < 0) {
                  throw new RuntimeException("position " + pos + " is out of bounds");
                }
                if (pos < lastPosition) {
                  throw new RuntimeException("position " + pos + " is < lastPosition " + lastPosition);
                }
                lastPosition = pos;
                if (hasOffsets) {
                  int startOffset = postings.startOffset();
                  int endOffset = postings.endOffset();
                  // NOTE: we cannot enforce any bounds whatsoever on vectors... they were a free-for-all before?
                  // but for offsets in the postings lists these checks are fine: they were always enforced by IndexWriter
                  if (!isVectors) {
                    if (startOffset < 0) {
                      throw new RuntimeException("term " + term + ": doc " + docID + ": pos " + pos + ": startOffset " + startOffset + " is out of bounds");
                    }
                    if (startOffset < lastOffset) {
                      throw new RuntimeException("term " + term + ": doc " + docID + ": pos " + pos + ": startOffset " + startOffset + " < lastStartOffset " + lastOffset);
                    }
                    if (endOffset < 0) {
                      throw new RuntimeException("term " + term + ": doc " + docID + ": pos " + pos + ": endOffset " + endOffset + " is out of bounds");
                    }
                    if (endOffset < startOffset) {
                      throw new RuntimeException("term " + term + ": doc " + docID + ": pos " + pos + ": endOffset " + endOffset + " < startOffset " + startOffset);
                    }
                  }
                  lastOffset = startOffset;
                }
              }
             
              final int nextDocID = postings.nextDoc();
              if (nextDocID == DocIdSetIterator.NO_MORE_DOCS) {
                break;
              }
              if (nextDocID <= docID) {
                throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + "), then .next() returned docID=" + nextDocID + " vs prev docID=" + docID);
              }
            }
          }
        } else {
          for(int idx=0;idx<7;idx++) {
            final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
            docs = termsEnum.docs(liveDocs, docs, DocsEnum.FLAG_NONE);
            final int docID = docs.advance(skipDocID);
            if (docID == DocIdSetIterator.NO_MORE_DOCS) {
              break;
            } else {
              if (docID < skipDocID) {
                throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID);
              }
              final int nextDocID = docs.nextDoc();
              if (nextDocID == DocIdSetIterator.NO_MORE_DOCS) {
                break;
              }
              if (nextDocID <= docID) {
                throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + "), then .next() returned docID=" + nextDocID + " vs prev docID=" + docID);
              }
            }
          }
        }
      }
     
      final Terms fieldTerms = fields.terms(field);
      if (fieldTerms == null) {
        // Unusual: the FieldsEnum returned a field but
        // the Terms for that field is null; this should
        // only happen if it's a ghost field (field with
        // no terms, eg there used to be terms but all
        // docs got deleted and then merged away):
       
      } else {
        if (fieldTerms instanceof BlockTreeTermsReader.FieldReader) {
          final BlockTreeTermsReader.Stats stats = ((BlockTreeTermsReader.FieldReader) fieldTerms).computeStats();
          assert stats != null;
          if (status.blockTreeStats == null) {
            status.blockTreeStats = new HashMap<String,BlockTreeTermsReader.Stats>();
          }
          status.blockTreeStats.put(field, stats);
        }
       
        if (sumTotalTermFreq != 0) {
          final long v = fields.terms(field).getSumTotalTermFreq();
          if (v != -1 && sumTotalTermFreq != v) {
            throw new RuntimeException("sumTotalTermFreq for field " + field + "=" + v + " != recomputed sumTotalTermFreq=" + sumTotalTermFreq);
          }
        }
       
        if (sumDocFreq != 0) {
          final long v = fields.terms(field).getSumDocFreq();
          if (v != -1 && sumDocFreq != v) {
            throw new RuntimeException("sumDocFreq for field " + field + "=" + v + " != recomputed sumDocFreq=" + sumDocFreq);
          }
        }
       
        if (fieldTerms != null) {
          final int v = fieldTerms.getDocCount();
          if (v != -1 && visitedDocs.cardinality() != v) {
            throw new RuntimeException("docCount for field " + field + "=" + v + " != recomputed docCount=" + visitedDocs.cardinality());
          }
        }
       
        // Test seek to last term:
        if (lastTerm != null) {
View Full Code Here

  }
 
  private static void checkSortedDocValues(String fieldName, AtomicReader reader, SortedDocValues dv, Bits docsWithField) {
    checkBinaryDocValues(fieldName, reader, dv, docsWithField);
    final int maxOrd = dv.getValueCount()-1;
    FixedBitSet seenOrds = new FixedBitSet(dv.getValueCount());
    int maxOrd2 = -1;
    for (int i = 0; i < reader.maxDoc(); i++) {
      int ord = dv.getOrd(i);
      if (ord == -1) {
        if (docsWithField.get(i)) {
          throw new RuntimeException("dv for field: " + fieldName + " has -1 ord but is not marked missing for doc: " + i);
        }
      } else if (ord < -1 || ord > maxOrd) {
        throw new RuntimeException("ord out of bounds: " + ord);
      } else {
        if (!docsWithField.get(i)) {
          throw new RuntimeException("dv for field: " + fieldName + " is missing but has ord=" + ord + " for doc: " + i);
        }
        maxOrd2 = Math.max(maxOrd2, ord);
        seenOrds.set(ord);
      }
    }
    if (maxOrd != maxOrd2) {
      throw new RuntimeException("dv for field: " + fieldName + " reports wrong maxOrd=" + maxOrd + " but this is not the case: " + maxOrd2);
    }
    if (seenOrds.cardinality() != dv.getValueCount()) {
      throw new RuntimeException("dv for field: " + fieldName + " has holes in its ords, valueCount=" + dv.getValueCount() + " but only used: " + seenOrds.cardinality());
    }
    BytesRef lastValue = null;
    BytesRef scratch = new BytesRef();
    for (int i = 0; i <= maxOrd; i++) {
      dv.lookupOrd(i, scratch);
View Full Code Here

   * @lucene.experimental
   */
  public static Status.TermVectorStatus testTermVectors(AtomicReader reader, PrintStream infoStream, boolean verbose, boolean crossCheckTermVectors) {
    final Status.TermVectorStatus status = new Status.TermVectorStatus();
    final FieldInfos fieldInfos = reader.getFieldInfos();
    final Bits onlyDocIsDeleted = new FixedBitSet(1);
   
    try {
      if (infoStream != null) {
        infoStream.print("    test: term vectors........");
      }
View Full Code Here

        }
        filter = new Filter() {
            @Override
            public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
              int maxDoc = context.reader().maxDoc();
              final FixedBitSet bits = new FixedBitSet(maxDoc);
              for(int docID=0;docID < maxDoc;docID++) {
                // Keeps only the even ids:
                if ((acceptDocs == null || acceptDocs.get(docID)) && ((Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0)) {
                  bits.set(docID);
                }
              }
              return bits;
            }
          };
View Full Code Here

        System.out.println("  s2 ch=0x" + Integer.toHexString(s2.charAt(idx)));
      }
      */
    }

    final FixedBitSet isS1 = new FixedBitSet(NUM_DOCS);
    for(int idx=0;idx<NUM_DOCS;idx++) {
      if (random().nextBoolean()) {
        isS1.set(idx);
      }
    }

    final IndexReader r;
    final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
      .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
      .setMergePolicy(newLogMergePolicy());
    iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble());
    iwc.setMaxBufferedDocs(-1);
    final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);

    for(int idx=0;idx<NUM_DOCS;idx++) {
      final Document doc = new Document();
      String s = isS1.get(idx) ? s1 : s2;
      final Field f = newTextField("field", s, Field.Store.NO);
      final int count = _TestUtil.nextInt(random(), 1, 4);
      for(int ct=0;ct<count;ct++) {
        doc.add(f);
      }
      riw.addDocument(doc);
    }

    r = riw.getReader();
    riw.close();

    /*
    if (VERBOSE) {
      System.out.println("TEST: terms");
      TermEnum termEnum = r.terms();
      while(termEnum.next()) {
        System.out.println("  term=" + termEnum.term() + " len=" + termEnum.term().text().length());
        assertTrue(termEnum.docFreq() > 0);
        System.out.println("    s1?=" + (termEnum.term().text().equals(s1)) + " s1len=" + s1.length());
        System.out.println("    s2?=" + (termEnum.term().text().equals(s2)) + " s2len=" + s2.length());
        final String s = termEnum.term().text();
        for(int idx=0;idx<s.length();idx++) {
          System.out.println("      ch=0x" + Integer.toHexString(s.charAt(idx)));
        }
      }
    }
    */

    assertEquals(NUM_DOCS, r.numDocs());
    assertTrue(r.docFreq(new Term("field", s1)) > 0);
    assertTrue(r.docFreq(new Term("field", s2)) > 0);

    int num = atLeast(1000);
    for(int iter=0;iter<num;iter++) {

      final String term;
      final boolean doS1;
      if (random().nextBoolean()) {
        term = s1;
        doS1 = true;
      } else {
        term = s2;
        doS1 = false;
      }

      if (VERBOSE) {
        System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
      }
       
      final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));

      int docID = -1;
      while(docID < DocIdSetIterator.NO_MORE_DOCS) {
        final int what = random().nextInt(3);
        if (what == 0) {
          if (VERBOSE) {
            System.out.println("TEST: docID=" + docID + "; do next()");
          }
          // nextDoc
          int expected = docID+1;
          while(true) {
            if (expected == NUM_DOCS) {
              expected = Integer.MAX_VALUE;
              break;
            } else if (isS1.get(expected) == doS1) {
              break;
            } else {
              expected++;
            }
          }
          docID = postings.nextDoc();
          if (VERBOSE) {
            System.out.println("  got docID=" + docID);
          }
          assertEquals(expected, docID);
          if (docID == DocIdSetIterator.NO_MORE_DOCS) {
            break;
          }

          if (random().nextInt(6) == 3) {
            final int freq = postings.freq();
            assertTrue(freq >=1 && freq <= 4);
            for(int pos=0;pos<freq;pos++) {
              assertEquals(pos, postings.nextPosition());
              if (random().nextBoolean()) {
                postings.getPayload();
                if (random().nextBoolean()) {
                  postings.getPayload(); // get it again
                }
              }
            }
          }
        } else {
          // advance
          final int targetDocID;
          if (docID == -1) {
            targetDocID = random().nextInt(NUM_DOCS+1);
          } else {
            targetDocID = docID + _TestUtil.nextInt(random(), 1, NUM_DOCS - docID);
          }
          if (VERBOSE) {
            System.out.println("TEST: docID=" + docID + "; do advance(" + targetDocID + ")");
          }
          int expected = targetDocID;
          while(true) {
            if (expected == NUM_DOCS) {
              expected = Integer.MAX_VALUE;
              break;
            } else if (isS1.get(expected) == doS1) {
              break;
            } else {
              expected++;
            }
          }
View Full Code Here

        System.out.println("  s2 ch=0x" + Integer.toHexString(s2.charAt(idx)));
      }
      */
    }

    final FixedBitSet isS1 = new FixedBitSet(NUM_DOCS);
    for(int idx=0;idx<NUM_DOCS;idx++) {
      if (random().nextBoolean()) {
        isS1.set(idx);
      }
    }

    final IndexReader r;
    if (true) {
      final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
        .setMergePolicy(newLogMergePolicy());
      iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble());
      iwc.setMaxBufferedDocs(-1);
      final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);

      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
      ft.setIndexOptions(options);
      for(int idx=0;idx<NUM_DOCS;idx++) {
        final Document doc = new Document();
        String s = isS1.get(idx) ? s1 : s2;
        final Field f = newField("field", s, ft);
        final int count = _TestUtil.nextInt(random(), 1, 4);
        for(int ct=0;ct<count;ct++) {
          doc.add(f);
        }
        riw.addDocument(doc);
      }

      r = riw.getReader();
      riw.close();
    } else {
      r = DirectoryReader.open(dir);
    }

    /*
    if (VERBOSE) {
      System.out.println("TEST: terms");
      TermEnum termEnum = r.terms();
      while(termEnum.next()) {
        System.out.println("  term=" + termEnum.term() + " len=" + termEnum.term().text().length());
        assertTrue(termEnum.docFreq() > 0);
        System.out.println("    s1?=" + (termEnum.term().text().equals(s1)) + " s1len=" + s1.length());
        System.out.println("    s2?=" + (termEnum.term().text().equals(s2)) + " s2len=" + s2.length());
        final String s = termEnum.term().text();
        for(int idx=0;idx<s.length();idx++) {
          System.out.println("      ch=0x" + Integer.toHexString(s.charAt(idx)));
        }
      }
    }
    */

    assertEquals(NUM_DOCS, r.numDocs());
    assertTrue(r.docFreq(new Term("field", s1)) > 0);
    assertTrue(r.docFreq(new Term("field", s2)) > 0);

    int num = atLeast(1000);
    for(int iter=0;iter<num;iter++) {

      final String term;
      final boolean doS1;
      if (random().nextBoolean()) {
        term = s1;
        doS1 = true;
      } else {
        term = s2;
        doS1 = false;
      }

      if (VERBOSE) {
        System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1 + " term=" + term);
      }
       
      final DocsEnum docs;
      final DocsEnum postings;

      if (options == IndexOptions.DOCS_ONLY) {
        docs = _TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_NONE);
        postings = null;
      } else {
        docs = postings = _TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_FREQS);
        assert postings != null;
      }
      assert docs != null;

      int docID = -1;
      while(docID < DocIdSetIterator.NO_MORE_DOCS) {
        final int what = random().nextInt(3);
        if (what == 0) {
          if (VERBOSE) {
            System.out.println("TEST: docID=" + docID + "; do next()");
          }
          // nextDoc
          int expected = docID+1;
          while(true) {
            if (expected == NUM_DOCS) {
              expected = Integer.MAX_VALUE;
              break;
            } else if (isS1.get(expected) == doS1) {
              break;
            } else {
              expected++;
            }
          }
          docID = docs.nextDoc();
          if (VERBOSE) {
            System.out.println("  got docID=" + docID);
          }
          assertEquals(expected, docID);
          if (docID == DocIdSetIterator.NO_MORE_DOCS) {
            break;
          }

          if (random().nextInt(6) == 3 && postings != null) {
            final int freq = postings.freq();
            assertTrue(freq >=1 && freq <= 4);
          }
        } else {
          // advance
          final int targetDocID;
          if (docID == -1) {
            targetDocID = random().nextInt(NUM_DOCS+1);
          } else {
            targetDocID = docID + _TestUtil.nextInt(random(), 1, NUM_DOCS - docID);
          }
          if (VERBOSE) {
            System.out.println("TEST: docID=" + docID + "; do advance(" + targetDocID + ")");
          }
          int expected = targetDocID;
          while(true) {
            if (expected == NUM_DOCS) {
              expected = Integer.MAX_VALUE;
              break;
            } else if (isS1.get(expected) == doS1) {
              break;
            } else {
              expected++;
            }
          }
View Full Code Here

  public void testBuildDocMap() {
    final int maxDoc = _TestUtil.nextInt(random(), 1, 128);
    final int numDocs = _TestUtil.nextInt(random(), 0, maxDoc);
    final int numDeletedDocs = maxDoc - numDocs;
    final FixedBitSet liveDocs = new FixedBitSet(maxDoc);
    for (int i = 0; i < numDocs; ++i) {
      while (true) {
        final int docID = random().nextInt(maxDoc);
        if (!liveDocs.get(docID)) {
          liveDocs.set(docID);
          break;
        }
      }
    }

    final MergeState.DocMap docMap = MergeState.DocMap.build(maxDoc, liveDocs);

    assertEquals(maxDoc, docMap.maxDoc());
    assertEquals(numDocs, docMap.numDocs());
    assertEquals(numDeletedDocs, docMap.numDeletedDocs());
    // assert the mapping is compact
    for (int i = 0, del = 0; i < maxDoc; ++i) {
      if (!liveDocs.get(i)) {
        assertEquals(-1, docMap.get(i));
        ++del;
      } else {
        assertEquals(i - del, docMap.get(i));
      }
View Full Code Here

      Terms terms = fields.terms(field);
      if (terms == null) {
        continue;
      }
      int docCount = terms.getDocCount();
      FixedBitSet visited = new FixedBitSet(ir.maxDoc());
      TermsEnum te = terms.iterator(null);
      while (te.next() != null) {
        DocsEnum de = _TestUtil.docs(random(), te, null, null, DocsEnum.FLAG_NONE);
        while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
          visited.set(de.docID());
        }
      }
      assertEquals(visited.cardinality(), docCount);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.util.FixedBitSet

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.