Package org.apache.lucene.index

Examples of org.apache.lucene.index.AtomicReaderContext


    fContext = ValueSource.newContext(searcher);
  }

  private void setState(int docid) {
    int idx = ReaderUtil.subIndex(docid, readerContexts);
    AtomicReaderContext rcontext = readerContexts.get(idx);
    values = docValuesArr[idx];
    if (values == null) {
      try {
        docValuesArr[idx] = values = valueSource.getValues(fContext, rcontext);
      } catch (IOException e) {
View Full Code Here


    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    writer.close();

    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
    MockFilter filter = new MockFilter();
    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

    // first time, nested filter is called
    DocIdSet strongRef = cacher.getDocIdSet(context, context.reader().getLiveDocs());
    assertTrue("first time", filter.wasCalled());

    // make sure no exception if cache is holding the wrong docIdSet
    cacher.getDocIdSet(context, context.reader().getLiveDocs());

    // second time, nested filter should not be called
    filter.clear();
    cacher.getDocIdSet(context, context.reader().getLiveDocs());
    assertFalse("second time", filter.wasCalled());

    reader.close();
    dir.close();
  }
View Full Code Here

    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    writer.close();

    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();

    final Filter filter = new Filter() {
      @Override
      public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
        return null;
      }
    };
    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

    // the caching filter should return the empty set constant
    assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs()));
   
    reader.close();
    dir.close();
  }
View Full Code Here

    Directory dir = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    writer.close();

    IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();

    final Filter filter = new Filter() {
      @Override
      public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
        return new DocIdSet() {
          @Override
          public DocIdSetIterator iterator() {
            return null;
          }
        };
      }
    };
    CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

    // the caching filter should return the empty set constant
    assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs()));
   
    reader.close();
    dir.close();
  }
View Full Code Here

    dir.close();
  }
 
  private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
    assertTrue(reader.getContext() instanceof AtomicReaderContext);
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
    final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
    final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs());
    final DocIdSet cachedSet = cacher.getDocIdSet(context, context.reader().getLiveDocs());
    if (originalSet == null) {
      assertNull(cachedSet);
    }
    if (cachedSet == null) {
      assertTrue(originalSet == null || originalSet.iterator() == null);
View Full Code Here

    for (Entry<CategoryListIterator, Aggregator> entry : categoryLists.entrySet()) {
      final ScoredDocIDsIterator iterator = docids.iterator();
      final CategoryListIterator categoryListIter = entry.getKey();
      final Aggregator aggregator = entry.getValue();
      Iterator<AtomicReaderContext> contexts = indexReader.leaves().iterator();
      AtomicReaderContext current = null;
      int maxDoc = -1;
      while (iterator.next()) {
        int docID = iterator.getDocID();
        if (docID >= maxDoc) {
          boolean iteratorDone = false;
          do { // find the segment which contains this document
            if (!contexts.hasNext()) {
              throw new RuntimeException("ScoredDocIDs contains documents outside this reader's segments !?");
            }
            current = contexts.next();
            maxDoc = current.docBase + current.reader().maxDoc();
            if (docID < maxDoc) { // segment has docs, check if it has categories
              boolean validSegment = categoryListIter.setNextReader(current);
              validSegment &= aggregator.setNextReader(current);
              if (!validSegment) { // if categoryList or aggregtor say it's an invalid segment, skip all docs
                while (docID < maxDoc && iterator.next()) {
View Full Code Here

   * @throws BooleanQuery.TooManyClauses If a query would exceed
   *         {@link BooleanQuery#getMaxClauseCount()} clauses.
   */
  protected Explanation explain(Weight weight, int doc) throws IOException {
    int n = ReaderUtil.subIndex(doc, leafContexts);
    final AtomicReaderContext ctx = leafContexts.get(n);
    int deBasedDoc = doc - ctx.docBase;
   
    return weight.explain(ctx, deBasedDoc);
  }
View Full Code Here

      assert slice.leaves.length == 1;
      final TopFieldDocs docs = searcher.search(Arrays.asList(slice.leaves),
          weight, after, nDocs, sort, true, doDocScores || sort.needsScores(), doMaxScore);
      lock.lock();
      try {
        final AtomicReaderContext ctx = slice.leaves[0];
        final int base = ctx.docBase;
        hq.setNextReader(ctx);
        hq.setScorer(fakeScorer);
        for(ScoreDoc scoreDoc : docs.scoreDocs) {
          fakeScorer.doc = scoreDoc.doc - base;
View Full Code Here

        continue; // nothing to do
      }
      bi.setText(content);
      int doc = docids[i];
      int leaf = ReaderUtil.subIndex(doc, leaves);
      AtomicReaderContext subContext = leaves.get(leaf);
      AtomicReader r = subContext.reader();
      Terms t = r.terms(field);
      if (t == null) {
        continue; // nothing to do
      }
      if (leaf != lastLeaf) {
View Full Code Here

  }

  @Override
  public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
    // get a private context that is used to rewrite, createWeight and score eventually
    final AtomicReaderContext privateContext = context.reader().getContext();
    final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
    return new DocIdSet() {
      @Override
      public DocIdSetIterator iterator() throws IOException {
        return weight.scorer(privateContext, true, false, acceptDocs);
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.AtomicReaderContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.