Package org.apache.lucene.index

Examples of org.apache.lucene.index.MultiReader


    w2.addDocument(doc);
    IndexReader reader2 = w2.getReader();
    w2.close();
   
    TermsFilter tf = new TermsFilter(new Term(fieldName, "content1"));
    MultiReader multi = new MultiReader(reader1, reader2);
    for (AtomicReaderContext context : multi.leaves()) {
      DocIdSet docIdSet = tf.getDocIdSet(context, context.reader().getLiveDocs());
      if (context.reader().docFreq(new Term(fieldName, "content1")) == 0) {
        assertNull(docIdSet);
      } else {
        FixedBitSet bits = (FixedBitSet) docIdSet;
        assertTrue("Must be >= 0", bits.cardinality() >= 0);     
      }
    }
    multi.close();
    reader1.close();
    reader2.close();
    rd1.close();
    rd2.close();
  }
View Full Code Here


    DirectoryReader rA = DirectoryReader.open(dirA);
    readerA = SlowCompositeReaderWrapper.wrap(rA);
    readerAclone = SlowCompositeReaderWrapper.wrap(rA);
    readerA = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dirA));
    readerB = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dirB));
    readerX = SlowCompositeReaderWrapper.wrap(new MultiReader(readerA, readerB));
  }
View Full Code Here

    writer.close(); swriter1.close(); swriter2.close();
   
    reader = DirectoryReader.open(dir);
    searcher = newSearcher(reader);
   
    multiReader = new MultiReader(new IndexReader[] {
      DirectoryReader.open(sdir1), DirectoryReader.open(sdir2)
    }, true);
    multiSearcher = newSearcher(multiReader);
   
    multiReaderDupls = new MultiReader(new IndexReader[] {
      DirectoryReader.open(sdir1), DirectoryReader.open(dir)
    }, true);
    multiSearcherDupls = newSearcher(multiReaderDupls);
  }
View Full Code Here

        unsortedReaders = super.getMergeReaders();
        final AtomicReader atomicView;
        if (unsortedReaders.size() == 1) {
          atomicView = unsortedReaders.get(0);
        } else {
          final IndexReader multiReader = new MultiReader(unsortedReaders.toArray(new AtomicReader[unsortedReaders.size()]));
          atomicView = SlowCompositeReaderWrapper.wrap(multiReader);
        }
        docMap = sorter.sort(atomicView);
        sortedView = SortingAtomicReader.wrap(atomicView, docMap);
      }
View Full Code Here

    // we can't put deleted docs before the nested reader, because
    // it will throw off the docIds
    IndexReader[] readers = new IndexReader[] {
      edge < 0 ? r : IndexReader.open(makeEmptyIndex(0), true),
      IndexReader.open(makeEmptyIndex(0), true),
      new MultiReader(new IndexReader[] {
        IndexReader.open(makeEmptyIndex(edge < 0 ? 4 : 0), true),
        IndexReader.open(makeEmptyIndex(0), true),
        0 == edge ? r : IndexReader.open(makeEmptyIndex(0), true)
      }),
      IndexReader.open(makeEmptyIndex(0 < edge ? 0 : 7), true),
      IndexReader.open(makeEmptyIndex(0), true),
      new MultiReader(new IndexReader[] {
        IndexReader.open(makeEmptyIndex(0 < edge ? 0 : 5), true),
        IndexReader.open(makeEmptyIndex(0), true),
        0 < edge ? r : IndexReader.open(makeEmptyIndex(0), true)
      })
    };
    IndexSearcher out = new IndexSearcher(new MultiReader(readers));
    out.setSimilarity(s.getSimilarity());
    return out;
  }
View Full Code Here

             List<File> fileList = Arrays.asList(files);
             for (File f: fileList) {
                     readers.add(IndexReader.open(LuceneManager.pickDirectory(f)));
             }
             LOG.info("Opening IndexSearcher and IndexReader for "+readers.size()+" Lucene indexes under directory "+indexDir+" ...");
             return new MultiReader(readers.toArray(new IndexReader[readers.size()]));
         }
View Full Code Here

          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));

    assertEquals(numRuns * numDocsPerRun, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      assertEquals(numRuns, counts[i]);
    }

    // max field length is 2, so "dot" is also indexed but not "org"
    hits = searcher.search(new TermQuery(new Term("content", "dot")));
    assertEquals(numRuns, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "org")));
    assertEquals(0, hits.length());

    searcher.close();
    reader.close();

    // open and close an index writer with KeepOnlyLastCommitDeletionPolicy
    // to remove earlier checkpoints
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
View Full Code Here

          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
    assertEquals(0, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
    assertEquals(numDocsPerRun / 2, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      if (i % 2 == 0) {
        assertEquals(0, counts[i]);
      } else {
        assertEquals(1, counts[i]);
      }
    }

    searcher.close();
    reader.close();
  }
View Full Code Here

    AtomicReader wrappedLeaves[] = new AtomicReader[leaves.size()];
    for (int i = 0; i < leaves.size(); i++) {
      wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, params);
    }
    try {
      destIndexWriter.addIndexes(new MultiReader(wrappedLeaves));
     
      // commit changes to taxonomy and index respectively.
      destTaxWriter.commit();
      destIndexWriter.commit();
    } finally {
View Full Code Here

    }
    wA.close();
    wB.close();
    readerA = IndexReader.open(dirA, true);
    readerB = IndexReader.open(dirB, true);
    readerX = new MultiReader(readerA, readerB);
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.MultiReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.