Package org.apache.lucene.index

Examples of org.apache.lucene.index.DirectoryReader


  public static void merge(Directory srcIndexDir, Directory srcTaxDir, OrdinalMap map, IndexWriter destIndexWriter,
      DirectoryTaxonomyWriter destTaxWriter, FacetIndexingParams params) throws IOException {
    // merge the taxonomies
    destTaxWriter.addTaxonomy(srcTaxDir, map);
    int ordinalMap[] = map.getMap();
    DirectoryReader reader = DirectoryReader.open(srcIndexDir, -1);
    List<AtomicReaderContext> leaves = reader.leaves();
    int numReaders = leaves.size();
    AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
    for (int i = 0; i < numReaders; i++) {
      wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, params);
    }
    try {
      destIndexWriter.addIndexes(new MultiReader(wrappedLeaves));
     
      // commit changes to taxonomy and index respectively.
      destTaxWriter.commit();
      destIndexWriter.commit();
    } finally {
      reader.close();
    }
  }
View Full Code Here


   */
  public static Map<String,Term> buildFieldTermsMap(Directory dir, FacetIndexingParams fip) throws IOException {
    // only add field-Term mapping that will actually have DocValues in the end.
    // therefore traverse the index terms and add what exists. this pertains to
    // multiple CLPs, as well as partitions
    DirectoryReader reader = DirectoryReader.open(dir);
    final Map<String,Term> fieldTerms = new HashMap<String,Term>();
    for (AtomicReaderContext context : reader.leaves()) {
      for (CategoryListParams clp : fip.getAllCategoryListParams()) {
        Terms terms = context.reader().terms(clp.field);
        if (terms != null) {
          TermsEnum te = terms.iterator(null);
          BytesRef termBytes = null;
          while ((termBytes = te.next()) != null) {
            String term = termBytes.utf8ToString();
            if (term.startsWith(PAYLOAD_TERM_TEXT )) {
              if (term.equals(PAYLOAD_TERM_TEXT)) {
                fieldTerms.put(clp.field, new Term(clp.field, term));
              } else {
                fieldTerms.put(clp.field + term.substring(PAYLOAD_TERM_TEXT.length()), new Term(clp.field, term));
              }
            }
          }
        }       
      }
    }
    reader.close();
    return fieldTerms;
  }
View Full Code Here

  @Test
  public void testMergeHierarchies() throws Exception {
    Directory indexDir = new RAMDirectory(), taxoDir = new RAMDirectory();
    initIndex(indexDir, taxoDir);
   
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
   
    String[] exp = new String[] { "Date (0)\n  2010 (4)\n  2011 (3)\n" };
    searchIndex(taxoReader, searcher, false, exp, new String[][] { new String[] { "Date" } }, null);
View Full Code Here

  }
 
  @Test
  public void testDifferentNumResults() throws Exception {
    // test the collector w/ FacetRequests and different numResults
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    IndexSearcher searcher = newSearcher(indexReader);
   
    FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(CP_A, NUM_CHILDREN_CP_A),
        new CountFacetRequest(CP_B, NUM_CHILDREN_CP_B));
View Full Code Here

    IOUtils.close(indexReader, taxoReader);
  }
 
  @Test
  public void testAllCounts() throws Exception {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    IndexSearcher searcher = newSearcher(indexReader);
   
    FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(CP_A, NUM_CHILDREN_CP_A),
        new CountFacetRequest(CP_B, NUM_CHILDREN_CP_B));
View Full Code Here

    IOUtils.close(indexReader, taxoReader);
  }
 
  @Test
  public void testBigNumResults() throws Exception {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    IndexSearcher searcher = newSearcher(indexReader);
   
    FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(CP_A, Integer.MAX_VALUE),
        new CountFacetRequest(CP_B, Integer.MAX_VALUE));
View Full Code Here

    IOUtils.close(indexReader, taxoReader);
  }
 
  @Test
  public void testNoParents() throws Exception {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    IndexSearcher searcher = newSearcher(indexReader);
    FacetSearchParams fsp = new FacetSearchParams(fip, new CountFacetRequest(CP_C, NUM_CHILDREN_CP_C),
        new CountFacetRequest(CP_D, NUM_CHILDREN_CP_D));
    FacetsCollector fc = FacetsCollector.create(randomAccumulator(fsp, indexReader, taxoReader));
View Full Code Here

    indexTwoDocs(indexWriter, null, true);         // 5th segment, with content, no categories
    indexTwoDocs(indexWriter, facetFields, true)// 6th segment, with content, with categories
    indexTwoDocs(indexWriter, null, true);         // 7th segment, with content, no categories
    IOUtils.close(indexWriter, taxoWriter);

    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    IndexSearcher indexSearcher = newSearcher(indexReader);
   
    // search for "f:a", only segments 1 and 3 should match results
    Query q = new TermQuery(new Term("f", "a"));
View Full Code Here

    fieldType.setStored(true);
    Field field = new Field(FIELD, content, fieldType);
    doc.add(field);
    writer.addDocument(doc);
    writer.close();
    DirectoryReader ir = DirectoryReader.open(ramDir);
    IndexSearcher is = new IndexSearcher(ir);
     
    int hits = is.search(q, 10).totalHits;
    ir.close();
    ramDir.close();
    if (hits == 1){
      return true;
    } else {
      return false;
View Full Code Here

    taxoWriter.addCategory(new CategoryPath("b"));
    Map<String, String> userCommitData = new HashMap<String, String>();
    userCommitData.put("testing", "1 2 3");
    taxoWriter.setCommitData(userCommitData);
    taxoWriter.close();
    DirectoryReader r = DirectoryReader.open(dir);
    assertEquals("2 categories plus root should have been committed to the underlying directory", 3, r.numDocs());
    Map <String, String> readUserCommitData = r.getIndexCommit().getUserData();
    assertTrue("wrong value extracted from commit data",
        "1 2 3".equals(readUserCommitData.get("testing")));
    assertNotNull(DirectoryTaxonomyWriter.INDEX_EPOCH + " not found in commitData", readUserCommitData.get(DirectoryTaxonomyWriter.INDEX_EPOCH));
    r.close();
   
    // open DirTaxoWriter again and commit, INDEX_EPOCH should still exist
    // in the commit data, otherwise DirTaxoReader.refresh() might not detect
    // that the taxonomy index has been recreated.
    taxoWriter = new DirectoryTaxonomyWriter(dir, OpenMode.CREATE_OR_APPEND, NO_OP_CACHE);
    taxoWriter.addCategory(new CategoryPath("c")); // add a category so that commit will happen
    taxoWriter.setCommitData(new HashMap<String, String>(){{
      put("just", "data");
    }});
    taxoWriter.commit();
   
    // verify taxoWriter.getCommitData()
    assertNotNull(DirectoryTaxonomyWriter.INDEX_EPOCH
        + " not found in taoxWriter.commitData", taxoWriter.getCommitData().get(DirectoryTaxonomyWriter.INDEX_EPOCH));
    taxoWriter.close();
   
    r = DirectoryReader.open(dir);
    readUserCommitData = r.getIndexCommit().getUserData();
    assertNotNull(DirectoryTaxonomyWriter.INDEX_EPOCH + " not found in commitData", readUserCommitData.get(DirectoryTaxonomyWriter.INDEX_EPOCH));
    r.close();
   
    dir.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.DirectoryReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.