Package org.apache.lucene.index

Examples of org.apache.lucene.index.MultiReader


    addDoc("f123456", writer2);
   
    IndexReader ir1 = writer.getReader();
    IndexReader ir2 = writer2.getReader();
   
    MultiReader mr = new MultiReader(ir1, ir2);
    IndexSearcher searcher = newSearcher(mr);
    FuzzyQuery fq = new FuzzyQuery(new Term("field", "z123456"), 1, 0, 2, false);
    TopDocs docs = searcher.search(fq, 2);
    assertEquals(5, docs.totalHits); // 5 docs, from the a and b's
    mr.close();
    ir1.close();
    ir2.close();
    writer.close();
    writer2.close();
    directory.close();
View Full Code Here


    addDoc("f123456", writer2);
   
    IndexReader ir1 = writer.getReader();
    IndexReader ir2 = writer2.getReader();
   
    MultiReader mr = new MultiReader(ir1, ir2);
    IndexSearcher searcher = newSearcher(mr);
    SlowFuzzyQuery fq = new SlowFuzzyQuery(new Term("field", "z123456"), 1f, 0, 2);
    TopDocs docs = searcher.search(fq, 2);
    assertEquals(5, docs.totalHits); // 5 docs, from the a and b's
    mr.close();
    ir1.close();
    ir2.close();
    writer.close();
    writer2.close();
    directory.close();
View Full Code Here

    d.close();
  }
 
  /** test sorts when there's nothing in the index */
  public void testEmptyIndex() throws Exception {
    IndexSearcher empty = newSearcher(new MultiReader());
    Query query = new TermQuery(new Term("contents", "foo"));
 
    Sort sort = new Sort();
    TopDocs td = empty.search(query, null, 10, sort, true, true);
    assertEquals(0, td.totalHits);
View Full Code Here

    query.add(new TermQuery(new Term("field", "foo")), BooleanClause.Occur.MUST);
    WildcardQuery wildcardQuery = new WildcardQuery(new Term("field", "ba*"));
    wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
    query.add(wildcardQuery, BooleanClause.Occur.MUST_NOT);
   
    MultiReader multireader = new MultiReader(reader1, reader2);
    IndexSearcher searcher = newSearcher(multireader);
    assertEquals(0, searcher.search(query, 10).totalHits);
   
    final ExecutorService es = Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
    searcher = new IndexSearcher(multireader, es);
    if (VERBOSE)
      System.out.println("rewritten form: " + searcher.rewrite(query));
    assertEquals(0, searcher.search(query, 10).totalHits);
    es.shutdown();
    es.awaitTermination(1, TimeUnit.SECONDS);

    multireader.close();
    reader1.close();
    reader2.close();
    dir1.close();
    dir2.close();
  }
View Full Code Here

          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));

    assertEquals(numRuns * numDocsPerRun, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      assertEquals(numRuns, counts[i]);
    }

    // max field length is 2, so "dot" is also indexed but not "org"
    hits = searcher.search(new TermQuery(new Term("content", "dot")));
    assertEquals(numRuns, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "org")));
    assertEquals(0, hits.length());

    searcher.close();
    reader.close();

    // open and close an index writer with KeepOnlyLastCommitDeletionPolicy
    // to remove earlier checkpoints
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
View Full Code Here

          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
    assertEquals(0, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
    assertEquals(numDocsPerRun / 2, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      if (i % 2 == 0) {
        assertEquals(0, counts[i]);
      } else {
        assertEquals(1, counts[i]);
      }
    }

    searcher.close();
    reader.close();
  }
View Full Code Here

    }
    wA.close();
    wB.close();
    readerA = IndexReader.open(dirA, true);
    readerB = IndexReader.open(dirB, true);
    readerX = new MultiReader(new IndexReader[] { readerA, readerB });
  }
View Full Code Here

    }
    wA.close();
    wB.close();
    readerA = IndexReader.open(dirA);
    readerB = IndexReader.open(dirB);
    readerX = new MultiReader(new IndexReader[] { readerA, readerB });
  }
View Full Code Here

          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));

    assertEquals(numRuns * numDocsPerRun, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      assertEquals(numRuns, counts[i]);
    }

    // max field length is 2, so "dot" is also indexed but not "org"
    hits = searcher.search(new TermQuery(new Term("content", "dot")));
    assertEquals(numRuns, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "org")));
    assertEquals(0, hits.length());

    searcher.close();
    reader.close();

    // open and close an index writer with KeepOnlyLastCommitDeletionPolicy
    // to remove earlier checkpoints
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
View Full Code Here

          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
    assertEquals(0, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
    assertEquals(numDocsPerRun / 2, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      if (i % 2 == 0) {
        assertEquals(0, counts[i]);
      } else {
        assertEquals(1, counts[i]);
      }
    }

    searcher.close();
    reader.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.MultiReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.