Package org.apache.lucene.index

Examples of org.apache.lucene.index.RandomIndexWriter$MockIndexWriter


    }
    // prepare a small index with just a few documents. 
    dir = newDirectory();
    anlzr = new MockAnalyzer(random);
    IndexWriterConfig iwc = newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setMergePolicy(newLogMergePolicy());
    RandomIndexWriter iw = new RandomIndexWriter(random, dir, iwc);
    if (doMultiSegment) {
      iw.w.setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 7));
    }

    iw.w.setInfoStream(VERBOSE ? System.out : null);
    // add docs not exactly in natural ID order, to verify we do check the order of docs by scores
    int remaining = N_DOCS;
    boolean done[] = new boolean[N_DOCS];
    int i = 0;
    while (remaining > 0) {
      if (done[i]) {
        throw new Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
      }
      addDoc(iw, i);
      done[i] = true;
      i = (i + 4) % N_DOCS;
      remaining --;
    }
    if (!doMultiSegment) {
      if (VERBOSE) {
        System.out.println("TEST: setUp optimize");
      }
      iw.optimize();
    }
    iw.close();
    if (VERBOSE) {
      System.out.println("TEST: setUp done close");
    }
  }
View Full Code Here


  }

  public void testMissingTerms() throws Exception {
    String fieldName = "field1";
    Directory rd = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), rd);
    for (int i = 0; i < 100; i++) {
      Document doc = new Document();
      int term = i * 10; //terms are units of 10;
      doc.add(newStringField(fieldName, "" + term, Field.Store.YES));
      w.addDocument(doc);
    }
    IndexReader reader = SlowCompositeReaderWrapper.wrap(w.getReader());
    assertTrue(reader.getContext() instanceof AtomicReaderContext);
    AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
    w.close();

    List<Term> terms = new ArrayList<Term>();
    terms.add(new Term(fieldName, "19"));
    FixedBitSet bits = (FixedBitSet) termsFilter(random().nextBoolean(), terms).getDocIdSet(context, context.reader().getLiveDocs());
    assertNull("Must match nothing", bits);
View Full Code Here

  }
 
  public void testMissingField() throws Exception {
    String fieldName = "field1";
    Directory rd1 = newDirectory();
    RandomIndexWriter w1 = new RandomIndexWriter(random(), rd1);
    Document doc = new Document();
    doc.add(newStringField(fieldName, "content1", Field.Store.YES));
    w1.addDocument(doc);
    IndexReader reader1 = w1.getReader();
    w1.close();
   
    fieldName = "field2";
    Directory rd2 = newDirectory();
    RandomIndexWriter w2 = new RandomIndexWriter(random(), rd2);
    doc = new Document();
    doc.add(newStringField(fieldName, "content2", Field.Store.YES));
    w2.addDocument(doc);
    IndexReader reader2 = w2.getReader();
    w2.close();
   
    TermsFilter tf = new TermsFilter(new Term(fieldName, "content1"));
    MultiReader multi = new MultiReader(reader1, reader2);
    for (AtomicReaderContext context : multi.leaves()) {
      DocIdSet docIdSet = tf.getDocIdSet(context, context.reader().getLiveDocs());
View Full Code Here

    rd2.close();
  }
 
  public void testFieldNotPresent() throws IOException {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    int num = atLeast(3);
    int skip = random().nextInt(num);
    List<Term> terms = new ArrayList<Term>();
    for (int i = 0; i < num; i++) {
      terms.add(new Term("field" + i, "content1"));
      Document doc = new Document();
      if (skip == i) {
        continue;
      }
      doc.add(newStringField("field" + i, "content1", Field.Store.YES));
      w.addDocument(doc)
    }
   
    w.forceMerge(1);
    IndexReader reader = w.getReader();
    w.close();
    assertEquals(1, reader.leaves().size());
   
   
   
    AtomicReaderContext context = reader.leaves().get(0);
View Full Code Here

    dir.close();
  }
 
  public void testSkipField() throws IOException {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    int num = atLeast(10);
    Set<Term> terms = new HashSet<Term>();
    for (int i = 0; i < num; i++) {
      String field = "field" + random().nextInt(100);
      terms.add(new Term(field, "content1"));
      Document doc = new Document();
      doc.add(newStringField(field, "content1", Field.Store.YES));
      w.addDocument(doc);
    }
    int randomFields = random().nextInt(10);
    for (int i = 0; i < randomFields; i++) {
      while (true) {
        String field = "field" + random().nextInt(100);
        Term t = new Term(field, "content1");
        if (!terms.contains(t)) {
          terms.add(t);
          break;
        }
      }
    }
    w.forceMerge(1);
    IndexReader reader = w.getReader();
    w.close();
    assertEquals(1, reader.leaves().size());
    AtomicReaderContext context = reader.leaves().get(0);
    TermsFilter tf = new TermsFilter(new ArrayList<Term>(terms));

    FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
View Full Code Here

    dir.close();
  }
 
  public void testRandom() throws IOException {
    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    int num = atLeast(100);
    final boolean singleField = random().nextBoolean();
    List<Term> terms = new ArrayList<Term>();
    for (int i = 0; i < num; i++) {
      String field = "field" + (singleField ? "1" : random().nextInt(100));
      String string = _TestUtil.randomRealisticUnicodeString(random());
      terms.add(new Term(field, string));
      Document doc = new Document();
      doc.add(newStringField(field, string, Field.Store.YES));
      w.addDocument(doc);
    }
    IndexReader reader = w.getReader();
    w.close();
   
    IndexSearcher searcher = newSearcher(reader);
   
    int numQueries = atLeast(10);
    for (int i = 0; i < numQueries; i++) {
View Full Code Here

  public void setUp() throws Exception {
    super.setUp();

    analyzer = new MockAnalyzer(random());
    directory = newDirectory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));

    //Add series of docs with misspelt names
    addDoc(writer, "jonathon smythe", "1");
    addDoc(writer, "jonathan smith", "2");
    addDoc(writer, "johnathon smyth", "3");
    addDoc(writer, "johnny smith", "4");
    addDoc(writer, "jonny smith", "5");
    addDoc(writer, "johnathon smythe", "6");
    reader = writer.getReader();
    writer.close();
    searcher = newSearcher(reader);
  }
View Full Code Here

    collator.setStrength(Collator.IDENTICAL);
    collator.setDecomposition(Collator.NO_DECOMPOSITION);

    numDocs = 1000 * RANDOM_MULTIPLIER;
    dir = newDirectory();
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      String value = _TestUtil.randomUnicodeString(random());
      Field field = newStringField("field", value, Field.Store.YES);
      doc.add(field);
      iw.addDocument(doc);
    }
    splitDoc = _TestUtil.randomUnicodeString(random());
    reader = iw.getReader();
    iw.close();

    searcher = newSearcher(reader);
  }
View Full Code Here

    FieldType customType = new FieldType();
    customType.setStored(true);

    Directory dir = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(
        random(),
        dir,
        newIndexWriterConfig(TEST_VERSION_CURRENT,
            new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
    boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName());
    List<Document> documents = new ArrayList<Document>();
    // 0
    Document doc = new Document();
    addGroupField(doc, groupField, "author1", canUseIDV);
    doc.add(new TextField("content", "random text", Field.Store.YES));
    doc.add(new Field("id", "1", customType));
    documents.add(doc);

    // 1
    doc = new Document();
    addGroupField(doc, groupField, "author1", canUseIDV);
    doc.add(new TextField("content", "some more random text", Field.Store.YES));
    doc.add(new Field("id", "2", customType));
    documents.add(doc);

    // 2
    doc = new Document();
    addGroupField(doc, groupField, "author1", canUseIDV);
    doc.add(new TextField("content", "some more random textual data", Field.Store.YES));
    doc.add(new Field("id", "3", customType));
    doc.add(new StringField("groupend", "x", Field.Store.NO));
    documents.add(doc);
    w.addDocuments(documents);
    documents.clear();

    // 3
    doc = new Document();
    addGroupField(doc, groupField, "author2", canUseIDV);
    doc.add(new TextField("content", "some random text", Field.Store.YES));
    doc.add(new Field("id", "4", customType));
    doc.add(new StringField("groupend", "x", Field.Store.NO));
    w.addDocument(doc);

    // 4
    doc = new Document();
    addGroupField(doc, groupField, "author3", canUseIDV);
    doc.add(new TextField("content", "some more random text", Field.Store.YES));
    doc.add(new Field("id", "5", customType));
    documents.add(doc);

    // 5
    doc = new Document();
    addGroupField(doc, groupField, "author3", canUseIDV);
    doc.add(new TextField("content", "random", Field.Store.YES));
    doc.add(new Field("id", "6", customType));
    doc.add(new StringField("groupend", "x", Field.Store.NO));
    documents.add(doc);
    w.addDocuments(documents);
    documents.clear();

    // 6 -- no author field
    doc = new Document();
    doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
    doc.add(new Field("id", "6", customType));
    doc.add(new StringField("groupend", "x", Field.Store.NO));

    w.addDocument(doc);

    IndexSearcher indexSearcher = newSearcher(w.getReader());
    w.close();

    Sort groupSort = Sort.RELEVANCE;
    GroupingSearch groupingSearch = createRandomGroupingSearch(groupField, groupSort, 5, canUseIDV);

    TopGroups<?> groups = groupingSearch.search(indexSearcher, null, new TermQuery(new Term("content", "random")), 0, 10);
View Full Code Here

    collator.setStrength(Collator.IDENTICAL);
    collator.setDecomposition(Collator.NO_DECOMPOSITION);

    int numDocs = 20 * RANDOM_MULTIPLIER;
    Directory dir = newDirectory();
    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      String value = _TestUtil.randomUnicodeString(random());
      Field field = newStringField("field", value, Field.Store.YES);
      doc.add(field);
      iw.addDocument(doc);
    }
    IndexReader reader = iw.getReader();
    iw.close();

    IndexSearcher searcher = newSearcher(reader);

    String startPoint = _TestUtil.randomUnicodeString(random());
    String endPoint = _TestUtil.randomUnicodeString(random());
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.RandomIndexWriter$MockIndexWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.