Package org.apache.lucene.index

Examples of org.apache.lucene.index.IndexReader.docFreq()


      SimpleOrderedMap<Object> results = new SimpleOrderedMap<Object>();
      String[] wordz = words.split(" ");
      for (String word : wordz)
      {
        SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
        nl.add("frequency", indexReader.docFreq(new Term(suggestionField, word)));
        String[] suggestions =
          spellChecker.suggestSimilar(word, numSug,
          indexReader, suggestionField, onlyMorePopular);

        // suggestion array
View Full Code Here


        // suggestion array
        NamedList<Object> sa = new NamedList<Object>();
        for (int i=0; i<suggestions.length; i++) {
          // suggestion item
          SimpleOrderedMap<Object> si = new SimpleOrderedMap<Object>();
          si.add("frequency", indexReader.docFreq(new Term(termSourceField, suggestions[i])));
          sa.add(suggestions[i], si);
        }
        nl.add("suggestions", sa);
        results.add(word, nl);
      }
View Full Code Here

      writer.close();

      // Make sure starting index seems to be working properly:
      Term searchTerm = new Term("content", "aaa");       
      IndexReader reader = IndexReader.open(startDir);
      assertEquals("first docFreq", 57, reader.docFreq(searchTerm));

      IndexSearcher searcher = new IndexSearcher(reader);
      Hits hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("first number of hits", 57, hits.length());
      searcher.close();
View Full Code Here

    writer.close();

    IndexReader reader = IndexReader.open(dir);

    // Make sure all terms < max size were indexed
    assertEquals(2, reader.docFreq(new Term("content", "abc")));
    assertEquals(1, reader.docFreq(new Term("content", "bbb")));
    assertEquals(1, reader.docFreq(new Term("content", "term")));
    assertEquals(1, reader.docFreq(new Term("content", "another")));

    // Make sure position is still incremented when
View Full Code Here

    IndexReader reader = IndexReader.open(dir);

    // Make sure all terms < max size were indexed
    assertEquals(2, reader.docFreq(new Term("content", "abc")));
    assertEquals(1, reader.docFreq(new Term("content", "bbb")));
    assertEquals(1, reader.docFreq(new Term("content", "term")));
    assertEquals(1, reader.docFreq(new Term("content", "another")));

    // Make sure position is still incremented when
    // massive term is skipped:
View Full Code Here

    IndexReader reader = IndexReader.open(dir);

    // Make sure all terms < max size were indexed
    assertEquals(2, reader.docFreq(new Term("content", "abc")));
    assertEquals(1, reader.docFreq(new Term("content", "bbb")));
    assertEquals(1, reader.docFreq(new Term("content", "term")));
    assertEquals(1, reader.docFreq(new Term("content", "another")));

    // Make sure position is still incremented when
    // massive term is skipped:
    DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader,
View Full Code Here

    // Make sure all terms < max size were indexed
    assertEquals(2, reader.docFreq(new Term("content", "abc")));
    assertEquals(1, reader.docFreq(new Term("content", "bbb")));
    assertEquals(1, reader.docFreq(new Term("content", "term")));
    assertEquals(1, reader.docFreq(new Term("content", "another")));

    // Make sure position is still incremented when
    // massive term is skipped:
    DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader,
                                                                MultiFields.getLiveDocs(reader),
View Full Code Here

    sa.setMaxTokenLength(100000);
    writer  = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, sa));
    writer.addDocument(doc);
    writer.close();
    reader = IndexReader.open(dir);
    assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
    reader.close();

    dir.close();
  }
 
View Full Code Here

    int clusterSize = wvws.size();

    for (TermEntry termEntry : termEntryMap.values()) {
       
      int corpusDF = reader.docFreq(new Term(this.contentField,termEntry.getTerm()));
      int outDF = corpusDF - termEntry.getDocFreq();
      int inDF = termEntry.getDocFreq();
      double logLikelihoodRatio = scoreDocumentFrequencies(inDF, outDF, clusterSize, numDocs);
      TermInfoClusterInOut termInfoCluster =
          new TermInfoClusterInOut(termEntry.getTerm(), inDF, outDF, logLikelihoodRatio);
View Full Code Here

      writer.close();

      // Make sure starting index seems to be working properly:
      Term searchTerm = new Term("content", "aaa");       
      IndexReader reader = IndexReader.open(startDir);
      assertEquals("first docFreq", 57, reader.docFreq(searchTerm));

      IndexSearcher searcher = new IndexSearcher(reader);
      Hits hits = searcher.search(new TermQuery(searchTerm));
      assertEquals("first number of hits", 57, hits.length());
      searcher.close();
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.