Examples of TermDocs


Examples of org.apache.lucene.index.TermDocs

        } catch (NumberFormatException ne) {
          return wrapper.getLongs(reader, field, NUMERIC_UTILS_LONG_PARSER);     
        }
      }
      long[] retArray = null;
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term(field));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          long termval = parser.parseLong(term.text());
          if (retArray == null) // late init
            retArray = new long[reader.maxDoc()];
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
          }
        } while (termEnum.next());
      } catch (StopFillCacheException stop) {
      } finally {
        termDocs.close();
        termEnum.close();
      }
      if (retArray == null) // no values
        retArray = new long[reader.maxDoc()];
      return retArray;
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

        } catch (NumberFormatException ne) {
          return wrapper.getDoubles(reader, field, NUMERIC_UTILS_DOUBLE_PARSER);     
        }
      }
      double[] retArray = null;
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term (field));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          double termval = parser.parseDouble(term.text());
          if (retArray == null) // late init
            retArray = new double[reader.maxDoc()];
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
          }
        } while (termEnum.next());
      } catch (StopFillCacheException stop) {
      } finally {
        termDocs.close();
        termEnum.close();
      }
      if (retArray == null) // no values
        retArray = new double[reader.maxDoc()];
      return retArray;
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

    doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.ANALYZED));
    writer.addDocument(doc);
    writer.close();

    IndexReader reader = IndexReader.open(dir, true);
    TermDocs td = reader.termDocs(new Term("partnum", "Q36"));
    assertTrue(td.next());
    td = reader.termDocs(new Term("partnum", "Q37"));
    assertTrue(td.next());
  }
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

    assertTrue("Filtered searching should have found some matches",hits.length>0);
    for(int i=0;i<hits.length;i++)
    {
      Document d=searcher.doc(hits[i].doc);
      String url=d.get(KEY_FIELD);
      TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
      int lastDoc=0;
      while(td.next())
      {
        lastDoc=td.doc();
      }
      assertEquals("Duplicate urls should return last doc",lastDoc, hits[i].doc);
    }
  } 
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

    assertTrue("Filtered searching should have found some matches",hits.length>0);
    for(int i=0;i<hits.length;i++)
    {
      Document d=searcher.doc(hits[i].doc);
      String url=d.get(KEY_FIELD);
      TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
      int lastDoc=0;
      td.next();
      lastDoc=td.doc();
      assertEquals("Duplicate urls should return first doc",lastDoc, hits[i].doc);
    }
  } 
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

    t = aprioriTermEnum.term();

    aprioriTermEnum.close();
    testTermEnum.close();

    TermDocs aprioriTermDocs = aprioriReader.termDocs(t);
    TermDocs testTermDocs = testReader.termDocs(t);

    assertEquals(aprioriTermDocs.next(), testTermDocs.next());
    assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
    assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());

    if (aprioriTermDocs.skipTo(4)) {
      assertTrue(testTermDocs.skipTo(4));
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.skipTo(4));
    }

    if (aprioriTermDocs.next()) {
      assertTrue(testTermDocs.next());
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.next());
    }


    // beyond this point all next and skipto will return false

    if (aprioriTermDocs.skipTo(100)) {
      assertTrue(testTermDocs.skipTo(100));
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.skipTo(100));
    }


    if (aprioriTermDocs.next()) {
      assertTrue(testTermDocs.next());
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.next());
    }

    if (aprioriTermDocs.skipTo(110)) {
      assertTrue(testTermDocs.skipTo(110));
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.skipTo(110));
    }

    if (aprioriTermDocs.skipTo(10)) {
      assertTrue(testTermDocs.skipTo(10));
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.skipTo(10));
    }


    if (aprioriTermDocs.skipTo(210)) {
      assertTrue(testTermDocs.skipTo(210));
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    } else {
      assertFalse(testTermDocs.skipTo(210));
    }

    aprioriTermDocs.close();
    testTermDocs.close();



    // test seek null (AllTermDocs)
    aprioriTermDocs = aprioriReader.termDocs(null);
    testTermDocs = testReader.termDocs(null);

    while (aprioriTermDocs.next()) {
      assertTrue(testTermDocs.next());
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    }
    assertFalse(testTermDocs.next());


    aprioriTermDocs.close();
    testTermDocs.close();


    // test seek default
    aprioriTermDocs = aprioriReader.termDocs();
    testTermDocs = testReader.termDocs();

    // this is invalid use of the API,
    // but if the response differs then it's an indication that something might have changed.
    // in 2.9 and 3.0 the two TermDocs-implementations returned different values at this point.
//    assertEquals("Descripency during invalid use of the TermDocs API, see comments in test code for details.",
//        aprioriTermDocs.next(), testTermDocs.next());

    // start using the API the way one is supposed to use it

    t = new Term("", "");
    aprioriTermDocs.seek(t);
    testTermDocs.seek(t);

    while (aprioriTermDocs.next()) {
      assertTrue(testTermDocs.next());
      assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
      assertEquals(aprioriTermDocs.doc(), testTermDocs.doc());
    }
    assertFalse(testTermDocs.next());

    aprioriTermDocs.close();
    testTermDocs.close();


    // clean up
    aprioriReader.close();
    testReader.close();
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

        break;
      }
      OpenBitSet termBitset = new OpenBitSet(reader.maxDoc());
     
      // Generate bitset for the term
      TermDocs termDocs = reader.termDocs(term);
     
      while (termDocs.next()) {
        termBitset.set(termDocs.doc());
      }
     
      // AND the term's bitset with cluster doc bitset to get the term's in-cluster frequency.
      // This modifies the termBitset, but that's fine as we are not using it anywhere else.
      termBitset.and(clusterDocBitset);
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

        try {
            CachingMultiIndexReader reader = index.getIndexReader();
            try {
                Term aggregateIds =
                    new Term(FieldNames.AGGREGATED_NODE_UUID, "");
                TermDocs tDocs = reader.termDocs();
                try {
                    ItemStateManager ism = getContext().getItemStateManager();
                    for (NodeId id : removedIds) {
                        aggregateIds =
                            aggregateIds.createTerm(id.toString());
                        tDocs.seek(aggregateIds);
                        while (tDocs.next()) {
                            Document doc = reader.document(
                                    tDocs.doc(), FieldSelectors.UUID);
                            NodeId nId = new NodeId(doc.get(FieldNames.UUID));
                            NodeState nodeState = (NodeState) ism.getItemState(nId);
                            aggregates.put(nId, nodeState);
                            found++;

                            // JCR-2989 Support for embedded index aggregates
                            int sizeBefore = aggregates.size();
                            retrieveAggregateRoot(nodeState, aggregates);
                            found += aggregates.size() - sizeBefore;
                        }
                    }
                } finally {
                    tDocs.close();
                }
            } finally {
                reader.release();
            }
        } catch (Exception e) {
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

               {
                  CachingMultiIndexReader reader = index.getIndexReader();
                  try
                  {
                     Term aggregateUUIDs = new Term(FieldNames.AGGREGATED_NODE_UUID, "");
                     TermDocs tDocs = reader.termDocs();
                     try
                     {
                        ItemDataConsumer ism = getContext().getItemStateManager();
                        for (Iterator<String> it = removedNodeIds.iterator(); it.hasNext();)
                        {
                           String id = it.next();
                           aggregateUUIDs = aggregateUUIDs.createTerm(id);
                           tDocs.seek(aggregateUUIDs);
                           while (tDocs.next())
                           {
                              Document doc = reader.document(tDocs.doc(), FieldSelectors.UUID);
                              String uuid = doc.get(FieldNames.UUID);
                              ItemData itd = ism.getItemData(uuid);
                              if (itd == null)
                              {
                                 continue;
                              }
                              if (!itd.isNode())
                              {
                                 throw new RepositoryException("Item with id:" + uuid + " is not a node");
                              }
                              map.put(uuid, (NodeData)itd);
                              found++;
                           }
                        }
                     }
                     finally
                     {
                        tDocs.close();
                     }
                  }
                  finally
                  {
                     reader.release();
View Full Code Here

Examples of org.apache.lucene.index.TermDocs

         {
            // process node uuids one-by-one
            try
            {
               String uuid = node.getIdentifier();
               TermDocs docs = indexReader.termDocs(new Term(FieldNames.UUID, uuid));

               if (docs.next())
               {
                  indexedNodes.add(uuid);
                  docs.doc();
                  if (docs.next())
                  {
                     //multiple entries
                     report.logComment("Multiple entires.");
                     report.logBrokenObjectAndSetInconsistency("ID=" + uuid);
                  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.