Package org.apache.lucene.facet.taxonomy

Examples of org.apache.lucene.facet.taxonomy.CategoryPath


   * {@link ValueSource}. The label will be used to as the root's
   * {@link FacetResultNode} label.
   */
  @SuppressWarnings("unchecked")
  public RangeFacetRequest(String label, ValueSource valueSource, T...ranges) {
    super(new CategoryPath(label), 1);
    this.ranges = ranges;
    this.valueSource = valueSource;
    this.label = label;
  }
View Full Code Here


        de = _TestUtil.docs(random(), te, liveDocs, de, DocsEnum.FLAG_NONE);
        int cnt = 0;
        while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
          cnt++;
        }
        res.put(new CategoryPath(te.term().utf8ToString().split(delim)), cnt);
      }
    }
    return res;
  }
View Full Code Here

     * in essence sampling with recounting spends some extra cycles for
     * labeling results for which labels are not required. */
    if (fresNode.label == null) {
      fresNode.label = taxonomyReader.getPath(fresNode.ordinal);
    }
    CategoryPath catPath = fresNode.label;

    Term drillDownTerm = DrillDownQuery.term(searchParams.indexingParams, catPath);
    // TODO (Facet): avoid Multi*?
    Bits liveDocs = MultiFields.getLiveDocs(indexReader);
    int updatedCount = countIntersection(MultiFields.getTermDocsEnum(indexReader, liveDocs,
View Full Code Here

    int upto = ordinals.length; // since we may add ordinals to IntsRef, iterate upto original length

    Iterator<CategoryPath> iter = categories.iterator();
    for (int i = 0; i < upto; i++) {
      int ordinal = ordinals.ints[i];
      CategoryPath cp = iter.next();
      OrdinalPolicy op = clp.getOrdinalPolicy(cp.components[0]);
      if (op != OrdinalPolicy.NO_PARENTS) {
        // need to add parents too
        int parent = taxoWriter.getParent(ordinal);
        if (parent > 0) {
View Full Code Here

    Document doc = new Document();
    // Mixup order we add these paths, to verify tie-break
    // order is by label (unicode sort) and has nothing to
    // do w/ order we added them:
    List<CategoryPath> paths = new ArrayList<CategoryPath>();
    paths.add(new CategoryPath("a", "foo"));
    paths.add(new CategoryPath("a", "bar"));
    paths.add(new CategoryPath("a", "zoo"));
    Collections.shuffle(paths, random());

    paths.add(new CategoryPath("b", "baz"));
    paths.add(new CategoryPath("b" + FacetIndexingParams.DEFAULT_FACET_DELIM_CHAR, "bazfoo"));

    dvFields.addFields(doc, paths);

    writer.addDocument(doc);
    if (random().nextBoolean()) {
      writer.commit();
    }

    doc = new Document();
    dvFields.addFields(doc, Collections.singletonList(new CategoryPath("a", "foo")));
    writer.addDocument(doc);

    // NRT open
    IndexSearcher searcher = newSearcher(writer.getReader());
    writer.close();

    List<FacetRequest> requests = new ArrayList<FacetRequest>();
    requests.add(new CountFacetRequest(new CategoryPath("a"), 10));
    requests.add(new CountFacetRequest(new CategoryPath("b"), 10));
    requests.add(new CountFacetRequest(new CategoryPath("b" + FacetIndexingParams.DEFAULT_FACET_DELIM_CHAR), 10));

    final boolean doDimCount = random().nextBoolean();

    CategoryListParams clp = new CategoryListParams() {
        @Override
        public OrdinalPolicy getOrdinalPolicy(String dimension) {
          return doDimCount ? OrdinalPolicy.NO_PARENTS : OrdinalPolicy.ALL_BUT_DIMENSION;
        }
      };

    FacetSearchParams fsp = new FacetSearchParams(new FacetIndexingParams(clp), requests);

    // Per-top-reader state:
    SortedSetDocValuesReaderState state = new SortedSetDocValuesReaderState(fip, searcher.getIndexReader());
   
    //SortedSetDocValuesCollector c = new SortedSetDocValuesCollector(state);
    //SortedSetDocValuesCollectorMergeBySeg c = new SortedSetDocValuesCollectorMergeBySeg(state);

    FacetsCollector c = FacetsCollector.create(new SortedSetDocValuesAccumulator(state, fsp));

    searcher.search(new MatchAllDocsQuery(), c);

    //List<FacetResult> results = c.getFacetResults(requests);
    List<FacetResult> results = c.getFacetResults();

    assertEquals(3, results.size());

    int dimCount = doDimCount ? 4 : 0;
    assertEquals("a (" + dimCount + ")\n  foo (2)\n  bar (1)\n  zoo (1)\n", FacetTestUtils.toSimpleString(results.get(0)));

    dimCount = doDimCount ? 1 : 0;
    assertEquals("b (" + dimCount + ")\n  baz (1)\n", FacetTestUtils.toSimpleString(results.get(1)));

    dimCount = doDimCount ? 1 : 0;
    assertEquals("b" + FacetIndexingParams.DEFAULT_FACET_DELIM_CHAR + " (" + dimCount + ")\n  bazfoo (1)\n", FacetTestUtils.toSimpleString(results.get(2)));

    // DrillDown:

    DrillDownQuery q = new DrillDownQuery(fip);
    q.add(new CategoryPath("a", "foo"));
    q.add(new CategoryPath("b", "baz"));
    TopDocs hits = searcher.search(q, 1);
    assertEquals(1, hits.totalHits);

    q = new DrillDownQuery(fip);
    q.add(new CategoryPath("a"));
    hits = searcher.search(q, 1);
    assertEquals(2, hits.totalHits);

    searcher.getIndexReader().close();
    dir.close();
View Full Code Here

    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

    SortedSetDocValuesFacetFields dvFields = new SortedSetDocValuesFacetFields();

    Document doc = new Document();
    dvFields.addFields(doc, Collections.singletonList(new CategoryPath("a", "foo")));
    writer.addDocument(doc);

    IndexReader r = writer.getReader();
    SortedSetDocValuesReaderState state = new SortedSetDocValuesReaderState(r);

    doc = new Document();
    dvFields.addFields(doc, Collections.singletonList(new CategoryPath("a", "bar")));
    writer.addDocument(doc);

    doc = new Document();
    dvFields.addFields(doc, Collections.singletonList(new CategoryPath("a", "baz")));
    writer.addDocument(doc);

    IndexSearcher searcher = newSearcher(writer.getReader());

    List<FacetRequest> requests = new ArrayList<FacetRequest>();
    requests.add(new CountFacetRequest(new CategoryPath("a"), 10));

    FacetSearchParams fsp = new FacetSearchParams(requests);
   
    FacetsCollector c = FacetsCollector.create(new SortedSetDocValuesAccumulator(state, fsp));
View Full Code Here

    index100Docs(indexDir, taxoDir, fip);
   
    DirectoryReader r = DirectoryReader.open(indexDir);
    TaxonomyReader tr = new DirectoryTaxonomyReader(taxoDir);
   
    CountFacetRequest facetRequest = new CountFacetRequest(new CategoryPath("root"), 10);
    // Setting the depth to '2', should potentially get all categories
    facetRequest.setDepth(2);
    facetRequest.setResultMode(ResultMode.PER_NODE_IN_TREE);

    FacetSearchParams fsp = new FacetSearchParams(fip, facetRequest);
View Full Code Here

    TaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir);
   
    FacetFields facetFields = new FacetFields(tw, fip);
    for (int i = 0; i < 100; i++) {
      Document doc = new Document();
      CategoryPath cp = new CategoryPath("root",Integer.toString(i / 10), Integer.toString(i));
      facetFields.addFields(doc, Collections.singletonList(cp));
      w.addDocument(doc);
    }
    IOUtils.close(tw, w);
  }
View Full Code Here

 
  @Override
  protected List<CategoryPath> getCategories(final int doc) {
    return new ArrayList<CategoryPath>() {
      {
        add(new CategoryPath("root", "a", Integer.toString(doc % 10)));
      }
    };
  }
View Full Code Here

    sp.setOversampleFactor(5d);
   
    assertNull("Fixer should be null as the test is for no-fixing",
        sp.getSampleFixer());
    FacetSearchParams fsp = new FacetSearchParams(fip, new CountFacetRequest(
        new CategoryPath("root", "a"), 1));
    SamplingAccumulator accumulator = new SamplingAccumulator(
        new RandomSampler(sp, random()), fsp, indexReader, taxoReader);
   
    // Make sure no complements are in action
    accumulator
View Full Code Here

TOP

Related Classes of org.apache.lucene.facet.taxonomy.CategoryPath

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.