Examples of CharTermAttribute


Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

    if (terms == null) {
      return;
    }
    TokenStream ts = analyzer.tokenStream(f.fieldName, f.queryString);
    try {
      CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);

      int corpusNumDocs = reader.numDocs();
      HashSet<String> processedTerms = new HashSet<String>();
      ts.reset();
      while (ts.incrementToken()) {
        String term = termAtt.toString();
        if (!processedTerms.contains(term)) {
          processedTerms.add(term);
          ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term
          float minScore = 0;
          Term startTerm = new Term(f.fieldName, term);
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

    assertEquals("enfin", tas.get(7));
  }

  private List<String> filter(TokenFilter filter) throws IOException {
    List<String> tas = new ArrayList<String>();
    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
    filter.reset();
    while (filter.incrementToken()) {
      tas.add(termAtt.toString());
    }
    filter.end();
    filter.close();
    return tas;
  }
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

    TokenStream ts = getTestAnalyzer().tokenStream("dummy", input);
    try {
      ts.reset();

      MorphosyntacticTagsAttribute attribute = ts.getAttribute(MorphosyntacticTagsAttribute.class);
      CharTermAttribute charTerm = ts.getAttribute(CharTermAttribute.class);
      while (ts.incrementToken()) {
        System.out.println(charTerm.toString() + " => " + attribute.getTags());
      }
      ts.end();
    } finally {
      IOUtils.closeWhileHandlingException(ts);
    }
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

  /** Test reuse of MorfologikFilter with leftover stems. */
  public final void testLeftoverStems() throws IOException {
    Analyzer a = getTestAnalyzer();
    TokenStream ts_1 = a.tokenStream("dummy", "liście");
    try {
      CharTermAttribute termAtt_1 = ts_1.getAttribute(CharTermAttribute.class);
      ts_1.reset();
      ts_1.incrementToken();
      assertEquals("first stream", "liście", termAtt_1.toString());
      ts_1.end();
    } finally {
      IOUtils.closeWhileHandlingException(ts_1);
    }

    TokenStream ts_2 = a.tokenStream("dummy", "danych");
    try {
      CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
      ts_2.reset();
      ts_2.incrementToken();
      assertEquals("second stream", "dany", termAtt_2.toString());
      ts_2.end();
    } finally {
      IOUtils.closeWhileHandlingException(ts_2);
    }
  }
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

  public void testPayloads() throws Exception {
    String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
    DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
      (new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false),
       DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
    PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
    filter.reset();
    assertTermEquals("The", filter, termAtt, payAtt, null);
    assertTermEquals("quick", filter, termAtt, payAtt, "JJ".getBytes("UTF-8"));
    assertTermEquals("red", filter, termAtt, payAtt, "JJ".getBytes("UTF-8"));
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute


  public void testFloatEncoding() throws Exception {
    String test = "The quick|1.0 red|2.0 fox|3.5 jumped|0.5 over the lazy|5 brown|99.3 dogs|83.7";
    DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false), '|', new FloatEncoder());
    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
    PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
    filter.reset();
    assertTermEquals("The", filter, termAtt, payAtt, null);
    assertTermEquals("quick", filter, termAtt, payAtt, PayloadHelper.encodeFloat(1.0f));
    assertTermEquals("red", filter, termAtt, payAtt, PayloadHelper.encodeFloat(2.0f));
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

  }

  public void testIntEncoding() throws Exception {
    String test = "The quick|1 red|2 fox|3 jumped over the lazy|5 brown|99 dogs|83";
    DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false), '|', new IntegerEncoder());
    CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
    PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
    filter.reset();
    assertTermEquals("The", filter, termAtt, payAtt, null);
    assertTermEquals("quick", filter, termAtt, payAtt, PayloadHelper.encodeInt(1));
    assertTermEquals("red", filter, termAtt, payAtt, PayloadHelper.encodeInt(2));
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

    filter.end();
    filter.close();
  }

  void assertTermEquals(String expected, TokenStream stream, byte[] expectPay) throws Exception {
    CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
    PayloadAttribute payloadAtt = stream.getAttribute(PayloadAttribute.class);
    assertTrue(stream.incrementToken());
    assertEquals(expected, termAtt.toString());
    BytesRef payload = payloadAtt.getPayload();
    if (payload != null) {
      assertTrue(payload.length + " does not equal: " + expectPay.length, payload.length == expectPay.length);
      for (int i = 0; i < expectPay.length; i++) {
        assertTrue(expectPay[i] + " does not equal: " + payload.bytes[i + payload.offset], expectPay[i] == payload.bytes[i + payload.offset]);
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

  public void test() throws IOException {
    String test = "The quick red fox jumped over the lazy brown dogs";

    TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false)));
    int count = 0;
    CharTermAttribute termAtt = nptf.getAttribute(CharTermAttribute.class);
    TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
    PayloadAttribute payloadAtt = nptf.getAttribute(PayloadAttribute.class);
    nptf.reset();
    while (nptf.incrementToken()) {
      assertTrue(typeAtt.type() + " is not null and it should be", typeAtt.type().equals(String.valueOf(Character.toUpperCase(termAtt.buffer()[0]))));
      assertTrue("nextToken.getPayload() is null and it shouldn't be", payloadAtt.getPayload() != null);
      String type = new String(payloadAtt.getPayload().bytes, "UTF-8");
      assertTrue(type + " is not equal to " + typeAtt.type(), type.equals(typeAtt.type()) == true);
      count++;
    }
View Full Code Here

Examples of org.apache.lucene.analysis.tokenattributes.CharTermAttribute

    final int codePointCount = s.codePointCount(0, s.length());
    final int minGram = _TestUtil.nextInt(random(), 1, 3);
    final int maxGram = _TestUtil.nextInt(random(), minGram, 10);
    TokenStream tk = new KeywordTokenizer(new StringReader(s));
    tk = new NGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
    final CharTermAttribute termAtt = tk.addAttribute(CharTermAttribute.class);
    final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
    tk.reset();
    for (int start = 0; start < codePointCount; ++start) {
      for (int end = start + minGram; end <= Math.min(codePointCount, start + maxGram); ++end) {
        assertTrue(tk.incrementToken());
        assertEquals(0, offsetAtt.startOffset());
        assertEquals(s.length(), offsetAtt.endOffset());
        final int startIndex = Character.offsetByCodePoints(s, 0, start);
        final int endIndex = Character.offsetByCodePoints(s, 0, end);
        assertEquals(s.substring(startIndex, endIndex), termAtt.toString());
      }
    }
    assertFalse(tk.incrementToken());
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.