Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.WhitespaceAnalyzer.tokenStream()


class SimpleQueryConverter extends SpellingQueryConverter{
  @Override
  public Collection<Token> convert(String origQuery) {
    Collection<Token> result = new HashSet<Token>();
    WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_31);
    TokenStream ts = analyzer.tokenStream("", new StringReader(origQuery));
    // TODO: support custom attributes
    CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
    OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
    TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
    FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
View Full Code Here


   * Basic analyzer behavior should be to keep sequential terms in one
   * increment from one another.
   */
  public void testIncrementingPositions() throws Exception {
    Analyzer analyzer = new WhitespaceAnalyzer();
    TokenStream ts = analyzer.tokenStream("field",
                                new StringReader("one two three four five"));

    while (true) {
      Token token = ts.next();
      if (token == null) break;
View Full Code Here

 
  public void testTermOffsetsTokenStream() throws Exception {
    String[] multivalued = { "a b c d", "e f g", "h", "i j k l m n" };
    Analyzer a1 = new WhitespaceAnalyzer();
    TermOffsetsTokenStream tots = new TermOffsetsTokenStream(
        a1.tokenStream( "", new StringReader( "a b c d e f g h i j k l m n" ) ) );
    for( String v : multivalued ){
      TokenStream ts1 = tots.getMultiValuedTokenStream( v.length() );
      Analyzer a2 = new WhitespaceAnalyzer();
      TokenStream ts2 = a2.tokenStream( "", new StringReader( v ) );
      Token t1 = new Token();
View Full Code Here

    TermOffsetsTokenStream tots = new TermOffsetsTokenStream(
        a1.tokenStream( "", new StringReader( "a b c d e f g h i j k l m n" ) ) );
    for( String v : multivalued ){
      TokenStream ts1 = tots.getMultiValuedTokenStream( v.length() );
      Analyzer a2 = new WhitespaceAnalyzer();
      TokenStream ts2 = a2.tokenStream( "", new StringReader( v ) );
      Token t1 = new Token();
      Token t2 = new Token();
      for( t1 = ts1.next( t1 ); t1 != null; t1 = ts1.next( t1 ) ){
        t2 = ts2.next( t2 );
        assertEquals( t2, t1 );
View Full Code Here

  public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
    MockRAMDirectory dir = new MockRAMDirectory();
    Analyzer analyzer = new WhitespaceAnalyzer();
    IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED);
    Document doc = new Document();
    TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd   ")));
    Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS);
    doc.add(f);
    doc.add(f);
    w.addDocument(doc);
    w.close();
View Full Code Here

  public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception {
    MockRAMDirectory dir = new MockRAMDirectory();
    Analyzer analyzer = new WhitespaceAnalyzer();
    IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED);
    Document doc = new Document();
    TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd   ")));
    TokenStream sink = tee.newSinkTokenStream();
    Field f1 = new Field("field", tee, Field.TermVector.WITH_POSITIONS_OFFSETS);
    Field f2 = new Field("field", sink, Field.TermVector.WITH_POSITIONS_OFFSETS);
    doc.add(f1);
    doc.add(f2);
View Full Code Here

    Directory dir = newDirectory();
    Analyzer analyzer = new Analyzer(){
      Analyzer a = new WhitespaceAnalyzer( TEST_VERSION_CURRENT );
      @Override
      public TokenStream tokenStream(String fieldName, Reader reader){
        return a.tokenStream(fieldName, reader);
      }
      @Override
      public int getPositionIncrementGap(String fieldName) {
        return 100;
      }
View Full Code Here

   * Basic analyzer behavior should be to keep sequential terms in one
   * increment from one another.
   */
  public void testIncrementingPositions() throws Exception {
    Analyzer analyzer = new WhitespaceAnalyzer();
    TokenStream ts = analyzer.tokenStream("field",
                                new StringReader("one two three four five"));

    while (true) {
      Token token = ts.next();
      if (token == null) break;
View Full Code Here

   * Basic analyzer behavior should be to keep sequential terms in one
   * increment from one another.
   */
  public void testIncrementingPositions() throws Exception {
    Analyzer analyzer = new WhitespaceAnalyzer();
    TokenStream ts = analyzer.tokenStream("field",
                                new StringReader("one two three four five"));

    while (true) {
      Token token = ts.next();
      if (token == null) break;
View Full Code Here

   * Basic analyzer behavior should be to keep sequential terms in one
   * increment from one another.
   */
  public void testIncrementingPositions() throws Exception {
    Analyzer analyzer = new WhitespaceAnalyzer();
    TokenStream ts = analyzer.tokenStream("field",
                                new StringReader("one two three four five"));

    while (true) {
      Token token = ts.next();
      if (token == null) break;
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.