Examples of WhitespaceTokenizer


Examples of org.apache.lucene.analysis.WhitespaceTokenizer

  }

  public void testDocumentsWriterExceptionThreads() throws Exception {
    Analyzer analyzer = new Analyzer() {
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new CrashingFilter(fieldName, new WhitespaceTokenizer(reader));
      }
    };

    final int NUM_THREAD = 3;
    final int NUM_ITER = 100;
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

                      Field.Index.ANALYZED));
    w.addDocument(doc);

    Analyzer analyzer = new Analyzer() {
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new CrashingFilter(fieldName, new WhitespaceTokenizer(reader));
      }
    };

    Document crashDoc = new Document();
    crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

    for(int i=0;i<50;i++)
      b[i] = (byte) (i+77);

    Document doc = new Document();
    Field f = new Field("binary", b, 10, 17, Field.Store.YES);
    f.setTokenStream(new WhitespaceTokenizer(new StringReader("doc1field1")));
    Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
    f2.setTokenStream(new WhitespaceTokenizer(new StringReader("doc1field2")));
    doc.add(f);
    doc.add(f2);
    w.addDocument(doc);
   
    // add 2 docs to test in-memory merging
    f.setTokenStream(new WhitespaceTokenizer(new StringReader("doc2field1")));
    f2.setTokenStream(new WhitespaceTokenizer(new StringReader("doc2field2")));
    w.addDocument(doc);
 
    // force segment flush so we can force a segment merge with doc3 later.
    w.commit();

    f.setTokenStream(new WhitespaceTokenizer(new StringReader("doc3field1")));
    f2.setTokenStream(new WhitespaceTokenizer(new StringReader("doc3field2")));

    w.addDocument(doc);
    w.commit();
    w.optimize();   // force segment merge.
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

            fieldToData.put(field, new PayloadData(numFieldInstancesToSkip, data, offset, length));
        }
       
        public TokenStream tokenStream(String fieldName, Reader reader) {
            PayloadData payload = (PayloadData) fieldToData.get(fieldName);
            TokenStream ts = new WhitespaceTokenizer(reader);
            if (payload != null) {
                if (payload.numFieldInstancesToSkip == 0) {
                    ts = new PayloadFilter(ts, payload.data, payload.offset, payload.length);
                } else {
                    payload.numFieldInstancesToSkip--;
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

  public void testAlternate() throws IOException {
    Analyzer a = new Analyzer() {
      @Override
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new ICUNormalizer2Filter(
            new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader),
            /* specify nfc with decompose to get nfd */
            Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.DECOMPOSE));
      }
    };
   
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

  public void testRandomStrings() throws Exception {
    final Transliterator transform = Transliterator.getInstance("Any-Latin");
    Analyzer a = new ReusableAnalyzerBase() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
        return new TokenStreamComponents(tokenizer, new ICUTransformFilter(tokenizer, transform));
      }
    };
    checkRandomData(random, a, 1000*RANDOM_MULTIPLIER);
  }
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

    for(int i=0;i<50;i++)
      b[i] = (byte) (i+77);

    Document doc = new Document();
    Field f = new Field("binary", b, 10, 17);
    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1")));
    Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2")));
    doc.add(f);
    doc.add(f2);
    w.addDocument(doc);
   
    // add 2 docs to test in-memory merging
    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field1")));
    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field2")));
    w.addDocument(doc);
 
    // force segment flush so we can force a segment merge with doc3 later.
    w.commit();

    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field1")));
    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field2")));

    w.addDocument(doc);
    w.commit();
    w.optimize();   // force segment merge.
    w.close();
View Full Code Here

Examples of org.apache.lucene.analysis.WhitespaceTokenizer

  }

  public void testDocumentsWriterExceptions() throws IOException {
    Analyzer analyzer = new Analyzer() {
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new CrashingFilter(fieldName, new WhitespaceTokenizer(reader));
      }
    };

    for(int i=0;i<2;i++) {
      MockRAMDirectory dir = new MockRAMDirectory();
View Full Code Here

Examples of org.apache.lucene.analysis.core.WhitespaceTokenizer

import org.apache.lucene.util._TestUtil;

public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase {

  public void testSize4FalseInject() throws Exception {
    TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("international"));
    TokenStream filter = new DoubleMetaphoneFilter(stream, 4, false);
    assertTokenStreamContents(filter, new String[] { "ANTR" });
  }
View Full Code Here

Examples of org.apache.lucene.analysis.core.WhitespaceTokenizer

    TokenStream filter = new DoubleMetaphoneFilter(stream, 4, false);
    assertTokenStreamContents(filter, new String[] { "ANTR" });
  }

  public void testSize4TrueInject() throws Exception {
    TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("international"));
    TokenStream filter = new DoubleMetaphoneFilter(stream, 4, true);
    assertTokenStreamContents(filter, new String[] { "international", "ANTR" });
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.