Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.LowerCaseFilter


   *         {@link PersianNormalizationFilter} and Persian Stop words
   */
  @Override
  public TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new ArabicLetterTokenizer(reader);
    result = new LowerCaseFilter(result);
    result = new ArabicNormalizationFilter(result);
    /* additional persian-specific normalization */
    result = new PersianNormalizationFilter(result);
    /*
     * the order here is important: the stopword list is normalized with the
View Full Code Here


      throws IOException {
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      streams.source = new ArabicLetterTokenizer(reader);
      streams.result = new LowerCaseFilter(streams.source);
      streams.result = new ArabicNormalizationFilter(streams.result);
      /* additional persian-specific normalization */
      streams.result = new PersianNormalizationFilter(streams.result);
      /*
       * the order here is important: the stopword list is normalized with the
View Full Code Here

   *          {@link BrazilianStemFilter}.
   */
  @Override
  public final TokenStream tokenStream(String fieldName, Reader reader) {
                TokenStream result = new StandardTokenizer( matchVersion, reader );
    result = new LowerCaseFilter( result );
    result = new StandardFilter( result );
    result = new StopFilter( StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                                         result, stoptable );
    result = new BrazilianStemFilter( result, excltable );
    return result;
View Full Code Here

      throws IOException {
      SavedStreams streams = (SavedStreams) getPreviousTokenStream();
      if (streams == null) {
        streams = new SavedStreams();
        streams.source = new StandardTokenizer(matchVersion, reader);
        streams.result = new LowerCaseFilter(streams.source);
        streams.result = new StandardFilter(streams.result);
        streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                                        streams.result, stoptable);
        streams.result = new BrazilianStemFilter(streams.result, excltable);
        setPreviousTokenStream(streams);
View Full Code Here

   *            and {@link ArabicStemFilter}.
   */
  @Override
  public final TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new ArabicLetterTokenizer( reader );
    result = new LowerCaseFilter(result);
    // the order here is important: the stopword list is not normalized!
    result = new StopFilter( StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                             result, stoptable );
    result = new ArabicNormalizationFilter( result );
    result = new ArabicStemFilter( result );
View Full Code Here

      throws IOException {
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      streams.source = new ArabicLetterTokenizer(reader);
      streams.result = new LowerCaseFilter(streams.source);
      // the order here is important: the stopword list is not normalized!
      streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                                      streams.result, stoptable);
      streams.result = new ArabicNormalizationFilter(streams.result);
      streams.result = new ArabicStemFilter(streams.result);
View Full Code Here

    @Override
    public TokenStream tokenStream(String fieldName, Reader reader) {
      TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
      result = new TestFilter(result);
      result = new LowerCaseFilter(result);
      return result;
    }
View Full Code Here

    @Override
    public TokenStream tokenStream(String fieldName, Reader reader) {
      TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
      result = new TestPosIncrementFilter(result);
      result = new LowerCaseFilter(result);
      return result;
    }
View Full Code Here

   */
  @Override
  public final TokenStream tokenStream( String fieldName, Reader reader ) {
                TokenStream result = new StandardTokenizer( matchVersion, reader );
    result = new StandardFilter( result );
    result = new LowerCaseFilter( result );
    result = new StopFilter( StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                                         result, stoptable );
    return result;
  }
View Full Code Here

      SavedStreams streams = (SavedStreams) getPreviousTokenStream();
      if (streams == null) {
        streams = new SavedStreams();
        streams.source = new StandardTokenizer(matchVersion, reader);
        streams.result = new StandardFilter(streams.source);
        streams.result = new LowerCaseFilter(streams.result);
        streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                                        streams.result, stoptable);
        setPreviousTokenStream(streams);
      } else {
        streams.source.reset(reader);
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.LowerCaseFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.