Package org.apache.lucene.analysis.core

Examples of org.apache.lucene.analysis.core.SimpleAnalyzer


        JndiRegistry registry = new JndiRegistry(createJndiContext());
        registry.bind("std", new File("target/stdindexDir"));
        registry.bind("load_dir", new File("src/test/resources/sources"));
        registry.bind("stdAnalyzer", new StandardAnalyzer(Version.LUCENE_46));
        registry.bind("simple", new File("target/simpleindexDir"));
        registry.bind("simpleAnalyzer", new SimpleAnalyzer(Version.LUCENE_46));
        registry.bind("whitespace", new File("target/whitespaceindexDir"));
        registry.bind("whitespaceAnalyzer", new WhitespaceAnalyzer(Version.LUCENE_46));
        return registry;
    }
View Full Code Here


public class TestPerFieldAnalyzerWrapper extends BaseTokenStreamTestCase {
  public void testPerField() throws Exception {
    String text = "Qwerty";

    Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
    analyzerPerField.put("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));

    PerFieldAnalyzerWrapper analyzer =
              new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField);

    TokenStream tokenStream = analyzer.tokenStream("field", text);
View Full Code Here

        .
    */

    @Override
    public Analyzer open(Assembler a, Resource root, Mode mode) {
      return new SimpleAnalyzer(TextIndexLucene.VER);
    }
View Full Code Here

public class TestPerFieldAnalyzerWrapper extends BaseTokenStreamTestCase {
  public void testPerField() throws Exception {
    String text = "Qwerty";

    Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
    analyzerPerField.put("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));

    PerFieldAnalyzerWrapper analyzer =
              new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField);

    TokenStream tokenStream = analyzer.tokenStream("field",
View Full Code Here

  private Directory _directory;
 
  public VocabularySearcher(File dir) throws IOException {
                _directory = new SimpleFSDirectory(new File(dir, "luceneIndex"));
                Analyzer a = new SimpleAnalyzer(Version.LUCENE_43);
                IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43,a);               

                writer = new IndexWriter(_directory,conf);
                writer.commit();
                r = DirectoryReader.open(_directory);
View Full Code Here

    BooleanQuery q = new BooleanQuery();
    q.add(q1, Occur.MUST);
    q.add(q2, Occur.MUST);

    if (s != null && s.trim().length() > 0) {
      SimpleAnalyzer analyzer = new SimpleAnalyzer(Version.LUCENE_36);
      if (s.indexOf(":") == -1) {
        // the query we need:
        // "projectId":projectId AND "type":type AND ("prefix":s* OR
        // "localPart":s* OR "label":s* OR "description":s*)
        BooleanQuery q3 = new BooleanQuery();
        q3.add(new WildcardQuery(new Term("prefix", s + "*")),
            Occur.SHOULD);

        TokenStream stream = analyzer.tokenStream("localPart",
            new StringReader(s));
        // get the TermAttribute from the TokenStream
        CharTermAttribute termAtt = (CharTermAttribute) stream
            .addAttribute(CharTermAttribute.class);

        stream.reset();
               
        while (stream.incrementToken()) {
          String tmp = termAtt.toString() + "*";
          q3.add(new WildcardQuery(new Term("localPart", tmp)),
              Occur.SHOULD);
        }
        stream.close();
        stream.end();

        stream = analyzer.tokenStream("description",
            new StringReader(s));
        // get the TermAttribute from the TokenStream
        termAtt = (CharTermAttribute) stream
            .addAttribute(CharTermAttribute.class);

        stream.reset();
        while (stream.incrementToken()) {
          String tmp = termAtt.toString() + "*";
          q3.add(new WildcardQuery(new Term("description", tmp)),
              Occur.SHOULD);
        }
        stream.close();
        stream.end();

        stream = analyzer.tokenStream("label", new StringReader(s));
        // get the TermAttribute from the TokenStream
        termAtt = (CharTermAttribute) stream
            .addAttribute(CharTermAttribute.class);

        stream.reset();
        while (stream.incrementToken()) {
          String tmp = termAtt.toString() + "*";
          q3.add(new WildcardQuery(new Term("label", tmp)),
              Occur.SHOULD);
        }
        stream.close();
        stream.end();

        q.add(q3, Occur.MUST);
        return q;
      } else {
        // the query we need:
        // "projectId":projectId AND "type":type AND ("prefix":p1 AND
        // "localPart":s*)
        String p1 = s.substring(0, s.indexOf(":"));
        String p2 = s.substring(s.indexOf(":") + 1);

        BooleanQuery q3 = new BooleanQuery();
        q3.add(new TermQuery(new Term("prefix", p1)), Occur.SHOULD);

        BooleanQuery q4 = new BooleanQuery();

        TokenStream stream = analyzer.tokenStream("localPart",
            new StringReader(p2));
        // get the TermAttribute from the TokenStream
        CharTermAttribute termAtt = (CharTermAttribute) stream
            .addAttribute(CharTermAttribute.class);

View Full Code Here

    System.out.println( "Going to create fake index..." );

    FSDirectory directory = FSDirectory.open(
        new File( getIndexBaseDir(), Detective.class.getCanonicalName() )
    );
    SimpleAnalyzer analyzer = new SimpleAnalyzer( Version.LUCENE_CURRENT );
    IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_CURRENT, analyzer);
    IndexWriter iw = new IndexWriter( directory, cfg );
    IndexFillRunnable filler = new IndexFillRunnable( iw );
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool( WORKER_THREADS );
    for ( int batch = 0; batch <= INDEX_ELEMENTS; batch++ ) {
View Full Code Here

  private void assertFindsN(int expectedToFind, String queryString) throws ParseException {
    openSession().beginTransaction();
    try {
      FullTextSession fullTextSession = Search.getFullTextSession( getSession() );
      QueryParser queryParser = new QueryParser( TestConstants.getTargetLuceneVersion(), "", new SimpleAnalyzer( TestConstants.getTargetLuceneVersion() ) );
      Query query = queryParser.parse( queryString );
      FullTextQuery fullTextQuery = fullTextSession.createFullTextQuery(
          query,
          LargeDocument.class
      );
View Full Code Here

    // The equipment field is the manufacturer field  in the
    // Departments entity after being massaged by passing it
    // through the EquipmentType class. This field is in
    // the Lucene document but not in the Department entity itself.
    QueryParser parser = new QueryParser( TestConstants.getTargetLuceneVersion(), "equipment", new SimpleAnalyzer( TestConstants.getTargetLuceneVersion() ) );

    // Check the second ClassBridge annotation
    Query query = parser.parse( "equiptype:Cisco" );
    org.hibernate.search.FullTextQuery hibQuery = session.createFullTextQuery( query, Departments.class );
    List<Departments> result = hibQuery.list();
    assertNotNull( result );
    assertEquals( "incorrect number of results returned", 2, result.size() );
    for ( Departments d : result ) {
      assertEquals( "incorrect manufacturer", "C", d.getManufacturer() );
    }

    // No data cross-ups.
    query = parser.parse( "branchnetwork:Kent Lewin" );
    hibQuery = session.createFullTextQuery( query, Departments.class );
    result = hibQuery.list();
    assertNotNull( result );
    assertTrue( "problem with field cross-ups", result.size() == 0 );

    // Non-ClassBridge field.
    parser = new QueryParser( TestConstants.getTargetLuceneVersion(), "branchHead", new SimpleAnalyzer( TestConstants.getTargetLuceneVersion() ) );
    query = parser.parse( "branchHead:Kent Lewin" );
    hibQuery = session.createFullTextQuery( query, Departments.class );
    result = hibQuery.list();
    assertNotNull( result );
    assertTrue( "incorrect entity returned, wrong branch head", result.size() == 1 );
    assertEquals( "incorrect entity returned", "Kent Lewin", ( result.get( 0 ) ).getBranchHead() );

    // Check other ClassBridge annotation.
    parser = new QueryParser( TestConstants.getTargetLuceneVersion(), "branchnetwork", new SimpleAnalyzer( TestConstants.getTargetLuceneVersion() ) );
    query = parser.parse( "branchnetwork:st. george 1D" );
    hibQuery = session.createFullTextQuery( query, Departments.class );
    result = hibQuery.list();
    assertNotNull( result );
    assertEquals( "incorrect entity returned, wrong network", "1D", ( result.get( 0 ) ).getNetwork() );
View Full Code Here

public class TestPerFieldAnalyzerWrapper extends BaseTokenStreamTestCase {
  public void testPerField() throws Exception {
    String text = "Qwerty";

    Map<String, Analyzer> analyzerPerField = new HashMap<>();
    analyzerPerField.put("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));

    PerFieldAnalyzerWrapper analyzer =
              new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField);

    TokenStream tokenStream = analyzer.tokenStream("field", text);
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.core.SimpleAnalyzer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.