Package org.hibernate.search.engine

Examples of org.hibernate.search.engine.DocumentExtractor


      int max = max( first, hits );
      Session sess = (Session) this.session;

      int size = max - first + 1 < 0 ? 0 : max - first + 1;
      List<EntityInfo> infos = new ArrayList<EntityInfo>( size );
      DocumentExtractor extractor = new DocumentExtractor( searchFactoryImplementor, indexProjection );
      for (int index = first; index <= max; index++) {
        infos.add( extractor.extract( hits, index ) );
      }
      Loader loader = getLoader( sess, searchFactoryImplementor );
      List list = loader.load( infos.toArray( new EntityInfo[infos.size()] ) );
      if ( resultTransformer == null || loader instanceof ProjectionLoader) {
        //stay consistent with transformTuple which can only be executed during a projection
View Full Code Here


         int first = first();
         int max = max(first, hits);
         int size = max - first + 1 < 0 ? 0 : max - first + 1;
         ids = new ArrayList<CacheEntityId>(size);

         DocumentExtractor extractor = new DocumentExtractor(luceneQuery, searcher, searchFactory, indexProjection);
         for (int index = first; index <= max; index++)
         {
            String documentId = (String) extractor.extract(hits, index).id;
            CacheEntityId id = new CacheEntityId(documentId);
            ids.add(id);
         }

      }
View Full Code Here

      {
         Hits hits = getHits(searcher);
         int first = first();
         int max = max(first, hits);

         DocumentExtractor extractor = new DocumentExtractor(luceneQuery, searcher, searchFactory, indexProjection);

         return new LazyQueryResultIterator(extractor, entityLoader, hits, searcher, searchFactory, first, max, fetchSize);
      }
      catch (IOException e)
      {
View Full Code Here

         int max = max(first, hits);

         int size = max - first + 1 < 0 ? 0 : max - first + 1;

         List<CacheEntityId> ids = new ArrayList<CacheEntityId>(size);
         DocumentExtractor extractor = new DocumentExtractor(luceneQuery, searcher, searchFactory, indexProjection);

         for (int index = first; index <= max; index++)
         {
            String documentId = (String) extractor.extract(hits, index).id;
            CacheEntityId id = new CacheEntityId(documentId);
            ids.add(id);
         }

         List<Object> list = entityLoader.load(ids);
View Full Code Here

      int first = first();
      int max = max( first, queryHits.totalHits );

      int size = max - first + 1 < 0 ? 0 : max - first + 1;
      List<EntityInfo> infos = new ArrayList<EntityInfo>( size );
      DocumentExtractor extractor = new DocumentExtractor(
          queryHits, searchFactoryImplementor, indexProjection, idFieldNames, allowFieldSelectionInProjection
      );
      try {
        for ( int index = first; index <= max; index++ ) {
          infos.add( extractor.extract( index ) );
          //TODO should we measure on each extractor?
          if ( index % 10 == 0 ) timeoutManager.isTimedOut();
        }
      }
      catch ( QueryTimeoutException e ) {
View Full Code Here

    //FIXME: handle null searcher
    try {
      QueryHits queryHits = getQueryHits( searcher, calculateTopDocsRetrievalSize() );
      int first = first();
      int max = max( first, queryHits.totalHits );
      DocumentExtractor extractor = new DocumentExtractor(
          queryHits, searchFactoryImplementor, indexProjection, idFieldNames, allowFieldSelectionInProjection
      );
      Loader loader = getLoader();
      //stop timeout manager, the iterator pace is in the user's hands
      timeoutManager.stop();
View Full Code Here

      int first = first();
      int max = max( first, queryHits.totalHits );

      int size = max - first + 1 < 0 ? 0 : max - first + 1;
      List<EntityInfo> infos = new ArrayList<EntityInfo>( size );
      DocumentExtractor extractor = new DocumentExtractor(
          queryHits, searchFactoryImplementor, indexProjection, idFieldNames, allowFieldSelectionInProjection
      );
      try {
        for ( int index = first; index <= max; index++ ) {
          infos.add( extractor.extract( index ) );
          //TODO should we measure on each extractor?
          if ( index % 10 == 0 ) timeoutManager.isTimedOut();
        }
      }
      catch ( QueryTimeoutException e ) {
View Full Code Here

      int max = max( first, queryHits.totalHits );
      Session sess = ( Session ) this.session;

      int size = max - first + 1 < 0 ? 0 : max - first + 1;
      List<EntityInfo> infos = new ArrayList<EntityInfo>( size );
      DocumentExtractor extractor = new DocumentExtractor(
          queryHits, searchFactoryImplementor, indexProjection, idFieldNames, allowFieldSelectionInProjection
      );
      for ( int index = first; index <= max; index++ ) {
        infos.add( extractor.extract( index ) );
      }
      Loader loader = getLoader();
      return new IteratorImpl( infos, loader );
    }
    catch ( IOException e ) {
View Full Code Here

    //FIXME: handle null searcher
    try {
      QueryHits queryHits = getQueryHits( searcher, calculateTopDocsRetrievalSize() );
      int first = first();
      int max = max( first, queryHits.totalHits );
      DocumentExtractor extractor = new DocumentExtractor(
          queryHits, searchFactoryImplementor, indexProjection, idFieldNames, allowFieldSelectionInProjection
      );
      Loader loader = getLoader();
      return new ScrollableResultsImpl(
          searcher, first, max, fetchSize, extractor, loader, searchFactoryImplementor, this.session
View Full Code Here

      int max = max( first, queryHits.totalHits );
      Session sess = ( Session ) this.session;

      int size = max - first + 1 < 0 ? 0 : max - first + 1;
      List<EntityInfo> infos = new ArrayList<EntityInfo>( size );
      DocumentExtractor extractor = new DocumentExtractor(
          queryHits, searchFactoryImplementor, indexProjection, idFieldNames, allowFieldSelectionInProjection
      );
      for ( int index = first; index <= max; index++ ) {
        infos.add( extractor.extract( index ) );
      }
      Loader loader = getLoader();
      List list = loader.load( infos.toArray( new EntityInfo[infos.size()] ) );
      if ( resultTransformer == null || loader instanceof ProjectionLoader ) {
        //stay consistent with transformTuple which can only be executed during a projection
View Full Code Here

TOP

Related Classes of org.hibernate.search.engine.DocumentExtractor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.