Package org.apache.solr.search

Examples of org.apache.solr.search.SolrIndexSearcher


      }
      BrowseResult res = null;
      if (shardsVal == null && !solrParams.getBool(ShardParams.IS_SHARD, false))
    {

      SolrIndexSearcher searcher=rb.req.getSearcher();
     
      SolrIndexReader solrReader = searcher.getReader();
      BoboIndexReader reader = (BoboIndexReader)solrReader.getWrappedReader();
     
      if (reader instanceof BoboIndexReader){
          try {
            List<Query> filters = rb.getFilters();
View Full Code Here


      public long getStart() {
        return ids.offset();
      }

      public void writeDocs(boolean includeScore, Set<String> fields) throws IOException {
        SolrIndexSearcher searcher = request.getSearcher();
        DocIterator iterator = ids.iterator();
        int sz = ids.size();
        includeScore = includeScore && ids.hasScores();
        for (int i=0; i<sz; i++) {
          int id = iterator.nextDoc();
          Document doc = searcher.doc(id, fields);
          writeDoc(null, doc, fields, (includeScore ? iterator.score() : 0.0f), includeScore);
        }
      }
    }, fields );
  }
View Full Code Here

    writeArrayOpener(sz);

    incLevel();
    boolean first=true;

    SolrIndexSearcher searcher = req.getSearcher();
    DocIterator iterator = ids.iterator();
    for (int i=0; i<sz; i++) {
      int id = iterator.nextDoc();
      Document doc = searcher.doc(id, fields);

      if (first) {
        first=false;
      } else {
        writeArraySeparator();
View Full Code Here

    writeArrayOpener(sz);

    incLevel();
    boolean first=true;

    SolrIndexSearcher searcher = req.getSearcher();
    for (SolrDocument doc : docs) {

      if (first) {
        first=false;
      } else {
View Full Code Here

      iter = docIds.iterator();
    } else {
      DocList list = listAndSet.docList;
      iter = list.iterator();
    }
    SolrIndexSearcher searcher = rb.req.getSearcher();

    IndexReader reader = searcher.getReader();
    //the TVMapper is a TermVectorMapper which can be used to optimize loading of Term Vectors
    SchemaField keyField = schema.getUniqueKeyField();
    String uniqFieldName = null;
    if (keyField != null) {
      uniqFieldName = keyField.getName();
View Full Code Here

       
        cachedir.mkdirs();
        Directory fieldcacheDir = FSDirectory.open(cachedir);
        fieldcacheDir.setCore(this,p);

        SolrIndexSearcher newSearcher = new SolrIndexSearcher(this,  schema, "partion_" + partionKey, reader, true);
        newSearcher.setPartionCacheKey(p);
        newSearcher.setFieldcacheDir(fieldcacheDir);
        newSearcher.setPartionKey(partionKey);
        rtn = newHolderPartion(newSearcher);
        if(ishb)
        {
          searchCacheForHb.put(partionKey, rtn);
        }else{
View Full Code Here

  @Override
  public void process(ResponseBuilder rb) throws IOException
  {
    SolrParams p = rb.req.getParams();
    if( p.getBool( MoreLikeThisParams.MLT, false ) ) {
      SolrIndexSearcher searcher = rb.req.getSearcher();
     
      NamedList<DocList> sim = getMoreLikeThese( rb, searcher,
          rb.getResults().docList, rb.getFieldFlags() );

      // TODO ???? add this directly to the response?
View Full Code Here

 
  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
  {   
    IndexSchema schema = req.getSchema();
    SolrIndexSearcher searcher = req.getSearcher();
    IndexReader reader = searcher.getReader();
    SolrParams params = req.getParams();
    int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
       
    // Always show the core lucene info
    rsp.add("index", getIndexInfo(reader, numTerms>0 ) );

    Integer docId = params.getInt( DOC_ID );
    if( docId == null && params.get( ID ) != null ) {
      // Look for something with a given solr ID
      SchemaField uniqueKey = schema.getUniqueKeyField();
      String v = uniqueKey.getType().toInternal( params.get(ID) );
      Term t = new Term( uniqueKey.getName(), v );
      docId = searcher.getFirstMatch( t );
      if( docId < 0 ) {
        throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+params.get( ID ) );
      }
    }
       
View Full Code Here

  public NamedList<Object> doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException {
    SolrParams params = req.getParams();
    if (!isHighlightingEnabled(params))
        return null;
    
    SolrIndexSearcher searcher = req.getSearcher();
    IndexSchema schema = searcher.getSchema();
    NamedList fragments = new SimpleOrderedMap();
    String[] fieldNames = getHighlightFields(query, req, defaultFields);
    Set<String> fset = new HashSet<String>();
    
    {
      // pre-fetch documents using the Searcher's doc cache
      for(String f : fieldNames) { fset.add(f); }
      // fetch unique key if one exists.
      SchemaField keyField = schema.getUniqueKeyField();
      if(null != keyField)
        fset.add(keyField.getName())
    }

    // get FastVectorHighlighter instance out of the processing loop
    FastVectorHighlighter fvh = new FastVectorHighlighter(
        // FVH cannot process hl.usePhraseHighlighter parameter per-field basis
        params.getBool( HighlightParams.USE_PHRASE_HIGHLIGHTER, true ),
        // FVH cannot process hl.requireFieldMatch parameter per-field basis
        params.getBool( HighlightParams.FIELD_MATCH, false ) );
    fvh.setPhraseLimit(params.getInt(HighlightParams.PHRASE_LIMIT, Integer.MAX_VALUE));
    FieldQuery fieldQuery = fvh.getFieldQuery( query, searcher.getIndexReader() );

    // Highlight each document
    DocIterator iterator = docs.iterator();
    for (int i = 0; i < docs.size(); i++) {
      int docId = iterator.nextDoc();
      Document doc = searcher.doc(docId, fset);
      NamedList docSummaries = new SimpleOrderedMap();
      for (String fieldName : fieldNames) {
        fieldName = fieldName.trim();
        if( useFastVectorHighlighter( params, schema, fieldName ) )
          doHighlightingByFastVectorHighlighter( fvh, fieldQuery, req, docSummaries, docId, doc, fieldName );
View Full Code Here

    SolrParams params = req.getParams();
    String[] docTexts = doc.getValues(fieldName);
    // according to Document javadoc, doc.getValues() never returns null. check empty instead of null
    if (docTexts.length == 0) return;
   
    SolrIndexSearcher searcher = req.getSearcher();
    IndexSchema schema = searcher.getSchema();
    TokenStream tstream = null;
    int numFragments = getMaxSnippets(fieldName, params);
    boolean mergeContiguousFragments = isMergeContiguousFragments(fieldName, params);

    String[] summaries = null;
    List<TextFragment> frags = new ArrayList<TextFragment>();

    TermOffsetsTokenStream tots = null; // to be non-null iff we're using TermOffsets optimization
    try {
        TokenStream tvStream = TokenSources.getTokenStream(searcher.getReader(), docId, fieldName);
        if (tvStream != null) {
          tots = new TermOffsetsTokenStream(tvStream);
        }
    }
    catch (IllegalArgumentException e) {
View Full Code Here

TOP

Related Classes of org.apache.solr.search.SolrIndexSearcher

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.