Package org.apache.solr.search

Examples of org.apache.solr.search.SolrIndexSearcher


 
  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
  {   
    IndexSchema schema = req.getSchema();
    SolrIndexSearcher searcher = req.getSearcher();
    IndexReader reader = searcher.getReader();
    SolrParams params = req.getParams();
    int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
       
    // Always show the core lucene info
    rsp.add("index", getIndexInfo(reader, numTerms>0 ) );

    Integer docId = params.getInt( DOC_ID );
    if( docId == null && params.get( ID ) != null ) {
      // Look for something with a given solr ID
      SchemaField uniqueKey = schema.getUniqueKeyField();
      String v = uniqueKey.getType().toInternal( params.get(ID) );
      Term t = new Term( uniqueKey.getName(), v );
      docId = searcher.getFirstMatch( t );
      if( docId < 0 ) {
        throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+params.get( ID ) );
      }
    }
       
View Full Code Here


  public NamedList<Object> doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException {
    SolrParams params = req.getParams();
    if (!isHighlightingEnabled(params))
        return null;
    
     SolrIndexSearcher searcher = req.getSearcher();
     IndexSchema schema = searcher.getSchema();
     NamedList fragments = new SimpleOrderedMap();
     String[] fieldNames = getHighlightFields(query, req, defaultFields);
     Set<String> fset = new HashSet<String>();
    
     {
       // pre-fetch documents using the Searcher's doc cache
       for(String f : fieldNames) { fset.add(f); }
       // fetch unique key if one exists.
       SchemaField keyField = schema.getUniqueKeyField();
       if(null != keyField)
         fset.add(keyField.getName())
     }


    // Highlight each document
    DocIterator iterator = docs.iterator();
    for (int i = 0; i < docs.size(); i++) {
       int docId = iterator.nextDoc();
       Document doc = searcher.doc(docId, fset);
       NamedList docSummaries = new SimpleOrderedMap();
       for (String fieldName : fieldNames) {
          fieldName = fieldName.trim();
          String[] docTexts = doc.getValues(fieldName);
          if (docTexts == null) continue;
         
          TokenStream tstream = null;
          int numFragments = getMaxSnippets(fieldName, params);
          boolean mergeContiguousFragments = isMergeContiguousFragments(fieldName, params);

          String[] summaries = null;
          List<TextFragment> frags = new ArrayList<TextFragment>();
          TermOffsetsTokenStream tots = null;
          for (int j = 0; j < docTexts.length; j++) {
            // create TokenStream
            try {
              // attempt term vectors
              if( tots == null ) {
                TokenStream tvStream = TokenSources.getTokenStream(searcher.getReader(), docId, fieldName);
                if (tvStream != null) {
                  tots = new TermOffsetsTokenStream(tvStream);
                  tstream = tots.getMultiValuedTokenStream( docTexts[j].length() );
                } else {
                  // fall back to analyzer
View Full Code Here

  }

  public String getIndexDir() {
    if (_searcher == null)
      return dataDir + "index/";
    SolrIndexSearcher searcher = _searcher.get();
    return searcher.getIndexDir() == null ? dataDir + "index/" : searcher.getIndexDir();
  }
View Full Code Here

    return newSearcher(name, false);
  }
 
  // gets a non-caching searcher
  public SolrIndexSearcher newSearcher(String name, boolean readOnly) throws IOException {
    return new SolrIndexSearcher(this, schema, name, directoryFactory.open(getIndexDir()), readOnly, false);
  }
View Full Code Here

      }
    }

    // open the index synchronously
    // if this fails, we need to decrement onDeckSearchers again.
    SolrIndexSearcher tmp;
    RefCounted<SolrIndexSearcher> newestSearcher = null;

    try {
      newestSearcher = getNewestSearcher(false);
      String newIndexDir = getNewIndexDir();
      File indexDirFile = new File(getIndexDir()).getCanonicalFile();
      File newIndexDirFile = new File(newIndexDir).getCanonicalFile();
     
      if (newestSearcher != null && solrConfig.reopenReaders
          && indexDirFile.equals(newIndexDirFile)) {
        IndexReader currentReader = newestSearcher.get().getReader();
        IndexReader newReader = currentReader.reopen();

        if (newReader == currentReader) {
          currentReader.incRef();
        }

        tmp = new SolrIndexSearcher(this, schema, "main", newReader, true, true);
      } else {
        IndexReader reader = getIndexReaderFactory().newReader(getDirectoryFactory().open(newIndexDir), true);
        tmp = new SolrIndexSearcher(this, schema, "main", reader, true, true);
      }
    } catch (Throwable th) {
      synchronized(searcherLock) {
        onDeckSearchers--;
        // notify another waiter to continue... it may succeed
        // and wake any others.
        searcherLock.notify();
      }
      // need to close the searcher here??? we shouldn't have to.
      throw new RuntimeException(th);
    } finally {
      if (newestSearcher != null) {
        newestSearcher.decref();
      }
    }
   
    final SolrIndexSearcher newSearcher=tmp;

    RefCounted<SolrIndexSearcher> currSearcherHolder=null;
    final RefCounted<SolrIndexSearcher> newSearchHolder=newHolder(newSearcher);

    if (returnSearcher) newSearchHolder.incref();

    // a signal to decrement onDeckSearchers if something goes wrong.
    final boolean[] decrementOnDeckCount=new boolean[1];
    decrementOnDeckCount[0]=true;

    try {

      boolean alreadyRegistered = false;
      synchronized (searcherLock) {
        _searchers.add(newSearchHolder);

        if (_searcher == null) {
          // if there isn't a current searcher then we may
          // want to register this one before warming is complete instead of waiting.
          if (solrConfig.useColdSearcher) {
            registerSearcher(newSearchHolder);
            decrementOnDeckCount[0]=false;
            alreadyRegistered=true;
          }
        } else {
          // get a reference to the current searcher for purposes of autowarming.
          currSearcherHolder=_searcher;
          currSearcherHolder.incref();
        }
      }


      final SolrIndexSearcher currSearcher = currSearcherHolder==null ? null : currSearcherHolder.get();

      //
      // Note! if we registered the new searcher (but didn't increment it's
      // reference count because returnSearcher==false, it's possible for
      // someone else to register another searcher, and thus cause newSearcher
View Full Code Here

          _searcher.decref();   // dec refcount for this._searcher
          _searcher=null;
        }

        _searcher = newSearcherHolder;
        SolrIndexSearcher newSearcher = newSearcherHolder.get();

        newSearcher.register(); // register subitems (caches)
        log.info(logid+"Registered new searcher " + newSearcher);

      } catch (Throwable e) {
        log(e);
      } finally {
View Full Code Here

      U.setDefaults(req,defaults,appends,invariants);
      SolrParams params = req.getParams();
     
      int flags = 0;
     
      SolrIndexSearcher s = req.getSearcher();
      IndexSchema schema = req.getSchema();
           
      Map<String,Float> queryFields = U.parseFieldBoosts(params.get(DMP.QF));
      Map<String,Float> phraseFields = U.parseFieldBoosts(params.get(DMP.PF));

      float tiebreaker = params.getFloat(DMP.TIE, 0.0f);
           
      int pslop = params.getInt(DMP.PS, 0);

      /* a generic parser for parsing regular lucene queries */
      QueryParser p = new SolrQueryParser(schema, null);

      /* a parser for dealing with user input, which will convert
       * things to DisjunctionMaxQueries
       */
      U.DisjunctionMaxQueryParser up =
        new U.DisjunctionMaxQueryParser(schema, IMPOSSIBLE_FIELD_NAME);
      up.addAlias(IMPOSSIBLE_FIELD_NAME,
                  tiebreaker, queryFields);

      /* for parsing slopy phrases using DisjunctionMaxQueries */
      U.DisjunctionMaxQueryParser pp =
        new U.DisjunctionMaxQueryParser(schema, IMPOSSIBLE_FIELD_NAME);
      pp.addAlias(IMPOSSIBLE_FIELD_NAME,
                  tiebreaker, phraseFields);
      pp.setPhraseSlop(pslop);
           
           
      /* * * Main User Query * * */

      String userQuery = U.partialEscape
        (U.stripUnbalancedQuotes(params.get(Q))).toString();
           
      /* the main query we will execute.  we disable the coord because
       * this query is an artificial construct
       */
      BooleanQuery query = new BooleanQuery(true);

      String minShouldMatch = params.get(DMP.MM, "100%");
           
      Query dis = up.parse(userQuery);

      if (dis instanceof BooleanQuery) {
        BooleanQuery t = new BooleanQuery();
        U.flattenBooleanQuery(t, (BooleanQuery)dis);

        U.setMinShouldMatch(t, minShouldMatch);
               
        query.add(t, Occur.MUST);
      } else {
        query.add(dis, Occur.MUST);
      }

      /* * * Add on Phrases for the Query * * */
           
      /* build up phrase boosting queries */

      /* if the userQuery already has some quotes, stip them out.
       * we've already done the phrases they asked for in the main
       * part of the query, this is to boost docs that may not have
       * matched those phrases but do match looser phrases.
       */
      String userPhraseQuery = userQuery.replace("\"","");
      Query phrase = pp.parse("\"" + userPhraseQuery + "\"");
      if (null != phrase) {
        query.add(phrase, Occur.SHOULD);
      }
           
      /* * * Boosting Query * * */

      String boostQuery = params.get(DMP.BQ);
      if (null != boostQuery && !boostQuery.equals("")) {
        Query tmp = p.parse(boostQuery);
        /* if the default boost was used, and we've got a BooleanQuery
         * extract the subqueries out and use them directly
         */
        if (1.0f == tmp.getBoost() && tmp instanceof BooleanQuery) {
          for (BooleanClause c : ((BooleanQuery)tmp).getClauses()) {
            query.add(c);
          }
        } else {
          query.add(tmp, BooleanClause.Occur.SHOULD);
        }
      }

      /* * * Boosting Functions * * */

      String boostFunc = params.get(DMP.BF);
      if (null != boostFunc && !boostFunc.equals("")) {
        List<Query> funcs = U.parseFuncs(schema, boostFunc);
        for (Query f : funcs) {
          query.add(f, Occur.SHOULD);
        }
      }
           
      /* * * Restrict Results * * */

      List<Query> restrictions = U.parseFilterQueries(req);
           
      /* * * Generate Main Results * * */

      flags |= U.setReturnFields(req,rsp);
     
      DocListAndSet results = new DocListAndSet();
      NamedList facetInfo = null;
      if (params.getBool(FACET,false)) {
        results = s.getDocListAndSet(query, restrictions,
                                     SolrPluginUtils.getSort(req),
                                     req.getStart(), req.getLimit(),
                                     flags);
        facetInfo = getFacetInfo(req, rsp, results.docSet);
      } else {
        results.docList = s.getDocList(query, restrictions,
                                       SolrPluginUtils.getSort(req),
                                       req.getStart(), req.getLimit(),
                                       flags);
      }
      rsp.add("response",results.docList);
View Full Code Here

   public static NamedList doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException
   {
      if (!isHighlightingEnabled(req))
         return null;
     
      SolrIndexSearcher searcher = req.getSearcher();
      NamedList fragments = new NamedList();
      String[] fieldNames = getHighlightFields(query, req, defaultFields);
      Document[] readDocs = new Document[docs.size()];
      {
        // pre-fetch documents using the Searcher's doc cache
        Set<String> fset = new HashSet<String>();
        for(String f : fieldNames) { fset.add(f); }
        // fetch unique key if one exists.
        SchemaField keyField = req.getSearcher().getSchema().getUniqueKeyField();
        if(null != keyField)
          fset.add(keyField.getName())
        searcher.readDocs(readDocs, docs, fset);
      }

      // Highlight each document
      DocIterator iterator = docs.iterator();
      for (int i = 0; i < docs.size(); i++)
      {
         int docId = iterator.nextDoc();
         Document doc = readDocs[i];
         NamedList docSummaries = new NamedList();
         for (String fieldName : fieldNames)
         {
            fieldName = fieldName.trim();
            String[] docTexts = doc.getValues(fieldName);
            if (docTexts == null) continue;

            // get highlighter, and number of fragments for this field
            Highlighter highlighter = getHighlighter(query, fieldName, req);
            int numFragments = getMaxSnippets(fieldName, req);

            String[] summaries;
            TextFragment[] frag;
            if (docTexts.length == 1)
            {
               // single-valued field
               TokenStream tstream;
               try
               {
                  // attempt term vectors
                  tstream = TokenSources.getTokenStream(searcher.getReader(), docId, fieldName);
               }
               catch (IllegalArgumentException e)
               {
                  // fall back to analyzer
                  tstream = new TokenOrderingFilter(searcher.getSchema().getAnalyzer().tokenStream(fieldName, new StringReader(docTexts[0])), 10);
               }
               frag = highlighter.getBestTextFragments(tstream, docTexts[0], false, numFragments);
            }
            else
            {
               // multi-valued field
               MultiValueTokenStream tstream;
               tstream = new MultiValueTokenStream(fieldName, docTexts, searcher.getSchema().getAnalyzer(), true);
               frag = highlighter.getBestTextFragments(tstream, tstream.asSingleValue(), false, numFragments);
            }
            // convert fragments back into text
            // TODO: we can include score and position information in output as snippet attributes
            if (frag.length > 0)
            {
               ArrayList<String> fragTexts = new ArrayList<String>();
               for (int j = 0; j < frag.length; j++)
               {
                  if ((frag[j] != null) && (frag[j].getScore() > 0))
                  {
                     fragTexts.add(frag[j].toString());
                  }
               }
               summaries = fragTexts.toArray(new String[0]);
               if (summaries.length > 0) docSummaries.add(fieldName, summaries);
            }
         }
         String printId = searcher.getSchema().printableUniqueKey(doc);
         fragments.add(printId == null ? null : printId, docSummaries);
      }
      return fragments;
   }
View Full Code Here

    TaggingSession(String language, IndexConfiguration config) throws CorpusException {
        this.language = language;
        this.config = config;
        //init the SolrIndexSearcher
        searcherRef = config.getIndex().getSearcher();
        SolrIndexSearcher searcher = searcherRef.get();
        DirectoryReader indexReader = searcher.getIndexReader();
        indexVersion = Long.valueOf(indexReader.getVersion());
       
        //get the corpusInfo
        CorpusInfo langCorpusInfo = config.getCorpus(language);
        log.debug("> language Corpus: {}", langCorpusInfo);
        CorpusInfo defaultCorpusInfo = config.getDefaultCorpus();
        log.debug("> default Corpus: {}", defaultCorpusInfo);
       
        //obtain the Solr Document Id field
        SchemaField idSchemaField = config.getIndex().getLatestSchema().getUniqueKeyField();
        idField = idSchemaField.getName();
        solrDocfields.add(idField);

        //obtain the language specific fields for the session
        if(langCorpusInfo == null && defaultCorpusInfo == null){
            //this should not happen, because the canEnhance method of the
            //engine should  already reject such calls
            throw new IllegalStateException("No FST Corpus configured for language '"
                +language+"' and also no default FST Corpus is present.!");
        }
        if(langCorpusInfo != null){
            this.langCorpus = new Corpus(langCorpusInfo,
                obtainFstCorpus(indexVersion,langCorpusInfo));
            this.labelField = langCorpusInfo.storedField;
            solrDocfields.add(labelField);
            this.labelLang = langCorpusInfo.language == null ||
                    StringUtils.isBlank(langCorpusInfo.language) ? null :
                        new Language(langCorpusInfo.language);
        } else {
            this.labelField = null;
            this.labelLang = null;
        }
        if(defaultCorpusInfo != null && !defaultCorpusInfo.equals(langCorpusInfo)){
            this.defaultCorpus = new Corpus(defaultCorpusInfo,
                obtainFstCorpus(indexVersion,defaultCorpusInfo));
            this.defaultLabelField = defaultCorpusInfo.storedField;
            solrDocfields.add(defaultLabelField);
            this.defaultLabelLang = defaultCorpusInfo.language == null ||
                    StringUtils.isBlank(defaultCorpusInfo.language) ? null :
                        new Language(defaultCorpusInfo.language);
        } else {
            this.defaultCorpus = null;
            this.defaultLabelField = null;
            this.defaultLabelLang = null;
        }
        if(this.defaultCorpus == null && this.langCorpus == null){
            throw new CorpusException("Unable to initialise a FST corpus for language '"
                + language+"'. Neigher the language specific Coprpus (field : "
                + langCorpusInfo != null ? langCorpusInfo.indexedField : "<undefined>"
                + ") nor for the default language (field: "
                + defaultCorpusInfo != null ? defaultCorpusInfo.indexedField : "<undefined>"
                + ") is currently available!",null);
        }
        if(config.getEncodedTypeField() != null){
            this.typeField = config.getEncodedTypeField();
            solrDocfields.add(typeField);
        } else {
            this.typeField = null;
        }
        if(config.getEncodedRedirectField() != null){
            this.redirectField = config.getEncodedRedirectField();
            solrDocfields.add(redirectField);
        } else {
            this.redirectField = null;
        }
        if(config.getEncodedRankingField() != null){
            this.rankingField = config.getEncodedRankingField();
            solrDocfields.add(rankingField);
        } else {
            this.rankingField = null;
        }
        if(config.getEntityCacheManager() != null){
            documentCacheRef = config.getEntityCacheManager().getCache(indexVersion);
        }
//        uniqueKeyCache = null; //no longer used.
//        uniqueKeyCache = new ValueSourceAccessor(searcher, idSchemaField.getType()
//            .getValueSource(idSchemaField, null));
        fieldLoader = new FieldLoaderImpl(searcher.getIndexReader());

    }
View Full Code Here

            return;
        }
        final TaggerFstCorpus corpus;
        RefCounted<SolrIndexSearcher> searcherRef = core.getSearcher();
        try { //STANBOL-1177: create FST models in AccessController.doPrivileged(..)
            final SolrIndexSearcher searcher = searcherRef.get();
            //we do get the AtomicReader, because TaggerFstCorpus will need it
            //anyways. This prevents to create another SlowCompositeReaderWrapper.
            final IndexReader reader = searcher.getAtomicReader();
            log.info(" ... build FST corpus for {}",fstInfo);
            corpus = AccessController.doPrivileged(new PrivilegedExceptionAction<TaggerFstCorpus>() {
                public TaggerFstCorpus run() throws IOException {
                    return new TaggerFstCorpus(reader, searcher.getIndexReader().getVersion(),
                        null, fstInfo.indexedField, fstInfo.storedField, fstInfo.analyzer,
                        fstInfo.partialMatches,1,100);
                }
            });
        } catch (PrivilegedActionException pae) {
View Full Code Here

TOP

Related Classes of org.apache.solr.search.SolrIndexSearcher

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.