Package org.apache.solr.search

Examples of org.apache.solr.search.SolrIndexSearcher


   * @param solrReq
   * @return the timestamp to use as a last modified time.
   */
  public static long calcLastModified(final SolrQueryRequest solrReq) {
    final SolrCore core = solrReq.getCore();
    final SolrIndexSearcher searcher = solrReq.getSearcher();
   
    final LastModFrom lastModFrom
      = core.getSolrConfig().getHttpCachingConfig().getLastModFrom();

    long lastMod;
    try {
      // assume default, change if needed (getOpenTime() should be fast)
      lastMod =
        LastModFrom.DIRLASTMOD == lastModFrom
        ? IndexReader.lastModified(searcher.getReader().directory())
        : searcher.getOpenTime();
    } catch (IOException e) {
      // we're pretty freaking screwed if this happens
      throw new SolrException(ErrorCode.SERVER_ERROR, e);
    }
    // Get the time where the searcher has been opened
View Full Code Here


  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
  {
    SolrParams params = req.getParams();
    SolrIndexSearcher searcher = req.getSearcher();
   
   
    MoreLikeThisHelper mlt = new MoreLikeThisHelper( params, searcher );
    List<Query> filters = SolrPluginUtils.parseFilterQueries(req);
   
    // Hold on to the interesting terms if relevant
    TermStyle termStyle = TermStyle.get( params.get( MoreLikeThisParams.INTERESTING_TERMS ) );
    List<InterestingTerm> interesting = (termStyle == TermStyle.NONE )
      ? null : new ArrayList<InterestingTerm>( mlt.mlt.getMaxQueryTerms() );
   
    DocListAndSet mltDocs = null;
    String q = params.get( CommonParams.Q );
   
    // Parse Required Params
    // This will either have a single Reader or valid query
    Reader reader = null;
    try {
      if (q == null || q.trim().length() < 1) {
        Iterable<ContentStream> streams = req.getContentStreams();
        if (streams != null) {
          Iterator<ContentStream> iter = streams.iterator();
          if (iter.hasNext()) {
            reader = iter.next().getReader();
          }
          if (iter.hasNext()) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                "MoreLikeThis does not support multiple ContentStreams");
          }
        }
      }

      // What fields do we need to return
      String fl = params.get(CommonParams.FL);
      int flags = 0;
      if (fl != null) {
        flags |= SolrPluginUtils.setReturnFields(fl, rsp);
      }

      int start = params.getInt(CommonParams.START, 0);
      int rows = params.getInt(CommonParams.ROWS, 10);

      // Find documents MoreLikeThis - either with a reader or a query
      // --------------------------------------------------------------------------------
      if (reader != null) {
        mltDocs = mlt.getMoreLikeThis(reader, start, rows, filters,
            interesting, flags);
      } else if (q != null) {
        // Matching options
        boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE,
            true);
        int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);
        // Find the base match
        Query query = QueryParsing.parseQuery(q, params.get(CommonParams.DF),
            params, req.getSchema());
        DocList match = searcher.getDocList(query, null, null, matchOffset, 1,
            flags); // only get the first one...
        if (includeMatch) {
          rsp.add("match", match);
        }
View Full Code Here

  }

  @Override
  public void writeDocList(String name, DocList ids, Set<String> fields, Map otherFields) throws IOException {
    int sz=ids.size();
    SolrIndexSearcher searcher = req.getSearcher();
    DocIterator iterator = ids.iterator();
    for (int i=0; i<sz; i++) {
      int id = iterator.nextDoc();
      Document doc = searcher.doc(id, fields);
      writeDoc(null, doc, fields, (returnScore ? iterator.score() : 0.0f), returnScore);
    }
  }
View Full Code Here

    super(core);
  }

  @Override
  public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
    final SolrIndexSearcher searcher = newSearcher;
    log.info("QuerySenderListener sending requests to " + newSearcher);
    List<NamedList> allLists = (List<NamedList>)args.get("queries");
    if (allLists == null) return;
    for (NamedList nlst : allLists) {
      try {
View Full Code Here

      writeFloat(null,ids.maxScore());
    }
    writeKey("docs",false);
    writeArrayOpener(sz);

    SolrIndexSearcher searcher = req.getSearcher();
    DocIterator iterator = ids.iterator();
    for (int i=0; i<sz; i++) {
      int id = iterator.nextDoc();
      Document doc = searcher.doc(id, fields);
      writeKey(i, false);
      writeDoc(null, doc, fields, (includeScore ? iterator.score() : 0.0f), includeScore);
    }
    writeMapCloser();
View Full Code Here

    if (origQuery != null) {
      if (origQuery instanceof SpanNearQuery == false) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Illegal query type.  The incoming query must be a Lucene SpanNearQuery and it was a " + origQuery.getClass().getName());
      }
      SpanNearQuery sQuery = (SpanNearQuery) origQuery;
      SolrIndexSearcher searcher = rb.req.getSearcher();
      IndexReader reader = searcher.getIndexReader();
      Spans spans = sQuery.getSpans(reader);
      //Assumes the query is a SpanQuery
      //Build up the query term weight map and the bi-gram
      Map<String, Float> termWeights = new HashMap<String, Float>();
      Map<String, Float> bigramWeights = new HashMap<String, Float>();
      createWeights(params.get(CommonParams.Q), sQuery, termWeights, bigramWeights, reader);
      float adjWeight = params.getFloat(ADJACENT_WEIGHT, DEFAULT_ADJACENT_WEIGHT);
      float secondAdjWeight = params.getFloat(SECOND_ADJ_WEIGHT, DEFAULT_SECOND_ADJACENT_WEIGHT);
      float bigramWeight = params.getFloat(BIGRAM_WEIGHT, DEFAULT_BIGRAM_WEIGHT);
      //get the passages
      int primaryWindowSize = params.getInt(QAParams.PRIMARY_WINDOW_SIZE, DEFAULT_PRIMARY_WINDOW_SIZE);
      int adjacentWindowSize = params.getInt(QAParams.ADJACENT_WINDOW_SIZE, DEFAULT_ADJACENT_WINDOW_SIZE);
      int secondaryWindowSize = params.getInt(QAParams.SECONDARY_WINDOW_SIZE, DEFAULT_SECONDARY_WINDOW_SIZE);
      WindowBuildingTVM tvm = new WindowBuildingTVM(primaryWindowSize, adjacentWindowSize, secondaryWindowSize);
      PassagePriorityQueue rankedPassages = new PassagePriorityQueue();
      //intersect w/ doclist
      DocList docList = rb.getResults().docList;
      while (spans.next() == true) {
        //build up the window
        if (docList.exists(spans.doc())) {
          tvm.spanStart = spans.start();
          tvm.spanEnd = spans.end();
          reader.getTermFreqVector(spans.doc(), sQuery.getField(), tvm);
          //The entries map contains the window, do some ranking of it
          if (tvm.passage.terms.isEmpty() == false) {
            log.debug("Candidate: Doc: {} Start: {} End: {} ",
                    new Object[]{spans.doc(), spans.start(), spans.end()});
          }
          tvm.passage.lDocId = spans.doc();
          tvm.passage.field = sQuery.getField();
          //score this window
          try {
            addPassage(tvm.passage, rankedPassages, termWeights, bigramWeights, adjWeight, secondAdjWeight, bigramWeight);
          } catch (CloneNotSupportedException e) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Internal error cloning Passage", e);
          }
          //clear out the entries for the next round
          tvm.passage.clear();
        }
      }
      NamedList qaResp = new NamedList();
      rb.rsp.add("qaResponse", qaResp);
      int rows = params.getInt(QA_ROWS, 5);

      SchemaField uniqField = rb.req.getSchema().getUniqueKeyField();
      if (rankedPassages.size() > 0) {
        int size = Math.min(rows, rankedPassages.size());
        Set<String> fields = new HashSet<String>();
        for (int i = size - 1; i >= 0; i--) {
          Passage passage = rankedPassages.pop();
          if (passage != null) {
            NamedList passNL = new NamedList();
            qaResp.add(("answer"), passNL);
            String idName;
            String idValue;
            if (uniqField != null) {
              idName = uniqField.getName();
              fields.add(idName);
              fields.add(passage.field);//prefetch this now, so that it is cached
              idValue = searcher.doc(passage.lDocId, fields).get(idName);
            } else {
              idName = "luceneDocId";
              idValue = String.valueOf(passage.lDocId);
            }
            passNL.add(idName, idValue);
            passNL.add("field", passage.field);
            //get the window
            String fldValue = searcher.doc(passage.lDocId, fields).get(passage.field);
            if (fldValue != null) {
              //get the window of words to display, we don't use the passage window, as that is based on the term vector
              int start = passage.terms.first().start;//use the offsets
              int end = passage.terms.last().end;
              if (start >= 0 && start < fldValue.length() &&
View Full Code Here

  }

  @Override
  public void write(Writer w, SolrQueryRequest req,
                    SolrQueryResponse rsp) throws IOException {
    SolrIndexSearcher searcher = req.getSearcher();
    NamedList nl = rsp.getValues();
    int sz = nl.size();
    for (int li = 0; li < sz; li++) {
      Object val = nl.getVal(li);
      if (val instanceof DocList) { //<co id="co.fuzzy.type-ahead.doclist"/>
        DocList dl = (DocList) val;
        DocIterator iterator = dl.iterator();
        w.append("<ul>\n");
        while (iterator.hasNext()) {
          int id = iterator.nextDoc();
          Document doc = searcher.doc(id, fields); //<co id="co.fuzzy.type-ahead.search"/>
          String name = doc.get("word");
          w.append("<li>" + name + "</li>\n");
        }
        w.append("</ul>\n");
      }
View Full Code Here

  }

  @Override
  public void writeDocList(String name, DocList ids, Set<String> fields, Map otherFields) throws IOException {
    int sz=ids.size();
    SolrIndexSearcher searcher = req.getSearcher();
    DocIterator iterator = ids.iterator();
    for (int i=0; i<sz; i++) {
      int id = iterator.nextDoc();
      Document doc = searcher.doc(id, fields);
      writeDoc(null, doc, fields, (returnScore ? iterator.score() : 0.0f), returnScore);
    }
  }
View Full Code Here

      iter = docIds.iterator();
    } else {
      DocList list = listAndSet.docList;
      iter = list.iterator();
    }
    SolrIndexSearcher searcher = rb.req.getSearcher();

    IndexReader reader = searcher.getReader();
    //the TVMapper is a TermVectorMapper which can be used to optimize loading of Term Vectors
    SchemaField keyField = schema.getUniqueKeyField();
    String uniqFieldName = null;
    if (keyField != null) {
      uniqFieldName = keyField.getName();
View Full Code Here

      writeFloat(null,ids.maxScore());
    }
    writeKey("docs",false);
    writeArrayOpener(sz);

    SolrIndexSearcher searcher = req.getSearcher();
    DocIterator iterator = ids.iterator();
    for (int i=0; i<sz; i++) {
      int id = iterator.nextDoc();
      Document doc = searcher.doc(id, fields);
      writeKey(i, false);
      writeDoc(null, doc, fields, (includeScore ? iterator.score() : 0.0f), includeScore);
    }
    writeMapCloser();
View Full Code Here

TOP

Related Classes of org.apache.solr.search.SolrIndexSearcher

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.