Package org.apache.lucene.index

Examples of org.apache.lucene.index.SegmentReader$FieldsReaderLocal


   
    IndexReader[] subReaders = inner.getSequentialSubReaders();
    ArrayList<IndexReader> subReaderList = new ArrayList<IndexReader>(subReaders.length);
    for (IndexReader subReader : subReaders){
      if (subReader instanceof SegmentReader){
        SegmentReader sr = (SegmentReader)subReader;
        String segmentName = sr.getSegmentName();
        ZoieSegmentReader<R> zoieSegmentReader = _readerMap.get(segmentName);
        if (zoieSegmentReader!=null){
          int numDocs = sr.numDocs();
          int maxDocs = sr.maxDoc();
          boolean hasDeletes = false;
          if (zoieSegmentReader.numDocs() != numDocs || zoieSegmentReader.maxDoc() != maxDocs){
            hasDeletes = true;
          }
          zoieSegmentReader = new ZoieSegmentReader<R>(zoieSegmentReader,sr,hasDeletes);
View Full Code Here


      return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
    }
    // This will only allow a single instance be created per reader per filter
    Object lock = getLock(key);
    synchronized (lock) {
      SegmentReader segmentReader = getSegmentReader(reader);
      if (segmentReader == null) {
        LOG.warn("Could not find SegmentReader from [{0}]", reader);
        return _filter.getDocIdSet(context, acceptDocs);
      }
      Directory directory = getDirectory(segmentReader);
      if (directory == null) {
        LOG.warn("Could not find Directory from [{0}]", segmentReader);
        return _filter.getDocIdSet(context, acceptDocs);
      }
      _misses.incrementAndGet();
      String segmentName = segmentReader.getSegmentName();
      docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
      _cache.put(key, docIdSet);
      return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
    }
  }
View Full Code Here

      List<? extends IndexReader> sequentialSubReaders = BaseCompositeReaderUtil.getSequentialSubReaders(indexReader);
      int readerIndex = BaseCompositeReaderUtil.readerIndex(indexReader, notAdjustedDocId);
      int readerBase = BaseCompositeReaderUtil.readerBase(indexReader, readerIndex);
      int docId = notAdjustedDocId - readerBase;
      IndexReader orgReader = sequentialSubReaders.get(readerIndex);
      SegmentReader sReader = BlurUtil.getSegmentReader(orgReader);
      if (sReader != null) {
        SegmentReader segmentReader = (SegmentReader) sReader;
        DocIdSet docIdSet = filter.getDocIdSet(segmentReader.getContext(), segmentReader.getLiveDocs());
        DocIdSetIterator iterator = docIdSet.iterator();
        if (iterator == null) {
          return true;
        }
        if (iterator.advance(docId) == docId) {
View Full Code Here

      int notAdjustedPrimeDocId = Integer.parseInt(locationId.substring(indexOf + 1));
      int readerIndex = BaseCompositeReaderUtil.readerIndex(indexReader, notAdjustedPrimeDocId);
      int readerBase = BaseCompositeReaderUtil.readerBase(indexReader, readerIndex);
      int primeDocId = notAdjustedPrimeDocId - readerBase;
      IndexReader orgReader = sequentialSubReaders.get(readerIndex);
      SegmentReader sReader = getSegmentReader(orgReader);
      if (sReader != null) {
        SegmentReader segmentReader = (SegmentReader) sReader;
        Bits liveDocs = segmentReader.getLiveDocs();

        OpenBitSet bitSet = PrimeDocCache.getPrimeDocBitSet(primeDocTerm, segmentReader);
        int nextPrimeDoc = bitSet.nextSetBit(primeDocId + 1);
        int numberOfDocsInRow;
        if (nextPrimeDoc == -1) {
          numberOfDocsInRow = segmentReader.maxDoc() - primeDocId;
        } else {
          numberOfDocsInRow = nextPrimeDoc - primeDocId;
        }
        OpenBitSet docsInRowSpanToFetch = getDocsToFetch(segmentReader, selector, primeDocId, numberOfDocsInRow,
            liveDocs, filter, totalRecords);
        int start = selector.getStartRecord();
        int maxDocsToFetch = selector.getMaxRecordsToFetch();
        int startingPosition = getStartingPosition(docsInRowSpanToFetch, start);
        List<Document> docs = new ArrayList<Document>();
        if (startingPosition < 0) {
          // nothing to fetch
          return docs;
        }
        int totalHeap = 0;
        Tracer trace2 = Trace.trace("fetching docs from index");
        int cursor = 0;
        try {
          for (cursor = startingPosition; cursor < numberOfDocsInRow; cursor++) {
            if (maxDocsToFetch <= 0) {
              return docs;
            }
            if (totalHeap >= maxHeap) {
              LOG.warn("Max heap size exceeded for this request [{0}] max [{1}] for [{2}] and selector [{3}]",
                  totalHeap, maxHeap, context, selector);
              return docs;
            }
            if (docsInRowSpanToFetch.fastGet(cursor)) {
              maxDocsToFetch--;
              int docID = primeDocId + cursor;
              segmentReader.document(docID, fieldSelector);
              Document document = fieldSelector.getDocument();
              if (highlighter.shouldHighlight()) {
                docs.add(highlighter.highlight(docID, document, segmentReader));
              } else {
                docs.add(document);
View Full Code Here

        return null;
      }
      AtomicReaderContext atomicReaderContext = (AtomicReaderContext) ctext;
      AtomicReader atomicReader = atomicReaderContext.reader();
      if (atomicReader instanceof SegmentReader) {
        SegmentReader segmentReader = (SegmentReader) atomicReader;
        if (segmentReader.getSegmentName().equals(segmentName)) {
          return segmentReader.directory();
        }
      }
    }
    return null;
  }
View Full Code Here

        return null;
      }
      AtomicReaderContext atomicReaderContext = (AtomicReaderContext) ctext;
      AtomicReader atomicReader = atomicReaderContext.reader();
      if (atomicReader instanceof SegmentReader) {
        SegmentReader segmentReader = (SegmentReader) atomicReader;
        return segmentReader.directory();
      }
    }
    return null;
  }
View Full Code Here

    return null;
  }

  private Directory getDirectory(AtomicReader atomicReader, String segmentName, String context) {
    if (atomicReader instanceof SegmentReader) {
      SegmentReader segmentReader = (SegmentReader) atomicReader;
      if (segmentReader.getSegmentName().equals(segmentName)) {
        return segmentReader.directory();
      }
    }
    return null;
  }
View Full Code Here

  }

  public Map<String, List<IndexTracerResult>> sampleIndex(AtomicReader atomicReader, String context) throws IOException {
    Map<String, List<IndexTracerResult>> results = new HashMap<String, List<IndexTracerResult>>();
    if (atomicReader instanceof SegmentReader) {
      SegmentReader segmentReader = (SegmentReader) atomicReader;
      Directory directory = segmentReader.directory();
      if (!(directory instanceof TraceableDirectory)) {
        LOG.info("Context [{1}] cannot warmup directory [{0}] needs to be a TraceableDirectory.", directory, context);
        return results;
      }
      IndexTracer tracer = new IndexTracer((TraceableDirectory) directory, _maxSampleSize);
      String fileName = getSampleFileName(segmentReader.getSegmentName());
      List<IndexTracerResult> segmentTraces = new ArrayList<IndexTracerResult>();
      if (directory.fileExists(fileName)) {
        IndexInput input = directory.openInput(fileName, IOContext.READONCE);
        segmentTraces = read(input);
        input.close();
      } else {
        Fields fields = atomicReader.fields();
        for (String field : fields) {
          LOG.debug("Context [{1}] sampling field [{0}].", field, context);
          Terms terms = fields.terms(field);
          boolean hasOffsets = terms.hasOffsets();
          boolean hasPayloads = terms.hasPayloads();
          boolean hasPositions = terms.hasPositions();

          tracer.initTrace(segmentReader, field, hasPositions, hasPayloads, hasOffsets);
          IndexTracerResult result = tracer.runTrace(terms);
          segmentTraces.add(result);
        }
        if (_isClosed.get() || _stop.get()) {
          LOG.info("Context [{0}] index closed", context);
          return null;
        }
        IndexOutput output = directory.createOutput(fileName, IOContext.DEFAULT);
        write(segmentTraces, output);
        output.close();
      }
      results.put(segmentReader.getSegmentName(), segmentTraces);
    }
    return results;
  }
View Full Code Here

  public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int idx = 0;

    for (AtomicReader reader : mergeState.readers) {
      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
      CompressingStoredFieldsReader matchingFieldsReader = null;
      if (matchingSegmentReader != null) {
        final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
        // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
        if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
          matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
        }
      }
View Full Code Here

  public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int idx = 0;

    for (AtomicReader reader : mergeState.readers) {
      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
      CompressingTermVectorsReader matchingVectorsReader = null;
      if (matchingSegmentReader != null) {
        final TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
        // we can only bulk-copy if the matching reader is also a CompressingTermVectorsReader
        if (vectorsReader != null && vectorsReader instanceof CompressingTermVectorsReader) {
          matchingVectorsReader = (CompressingTermVectorsReader) vectorsReader;
        }
      }

      final int maxDoc = reader.maxDoc();
      final Bits liveDocs = reader.getLiveDocs();

      if (matchingVectorsReader == null
          || matchingVectorsReader.getCompressionMode() != compressionMode
          || matchingVectorsReader.getChunkSize() != chunkSize
          || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
        // naive merge...
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
          final Fields vectors = reader.getTermVectors(i);
          addAllDocVectors(vectors, mergeState);
          ++docCount;
          mergeState.checkAbort.work(300);
        }
      } else {
        final CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
        final IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
          if (pendingDocs.isEmpty()
              && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
            final long startPointer = index.getStartPointer(i);
            vectorsStream.seek(startPointer);
            final int docBase = vectorsStream.readVInt();
            final int chunkDocs = vectorsStream.readVInt();
            assert docBase + chunkDocs <= matchingSegmentReader.maxDoc();
            if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
                && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
              final long chunkEnd = index.getStartPointer(docBase + chunkDocs);
              final long chunkLength = chunkEnd - vectorsStream.getFilePointer();
              indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
              this.vectorsStream.writeVInt(docCount);
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.SegmentReader$FieldsReaderLocal

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.