Examples of SegmentReader


Examples of org.apache.lucene.index.SegmentReader

    Configuration configuration = context.getConfiguration();
    LuceneStorageConfiguration lucene2SeqConfiguration = new LuceneStorageConfiguration(configuration);

    SegmentInfoPerCommit segmentInfo = inputSplit.getSegment(configuration);
    segmentReader = new SegmentReader(segmentInfo, USE_TERM_INFO, IOContext.READ);


    IndexSearcher searcher = new IndexSearcher(segmentReader);
    Weight weight = lucene2SeqConfiguration.getQuery().createWeight(searcher);
    scorer = weight.scorer(segmentReader.getContext(), false, false, null);
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  }

  private void assertSegmentContainsOneDoc(String segmentName) throws IOException {
    LuceneSegmentInputSplit inputSplit = new LuceneSegmentInputSplit(indexPath1, segmentName, 1000);
    SegmentInfoPerCommit segment = inputSplit.getSegment(configuration);
    SegmentReader segmentReader = new SegmentReader(segment, 1, IOContext.READ);//SegmentReader.get(true, segment, 1);
    assertEquals(segmentName, segment.info.name);
    assertEquals(1, segmentReader.numDocs());
  }
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  protected void setup(Context context) throws IOException, InterruptedException {
    Configuration configuration = context.getConfiguration();
    l2sConf = new LuceneStorageConfiguration(configuration);
    LuceneSegmentInputSplit inputSplit = (LuceneSegmentInputSplit) context.getInputSplit();
    SegmentInfoPerCommit segmentInfo = inputSplit.getSegment(configuration);
    segmentReader = new SegmentReader(segmentInfo, LuceneSeqFileHelper.USE_TERM_INFOS, IOContext.READ);
  }
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

   
    populate(directory, config);

    DirectoryReader r0 = IndexReader.open(directory);
    SegmentReader r = LuceneTestCase.getOnlySegmentReader(r0);
    String segment = r.getSegmentName();
    r.close();

    FieldInfosReader infosReader = new PreFlexRWCodec().fieldInfosFormat().getFieldInfosReader();
    FieldInfos fieldInfos = infosReader.read(directory, segment, IOContext.READONCE);
    String segmentFileName = IndexFileNames.segmentFileName(segment, "", Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION);
    long tiiFileLength = directory.fileLength(segmentFileName);
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  }

  /** Returns true if the given reader is sorted by the given sorter. */
  public static boolean isSorted(AtomicReader reader, Sorter sorter) {
    if (reader instanceof SegmentReader) {
      final SegmentReader segReader = (SegmentReader) reader;
      final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
      if (diagnostics != null && sorter.getID().equals(diagnostics.get(SORTER_ID_PROP))) {
        return true;
      }
    }
    return false;
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int idx = 0;

    for (AtomicReader reader : mergeState.readers) {
      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
      CompressingStoredFieldsReader matchingFieldsReader = null;
      if (matchingSegmentReader != null) {
        final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
        // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
        if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
          matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
        }
      }
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

    List<IndexReader> boboReaderList = new LinkedList<IndexReader>();
    ReaderUtil.gatherSubReaders((List<IndexReader>)boboReaderList, in);
    Map<String,BoboIndexReader> readerMap = new HashMap<String,BoboIndexReader>();
    for (IndexReader reader : boboReaderList){
      BoboIndexReader boboReader = (BoboIndexReader)reader;
      SegmentReader sreader = (SegmentReader)(boboReader.in);
      readerMap.put(sreader.getSegmentName(),boboReader);
    }
   
    ArrayList<BoboIndexReader> currentReaders = new ArrayList<BoboIndexReader>(size);
    boolean isNewReader = false;
    for (int i=0;i<size;++i){
      SegmentInfo sinfo = (SegmentInfo)sinfos.info(i);
      BoboIndexReader breader = readerMap.remove(sinfo.name);
      if (breader!=null){
        // should use SegmentReader.reopen
        // TODO: see LUCENE-2559
        BoboIndexReader newReader = (BoboIndexReader)breader.reopen(true);
        if (newReader!=breader){
          isNewReader = true;
        }
        if (newReader!=null){
          currentReaders.add(newReader);
        }
      }
      else{
        isNewReader = true;
        SegmentReader newSreader = SegmentReader.get(true, sinfo, 1);
        breader = BoboIndexReader.getInstanceAsSubReader(newSreader,this._facetHandlers,this._runtimeFacetHandlerFactories);
        breader._dir = _dir;
        currentReaders.add(breader);
      }
    }
    isNewReader = isNewReader || (readerMap.size() != 0);
    if (!isNewReader){
      return this;
    }
    else{
      MultiReader newMreader = new MultiReader(currentReaders.toArray(new BoboIndexReader[currentReaders.size()]),false);
      BoboIndexReader newReader = BoboIndexReader.getInstanceAsSubReader(newMreader,this._facetHandlers,this._runtimeFacetHandlerFactories);
      newReader._dir = _dir;
      return newReader;
    }
  }
  else if (in instanceof SegmentReader){
     // should use SegmentReader.reopen
    // TODO: see LUCENE-2559
   
    SegmentReader sreader = (SegmentReader)in;
    int numDels = sreader.numDeletedDocs();
   
    SegmentInfo sinfo = null;
    boolean sameSeg = false;
    //get SegmentInfo instance
    for (int i=0;i<size;++i){
    SegmentInfo sinfoTmp = (SegmentInfo)sinfos.info(i);
    if (sinfoTmp.name.equals(sreader.getSegmentName())){
      int numDels2 = sinfoTmp.getDelCount();
      sameSeg = numDels==numDels2;
      sinfo = sinfoTmp;
      break;
    }
    }
  
    if (sinfo == null){
      // segment no longer exists
      return null;
    }
    if (sameSeg){
      return this
    }
    else{
    SegmentReader newSreader = SegmentReader.get(true, sinfo, 1);
    return BoboIndexReader.getInstanceAsSubReader(newSreader,this._facetHandlers,this._runtimeFacetHandlerFactories);
    }
  }
  else{
    // should not reach here, a catch-all default case
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  }

  /** Returns true if the given reader is sorted by the given sorter. */
  public static boolean isSorted(AtomicReader reader, Sorter sorter) {
    if (reader instanceof SegmentReader) {
      final SegmentReader segReader = (SegmentReader) reader;
      final Map<String, String> diagnostics = segReader.getSegmentInfo().info.getDiagnostics();
      if (diagnostics != null && sorter.getID().equals(diagnostics.get(SORTER_ID_PROP))) {
        return true;
      }
    }
    return false;
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int idx = 0;

    for (AtomicReader reader : mergeState.readers) {
      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
      CompressingStoredFieldsReader matchingFieldsReader = null;
      if (matchingSegmentReader != null) {
        final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
        // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
        if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
          matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
        }
      }
View Full Code Here

Examples of org.apache.lucene.index.SegmentReader

  public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int idx = 0;

    for (AtomicReader reader : mergeState.readers) {
      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
      CompressingTermVectorsReader matchingVectorsReader = null;
      if (matchingSegmentReader != null) {
        final TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
        // we can only bulk-copy if the matching reader is also a CompressingTermVectorsReader
        if (vectorsReader != null && vectorsReader instanceof CompressingTermVectorsReader) {
          matchingVectorsReader = (CompressingTermVectorsReader) vectorsReader;
        }
      }

      final int maxDoc = reader.maxDoc();
      final Bits liveDocs = reader.getLiveDocs();

      if (matchingVectorsReader == null
          || matchingVectorsReader.getCompressionMode() != compressionMode
          || matchingVectorsReader.getChunkSize() != chunkSize
          || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
        // naive merge...
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
          final Fields vectors = reader.getTermVectors(i);
          addAllDocVectors(vectors, mergeState);
          ++docCount;
          mergeState.checkAbort.work(300);
        }
      } else {
        final CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
        final IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
          if (pendingDocs.isEmpty()
              && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
            final long startPointer = index.getStartPointer(i);
            vectorsStream.seek(startPointer);
            final int docBase = vectorsStream.readVInt();
            final int chunkDocs = vectorsStream.readVInt();
            assert docBase + chunkDocs <= matchingSegmentReader.maxDoc();
            if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
                && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
              final long chunkEnd = index.getStartPointer(docBase + chunkDocs);
              final long chunkLength = chunkEnd - vectorsStream.getFilePointer();
              indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
              this.vectorsStream.writeVInt(docCount);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.