Package uk.ac.ucl.panda.utility.structure

Examples of uk.ac.ucl.panda.utility.structure.SegmentInfos


          // incref all files it refers to:
          if (SegmentInfos.generationFromSegmentsFileName(fileName) <= currentGen) {
            if (infoStream != null) {
              message("init: load commit \"" + fileName + "\"");
            }
            SegmentInfos sis = new SegmentInfos();
            try {
              sis.read(directory, fileName);
            } catch (FileNotFoundException e) {
              // LUCENE-948: on NFS (and maybe others), if
              // you have writers switching back and forth
              // between machines, it's very likely that the
              // dir listing will be stale and will claim a
              // file segments_X exists when in fact it
              // doesn't.  So, we catch this and handle it
              // as if the file does not exist
              if (infoStream != null) {
                message("init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point");
              }
              sis = null;
            }
            if (sis != null) {
              CommitPoint commitPoint = new CommitPoint(sis);
              if (sis.getGeneration() == segmentInfos.getGeneration()) {
                currentCommitPoint = commitPoint;
              }
              commits.add(commitPoint);
              incRef(sis, true);
            }
          }
        }
      }
    }

    if (currentCommitPoint == null) {
      // We did not in fact see the segments_N file
      // corresponding to the segmentInfos that was passed
      // in.  Yet, it must exist, because our caller holds
      // the write lock.  This can happen when the directory
      // listing was stale (eg when index accessed via NFS
      // client with stale directory listing cache).  So we
      // try now to explicitly open this commit point:
      SegmentInfos sis = new SegmentInfos();
      try {
        sis.read(directory, segmentInfos.getCurrentSegmentFileName());
      } catch (IOException e) {
        throw new CorruptIndexException("failed to locate current segments_N file");
      }
      if (infoStream != null)
        message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName());
View Full Code Here


    startTransaction();

    try {
      for (int i = 0; i < dirs.length; i++) {
        SegmentInfos sis = new SegmentInfos();    // read infos from dir
        sis.read(dirs[i]);
        for (int j = 0; j < sis.size(); j++) {
          segmentInfos.addElement(sis.info(j));    // add each info
        }
      }

      optimize();
View Full Code Here

        if (directory == dirs[i]) {
          // cannot add this index: segments may be deleted in merge before added
          throw new IllegalArgumentException("Cannot add this index to itself");
        }

        SegmentInfos sis = new SegmentInfos(); // read infos from dir
        sis.read(dirs[i]);
        for (int j = 0; j < sis.size(); j++) {
          SegmentInfo info = sis.info(j);
          segmentInfos.addElement(info); // add each info
        }
      }

      maybeMerge();
View Full Code Here

      // If we are flushing docs, segment must not be null:
      assert segment != null || !flushDocs;

      if (flushDocs || flushDeletes) {

        SegmentInfos rollback = null;

        if (flushDeletes)
          rollback = (SegmentInfos) segmentInfos.clone();

        boolean success = false;

        try {
          if (flushDocs) {

            if (0 == docStoreOffset && flushDocStores) {
              // This means we are flushing private doc stores
              // with this segment, so it will not be shared
              // with other segments
              assert docStoreSegment != null;
              assert docStoreSegment.equals(segment);
              docStoreOffset = -1;
              docStoreIsCompoundFile = false;
              docStoreSegment = null;
            }

            int flushedDocCount = docWriter.flush(flushDocStores);
         
            newSegment = new SegmentInfo(segment,
                                         flushedDocCount,
                                         directory, false, true,
                                         docStoreOffset, docStoreSegment,
                                         docStoreIsCompoundFile);
            segmentInfos.addElement(newSegment);
          }

          if (flushDeletes) {
            // we should be able to change this so we can
            // buffer deletes longer and then flush them to
            // multiple flushed segments, when
            // autoCommit=false
            applyDeletes(flushDocs);
            doAfterFlush();
          }

          checkpoint();
          success = true;
        } finally {
          if (!success) {

            if (infoStream != null)
              message("hit exception flushing segment " + segment);
               
            if (flushDeletes) {

              // Carefully check if any partial .del files
              // should be removed:
              final int size = rollback.size();
              for(int i=0;i<size;i++) {
                final String newDelFileName = segmentInfos.info(i).getDelFileName();
                final String delFileName = rollback.info(i).getDelFileName();
                if (newDelFileName != null && !newDelFileName.equals(delFileName))
                  deleter.deleteFile(newDelFileName);
              }

              // Fully replace the segmentInfos since flushed
View Full Code Here

    boolean success = false;

    int start;

    try {
      SegmentInfos sourceSegmentsClone = merge.segmentsClone;
      SegmentInfos sourceSegments = merge.segments;

      start = ensureContiguousMerge(merge);
      if (infoStream != null)
        message("commitMerge " + merge.segString(directory));

      // Carefully merge deletes that occurred after we
      // started merging:

      BitVector deletes = null;
      int docUpto = 0;

      final int numSegmentsToMerge = sourceSegments.size();
      for(int i=0;i<numSegmentsToMerge;i++) {
        final SegmentInfo previousInfo = sourceSegmentsClone.info(i);
        final SegmentInfo currentInfo = sourceSegments.info(i);

        assert currentInfo.docCount == previousInfo.docCount;

        final int docCount = currentInfo.docCount;

        if (previousInfo.hasDeletions()) {

          // There were deletes on this segment when the merge
          // started.  The merge has collapsed away those
          // deletes, but, if new deletes were flushed since
          // the merge started, we must now carefully keep any
          // newly flushed deletes but mapping them to the new
          // docIDs.

          assert currentInfo.hasDeletions();

          // Load deletes present @ start of merge, for this segment:
          BitVector previousDeletes = new BitVector(previousInfo.dir, previousInfo.getDelFileName());

          if (!currentInfo.getDelFileName().equals(previousInfo.getDelFileName())) {
            // This means this segment has had new deletes
            // committed since we started the merge, so we
            // must merge them:
            if (deletes == null)
              deletes = new BitVector(merge.info.docCount);

            BitVector currentDeletes = new BitVector(currentInfo.dir, currentInfo.getDelFileName());
            for(int j=0;j<docCount;j++) {
              if (previousDeletes.get(j))
                assert currentDeletes.get(j);
              else {
                if (currentDeletes.get(j))
                  deletes.set(docUpto);
                docUpto++;
              }
            }
          } else
            docUpto += docCount - previousDeletes.count();
       
        } else if (currentInfo.hasDeletions()) {
          // This segment had no deletes before but now it
          // does:
          if (deletes == null)
            deletes = new BitVector(merge.info.docCount);
          BitVector currentDeletes = new BitVector(directory, currentInfo.getDelFileName());

          for(int j=0;j<docCount;j++) {
            if (currentDeletes.get(j))
              deletes.set(docUpto);
            docUpto++;
          }

        } else
          // No deletes before or after
          docUpto += currentInfo.docCount;

        merge.checkAborted(directory);
      }

      if (deletes != null) {
        merge.info.advanceDelGen();
        deletes.write(directory, merge.info.getDelFileName());
      }
      success = true;
    } finally {
      if (!success) {
        if (infoStream != null)
          message("hit exception creating merged deletes file");
        deleter.refresh(merge.info.name);
      }
    }

    // Simple optimization: if the doc store we are using
    // has been closed and is in now compound format (but
    // wasn't when we started), then we will switch to the
    // compound format as well:
    final String mergeDocStoreSegment = merge.info.getDocStoreSegment();
    if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) {
      final int size = segmentInfos.size();
      for(int i=0;i<size;i++) {
        final SegmentInfo info = segmentInfos.info(i);
        final String docStoreSegment = info.getDocStoreSegment();
        if (docStoreSegment != null &&
            docStoreSegment.equals(mergeDocStoreSegment) &&
            info.getDocStoreIsCompoundFile()) {
          merge.info.setDocStoreIsCompoundFile(true);
          break;
        }
      }
    }

    success = false;
    SegmentInfos rollback = null;
    try {
      rollback = (SegmentInfos) segmentInfos.clone();
      segmentInfos.subList(start, start + merge.segments.size()).clear();
      segmentInfos.add(start, merge.info);
      checkpoint();
View Full Code Here

    return true;
  }

  private void decrefMergeSegments(MergePolicy.OneMerge merge) throws IOException {
    final SegmentInfos sourceSegmentsClone = merge.segmentsClone;
    final int numSegmentsToMerge = sourceSegmentsClone.size();
    assert merge.increfDone;
    merge.increfDone = false;
    for(int i=0;i<numSegmentsToMerge;i++) {
      final SegmentInfo previousInfo = sourceSegmentsClone.info(i);
      // Decref all files for this SegmentInfo (this
      // matches the incref in mergeInit):
      if (previousInfo.dir == directory)
        deleter.decRef(previousInfo.files());
    }
View Full Code Here

    assert merge.registerDone;

    if (merge.isAborted())
      return;

    final SegmentInfos sourceSegments = merge.segments;
    final int end = sourceSegments.size();

    ensureContiguousMerge(merge);

    // Check whether this merge will allow us to skip
    // merging the doc stores (stored field & vectors).
    // This is a very substantial optimization (saves tons
    // of IO) that can only be applied with
    // autoCommit=false.

    Directory lastDir = directory;
    String lastDocStoreSegment = null;
    int next = -1;

    boolean mergeDocStores = false;
    boolean doFlushDocStore = false;
    final String currentDocStoreSegment = docWriter.getDocStoreSegment();

    // Test each segment to be merged: check if we need to
    // flush/merge doc stores
    for (int i = 0; i < end; i++) {
      SegmentInfo si = sourceSegments.info(i);

      // If it has deletions we must merge the doc stores
      if (si.hasDeletions())
        mergeDocStores = true;

      // If it has its own (private) doc stores we must
      // merge the doc stores
      if (-1 == si.getDocStoreOffset())
        mergeDocStores = true;

      // If it has a different doc store segment than
      // previous segments, we must merge the doc stores
      String docStoreSegment = si.getDocStoreSegment();
      if (docStoreSegment == null)
        mergeDocStores = true;
      else if (lastDocStoreSegment == null)
        lastDocStoreSegment = docStoreSegment;
      else if (!lastDocStoreSegment.equals(docStoreSegment))
        mergeDocStores = true;

      // Segments' docScoreOffsets must be in-order,
      // contiguous.  For the default merge policy now
      // this will always be the case but for an arbitrary
      // merge policy this may not be the case
      if (-1 == next)
        next = si.getDocStoreOffset() + si.docCount;
      else if (next != si.getDocStoreOffset())
        mergeDocStores = true;
      else
        next = si.getDocStoreOffset() + si.docCount;
     
      // If the segment comes from a different directory
      // we must merge
      if (lastDir != si.dir)
        mergeDocStores = true;

      // If the segment is referencing the current "live"
      // doc store outputs then we must merge
      if (si.getDocStoreOffset() != -1 && currentDocStoreSegment != null && si.getDocStoreSegment().equals(currentDocStoreSegment))
        doFlushDocStore = true;
    }

    final int docStoreOffset;
    final String docStoreSegment;
    final boolean docStoreIsCompoundFile;

    if (mergeDocStores) {
      docStoreOffset = -1;
      docStoreSegment = null;
      docStoreIsCompoundFile = false;
    } else {
      SegmentInfo si = sourceSegments.info(0);       
      docStoreOffset = si.getDocStoreOffset();
      docStoreSegment = si.getDocStoreSegment();
      docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
    }
View Full Code Here

    if (merge.increfDone)
      decrefMergeSegments(merge);

    assert merge.registerDone;

    final SegmentInfos sourceSegments = merge.segments;
    final int end = sourceSegments.size();
    for(int i=0;i<end;i++)
      mergingSegments.remove(sourceSegments.info(i));
    mergingSegments.remove(merge.info);
    merge.registerDone = false;
  }
View Full Code Here

   
    SegmentMerger merger = null;

    int mergedDocCount = 0;

    SegmentInfos sourceSegments = merge.segments;
    SegmentInfos sourceSegmentsClone = merge.segmentsClone;
    final int numSegments = sourceSegments.size();

    if (infoStream != null)
      message("merging " + merge.segString(directory));

    merger = new SegmentMerger(this, mergedName, merge);
   
    // This is try/finally to make sure merger's readers are
    // closed:

    boolean success = false;

    try {
      int totDocCount = 0;

      for (int i = 0; i < numSegments; i++) {
        SegmentInfo si = sourceSegmentsClone.info(i);
        IndexReader reader = SegmentReader.get(si, MERGE_READ_BUFFER_SIZE, merge.mergeDocStores); // no need to set deleter (yet)
        merger.add(reader);
        totDocCount += reader.numDocs();
      }
      if (infoStream != null) {
View Full Code Here

TOP

Related Classes of uk.ac.ucl.panda.utility.structure.SegmentInfos

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.