Examples of ChunkIterator


Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator

        }
      } else {
        int docID = nextLiveDoc(0, liveDocs, maxDoc);
        if (docID < maxDoc) {
          // not all docs were deleted
          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
          int[] startOffsets = new int[0];
          do {
            // go to the next chunk that contains docID
            it.next(docID);
            // transform lengths into offsets
            if (startOffsets.length < it.chunkDocs) {
              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
            }
            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            if (numBufferedDocs == 0 // starting a new chunk
                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
                && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
              assert docID == it.docBase;

              // no need to decompress, just copy data
              indexWriter.writeIndex(it.chunkDocs, fieldsStream.getFilePointer());
              writeHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
              it.copyCompressedData(fieldsStream);
              this.docBase += it.chunkDocs;
              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator

        }
      } else {
        int docID = nextLiveDoc(0, liveDocs, maxDoc);
        if (docID < maxDoc) {
          // not all docs were deleted
          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
          int[] startOffsets = new int[0];
          do {
            // go to the next chunk that contains docID
            it.next(docID);
            // transform lengths into offsets
            if (startOffsets.length < it.chunkDocs) {
              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
            }
            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            if (compressionMode == matchingFieldsReader.getCompressionMode() // same compression mode
                && numBufferedDocs == 0 // starting a new chunk
                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
                && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
              assert docID == it.docBase;

              // no need to decompress, just copy data
              indexWriter.writeIndex(it.chunkDocs, fieldsStream.getFilePointer());
              writeHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
              it.copyCompressedData(fieldsStream);
              this.docBase += it.chunkDocs;
              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator

        }
      } else {
        int docID = nextLiveDoc(0, liveDocs, maxDoc);
        if (docID < maxDoc) {
          // not all docs were deleted
          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
          int[] startOffsets = new int[0];
          do {
            // go to the next chunk that contains docID
            it.next(docID);
            // transform lengths into offsets
            if (startOffsets.length < it.chunkDocs) {
              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
            }
            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            // decompress
            it.decompress();
            if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
              throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
            }
            // copy non-deleted docs
            for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
              final int diff = docID - it.docBase;
              startDocument();
              bufferedDocs.writeBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
              numStoredFieldsInDoc = it.numStoredFields[diff];
              finishDocument();
              ++docCount;
              mergeState.checkAbort.work(300);
            }
          } while (docID < maxDoc);

          it.checkIntegrity();
        }
      }
    }
    finish(mergeState.fieldInfos, docCount);
    return docCount;
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator

        }
      } else {
        int docID = nextLiveDoc(0, liveDocs, maxDoc);
        if (docID < maxDoc) {
          // not all docs were deleted
          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
          int[] startOffsets = new int[0];
          do {
            // go to the next chunk that contains docID
            it.next(docID);
            // transform lengths into offsets
            if (startOffsets.length < it.chunkDocs) {
              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
            }
            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            if (compressionMode == matchingFieldsReader.getCompressionMode() // same compression mode
                && numBufferedDocs == 0 // starting a new chunk
                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
                && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
              assert docID == it.docBase;

              // no need to decompress, just copy data
              indexWriter.writeIndex(it.chunkDocs, fieldsStream.getFilePointer());
              writeHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
              it.copyCompressedData(fieldsStream);
              this.docBase += it.chunkDocs;
              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator

        }
      } else {
        int docID = nextLiveDoc(0, liveDocs, maxDoc);
        if (docID < maxDoc) {
          // not all docs were deleted
          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
          int[] startOffsets = new int[0];
          do {
            // go to the next chunk that contains docID
            it.next(docID);
            // transform lengths into offsets
            if (startOffsets.length < it.chunkDocs) {
              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
            }
            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            if (compressionMode == matchingFieldsReader.getCompressionMode() // same compression mode
                && numBufferedDocs == 0 // starting a new chunk
                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
                && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
              assert docID == it.docBase;

              // no need to decompress, just copy data
              indexWriter.writeIndex(it.chunkDocs, fieldsStream.getFilePointer());
              writeHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
              it.copyCompressedData(fieldsStream);
              this.docBase += it.chunkDocs;
              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
View Full Code Here

Examples of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator

        }
      } else {
        int docID = nextLiveDoc(0, liveDocs, maxDoc);
        if (docID < maxDoc) {
          // not all docs were deleted
          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
          int[] startOffsets = new int[0];
          do {
            // go to the next chunk that contains docID
            it.next(docID);
            // transform lengths into offsets
            if (startOffsets.length < it.chunkDocs) {
              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
            }
            for (int i = 1; i < it.chunkDocs; ++i) {
              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
            }

            if (numBufferedDocs == 0 // starting a new chunk
                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
                && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
              assert docID == it.docBase;

              // no need to decompress, just copy data
              indexWriter.writeIndex(it.chunkDocs, fieldsStream.getFilePointer());
              writeHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
              it.copyCompressedData(fieldsStream);
              this.docBase += it.chunkDocs;
              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
              docCount += it.chunkDocs;
              mergeState.checkAbort.work(300 * it.chunkDocs);
            } else {
              // decompress
              it.decompress();
              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
              }
              // copy non-deleted docs
              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
                final int diff = docID - it.docBase;
                startDocument(it.numStoredFields[diff]);
                bufferedDocs.writeBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
                finishDocument();
                ++docCount;
                mergeState.checkAbort.work(300);
              }
            }
          } while (docID < maxDoc);

          it.checkIntegrity();
        }
      }
    }
    finish(mergeState.fieldInfos, docCount);
    return docCount;
View Full Code Here

Examples of org.pentaho.reporting.engine.classic.core.layout.process.alignment.ChunkIterator

    ClassicEngineBoot.getInstance().start();
  }

  public void testIterateEmpty()
  {
    final ChunkIterator it = new ChunkIterator(new DefaultSequenceList(), 0);
    assertFalse(it.hasNext());
    try
    {
      it.next();
      fail();
    }
    catch (NoSuchElementException e)
    {
      // expected
View Full Code Here

Examples of org.pentaho.reporting.engine.classic.core.layout.process.alignment.ChunkIterator

    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.START), new SpacerRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.END), new SpacerRenderNode());

    final ChunkIterator it = new ChunkIterator(list, 0);
    assertNext(it, 0, 2, 20);
    assertNext(it, 2, 2, 20);
    assertFalse(it.hasNext());
  }
View Full Code Here

Examples of org.pentaho.reporting.engine.classic.core.layout.process.alignment.ChunkIterator

    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.END), new SpacerRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.END), new SpacerRenderNode());

    final ChunkIterator it = new ChunkIterator(list, 0);
    assertNext(it, 0, 3, 30);
    assertNext(it, 3, 3, 30);
    assertFalse(it.hasNext());
  }
View Full Code Here

Examples of org.pentaho.reporting.engine.classic.core.layout.process.alignment.ChunkIterator

    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.CONTENT), new DummyRenderNode());
    list.add(new TestInlineSequenceElement(InlineSequenceElement.Classification.END), new SpacerRenderNode());

    final ChunkIterator it = new ChunkIterator(list, 0);
    assertNext(it, 0, 2, 20);
    assertNext(it, 2, 1, 10);
    assertNext(it, 3, 2, 20);
    assertFalse(it.hasNext());
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.