Package org.apache.lucene.store

Examples of org.apache.lucene.store.IndexOutput


    fieldInfos = fn;

    boolean success = false;
    try {

      IndexOutput fdt = directory.createOutput(IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_EXTENSION));
      indexStream = directory.createOutput(IndexFileNames.segmentFileName(segment, IndexFileNames.FIELDS_INDEX_EXTENSION));

      if(FieldsWriterCompress.isFdtCompress()&&!(directory instanceof RAMDirectory))
      {
          indexStream.writeInt(FieldsWriterCompress.FORMAT_CURRENT);
          fieldsStream=new FdtCompressIndexOutput(fdt,1024*512);
          fieldsStream.writeInt(FieldsWriterCompress.FORMAT_CURRENT);

      }else{
          fdt.writeInt(FORMAT_CURRENT);
          indexStream.writeInt(FORMAT_CURRENT);
          fieldsStream=fdt;
          fieldsStream.writeInt(FieldsWriterCompress.FORMAT_CURRENT);
      }

View Full Code Here


            throw new IllegalStateException("No entries to merge have been defined");

        merged = true;

        // open the compound stream
        IndexOutput os = directory.createOutput(fileName);
        IndexOutput ospos = directory.createOutput(fileNamepos);
        IOException priorException = null;
        try {
            // Write the Version info - must be a VInt because CFR reads a VInt
            // in older versions!
            os.writeVInt(FORMAT_CURRENT);
           
            // Write the number of entries
            os.writeVInt(entries.size());

            // Write the directory with all offsets at 0.
            // Remember the positions of directory entries so that we can
            // adjust the offsets later
            long totalSize = 0;
            for (FileEntry fe : entries) {
                fe.directoryOffset = os.getFilePointer();
                os.writeLong(-1);    // for now
                os.writeString(IndexFileNames.stripSegmentName(fe.file));
                totalSize += fe.dir.fileLength(fe.file);
            }

            // Pre-allocate size of file as optimization --
            // this can potentially help IO performance as
            // we write the file and also later during
            // searching.  It also uncovers a disk-full
            // situation earlier and hopefully without
            // actually filling disk to 100%:
            final long finalLength = totalSize+os.getFilePointer();
            os.setLength(finalLength);

            // Open the files and copy their data into the stream.
            // Remember the locations of each file's data section.
            for (FileEntry fe : entries) {
                fe.dataOffset = os.getFilePointer();
                copyFile(fe, os);
            }

            // Write the data offsets into the directory of the compound stream
           
            ospos.writeInt(entries.size());
            for (FileEntry fe : entries) {
//                os.seek(fe.directoryOffset);
//                os.writeLong(fe.dataOffset);
                ospos.writeLong(fe.directoryOffset);
                ospos.writeLong(fe.dataOffset);

            }

            assert finalLength == os.length();

            // Close the output stream. Set the os to null before trying to
            // close so that if an exception occurs during the close, the
            // finally clause below will not attempt to close the stream
            // the second time.
            IndexOutput tmp = os;
            os = null;
            tmp.close();
           
            IndexOutput tmpospos = ospos;
            ospos = null;
            tmpospos.close();
           
        } catch (IOException e) {
          priorException = e;
        } finally {
          IOUtils.closeWhileHandlingException(priorException, os);
View Full Code Here

    for (IndexReader reader : readers) {
      bufferSize = Math.max(bufferSize, reader.maxDoc());
    }
   
    byte[] normBuffer = null;
    IndexOutput output = null;
    boolean success = false;
    try {
      int numFieldInfos = fieldInfos.size();
      for (int i = 0; i < numFieldInfos; i++) {
        FieldInfo fi = fieldInfos.fieldInfo(i);
        if (fi.isIndexed && !fi.omitNorms) {
          if (output == null) {
            output = directory.createOutput(IndexFileNames.segmentFileName(segment, IndexFileNames.NORMS_EXTENSION));
            output.writeBytes(SegmentNorms.NORMS_HEADER, SegmentNorms.NORMS_HEADER.length);
          }
          if (normBuffer == null) {
            normBuffer = new byte[bufferSize];
          }
          for (IndexReader reader : readers) {
            final int maxDoc = reader.maxDoc();
            reader.norms(fi.name, normBuffer, 0);
            if (!reader.hasDeletions()) {
              //optimized case for segments without deleted docs
              output.writeBytes(normBuffer, maxDoc);
            } else {
              // this segment has deleted docs, so we have to
              // check for every doc if it is deleted or not
              for (int k = 0; k < maxDoc; k++) {
                if (!reader.isDeleted(k)) {
                  output.writeByte(normBuffer[k]);
                }
              }
            }
            checkAbort.work(maxDoc);
          }
View Full Code Here

    }
    return hasVectors;
  }

  public void write(Directory d, String name) throws IOException {
    IndexOutput output = d.createOutput(name);
    try {
      write(output);
    } finally {
      output.close();
    }
  }
View Full Code Here

    }

    lastGeneration = generation;

    try {
      IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN);
      try {
        genOutput.writeInt(FORMAT_LOCKLESS);
        genOutput.writeLong(generation);
        genOutput.writeLong(generation);
      } finally {
        genOutput.close();
      }
    } catch (ThreadInterruptedException t) {
      logger.info("sigment error ",t);
      throw t;
    } catch (Throwable t) {
View Full Code Here

    this.input.readBytes(compressdata, 0, bytelen);
    this.nextpos=this.input.getFilePointer();
//    LOG.info("resetBlock "+bytelen+","+pos+","+this.nextpos+","+this.input.getClass().getName());

    ramDirectory=new RAMDirectory();
    IndexOutput output=ramDirectory.createOutput("r");

    ByteArrayInputStream bis = new ByteArrayInputStream(compressdata);
    GZIPInputStream gzip = new GZIPInputStream(bis);
    byte[] buff = new byte[1024]
        int rc = 0
        while ((rc = gzip.read(buff, 0, buff.length)) > 0) { 
          output.writeBytes(buff, 0, rc)
       
        gzip.close();
        bis.close();
        output.close();
        this.raminput=ramDirectory.openInput("r");
       
  }
View Full Code Here

  {
    if(d.fileExists("txid"))
    {
      d.deleteFile("txid");
    }
    IndexOutput out=d.createOutput("txid");
    out.writeString(String.valueOf(this.txid));
    out.close();
  }
View Full Code Here

  {
    if(d.fileExists("txid"))
    {
      d.deleteFile("txid");
    }
    IndexOutput out=d.createOutput("txid");
    out.writeString(String.valueOf(this.txid));
    out.close();
  }
View Full Code Here

    /** Creates a file of the specified size with random data. */
    private void createRandomFile(Directory dir, String name, int size)
    throws IOException
    {
        IndexOutput os = dir.createOutput(name);
        for (int i=0; i<size; i++) {
            byte b = (byte) (Math.random() * 256);
            os.writeByte(b);
        }
        os.close();
    }
View Full Code Here

                                    String name,
                                    byte start,
                                    int size)
    throws IOException
    {
        IndexOutput os = dir.createOutput(name);
        for (int i=0; i < size; i++) {
            os.writeByte(start);
            start ++;
        }
        os.close();
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.IndexOutput

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.