Package org.apache.lucene.store

Examples of org.apache.lucene.store.IndexOutput


   }

   private void testOn(Directory dir, int writeSize, int readSize, Cache cache) throws IOException {
      if (cache != null) cache.clear();//needed to make sure no chunks are left over in case of Infinispan implementation
      final String filename = "chunkTest";
      IndexOutput indexOutput = dir.createOutput(filename, IOContext.DEFAULT);
      byte[] toWrite = fillBytes(writeSize);
      indexOutput.writeBytes(toWrite, writeSize);
      indexOutput.close();
      if (cache != null) {
         AssertJUnit.assertEquals(writeSize, DirectoryIntegrityCheck.deepCountFileSize(new FileCacheKey(INDEXNAME,filename), cache));
      }
      AssertJUnit.assertEquals(writeSize, indexOutput.length());
      byte[] results = new byte[readSize];
      IndexInput openInput = dir.openInput(filename, IOContext.DEFAULT);
      try {
         openInput.readBytes(results, 0, readSize);
         for (int i = 0; i < writeSize && i < readSize; i++) {
View Full Code Here


      final int bufferSize = 300;
      Cache cache = cacheManager.getCache();
      cache.clear();
      Directory dir = DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME).chunkSize(13).create();
      byte[] manyBytes = fillBytes(bufferSize);
      IndexOutput indexOutput = dir.createOutput(filename, IOContext.DEFAULT);
      for (int i = 0; i < 10; i++) {
         indexOutput.writeBytes(manyBytes, bufferSize);
         indexOutput.flush();
      }
      indexOutput.close();
      IndexInput input = dir.openInput(filename, IOContext.DEFAULT);
      final int finalSize = (10 * bufferSize);
      AssertJUnit.assertEquals(finalSize, input.length());
      final byte[] resultingBuffer = new byte[finalSize];
      input.readBytes(resultingBuffer, 0, finalSize);
View Full Code Here

      for (int i = 0; i < files.length; i++)
      {
         IndexInput in = dir.openInput(files[i]);
         try
         {
            IndexOutput out = dest.createOutput(files[i]);
            try
            {
               long remaining = in.length();
               while (remaining > 0)
               {
                  int num = (int)Math.min(remaining, buffer.length);
                  in.readBytes(buffer, 0, num);
                  out.writeBytes(buffer, num);
                  remaining -= num;
               }
            }
            finally
            {
               out.close();
            }
         }
         finally
         {
            in.close();
View Full Code Here

        String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
        String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
                                                                   "",
                                                                   1+gen);
        IndexInput in = dir.openInput(fileNameIn);
        IndexOutput out = dir.createOutput(fileNameOut);
        long length = in.length();
        for(int i=0;i<length-1;i++) {
          out.writeByte(in.readByte());
        }
        in.close();
        out.close();

        IndexReader reader = null;
        try {
          reader = IndexReader.open(dir);
        } catch (Exception e) {
View Full Code Here

        String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);
        String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
                                                                   "",
                                                                   1+gen);
        IndexInput in = dir.openInput(fileNameIn);
        IndexOutput out = dir.createOutput(fileNameOut);
        long length = in.length();
        for(int i=0;i<length-1;i++) {
          out.writeByte(in.readByte());
        }
        in.close();
        out.close();
        dir.deleteFile(fileNameIn);

        IndexReader reader = null;
        try {
          reader = IndexReader.open(dir);
View Full Code Here

  /** Write norms in the "true" segment format.  This is
   *  called only during commit, to create the .nrm file. */
  void writeNorms(String segmentName, int totalNumDoc) throws IOException {

    IndexOutput normsOut = directory.createOutput(segmentName + "." + IndexFileNames.NORMS_EXTENSION);

    try {
      normsOut.writeBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.length);

      final int numField = fieldInfos.size();

      for (int fieldIdx=0;fieldIdx<numField;fieldIdx++) {
        FieldInfo fi = fieldInfos.fieldInfo(fieldIdx);
        if (fi.isIndexed && !fi.omitNorms) {
          BufferedNorms n = norms[fieldIdx];
          final long v;
          if (n == null)
            v = 0;
          else {
            v = n.out.getFilePointer();
            n.out.writeTo(normsOut);
            n.reset();
          }
          if (v < totalNumDoc)
            fillBytes(normsOut, defaultNorm, (int) (totalNumDoc-v));
        }
      }
    } finally {
      normsOut.close();
    }
  }
View Full Code Here

    segmentName = segment;

    TermInfosWriter termsOut = new TermInfosWriter(directory, segmentName, fieldInfos,
                                                   writer.getTermIndexInterval());

    IndexOutput freqOut = directory.createOutput(segmentName + ".frq");
    IndexOutput proxOut = directory.createOutput(segmentName + ".prx");

    // Gather all FieldData's that have postings, across all
    // ThreadStates
    ArrayList allFields = new ArrayList();
    assert allThreadsIdle();
    for(int i=0;i<threadStates.length;i++) {
      ThreadState state = threadStates[i];
      state.trimFields();
      final int numFields = state.numAllFieldData;
      for(int j=0;j<numFields;j++) {
        ThreadState.FieldData fp = state.allFieldDataArray[j];
        if (fp.numPostings > 0)
          allFields.add(fp);
      }
    }

    // Sort by field name
    Collections.sort(allFields);
    final int numAllFields = allFields.size();

    skipListWriter = new DefaultSkipListWriter(termsOut.skipInterval,
                                               termsOut.maxSkipLevels,
                                               numDocsInRAM, freqOut, proxOut);

    int start = 0;
    while(start < numAllFields) {

      final String fieldName = ((ThreadState.FieldData) allFields.get(start)).fieldInfo.name;

      int end = start+1;
      while(end < numAllFields && ((ThreadState.FieldData) allFields.get(end)).fieldInfo.name.equals(fieldName))
        end++;
     
      ThreadState.FieldData[] fields = new ThreadState.FieldData[end-start];
      for(int i=start;i<end;i++)
        fields[i-start] = (ThreadState.FieldData) allFields.get(i);

      // If this field has postings then add them to the
      // segment
      appendPostings(fields, termsOut, freqOut, proxOut);

      for(int i=0;i<fields.length;i++)
        fields[i].resetPostingArrays();

      start = end;
    }

    freqOut.close();
    proxOut.close();
    termsOut.close();
   
    // Record all files we have flushed
    List flushedFiles = new ArrayList();
    flushedFiles.add(segmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION));
View Full Code Here

  }

  @Test
  public void testWritingAndReadingAFile() throws IOException {

    IndexOutput output = directory.createOutput("testing.test", IOContext.DEFAULT);
    output.writeInt(12345);
    output.flush();
    output.close();

    IndexInput input = directory.openInput("testing.test", IOContext.DEFAULT);
    assertEquals(12345, input.readInt());
    input.close();
View Full Code Here

  }

  private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
    IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT);
    fsOutput.setLength(fileLength);
    IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT);
    hdfsOutput.setLength(fileLength);
    for (int i = 0; i < writes; i++) {
      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
      random.nextBytes(buf);
      int offset = random.nextInt(buf.length);
      int length = random.nextInt(buf.length - offset);
      fsOutput.writeBytes(buf, offset, length);
      hdfsOutput.writeBytes(buf, offset, length);
    }
    fsOutput.close();
    hdfsOutput.close();
  }
View Full Code Here

  }

  private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException {
    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
    IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT);
    IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT);
    for (int i = 0; i < writes; i++) {
      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
      random.nextBytes(buf);
      int offset = random.nextInt(buf.length);
      int length = random.nextInt(buf.length - offset);
      fsOutput.writeBytes(buf, offset, length);
      hdfsOutput.writeBytes(buf, offset, length);
    }
    fsOutput.close();
    hdfsOutput.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.IndexOutput

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.