Package org.apache.lucene.store

Examples of org.apache.lucene.store.MockRAMDirectory


    Term searchTerm = new Term("content", "aaa");
    int START_COUNT = 157;
    int END_COUNT = 144;

    // First build up a starting index:
    MockRAMDirectory startDir = new MockRAMDirectory();
    IndexWriter writer = new IndexWriter(startDir,
                                         new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    for (int i = 0; i < 157; i++) {
      Document d = new Document();
      d.add(new Field("id", Integer.toString(i), Field.Store.YES,
                      Field.Index.NOT_ANALYZED));
      d.add(new Field("content", "aaa " + i, Field.Store.NO,
                      Field.Index.ANALYZED));
      writer.addDocument(d);
    }
    writer.close();

    long diskUsage = startDir.sizeInBytes();
    long diskFree = diskUsage + 10;

    IOException err = null;

    boolean done = false;

    // Iterate w/ ever increasing free disk space:
    while (!done) {
      MockRAMDirectory dir = new MockRAMDirectory(startDir);
      dir.setPreventDoubleWrite(false);
      IndexWriter modifier = new IndexWriter(dir,
                                             new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);

      modifier.setMaxBufferedDocs(1000); // use flush or close
      modifier.setMaxBufferedDeleteTerms(1000); // use flush or close

      // For each disk size, first try to commit against
      // dir that will hit random IOExceptions & disk
      // full; after, give it infinite disk space & turn
      // off random IOExceptions & retry w/ same reader:
      boolean success = false;

      for (int x = 0; x < 2; x++) {

        double rate = 0.1;
        double diskRatio = ((double)diskFree) / diskUsage;
        long thisDiskFree;
        String testName;

        if (0 == x) {
          thisDiskFree = diskFree;
          if (diskRatio >= 2.0) {
            rate /= 2;
          }
          if (diskRatio >= 4.0) {
            rate /= 2;
          }
          if (diskRatio >= 6.0) {
            rate = 0.0;
          }
          if (debug) {
            System.out.println("\ncycle: " + diskFree + " bytes");
          }
          testName = "disk full during reader.close() @ " + thisDiskFree
            + " bytes";
        } else {
          thisDiskFree = 0;
          rate = 0.0;
          if (debug) {
            System.out.println("\ncycle: same writer: unlimited disk space");
          }
          testName = "reader re-use after disk full";
        }

        dir.setMaxSizeInBytes(thisDiskFree);
        dir.setRandomIOExceptionRate(rate, diskFree);

        try {
          if (0 == x) {
            int docId = 12;
            for (int i = 0; i < 13; i++) {
              if (updates) {
                Document d = new Document();
                d.add(new Field("id", Integer.toString(i), Field.Store.YES,
                                Field.Index.NOT_ANALYZED));
                d.add(new Field("content", "bbb " + i, Field.Store.NO,
                                Field.Index.ANALYZED));
                modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
              } else { // deletes
                modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
                // modifier.setNorm(docId, "contents", (float)2.0);
              }
              docId += 12;
            }
          }
          modifier.close();
          success = true;
          if (0 == x) {
            done = true;
          }
        }
        catch (IOException e) {
          if (debug) {
            System.out.println("  hit IOException: " + e);
            e.printStackTrace(System.out);
          }
          err = e;
          if (1 == x) {
            e.printStackTrace();
            fail(testName + " hit IOException after disk space was freed up");
          }
        }

        // If the close() succeeded, make sure there are
        // no unreferenced files.
        if (success)
          TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");

        // Finally, verify index is not corrupt, and, if
        // we succeeded, we see all docs changed, and if
        // we failed, we see either all docs or no docs
        // changed (transactional semantics):
        IndexReader newReader = null;
        try {
          newReader = IndexReader.open(dir, true);
        }
        catch (IOException e) {
          e.printStackTrace();
          fail(testName
               + ":exception when creating IndexReader after disk full during close: "
               + e);
        }

        IndexSearcher searcher = new IndexSearcher(newReader);
        ScoreDoc[] hits = null;
        try {
          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
        }
        catch (IOException e) {
          e.printStackTrace();
          fail(testName + ": exception when searching: " + e);
        }
        int result2 = hits.length;
        if (success) {
          if (x == 0 && result2 != END_COUNT) {
            fail(testName
                 + ": method did not throw exception but hits.length for search on term 'aaa' is "
                 + result2 + " instead of expected " + END_COUNT);
          } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) {
            // It's possible that the first exception was
            // "recoverable" wrt pending deletes, in which
            // case the pending deletes are retained and
            // then re-flushing (with plenty of disk
            // space) will succeed in flushing the
            // deletes:
            fail(testName
                 + ": method did not throw exception but hits.length for search on term 'aaa' is "
                 + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
          }
        } else {
          // On hitting exception we still may have added
          // all docs:
          if (result2 != START_COUNT && result2 != END_COUNT) {
            err.printStackTrace();
            fail(testName
                 + ": method did throw exception but hits.length for search on term 'aaa' is "
                 + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
          }
        }

        searcher.close();
        newReader.close();

        if (result2 == END_COUNT) {
          break;
        }
      }

      dir.close();

      // Try again with 10 more bytes of free space:
      diskFree += 10;
    }
  }
View Full Code Here


    String[] unindexed = { "Netherlands", "Italy" };
    String[] unstored = { "Amsterdam has lots of bridges",
        "Venice has lots of canals" };
    String[] text = { "Amsterdam", "Venice" };

    MockRAMDirectory dir = new MockRAMDirectory();
    IndexWriter modifier = new IndexWriter(dir,
                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    modifier.setUseCompoundFile(true);
    modifier.setMaxBufferedDeleteTerms(2);

    dir.failOn(failure.reset());

    for (int i = 0; i < keywords.length; i++) {
      Document doc = new Document();
      doc.add(new Field("id", keywords[i], Field.Store.YES,
                        Field.Index.NOT_ANALYZED));
      doc.add(new Field("country", unindexed[i], Field.Store.YES,
                        Field.Index.NO));
      doc.add(new Field("contents", unstored[i], Field.Store.NO,
                        Field.Index.ANALYZED));
      doc.add(new Field("city", text[i], Field.Store.YES,
                        Field.Index.ANALYZED));
      modifier.addDocument(doc);
    }
    // flush (and commit if ac)

    modifier.optimize();
    modifier.commit();

    // one of the two files hits

    Term term = new Term("city", "Amsterdam");
    int hitCount = getHitCount(dir, term);
    assertEquals(1, hitCount);

    // open the writer again (closed above)

    // delete the doc
    // max buf del terms is two, so this is buffered

    modifier.deleteDocuments(term);

    // add a doc (needed for the !ac case; see below)
    // doc remains buffered

    Document doc = new Document();
    modifier.addDocument(doc);

    // commit the changes, the buffered deletes, and the new doc

    // The failure object will fail on the first write after the del
    // file gets created when processing the buffered delete

    // in the ac case, this will be when writing the new segments
    // files so we really don't need the new doc, but it's harmless

    // in the !ac case, a new segments file won't be created but in
    // this case, creation of the cfs file happens next so we need
    // the doc (to test that it's okay that we don't lose deletes if
    // failing while creating the cfs file)

    boolean failed = false;
    try {
      modifier.commit();
    } catch (IOException ioe) {
      failed = true;
    }

    assertTrue(failed);

    // The commit above failed, so we need to retry it (which will
    // succeed, because the failure is a one-shot)

    modifier.commit();

    hitCount = getHitCount(dir, term);

    // Make sure the delete was successfully flushed:
    assertEquals(0, hitCount);

    modifier.close();
    dir.close();
  }
View Full Code Here

    String[] unindexed = { "Netherlands", "Italy" };
    String[] unstored = { "Amsterdam has lots of bridges",
        "Venice has lots of canals" };
    String[] text = { "Amsterdam", "Venice" };

    MockRAMDirectory dir = new MockRAMDirectory();
    IndexWriter modifier = new IndexWriter(dir,
                                           new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);

    dir.failOn(failure.reset());

    for (int i = 0; i < keywords.length; i++) {
      Document doc = new Document();
      doc.add(new Field("id", keywords[i], Field.Store.YES,
                        Field.Index.NOT_ANALYZED));
      doc.add(new Field("country", unindexed[i], Field.Store.YES,
                        Field.Index.NO));
      doc.add(new Field("contents", unstored[i], Field.Store.NO,
                        Field.Index.ANALYZED));
      doc.add(new Field("city", text[i], Field.Store.YES,
                        Field.Index.ANALYZED));
      try {
        modifier.addDocument(doc);
      } catch (IOException io) {
        break;
      }
    }

    String[] startFiles = dir.listAll();
    SegmentInfos infos = new SegmentInfos();
    infos.read(dir);
    new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
    String[] endFiles = dir.listAll();

    if (!Arrays.equals(startFiles, endFiles)) {
      fail("docswriter abort() failed to delete unreferenced files:\n  before delete:\n    "
           + arrayToString(startFiles) + "\n  after delete:\n    "
           + arrayToString(endFiles));
View Full Code Here

      fsDir.close();
    } finally {
      _TestUtil.rmDir(dir);
    }

    MockRAMDirectory dir2 = new MockRAMDirectory();
    runTest(dir2);
    dir2.close();
  }
View Full Code Here

    runTest(dir2);
    dir2.close();
  }

  public void testReuseAcrossWriters() throws Exception {
    Directory dir = new MockRAMDirectory();

    SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
    IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
    // Force frequent flushes
    writer.setMaxBufferedDocs(2);
    Document doc = new Document();
    doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
    for(int i=0;i<7;i++) {
      writer.addDocument(doc);
      if (i % 2 == 0) {
        writer.commit();
      }
    }
    IndexCommit cp = (IndexCommit) dp.snapshot();
    copyFiles(dir, cp);
    writer.close();
    copyFiles(dir, cp);
   
    writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
    copyFiles(dir, cp);
    for(int i=0;i<7;i++) {
      writer.addDocument(doc);
      if (i % 2 == 0) {
        writer.commit();
      }
    }
    copyFiles(dir, cp);
    writer.close();
    copyFiles(dir, cp);
    dp.release();
    writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
    writer.close();
    try {
      copyFiles(dir, cp);
      fail("did not hit expected IOException");
    } catch (IOException ioe) {
      // expected
    }
    dir.close();
  }
View Full Code Here

  // Make sure running BG merges still work fine even when
  // we are hitting exceptions during flushing.
  public void testFlushExceptions() throws IOException {

    MockRAMDirectory directory = new MockRAMDirectory();
    FailOnlyOnFlush failure = new FailOnlyOnFlush();
    directory.failOn(failure);

    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
    writer.setMergeScheduler(cms);
    writer.setMaxBufferedDocs(2);
    Document doc = new Document();
    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(idField);
    int extraCount = 0;

    for(int i=0;i<10;i++) {
      for(int j=0;j<20;j++) {
        idField.setValue(Integer.toString(i*20+j));
        writer.addDocument(doc);
      }

      // must cycle here because sometimes the merge flushes
      // the doc we just added and so there's nothing to
      // flush, and we don't hit the exception
      while(true) {
        writer.addDocument(doc);
        failure.setDoFail();
        try {
          writer.flush(true, false, true);
          if (failure.hitExc) {
            fail("failed to hit IOException");
          }
          extraCount++;
        } catch (IOException ioe) {
          failure.clearDoFail();
          break;
        }
      }
    }

    writer.close();
    IndexReader reader = IndexReader.open(directory, true);
    assertEquals(200+extraCount, reader.numDocs());
    reader.close();
    directory.close();
  }
View Full Code Here

  // Test that deletes committed after a merge started and
  // before it finishes, are correctly merged back:
  public void testDeleteMerging() throws IOException {

    RAMDirectory directory = new MockRAMDirectory();

    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
    writer.setMergeScheduler(cms);

    LogDocMergePolicy mp = new LogDocMergePolicy(writer);
    writer.setMergePolicy(mp);

    // Force degenerate merging so we can get a mix of
    // merging of segments with and without deletes at the
    // start:
    mp.setMinMergeDocs(1000);

    Document doc = new Document();
    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(idField);
    for(int i=0;i<10;i++) {
      for(int j=0;j<100;j++) {
        idField.setValue(Integer.toString(i*100+j));
        writer.addDocument(doc);
      }

      int delID = i;
      while(delID < 100*(1+i)) {
        writer.deleteDocuments(new Term("id", ""+delID));
        delID += 10;
      }

      writer.commit();
    }

    writer.close();
    IndexReader reader = IndexReader.open(directory, true);
    // Verify that we did not lose any deletes...
    assertEquals(450, reader.numDocs());
    reader.close();
    directory.close();
  }
View Full Code Here

    directory.close();
  }

  public void testNoExtraFiles() throws IOException {

    RAMDirectory directory = new MockRAMDirectory();

    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);

    for(int iter=0;iter<7;iter++) {
      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
      writer.setMergeScheduler(cms);
      writer.setMaxBufferedDocs(2);

      for(int j=0;j<21;j++) {
        Document doc = new Document();
        doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
        writer.addDocument(doc);
      }
       
      writer.close();
      TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");

      // Reopen
      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
    }

    writer.close();

    directory.close();
  }
View Full Code Here

    directory.close();
  }

  public void testNoWaitClose() throws IOException {
    RAMDirectory directory = new MockRAMDirectory();

    Document doc = new Document();
    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(idField);

    IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);

    for(int iter=0;iter<10;iter++) {
      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
      writer.setMergeScheduler(cms);
      writer.setMaxBufferedDocs(2);
      writer.setMergeFactor(100);

      for(int j=0;j<201;j++) {
        idField.setValue(Integer.toString(iter*201+j));
        writer.addDocument(doc);
      }

      int delID = iter*201;
      for(int j=0;j<20;j++) {
        writer.deleteDocuments(new Term("id", Integer.toString(delID)));
        delID += 5;
      }

      // Force a bunch of merge threads to kick off so we
      // stress out aborting them on close:
      writer.setMergeFactor(3);
      writer.addDocument(doc);
      writer.commit();

      writer.close(false);

      IndexReader reader = IndexReader.open(directory, true);
      assertEquals((1+iter)*182, reader.numDocs());
      reader.close();

      // Reopen
      writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
    }
    writer.close();

    directory.close();
  }
View Full Code Here

  }

  // LUCENE-1270
  public void testHangOnClose() throws IOException {

    Directory dir = new MockRAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
    writer.setMaxBufferedDocs(5);
    writer.setUseCompoundFile(false);
    writer.setMergeFactor(100);

    Document doc = new Document();
    doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
    for(int i=0;i<60;i++)
      writer.addDocument(doc);
    writer.setMaxBufferedDocs(200);
    Document doc2 = new Document();
    doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                      Field.Index.NO));
    doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                      Field.Index.NO));
    doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                      Field.Index.NO));
    doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
                      Field.Index.NO));
    for(int i=0;i<10;i++)
      writer.addDocument(doc2);
    writer.close();

    Directory dir2 = new MockRAMDirectory();
    writer = new IndexWriter(dir2, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
    lmp.setMinMergeMB(0.0001);
    writer.setMergePolicy(lmp);
    writer.setMergeFactor(4);
    writer.setUseCompoundFile(false);
    writer.setMergeScheduler(new SerialMergeScheduler());
    writer.addIndexesNoOptimize(new Directory[] {dir});
    writer.close();
    dir.close();
    dir2.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.MockRAMDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.