Package org.apache.lucene.index

Examples of org.apache.lucene.index.IndexWriterConfig


  public FacetHandlerTest(String testname) {
    super(testname);
    _ramDir = new RAMDirectory();
    try {
      Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43);
      IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer);
      IndexWriter idxWriter = new IndexWriter(_ramDir, config);
      // Add one empty document, so that Directory reader will have one sub reader
      Document doc = new Document();
      idxWriter.addDocument(doc);
      idxWriter.close();
View Full Code Here


    doTest(br, 3, answer, new String[] { "1", "2", "7" });

    BoboMultiReader reader = null;
    try {
      Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43);
      IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer);
      IndexWriter idxWriter = new IndexWriter(_indexDir, config);
      idxWriter.deleteDocuments(new Term("id", "1"));
      idxWriter.deleteDocuments(new Term("id", "2"));
      idxWriter.commit();
      reader = newIndexReader();
View Full Code Here

    /* Underlying time facet for DynamicTimeRangeFacetHandler */
    facetHandlers.add(new RangeFacetHandler("timeinmillis", new PredefinedTermListFactory(
        Long.class, DynamicTimeRangeFacetHandler.NUMBER_FORMAT), null));
    Directory idxDir = new RAMDirectory();
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43);
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, analyzer);
    IndexWriter idxWriter = new IndexWriter(idxDir, config);

    long now = System.currentTimeMillis();
    DecimalFormat df = new DecimalFormat(DynamicTimeRangeFacetHandler.NUMBER_FORMAT);
    for (long l = 0; l < 53; l++) {
View Full Code Here

   */
  public DirectoryTaxonomyWriter(Directory directory, OpenMode openMode,
      TaxonomyWriterCache cache) throws IOException {

    dir = directory;
    IndexWriterConfig config = createIndexWriterConfig(openMode);
    indexWriter = openIndexWriter(dir, config);

    // verify (to some extent) that merge policy in effect would preserve category docids
    assert !(indexWriter.getConfig().getMergePolicy() instanceof TieredMergePolicy) :
      "for preserving category docids, merging none-adjacent segments is not allowed";
   
    // after we opened the writer, and the index is locked, it's safe to check
    // the commit data and read the index epoch
    openMode = config.getOpenMode();
    if (!DirectoryReader.indexExists(directory)) {
      indexEpoch = 1;
    } else {
      String epochStr = null;
      Map<String, String> commitData = readCommitData(directory);
View Full Code Here

    // TODO: should we use a more optimized Codec, e.g. Pulsing (or write custom)?
    // The taxonomy has a unique structure, where each term is associated with one document
    // Make sure we use a MergePolicy which always merges adjacent segments and thus
    // keeps the doc IDs ordered as well (this is crucial for the taxonomy index).
    return new IndexWriterConfig(Version.LUCENE_43,
        null).setOpenMode(openMode).setMergePolicy(
        new LogByteSizeMergePolicy());
  }
View Full Code Here

        // indexing by lower-casing & tokenizing on whitespace
        Analyzer indexAnalyzer = new WhitespaceLowerCaseAnalyzer();

        // create the object that will actually build the Lucene index
        indexWriter = new IndexWriter(index, new IndexWriterConfig(Version.LUCENE_4_9, indexAnalyzer));

        // let's see how long this takes...
        Date start = new Date();

        // if we were given an alternate names file, process it
View Full Code Here

  @Before
  public void setUp() throws Exception {
    dir = new RAMDirectory();
    analyzer = new UIMAPayloadsAnalyzer("/HmmTaggerAggregate.xml");
    writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_33, analyzer));

    Document doc = new Document();
    doc.add(new Field("title", "this is a dummy title containing an entity for London", Field.Store.YES,
            Field.Index.ANALYZED));
    doc.add(new Field("contents", "there is some content written here about the british city",
View Full Code Here

  @Before
  public void setUp() throws Exception {
    dir = new RAMDirectory();
    analyzer = new UIMABaseAnalyzer("/WhitespaceTokenizer.xml",
            "org.apache.uima.TokenAnnotation");
    writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_32, analyzer));
  }
View Full Code Here

  @Before
  public void setUp() throws Exception {
    dir = new RAMDirectory();
    analyzer = new UIMAPayloadsAnalyzer("/HmmTaggerAggregate.xml");
    writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_33, analyzer));
  }
View Full Code Here

      IOException {

    analyzer = new StandardAnalyzer(Version.LUCENE_40);

    index = new RAMDirectory();
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_40,
        analyzer);
    IndexWriter w = new IndexWriter(index, config);

    for (DocumentInCollection xmlDoc : docs) {
      addDoc(w, xmlDoc);
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.IndexWriterConfig

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.