Package com.gentics.cr.monitoring

Examples of com.gentics.cr.monitoring.UseCase


   * Write Documents with Deskriptor and Synonym Field, where Deskripor is the base word and Synonym its Synonym
   *
   * @throws IOException if there is a Problem when accessing the Index
   */
  private synchronized void reIndex() throws IOException {
    UseCase ucReIndex = MonitorFactory.startUseCase("reIndex()");
   
    // build a dictionary (from the spell package)
    log.debug("Starting to reindex SYN index.");
    IndexAccessor synonymAccessor = synonym.getSynonymLocation().getAccessor();

    IndexWriter synonymWriter = synonymAccessor.getWriter();
    Collection<CRResolvableBean> objectsToIndex = null;
    try {
      if (rp == null) {
        throw new CRException("FATAL ERROR", "RequestProcessor not available");
      }

      // and get the current rule
      String rule = (String) config.get(RULE_KEY);
      if (rule == null) {
        rule = "";
      }
      if (rule.length() == 0 || rule == null) {
        rule = "1 == 1";
      }


      try {
        CRRequest req = new CRRequest();
        req.setRequestFilter(rule);
        status.setCurrentStatusString("SYN Get objects to update " + "in the index ...");
        objectsToIndex = getObjectsToUpdate(req, rp, true, null);
      } catch (Exception e) {
        log.error("ERROR while cleaning SYN index", e);
      }


      if (objectsToIndex == null) {
        log.debug("SYN Rule returned no objects to index. Skipping...");
        return;
      }

      status.setObjectCount(objectsToIndex.size());
      log.debug("SYN index job with " + objectsToIndex.size() + " objects to index.");

      String descriptorName = (String) config.get(DESCRIPTOR_NAME_KEY);
      String synonymName = (String) config.get(SYNONYM_NAME_KEY);


      status.setCurrentStatusString("Starting to index slices.");
      int objCount = 0;
      try {
        for (Iterator<CRResolvableBean> iterator = objectsToIndex.iterator(); iterator.hasNext();) {
          CRResolvableBean bean = iterator.next();
          iterator.remove();
          objCount++;
          String descriptorValue = bean.getString(descriptorName);
          String synonymValue = bean.getString(synonymName);
          if (descriptorValue != null && synonymValue != null) {
            descriptorValue = descriptorValue.toLowerCase();
            if (synonymValue != null) {
              synonymValue = synonymValue.toLowerCase();
            }
            Document doc = new Document();
            doc.add(new Field("Deskriptor", descriptorValue, Field.Store.YES, Field.Index.NOT_ANALYZED));
            doc.add(new Field("Synonym", synonymValue, Field.Store.YES, Field.Index.NOT_ANALYZED));
            synonymWriter.addDocument(doc);
            log.debug("WRITE SYN " + objCount + " " + descriptorValue + " " + synonymValue);
            synonymWriter.commit();
            log.debug("Number of actual Synonym: " + synonymWriter.numDocs());
          }
        }
      } finally {
        // if documents where added to the index create a reopen file and
        // optimize the writer
        log.debug("Number of indexed Synonyms finished: " + synonymWriter.numDocs());
        synonymAccessor.release(synonymWriter);
      }
 
      log.debug("Finished reindexing synonym index.");
      ucReIndex.stop();
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
View Full Code Here


   */
  protected Collection<CRResolvableBean> getObjectsToUpdate(final CRRequest request, final RequestProcessor rp,
      final boolean forceFullUpdate, final IndexUpdateChecker indexUpdateChecker) {
    Collection<CRResolvableBean> updateObjects = new Vector<CRResolvableBean>();

    UseCase objectsToUpdateCase = MonitorFactory.startUseCase("AbstractUpdateCheck.getObjectsToUpdate("
        + request.get("CRID") + ")");
    try {
      if (forceFullUpdate || "".equals(timestampAttribute)) {
        try {
          updateObjects = (Collection<CRResolvableBean>) rp.getObjects(request);
        } catch (CRException e) {
          String message = "Error getting objects to full index from " + "RequestProcessor. "
              + e.getMessage();
          log.error(message, e);
          status.setError(message);
        }
      } else {
        //Sorted (by the idAttribute) list of Resolvables to check for
        //Updates.
        Collection<CRResolvableBean> objectsToIndex;
        try {
          defaultizeRequest(request);
          objectsToIndex = (Collection<CRResolvableBean>) rp.getObjects(request);
        } catch (CRException e) {
          String message = "Error getting objects to index from " + "RequestProcessor. " + e.getMessage();
          log.error(message, e);
          status.setError(message);
          return null;
        }
        Iterator<CRResolvableBean> resolvableIterator = objectsToIndex.iterator();
        try {
          while (resolvableIterator.hasNext()) {
            CRResolvableBean crElement = resolvableIterator.next();
            Object crElementIDObject = crElement.get(idAttribute);
            if (crElementIDObject == null) {
              log.error("IDAttribute is null!");
            }
            String crElementID = crElementIDObject.toString();
            Object crElementTimestamp = crElement.get(timestampAttribute);
            //TODO: if any transformers change an attribute that is used for the update check we have to run the transformers
            //before
            if (!indexUpdateChecker.isUpToDate(
              crElementID,
              crElementTimestamp,
              timestampAttribute,
              crElement)) {
              updateObjects.add(crElement);
            }
          }
        } catch (WrongOrderException e) {
          log.error("Got the objects from the datasource in the wrong" + "order.", e);
          status.setError("Got the objects from the datasource in the" + "wrong order.");
          return null;
        }
      }
      //Finally delete all Objects from Index that are not checked for an
      //Update
      //fixing possible npe
      if (indexUpdateChecker != null) {
        indexUpdateChecker.deleteStaleObjects();
      }
    } finally {
      objectsToUpdateCase.stop();
    }
    return updateObjects;
  }
View Full Code Here

    }

  }

  private synchronized void reIndex() throws IOException {
    UseCase ucReIndex = MonitorFactory.startUseCase("reIndex()");
    // build a dictionary (from the spell package)
    log.debug("Starting to reindex autocomplete index.");

    LuceneIndexLocation source = this.autocompleter.getSource();
    LuceneIndexLocation autocompleteLocation = this.autocompleter.getAutocompleteLocation();
    String autocompletefield = this.autocompleter.getAutocompletefield();

    IndexAccessor sia = source.getAccessor();
    IndexReader sourceReader = sia.getReader(false);
    LuceneDictionary dict = new LuceneDictionary(sourceReader, autocompletefield);
    IndexAccessor aia = autocompleteLocation.getAccessor();
    // IndexReader reader = aia.getReader(false);
    IndexWriter writer = aia.getWriter();

    try {
      writer.setMergeFactor(300);
      writer.setMaxBufferedDocs(150);
      // go through every word, storing the original word (incl. n-grams)
      // and the number of times it occurs
      // CREATE WORD LIST FROM SOURCE INDEX
      Map<String, Integer> wordsMap = new HashMap<String, Integer>();
      Iterator<String> iter = (Iterator<String>) dict.getWordsIterator();
      while (iter.hasNext()) {
        String word = iter.next();
        int len = word.length();
        if (len < 3) {
          continue; // too short we bail but "too long" is fine...
        }
        if (wordsMap.containsKey(word)) {
          throw new IllegalStateException("Lucene returned a bad word list");
        } else {
          // use the number of documents this word appears in
          wordsMap.put(word, sourceReader.docFreq(new Term(autocompletefield, word)));
        }
      }
      // DELETE OLD OBJECTS FROM INDEX
      writer.deleteAll();

      // UPDATE DOCUMENTS IN AUTOCOMPLETE INDEX
      for (String word : wordsMap.keySet()) {
        // ok index the word
        Document doc = new Document();
        doc.add(new Field(SOURCE_WORD_FIELD, word, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); // orig term
        doc.add(new Field(GRAMMED_WORDS_FIELD, word, Field.Store.YES, Field.Index.ANALYZED)); // grammed
        doc.add(new Field(COUNT_FIELD, Integer.toString(wordsMap.get(word)), Field.Store.YES,
            Field.Index.NOT_ANALYZED_NO_NORMS)); // count
        writer.addDocument(doc);
      }
      writer.optimize();
      autocompleteLocation.createReopenFile();
    } finally {

      sia.release(sourceReader, false);
      // close writer

      aia.release(writer);
      // aia.release(reader,false);
    }
    log.debug("Finished reindexing autocomplete index.");
    ucReIndex.stop();
  }
View Full Code Here

   * Index related tasks and the {@link Autocompleter} will only handle search
   * requests
   */
  @Deprecated
  private synchronized void reIndex() throws IOException {
    UseCase ucReIndex = MonitorFactory.startUseCase("reIndex()");
    // build a dictionary (from the spell package)
    log.debug("Starting to reindex autocomplete index.");
    IndexAccessor sia = this.source.getAccessor();
    IndexReader sourceReader = sia.getReader(false);
    LuceneDictionary dict = new LuceneDictionary(sourceReader, this.autocompletefield);
    IndexAccessor aia = this.autocompleteLocation.getAccessor();
    // IndexReader reader = aia.getReader(false);
    IndexWriter writer = aia.getWriter();

    try {
      writer.setMergeFactor(300);
      writer.setMaxBufferedDocs(150);
      // go through every word, storing the original word (incl. n-grams)
      // and the number of times it occurs
      // CREATE WORD LIST FROM SOURCE INDEX
      Map<String, Integer> wordsMap = new HashMap<String, Integer>();
      Iterator<String> iter = (Iterator<String>) dict.getWordsIterator();
      while (iter.hasNext()) {
        String word = iter.next();
        int len = word.length();
        if (len < 3) {
          continue; // too short we bail but "too long" is fine...
        }
        if (wordsMap.containsKey(word)) {
          throw new IllegalStateException("Lucene returned a bad word list");
        } else {
          // use the number of documents this word appears in
          wordsMap.put(word, sourceReader.docFreq(new Term(autocompletefield, word)));
        }
      }
      // DELETE OLD OBJECTS FROM INDEX
      writer.deleteAll();

      // UPDATE DOCUMENTS IN AUTOCOMPLETE INDEX
      for (String word : wordsMap.keySet()) {
        // ok index the word
        Document doc = new Document();
        doc.add(new Field(SOURCE_WORD_FIELD, word, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); // orig term
        doc.add(new Field(GRAMMED_WORDS_FIELD, word, Field.Store.YES, Field.Index.ANALYZED)); // grammed
        doc.add(new Field(COUNT_FIELD, Integer.toString(wordsMap.get(word)), Field.Store.YES,
            Field.Index.NOT_ANALYZED_NO_NORMS)); // count
        writer.addDocument(doc);
      }
      writer.optimize();
    } finally {

      sia.release(sourceReader, false);
      // close writer

      aia.release(writer);
      // aia.release(reader,false);
    }
    autocompleteLocation.createReopenFile();
    log.debug("Finished reindexing autocomplete index.");
    ucReIndex.stop();
  }
View Full Code Here

TOP

Related Classes of com.gentics.cr.monitoring.UseCase

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.