Package org.apache.uima.analysis_engine

Examples of org.apache.uima.analysis_engine.CasIterator


       
        return;
      }
      long time = System.nanoTime();
      long totalProcessTime = 0// stored total time spent producing ALL CASes
      CasIterator casIterator = ae.processAndOutputNewCASes(aCAS);
      //  Store how long it took to call processAndOutputNewCASes()
      totalProcessTime = ( System.nanoTime() - time);
      long sequence = 1;
      String newCasReferenceId = null;
      long hasNextTime = 0;         // stores time in hasNext()
      long getNextTime = 0;         // stores time in next();  
      long timeToProcessCAS = 0;    // stores time in hasNext() and next() for each CAS
      boolean moreCASesToProcess = true;
/*     
     
      String parentCasReferenceId = null;
      CacheEntry inputCasCacheEntry = null;
      try
      {
        //  Fetch cache entry for the input CAS
        inputCasCacheEntry = getInProcessCache().getCacheEntryForCAS(aCasReferenceId);
        parentCasReferenceId = inputCasCacheEntry.getInputCasReferenceId();
      }
      catch( Exception e )
      {
        //  An exception be be thrown here if the service is being stopped.
        //  The top level controller may have already cleaned up the cache
        //  and the getCacheEntryForCAS() will throw an exception. Ignore it
        //  here, we are shutting down.
      }
*/
      while (moreCASesToProcess)
      {
        hasNextTime = System.nanoTime();
        if ( !casIterator.hasNext() )
        {
          moreCASesToProcess = false;
          //  Measure how long it took to call hasNext()
          timeToProcessCAS = (System.nanoTime()-hasNextTime);
          totalProcessTime += timeToProcessCAS;
          break;
        }
        //  Measure how long it took to call hasNext()
        timeToProcessCAS = (System.nanoTime()-hasNextTime);
        getNextTime = System.nanoTime();
        CAS casProduced = casIterator.next();
        //  Add how long it took to call next()
        timeToProcessCAS += (System.nanoTime()- getNextTime);
                //  Add time to call hasNext() and next() to the running total
        totalProcessTime += timeToProcessCAS;
        //  If the service is stopped or aborted, stop generating new CASes and just return the input CAS
View Full Code Here


      CAS initialCas = ae.newCAS();
      initialCas.setDocumentText(document);

      // pass the CAS to the AnalysisEngine and get back
      // a CasIterator for stepping over the output CASes that are produced.
      CasIterator casIterator = ae.processAndOutputNewCASes(initialCas);
      while (casIterator.hasNext()) {
        CAS outCas = casIterator.next();

        // dump the document text and annotations for this segment
        System.out.println("********* NEW SEGMENT *********");
        System.out.println(outCas.getDocumentText());
        PrintAnnotations.printAnnotations(outCas, System.out);
View Full Code Here

    process(aCAS);
    buildProcessTraceFromMBeanStats(aTrace);
  }

  public ProcessTrace process(CAS aCAS) throws AnalysisEngineProcessException {
    CasIterator iter = processAndOutputNewCASes(aCAS);
    // step through all output CASes which lets the AE finish all processing
    while (iter.hasNext()) {
      CAS cas = iter.next();
      cas.release();
    }
    return buildProcessTraceFromMBeanStats();
  }
View Full Code Here

            if (nextStep instanceof SimpleStep) {
              String nextAeKey = ((SimpleStep) nextStep).getAnalysisEngineKey();
              AnalysisEngine nextAe = (AnalysisEngine) mComponentAnalysisEngineMap.get(nextAeKey);
              if (nextAe != null) {
                // invoke next AE in flow
                CasIterator casIter;
                casIter = nextAe.processAndOutputNewCASes(cas);
                if (casIter.hasNext()) // new CASes are output
                {
                  // get the first output CAS
                  CAS outputCas = casIter.next();
                  // push the CasIterator, original CAS, and Flow onto a stack so we
                  // can get the other output CASes and the original CAS later
                  casIteratorStack.push(new StackFrame(casIter, cas, flow, nextAeKey));
                  // compute Flow for the output CAS
                  flow = flow.newCasProduced(outputCas, nextAeKey);
View Full Code Here

    // logging and instrumentation
    String resourceName = getMetaData().getName();
    getLogger().logrb(Level.FINE, CLASS_NAME.getName(), "process", LOG_RESOURCE_BUNDLE,
            "UIMA_analysis_engine_process_begin__FINE", resourceName);
    try {
      CasIterator iterator = _getASB().process(aCAS);

      // log end of event
      getLogger().logrb(Level.FINE, CLASS_NAME.getName(), "process", LOG_RESOURCE_BUNDLE,
              "UIMA_analysis_engine_process_end__FINE", resourceName);
      return iterator;
View Full Code Here

                      new XMLInputSource(JUnitExtension
                              .getFile("TextAnalysisEngineImplTest/NewlineSegmenter.xml")));
      AnalysisEngine tae = UIMAFramework.produceAnalysisEngine(segmenterDesc);
      CAS cas = tae.newCAS();
      cas.setDocumentText("Line one\nLine two\nLine three");
      CasIterator iter = tae.processAndOutputNewCASes(cas);
      assertTrue(iter.hasNext());
      CAS outCas = iter.next();
      assertEquals("Line one", outCas.getDocumentText());
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line two", outCas.getDocumentText());
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line three", outCas.getDocumentText());
      outCas.release();
      assertFalse(iter.hasNext());

      // aggregate
      AnalysisEngineDescription aggSegDesc = UIMAFramework.getXMLParser()
              .parseAnalysisEngineDescription(
                      new XMLInputSource(JUnitExtension
                              .getFile("TextAnalysisEngineImplTest/AggregateWithSegmenter.xml")));
      tae = UIMAFramework.produceAnalysisEngine(aggSegDesc);
      cas = tae.newCAS();
      cas.setDocumentText("Line one\nLine two\nLine three");
      iter = tae.processAndOutputNewCASes(cas);
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line one", outCas.getDocumentText());
      assertEquals("Line one", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line two", outCas.getDocumentText());
      assertEquals("Line two", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line three", outCas.getDocumentText());
      assertEquals("Line three", TestAnnotator.lastDocument);
      outCas.release();
      assertFalse(iter.hasNext());
      // Annotator should NOT get the original CAS according to the default flow
      assertEquals("Line three", TestAnnotator.lastDocument);

      // nested aggregate
      AnalysisEngineDescription nestedAggSegDesc = UIMAFramework
              .getXMLParser()
              .parseAnalysisEngineDescription(
                      new XMLInputSource(
                              JUnitExtension
                                      .getFile("TextAnalysisEngineImplTest/AggregateContainingAggregateSegmenter.xml")));
      tae = UIMAFramework.produceAnalysisEngine(nestedAggSegDesc);
      cas = tae.newCAS();
      cas.setDocumentText("Line one\nLine two\nLine three");
      iter = tae.processAndOutputNewCASes(cas);
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line one", outCas.getDocumentText());
      assertEquals("Line one", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line two", outCas.getDocumentText());
      assertEquals("Line two", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line three", outCas.getDocumentText());
      assertEquals("Line three", TestAnnotator.lastDocument);
      outCas.release();
      assertFalse(iter.hasNext());
      // Annotator should NOT get the original CAS according to the default flow
      assertEquals("Line three", TestAnnotator.lastDocument);

      // two segmenters
      AnalysisEngineDescription twoSegDesc = UIMAFramework.getXMLParser()
              .parseAnalysisEngineDescription(
                      new XMLInputSource(JUnitExtension
                              .getFile("TextAnalysisEngineImplTest/AggregateWith2Segmenters.xml")));
      tae = UIMAFramework.produceAnalysisEngine(twoSegDesc);
      cas = tae.newCAS();
      cas.setDocumentText("One\tTwo\nThree\tFour");
      iter = tae.processAndOutputNewCASes(cas);
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("One", outCas.getDocumentText());
      assertEquals("One", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Two", outCas.getDocumentText());
      assertEquals("Two", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Three", outCas.getDocumentText());
      assertEquals("Three", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Four", outCas.getDocumentText());
      assertEquals("Four", TestAnnotator.lastDocument);
      outCas.release();
      assertFalse(iter.hasNext());
      // Annotator should NOT get the original CAS according to the default flow
      assertEquals("Four", TestAnnotator.lastDocument);

      // dropping segments
      aggSegDesc = UIMAFramework.getXMLParser().parseAnalysisEngineDescription(
              new XMLInputSource(JUnitExtension
                      .getFile("TextAnalysisEngineImplTest/AggregateSegmenterForDropTest.xml")));
      tae = UIMAFramework.produceAnalysisEngine(aggSegDesc);
      cas = tae.newCAS();
      cas.setDocumentText("Line one\nDROP\nLine two\nDROP\nLine three");
      // results should be the same as the first aggregate segmenter test.
      // segmetns whose text is DROP should not be output.
      iter = tae.processAndOutputNewCASes(cas);
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line one", outCas.getDocumentText());
      assertEquals("Line one", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line two", outCas.getDocumentText());
      assertEquals("Line two", TestAnnotator.lastDocument);
      outCas.release();
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("Line three", outCas.getDocumentText());
      assertEquals("Line three", TestAnnotator.lastDocument);
      outCas.release();
      assertFalse(iter.hasNext());
      // Annotator should NOT get the original CAS according to the default flow
      assertEquals("Line three", TestAnnotator.lastDocument);
    } catch (Exception e) {
      JUnitExtension.handleException(e);
    }
View Full Code Here

      AnalysisEngine tae = UIMAFramework.produceAnalysisEngine(aggSegDesc);
      CAS cas = tae.newCAS();
      for (int i = 0; i < 2; i++) // verify we can do this more than once
      {
        cas.setDocumentText("Line one\nLine two\nERROR");
        CasIterator iter = tae.processAndOutputNewCASes(cas);
        assertTrue(iter.hasNext());
        CAS outCas = iter.next();
        assertEquals("Line one", outCas.getDocumentText());
        outCas.release();
        assertTrue(iter.hasNext());
        outCas = iter.next();
        assertEquals("Line two", outCas.getDocumentText());
        outCas.release();
        try {
          assertTrue(iter.hasNext());
          outCas = iter.next();
          fail(); // the above should throw an exception
        } catch (AnalysisEngineProcessException e) {
        }
        cas.reset();
      }

      // nested aggregate
      AnalysisEngineDescription nestedAggSegDesc = UIMAFramework
              .getXMLParser()
              .parseAnalysisEngineDescription(
                      new XMLInputSource(
                              JUnitExtension
                                      .getFile("TextAnalysisEngineImplTest/NestedAggregateSegmenterForErrorTest.xml")));
      tae = UIMAFramework.produceAnalysisEngine(nestedAggSegDesc);
      cas = tae.newCAS();
      for (int i = 0; i < 2; i++) // verify we can do this more than once
      {
        cas.setDocumentText("Line one\nLine two\nERROR");
        CasIterator iter = tae.processAndOutputNewCASes(cas);
        assertTrue(iter.hasNext());
        CAS outCas = iter.next();
        assertEquals("Line one", outCas.getDocumentText());
        outCas.release();
        assertTrue(iter.hasNext());
        outCas = iter.next();
        assertEquals("Line two", outCas.getDocumentText());
        outCas.release();
        try {
          assertTrue(iter.hasNext());
          outCas = iter.next();
          fail(); // the above should throw an exception
        } catch (AnalysisEngineProcessException e) {
        }
        cas.reset();
      }

      // 2 segmenters
      AnalysisEngineDescription twoSegDesc = UIMAFramework
              .getXMLParser()
              .parseAnalysisEngineDescription(
                      new XMLInputSource(
                              JUnitExtension
                                      .getFile("TextAnalysisEngineImplTest/AggregateWith2SegmentersForErrorTest.xml")));
      tae = UIMAFramework.produceAnalysisEngine(twoSegDesc);
      cas = tae.newCAS();
      for (int i = 0; i < 2; i++) // verify we can do this more than once
      {
        cas.setDocumentText("One\tTwo\nThree\tERROR");
        CasIterator iter = tae.processAndOutputNewCASes(cas);
        assertTrue(iter.hasNext());
        CAS outCas = iter.next();
        assertEquals("One", outCas.getDocumentText());
        outCas.release();
        assertTrue(iter.hasNext());
        outCas = iter.next();
        assertEquals("Two", outCas.getDocumentText());
        outCas.release();
        assertTrue(iter.hasNext());
        outCas = iter.next();
        assertEquals("Three", outCas.getDocumentText());
        outCas.release();
        try {
          assertTrue(iter.hasNext());
          outCas = iter.next();
          fail(); // the above should throw an exception
        } catch (AnalysisEngineProcessException e) {
        }
        cas.reset();
      }

      // segmenter that requests too many CASes
      AnalysisEngineDescription segmenterDesc = UIMAFramework.getXMLParser()
              .parseAnalysisEngineDescription(
                      new XMLInputSource(JUnitExtension
                              .getFile("TextAnalysisEngineImplTest/BadSegmenter.xml")));
      tae = UIMAFramework.produceAnalysisEngine(segmenterDesc);
      cas = tae.newCAS();
      cas.setDocumentText("Line one\nLine two\nLine three");
      CasIterator iter = tae.processAndOutputNewCASes(cas);
      assertTrue(iter.hasNext());
      CAS outCas = iter.next(); // first call OK
      outCas.release();
      assertTrue(iter.hasNext());
      // next call should fail with AnalysisEngineProcessException
      try {
        iter.next();
        fail(); // should not get here
      } catch (AnalysisEngineProcessException e) {
        // should get here
      }
    } catch (Exception e) {
View Full Code Here

      FeatureStructure sdiFS3 = inputCas3.createFS(sdiType);
      sdiFS3.setStringValue(uriFeat, "cas3");
      inputCas3.getIndexRepository().addFS(sdiFS3);

      // input first CAS. Should be no segments yet.
      CasIterator iter = tae.processAndOutputNewCASes(inputCas1);
      assertFalse(iter.hasNext());
      // input second CAS. We should get back one segment.
      iter = tae.processAndOutputNewCASes(inputCas2);
      assertTrue(iter.hasNext());
      CAS outCas = iter.next();
      assertEquals("This is one.", outCas.getDocumentText());
      // -- check SourceDocumentInformation FSs
      Iterator sdiIter = outCas.getAnnotationIndex(sdiType).iterator();
      assertTrue(sdiIter.hasNext());
      AnnotationFS outSdiFs = (AnnotationFS) sdiIter.next();
      assertEquals("This is", outSdiFs.getCoveredText());
      assertEquals("cas1", outSdiFs.getStringValue(uriFeat));
      assertTrue(sdiIter.hasNext());
      outSdiFs = (AnnotationFS) sdiIter.next();
      assertEquals(" one.", outSdiFs.getCoveredText());
      assertEquals("cas2", outSdiFs.getStringValue(uriFeat));
      assertFalse(sdiIter.hasNext());
      // --
      assertFalse(iter.hasNext());

      // input third CAS. We should get back one more segment.
      iter = tae.processAndOutputNewCASes(inputCas3);
      assertTrue(iter.hasNext());
      outCas = iter.next();
      assertEquals("This is two.", outCas.getDocumentText());
      // -- check SourceDocumentInformation FSs
      sdiIter = outCas.getAnnotationIndex(sdiType).iterator();
      assertTrue(sdiIter.hasNext());
      outSdiFs = (AnnotationFS) sdiIter.next();
      assertEquals("This is", outSdiFs.getCoveredText());
      assertEquals("cas2", outSdiFs.getStringValue(uriFeat));
      assertTrue(sdiIter.hasNext());
      outSdiFs = (AnnotationFS) sdiIter.next();
      assertEquals(" two.", outSdiFs.getCoveredText());
      assertEquals("cas3", outSdiFs.getStringValue(uriFeat));
      assertFalse(sdiIter.hasNext());
      // --
      assertFalse(iter.hasNext());
    } catch (Exception e) {
      JUnitExtension.handleException(e);
    }
  }
View Full Code Here

      } else {
        String path=produceUniqueName(rootAem);
          beforeAnalysisManagementObjects.add(deepCopyMetrics(rootAem, path));  
      }
     
      CasIterator casIterator = ae.processAndOutputNewCASes(aCAS);
      if ( stackDumpTimer != null ) {
        stackDumpTimer.cancel();
        stackDumpTimer = null;   // nullify timer instance so that we dont have to worry about
          // it in case an exception happens below
      }
     
      // Store how long it took to call processAndOutputNewCASes()
      totalProcessTime = (super.getCpuTime() - time);
      long sequence = 1;
      long hasNextTime = 0; // stores time in hasNext()
      long getNextTime = 0; // stores time in next();
      boolean moreCASesToProcess = true;
      boolean casAbortedDueToExternalRequest = false;
      while (moreCASesToProcess) {
        long timeToProcessCAS = 0; // stores time in hasNext() and next() for each CAS
        hasNextTime = super.getCpuTime();
        //  Start the heap dump timer. This timer is only started if explicitly enabled
        //  via System property: -DheapDumpThreshold=<x> where x is a number of seconds the
        //  method is allowed to complete. If the method is not complete in allowed window
        //  the heap and stack trace dump of all threads will be produced.
        stackDumpTimer = ifEnabledStartHeapDumpTimer();
        if (!casIterator.hasNext()) {
          moreCASesToProcess = false;
          // Measure how long it took to call hasNext()
          timeToProcessCAS = (super.getCpuTime() - hasNextTime);
          totalProcessTime += timeToProcessCAS;
          if ( stackDumpTimer != null ) {
            stackDumpTimer.cancel();
            stackDumpTimer = null;   // nullify timer instance so that we dont have to worry about
              // it in case an exception happens below
          }
          break; // from while
        }
        if ( stackDumpTimer != null ) {
          stackDumpTimer.cancel();
          stackDumpTimer = null;   // nullify timer instance so that we dont have to worry about
                                    // it in case an exception happens below
        }
        // Measure how long it took to call hasNext()
        timeToProcessCAS = (super.getCpuTime() - hasNextTime);
        getNextTime = super.getCpuTime();
        //  Start the heap dump timer. This timer is only started if explicitly enabled
        //  via System property: -DheapDumpThreshold=<x> where is number of seconds the
        //  method is allowed to complete. If the method is not complete in allowed window
        //  the heap and stack trace dump of all threads will be produced.
        stackDumpTimer = ifEnabledStartHeapDumpTimer();
        CAS casProduced = casIterator.next();
        if ( stackDumpTimer != null ) {
          stackDumpTimer.cancel();
          stackDumpTimer = null;   // nullify timer instance so that we dont have to worry about
            // it in case an exception happens below
        }
View Full Code Here

                  if (rs != null) {
                    nextAe.setResultSpecification(rs);
                  }
                }
                // invoke next AE in flow
                CasIterator casIter = null;
                CAS outputCas = null; //used if the AE we call outputs a new CAS
                try {
                  casIter = nextAe.processAndOutputNewCASes(cas);
                  if (casIter.hasNext()) {
                    outputCas = casIter.next();
                  }
                }
                catch(Exception e) {
                  //ask the FlowController if we should continue
                  //TODO: should this be configurable?
                  if (!flow.continueOnFailure(nextAeKey, e)) {
                    throw e;
                  }
                  else {
                    UIMAFramework.getLogger(CLASS_NAME).logrb(Level.FINE, CLASS_NAME.getName(), "processUntilNextOutputCas",
                            LOG_RESOURCE_BUNDLE, "UIMA_continuing_after_exception__FINE", e);
                  }
                }
                if (outputCas != null) // new CASes are output
                {
                  // push the CasIterator, original CAS, and Flow onto a stack so we
                  // can get the other output CASes and the original CAS later
                  casIteratorStack.push(new StackFrame(casIter, cas, flow, nextAeKey));
                  // compute Flow for the output CAS
                  flow = flow.newCasProduced(outputCas, nextAeKey);
                  // now route the output CAS through the flow
                  cas = outputCas;
                  activeCASes.add(cas);
                } else {
                  // no new CASes are output; this cas is done being processed
                  // by that AnalysisEngine so clear the componentInfo
                  cas.setCurrentComponentInfo(null);
                }
              } else {
                throw new AnalysisEngineProcessException(
                        AnalysisEngineProcessException.UNKNOWN_ID_IN_SEQUENCE,
                        new Object[] { nextAeKey });
              }
            }
            //ParallelStep (TODO: refactor out common parts with SimpleStep?)
            else if (nextStep instanceof ParallelStep) {
              //create modifiable list of destinations
              List<String> destinations = new LinkedList<String>(((ParallelStep)nextStep).getAnalysisEngineKeys());
              //iterate over all destinations, removing them from the list as we go
              while (!destinations.isEmpty()) {
                String nextAeKey = destinations.get(0);
                destinations.remove(0);
                //execute this step as we would a single step
                AnalysisEngine nextAe = (AnalysisEngine) mComponentAnalysisEngineMap.get(nextAeKey);
                if (nextAe != null) {
                  // invoke next AE in flow
                  CasIterator casIter = null;
                  CAS outputCas = null; //used if the AE we call outputs a new CAS
                  try {
                    casIter = nextAe.processAndOutputNewCASes(cas);
                    if (casIter.hasNext()) {
                      outputCas = casIter.next();
                    }
                  }
                  catch(Exception e) {
                    //ask the FlowController if we should continue
                    //TODO: should this be configurable?
View Full Code Here

TOP

Related Classes of org.apache.uima.analysis_engine.CasIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.