Examples of MarkovOutcome


Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

        for(Label label:graph.getCache().getAlphabet())
        {
          if (!failureLabels.contains(label))
          {// if the labels is not already recorded as being inconsistently predicted
            MarkovOutcome predictedFromEalierTrace = outgoing_labels_probabilities.get(label);
              Trace Predicted_trace = new Trace();
              if (predictionGraphInverted)
              {
                for(int i=pathToNewState.size()-1;i>=0;--i) Predicted_trace.add(pathToNewState.get(i));if (pathBeyondCurrentState != null) Predicted_trace.getList().addAll(pathBeyondCurrentState);
              }
              else
              {
                Predicted_trace.getList().addAll(pathToNewState);
              }
              Predicted_trace.add(label);
             
              MarkovOutcome predicted_from_Markov=model.predictionsMatrix.get(Predicted_trace);
            MarkovOutcome outcome = MarkovOutcome.reconcileOpinions_PosNeg_Overrides_Null(predictedFromEalierTrace, predicted_from_Markov);
            if (outcome != predictedFromEalierTrace)
            {// we learnt something new, be it a new value (or a non-null value) or a failure, record it
              if (outcome == MarkovOutcome.failure)
              {
                failureLabels.add(label);outgoing_labels_probabilities.remove(label);
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

          {
            predictedTrace.getList().addAll(pathToUseWithMarkovToPredictOutgoing);
          }
          predictedTrace.add(lbl);
         
          MarkovOutcome newValue = null;
          UpdatablePairInteger p=model.occurrenceMatrix.get(predictedTrace);if (p == null) { p=new UpdatablePairInteger(0, 0);model.occurrenceMatrix.put(predictedTrace,p); }

          boolean foundAccept = false, foundReject = false;
          for(Object vObj:graphToCheckForConsistency.getTargets(targets))
          {
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

        }
        //System.out.println(vert.toString()+" : "+encounteredPartOfTrace+" outgoing: "+outgoingLabels);
        if (model.occurrenceMatrix.containsKey(new Trace(encounteredPartOfTrace, true))) // we skip everything where a path was not seen in PTA.
            for(Label label:outgoingLabels)
            {
            MarkovOutcome labels_occurrence= outgoing_labels_value.get(label);
            if (labels_occurrence != MarkovOutcome.failure)
            {
                Trace traceToCheck = new Trace();traceToCheck.getList().addAll(encounteredPartOfTrace);
                traceToCheck.add(label);

                MarkovOutcome predicted_from_Markov=model.predictionsMatrix.get(traceToCheck);
                if (predicted_from_Markov != MarkovOutcome.failure)
                {// if training data does not lead to a consistent outcome for this label because chunk length is too small, not much we can do, but otherwise we are here and can make use of the data
                  if (!checker.consistent(labels_occurrence, predicted_from_Markov))
                  {
                    inconsistencies.addAndGet(1);// record inconsistency
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

        }
       
       
        for(Entry<Label,CmpVertex> existing:entry.getValue().entrySet())
        {
          MarkovOutcome predictedTarget = predictions.get(existing.getKey());
         
          if (existing.getValue().isAccept() && predictedTarget == MarkovOutcome.positive)
            ++numberOfExistingPredicted;
          if (!existing.getValue().isAccept() && predictedTarget == MarkovOutcome.negative)
            ++numberOfExistingPredicted;
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

        for(Label label:graph.getCache().getAlphabet())
        {
          if (!failureLabels.contains(label))
          {// if the labels is not already recorded as being inconsistently predicted
            MarkovOutcome predictedFromEalierTrace = outgoing_labels_probabilities.get(label);
             
              PredictionForSequence prediction = MarkovMatrixEngine.getPredictionIfExists(lastElementToPrediction, label);
              MarkovOutcome predicted_from_Markov= prediction!=null?prediction.prediction:null;
            MarkovOutcome outcome = MarkovOutcome.reconcileOpinions_PosNeg_Overrides_Null(predictedFromEalierTrace, predicted_from_Markov);
            if (outcome != predictedFromEalierTrace)
            {// we learnt something new, be it a new value (or a non-null value) or a failure, record it
              if (outcome == MarkovOutcome.failure)
              {
                failureLabels.add(label);outgoing_labels_probabilities.remove(label);
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

            pathToUpdateInMarkov.addAll(pathToUseWithMarkovToPredictOutgoing);
          }
         
          pathToUpdateInMarkov.add(lbl);
         
          MarkovOutcome newValue = null;
          PredictionForSequence prediction = model.markovMatrix.getPredictionAndCreateNewOneIfNecessary(pathToUpdateInMarkov);
         
          boolean foundAccept = false, foundReject = false;
          for(Object vObj:graphToCheckForConsistency.getTargets(targets))
          {
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

       
        //System.out.println(vert.toString()+" : "+encounteredPartOfTrace+" outgoing: "+outgoingLabels);
        if (checker.considerPathsWithPrefixMissingInMarkov() || mapFromLastLabelToNodes != null) // we skip everything where a path was not seen in PTA unless we are asked to consider all such paths.
            for(Label label:outgoingLabels)
            {
            MarkovOutcome labels_occurrence= outgoing_labels_value.get(label);
            if (labels_occurrence != MarkovOutcome.failure)
            {
                PredictionForSequence prediction = MarkovMatrixEngine.getPredictionIfExists(mapFromLastLabelToNodes, label);
                MarkovOutcome predicted_from_Markov=prediction == null?null:prediction.prediction;
                if (predicted_from_Markov != MarkovOutcome.failure)
                {// if training data does not lead to a consistent outcome for this label because chunk length is too small, not much we can do, but otherwise we are here and can make use of the data
                  if (!checker.consistent(labels_occurrence, predicted_from_Markov))
                  {
                    inconsistencies.addAndGet(1);// record inconsistency
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

        }
       
       
        for(Entry<Label,CmpVertex> existing:entry.getValue().entrySet())
        {
          MarkovOutcome predictedTarget = predictions.get(existing.getKey());
         
          if (existing.getValue().isAccept() && predictedTarget == MarkovOutcome.positive)
            ++numberOfExistingPredicted;
          if (!existing.getValue().isAccept() && predictedTarget == MarkovOutcome.negative)
            ++numberOfExistingPredicted;
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

    }
    for(Entry<Label,CmpVertex> state:states.entrySet())
    {
      for(Label label:ptaClassifier.graph.getCache().getAlphabet())
      {
        MarkovOutcome transition = ptaClassifier.model.predictionsMatrix.get(new Trace(Arrays.asList(new Label[]{state.getKey(),label}),true));
        if (transition != null)
          if (transition == MarkovOutcome.positive) outcome.transitionMatrix.get(state.getValue()).put(label,states.get(label));
      }
    }   
View Full Code Here

Examples of statechum.analysis.learning.MarkovModel.MarkovOutcome

    long scoreCurrentFanout = 0, score = 0;
    Map<Label,MarkovOutcome> outgoing_red_probabilities=cl.predictTransitionsFromState(red,pathLenBeyondCurrentState,cl.model.getChunkLen(),null);
    Map<Label,MarkovOutcome> outgoing_blue_probabilities=cl.predictTransitionsFromState(blue,pathLenBeyondCurrentState,cl.model.getChunkLen(),null);
    for(Entry<Label,MarkovOutcome> entry:outgoing_red_probabilities.entrySet())
    {
      MarkovOutcome outcomeBlue = outgoing_blue_probabilities.get(entry.getKey());
      if (outcomeBlue == null && entry.getValue() == MarkovOutcome.negative)
        ++scoreCurrentFanout; // red negative, blue absent, hence the two are consistent
      if (outcomeBlue == entry.getValue()) // or if the two are consistent
      {
        if (stepNumber > 1)
        {
          LinkedList<Label> pathBeyond = new LinkedList<Label>(pathLenBeyondCurrentState);pathBeyond.add(entry.getKey());
          score+=comparePredictedFanouts(cl,red,blue,pathBeyond,stepNumber-1);
        }
        ++scoreCurrentFanout;
      }
    }
     
    for(Entry<Label,MarkovOutcome> entry:outgoing_blue_probabilities.entrySet())
    {
      MarkovOutcome outcomeRed = outgoing_red_probabilities.get(entry.getKey());
      if (outcomeRed == null && entry.getValue() == MarkovOutcome.negative)
        ++scoreCurrentFanout; // blue negative, red absent, hence the two are consistent
      if (outcomeRed == entry.getValue()) // or if the two are consistent
      {
        if (stepNumber > 1)
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.