Package ivory.smrf.model

Examples of ivory.smrf.model.MarkovRandomField


        long endTime;

        startTime = System.currentTimeMillis();

        // Build the MRF for this query.
        MarkovRandomField mrf = builder.buildMRF(query);

        // Retrieve documents using this MRF.
        MRFDocumentRanker ranker = new MRFDocumentRanker(mrf, numHits);

        // Run initial query, if necessary.
        Accumulator[] results = null;
        if (expander != null) {
          results = ranker.rank();
        }

        // Perform pseudo-relevance feedback, if requested.
        if (expander != null) {
          // Get expanded MRF.
          MarkovRandomField expandedMRF = expander.getExpandedMRF(mrf, results);

          // Re-rank documents according to expanded MRF.
          ranker = new MRFDocumentRanker(expandedMRF, numHits);
        }
View Full Code Here


      // compute features for each model
      for(String modelName : modelNames) {
        // build mrf from model node
        Node modelNode = runner.getModel(modelName);
        MRFBuilder builder = MRFBuilder.get(env, modelNode);     
        MarkovRandomField mrf = builder.buildMRF(env.tokenize(queryText));

        // get mrf cliques
        List<Clique> cliques = mrf.getCliques();

        // add parameter name to feature name set
        for(Clique c : cliques) {
          // parameter id
          String paramId = c.getParameter().getName();

          // handle linear importance model weights
          if(importanceModels.size() != 0) {
            for(LinearImportanceModel model : linearImportanceModels) {
              List<MetaFeature> metaFeatures = model.getMetaFeatures();

              for(MetaFeature metaFeat : metaFeatures) {
                // feature id = modelName-metaFeatId-paramId
                String featId = modelName + "-" + metaFeat.getName() + "-" + paramId;
                featureNames.add(featId);
              }
            }
          }

          // feature id = modelName-paramId
          String featId = modelName + "-" + paramId;

          featureNames.add(featId);
        }
      }     
    }

    // add judgment feature name
    featureNames.add(JUDGMENT_FEATURE_NAME);

    // print feature name header
    System.out.print(QUERY_FEATURE_NAME + "\t" + DOC_FEATURE_NAME);
    for(String featureName : featureNames) {
      System.out.print("\t" + featureName);
    }
    System.out.println();

    // extract features query-by-query
    for(Entry<String, String> queryEntry : queries.entrySet()) {
      // feature map (docname -> feature name -> feature value)
      SortedMap<String,SortedMap<String,Double>> featureValues = new TreeMap<String,SortedMap<String,Double>>();

      // query id and text
      String qid = queryEntry.getKey();
      String queryText = queryEntry.getValue();

      // compute features for each model
      for(String modelName : modelNames) {
        // build mrf from model node
        Node modelNode = runner.getModel(modelName);
        MRFBuilder builder = MRFBuilder.get(env, modelNode);     
        MarkovRandomField mrf = builder.buildMRF(env.tokenize(queryText));

        // initialize mrf
        mrf.initialize();

        // get mrf cliques
        List<Clique> cliques = mrf.getCliques();

        // get docnodes associated with mrf
        ArrayList<DocumentNode> docNodes = new ArrayList<DocumentNode>();
        List<GraphNode> nodes = mrf.getNodes();
        for (GraphNode node : nodes) {
          if (node instanceof DocumentNode) {
            docNodes.add((DocumentNode) node);
          }
        }
View Full Code Here

     // compute features for each model
     for(String modelName : modelNames) {
       // build mrf from model node
       Node modelNode = runner.getModel(modelName);
       MRFBuilder builder = MRFBuilder.get(env, modelNode);
       MarkovRandomField mrf = builder.buildMRF(queryText.split("\\s+"));

       // get mrf cliques
      List<Clique> cliques = mrf.getCliques();

      // add parameter name to feature name set
      for(Clique c : cliques) {
        // parameter id
        String paramId = c.getParameter().getName();

        // handle linear importance model weights
        if(importanceModels.size() != 0) {
          for(LinearImportanceModel model : linearImportanceModels) {
            List<MetaFeature> metaFeatures = model.getMetaFeatures();

            for(MetaFeature metaFeat : metaFeatures) {
              // feature id = modelName-metaFeatId-paramId
              String featId = modelName + "-" + metaFeat.getName() + "-" + paramId;
              featureNames.add(featId);
            }
          }
        }

        // feature id = modelName-paramId
        String featId = modelName + "-" + paramId;

        featureNames.add(featId);
      }
     }
   }

   // add judgment feature name
   featureNames.add(JUDGMENT_FEATURE_NAME);

   // print feature name header
   System.out.print(QUERY_FEATURE_NAME + "\t" + DOC_FEATURE_NAME);
   for(String featureName : featureNames) {
     System.out.print("\t" + featureName);
   }
   System.out.println();

   // extract features query-by-query
   for(Entry<String, String> queryEntry : queries.entrySet()) {
     // feature map (docname -> feature name -> feature value)
     SortedMap<String,SortedMap<String,Operator>> featureValues = new TreeMap<String,SortedMap<String,Operator>>();

     // query id and text
     String qid = queryEntry.getKey();
     String queryText = queryEntry.getValue();

     // compute features for each model
     for(String modelName : modelNames) {
       // build mrf from model node
       Node modelNode = runner.getModel(modelName);
       MRFBuilder builder = MRFBuilder.get(env, modelNode);
       MarkovRandomField mrf = builder.buildMRF(queryText.split("\\s+"));

       // initialize mrf
       mrf.initialize();

       // get mrf cliques
       List<Clique> cliques = mrf.getCliques();

       // get docnodes associated with mrf
       ArrayList<DocumentNode> docNodes = new ArrayList<DocumentNode>();
       List<GraphNode> nodes = mrf.getNodes();
       for (GraphNode node : nodes) {
         if (node instanceof DocumentNode) {
           docNodes.add((DocumentNode) node);
         }
       }
View Full Code Here

        long endTime;

        startTime = System.currentTimeMillis();

        // Build the MRF for this query.
        MarkovRandomField mrf = builder.buildMRF(query);

        // Retrieve documents using this MRF.
        MRFDocumentRanker ranker = new MRFDocumentRanker(mrf, numHits);

        // Run initial query, if necessary.
        Accumulator[] results = null;
        if (expander != null) {
          results = ranker.rank();
        }

        // Perform pseudo-relevance feedback, if requested.
        if (expander != null) {
          // Get expanded MRF.
          MarkovRandomField expandedMRF = expander.getExpandedMRF(mrf, results);

          // Re-rank documents according to expanded MRF.
          ranker = new MRFDocumentRanker(expandedMRF, numHits);
        }
View Full Code Here

  }

  @Override
  public MarkovRandomField buildMRF(String[] queryTerms) throws ConfigurationException {
    // This is the MRF we're building.
    MarkovRandomField mrf = new MarkovRandomField(queryTerms, env);

    // Construct MRF feature by feature.
    NodeList children = super.getModel().getChildNodes();

    // Sum of query-dependent importance weights.
    float totalImportance = 0.0f;

    // Cliques that have query-dependent importance weights.
    Set<CascadeClique> cliquesWithImportance = new HashSet<CascadeClique>();

    int cascade_stage = 0;
    int cascade_stage_proper = -1;

    for (int i = 0; i < children.getLength(); i++) {
      Node child = children.item(i);

      if ("feature".equals(child.getNodeName())) {
        // Get the feature id.
        String featureID = XMLTools.getAttributeValue(child, "id", "");
        if (featureID.equals("")) {
          throw new RetrievalException("Each feature must specify an id attribute!");
        }

        // Get feature weight (default = 1.0).
        float weight = XMLTools.getAttributeValue(child, "weight", 1.0f);

        // Concept importance model (optional).
        ConceptImportanceModel importanceModel = null;

        // Get concept importance source (if applicable).
        String importanceSource = XMLTools.getAttributeValue(child, "importance", "");
        if (!importanceSource.equals("")) {
          importanceModel = env.getImportanceModel(importanceSource);
          if (importanceModel == null) {
            throw new RetrievalException("ImportanceModel " + importanceSource + " not found!");
          }
        }

        // Get CliqueSet type.
        String cliqueSetType = XMLTools.getAttributeValue(child, "cliqueSet", "");

        // Get Cascade stage (if any)
        int cascadeStage = XMLTools.getAttributeValue(child, "cascadeStage", -1);

        String pruner_and_params = XMLTools.getAttributeValue(child, "prune", "null");
        String thePruner = (pruner_and_params.trim().split("\\s+"))[0];
        String conceptBinType = XMLTools.getAttributeValue(child, "conceptBinType", "");
        String conceptBinParams = XMLTools.getAttributeValue(child, "conceptBinParams", "");
        String scoreFunction = XMLTools.getAttributeValue(child, "scoreFunction", null);

        int width = XMLTools.getAttributeValue(child, "width", -1);

        if (cascadeStage != -1) {
          RetrievalEnvironment.setIsNew(true);
        } else {
          RetrievalEnvironment.setIsNew(false);
        }

        if (cascadeStage != -1) {
          if (!conceptBinType.equals("") || !conceptBinParams.equals("")) {
            if (conceptBinType.equals("") || conceptBinParams.equals("")) {
              throw new RetrievalException("Most specify conceptBinType || conceptBinParams");
            }
            importanceModel = env.getImportanceModel("wsd");

            if (importanceModel == null) {
              throw new RetrievalException("ImportanceModel " + importanceSource + " not found!");
            }
          }
        }

        cascade_stage_proper = cascadeStage;

        if (cascadeStage != -1 && conceptBinType.equals("") && conceptBinParams.equals("")) {
          cascade_stage_proper = cascade_stage;
        }

        // Construct the clique set.
        CascadeCliqueSet cliqueSet = (CascadeCliqueSet) (CascadeCliqueSet.create(cliqueSetType,
            env, queryTerms, child, cascade_stage_proper, pruner_and_params));// , approxProximity);

        // Get cliques from clique set.
        List<Clique> cliques = cliqueSet.getCliques();

        if (cascadeStage != -1 && conceptBinType.equals("") && conceptBinParams.equals("")) {
          if (cliques.size() > 0) {
            cascade_stage++;
          }
        } else if (cascadeStage != -1 && !conceptBinType.equals("") && !conceptBinParams.equals("")) {
          if (cliques.size() > 0) {
            int[] order = new int[cliques.size()];
            double[] conceptWeights = new double[cliques.size()];
            int cntr = 0;
            String all_concepts = "";
            for (Clique c : cliques) {
              float importance = importanceModel.getCliqueWeight(c);
              order[cntr] = cntr;
              conceptWeights[cntr] = importance;
              cntr++;
              all_concepts += c.getConcept() + " ";
            }
            ivory.smrf.model.constrained.ConstraintModel.Quicksort(conceptWeights, order, 0,
                order.length - 1);

            int[] keptCliques = getCascadeCliques(conceptBinType, conceptBinParams, conceptWeights,
                order, all_concepts, featureID, thePruner, width + "", scoreFunction);

            List<Clique> cliques2 = Lists.newArrayList();
            for (int k = 0; k < keptCliques.length; k++) {
              int index = keptCliques[k];
              cliques2.add(cliques.get(index));
            }
            cliques = Lists.newArrayList();
            for (int k = 0; k < cliques2.size(); k++) {
              cliques.add(cliques2.get(k));
            }

            if (keptCliques.length != 0) {
              for (Clique c : cliques) {
                ((CascadeClique) c).setCascadeStage(cascade_stage);
              }
              cascade_stage++;
            }
          }
        }

        for (Clique c : cliques) {

          double w = weight;

          c.setParameterName(featureID); // Parameter id.
          c.setParameterWeight(weight); // Weight.
          c.setType(cliqueSet.getType()); // Clique type.

          // Get clique weight.
          if (!importanceSource.equals("")) {

            float importance = importanceModel.getCliqueWeight(c);

            if (weight == -1.0f) { // default value.
              c.setParameterWeight(1.0f);
            }

            c.setImportance(importance);

            totalImportance += importance;
            cliquesWithImportance.add((CascadeClique) c);

            w = importance;
          }

          if (w < pruningThresholdBigram && c.getType() != Clique.Type.Term) {
            // System.out.println("Not add "+c);
          } else {
            // Add clique to MRF.
            mrf.addClique(c);
            // System.out.println("Add "+c);
          }
        }
      }
    }
View Full Code Here

  }

  @Override
  public MarkovRandomField buildMRF(String[] queryTerms) throws ConfigurationException {
    // This is the MRF we're building.
    MarkovRandomField mrf = new MarkovRandomField(queryTerms, env);

    // Construct MRF feature by feature.
    NodeList children = model.getChildNodes();

    // Sum of query-dependent importance weights.
    float totalImportance = 0.0f;

    // Cliques that have query-dependent importance weights.
    Set<Clique> cliquesWithImportance = Sets.newHashSet();

    for (int i = 0; i < children.getLength(); i++) {
      Node child = children.item(i);

      if ("feature".equals(child.getNodeName())) {
        // Get the feature id.
        String featureID = XMLTools.getAttributeValueOrThrowException(child, "id",
            "Each feature must specify an id attribute!");

        // Get feature weight (default = 1.0).
        float weight = XMLTools.getAttributeValue(child, "weight", 1.0f);

        // Concept importance model (optional).
        ConceptImportanceModel importanceModel = null;

        // Get concept importance source (if applicable).
        String importanceSource = XMLTools.getAttributeValue(child, "importance", "");
        if (!importanceSource.equals("")) {
          importanceModel = env.getImportanceModel(importanceSource);
          if (importanceModel == null) {
            throw new RetrievalException("ImportanceModel " + importanceSource + " not found!");
          }
        }

        // Get CliqueSet type.
        String cliqueSetType = XMLTools.getAttributeValue(child, "cliqueSet", "");

        // Construct the clique set.
        CliqueSet cliqueSet = CliqueSet.create(cliqueSetType, env, queryTerms, child);

        // Get cliques from clique set.
        List<Clique> cliques = cliqueSet.getCliques();

        for (Clique c : cliques) {
          double w = weight;

          c.setParameterName(featureID)// Parameter id.
          c.setParameterWeight(weight);   // Weight.
          c.setType(cliqueSet.getType()); // Clique type.

          // Get clique weight.
          if (importanceModel != null) {
            float importance = importanceModel.getCliqueWeight(c);
            c.setImportance(importance);

            totalImportance += importance;
            cliquesWithImportance.add(c);

            w = importance;
          }

          if (w < pruningThresholdBigram && c.getType() != Clique.Type.Term) {
          } else {
            mrf.addClique(c);
          }
        }
      }
    }
View Full Code Here

          unigramAddThreshold, bigramAddThreshold, unigramRedundThreshold,
          bigramRedundThreshold, beta);
    }

    // construct constrained mrf
    MarkovRandomField constrainedMRF = new MarkovRandomField(queryTerms, env);
    for (Clique c : selectedCliques) {
      constrainedMRF.addClique(c);
    }

    return constrainedMRF;
  }
View Full Code Here

  }

  @Override
  public MarkovRandomField buildMRF(String[] queryTerms) throws ConfigurationException {
    // build unconstrained MRF model
    MarkovRandomField unconstrainedMRF = builder.buildMRF(queryTerms);

    // get constrained version of the model
    return buildConstrainedMRF(queryTerms, unconstrainedMRF);
  }
View Full Code Here

  @Override
  public MarkovRandomField getExpandedMRF(MarkovRandomField mrf, Accumulator[] results)
      throws ConfigurationException {
    // Begin constructing the expanded MRF.
    MarkovRandomField expandedMRF = new MarkovRandomField(mrf.getQueryTerms(), env);

    // Add cliques corresponding to original MRF.
    List<Clique> cliques = mrf.getCliques();
    for (Clique clique : cliques) {
      expandedMRF.addClique(clique);
    }

    // Find the best concepts for each of the expansion models.
    for (int modelNum = 0; modelNum < builders.size(); modelNum++) {
      // Get information about this expansion model.
      int curGramSize = gramSizes.get(modelNum);
      MRFBuilder curBuilder = builders.get(modelNum);
      int curFbDocs = fbDocList.get(modelNum);
      int curFbTerms = fbTermList.get(modelNum);

      // Gather Accumulators we're actually going to use for feedback purposes.
      Accumulator[] fbResults = new Accumulator[Math.min(results.length, curFbDocs)];
      for (int i = 0; i < Math.min(results.length, curFbDocs); i++) {
        fbResults[i] = results[i];
      }

      // Sort the Accumulators by docid.
      Arrays.sort(fbResults, new Accumulator.DocnoComparator());

      // Get docnos that correspond to the accumulators.
      int[] docSet = Accumulator.accumulatorsToDocnos(fbResults);

      // Get document vectors for results.
      IntDocVector[] docVecs = env.documentVectors(docSet);

      // Extract vocabulary from results.
      VocabFrequencyPair[] vocab = null;
      try {
        vocab = getVocabulary(docVecs, curGramSize);
      } catch (IOException e) {
        throw new RuntimeException("Error: Unable to fetch the vocabulary!");
      }

      // Priority queue for the concepts associated with this builder.
      PriorityQueue<Accumulator> sortedConcepts = new PriorityQueue<Accumulator>();

      // Score each concept.
      for (int conceptID = 0; conceptID < vocab.length; conceptID++) {
        if (maxCandidates > 0 && conceptID >= maxCandidates) {
          break;
        }

        // The current concept.
        String concept = vocab[conceptID].getKey();

        String[] concepts = concept.split(" ");
        MarkovRandomField conceptMRF = curBuilder.buildMRF(concepts);

        MRFDocumentRanker ranker = new MRFDocumentRanker(conceptMRF, docSet, docSet.length);
        Accumulator[] conceptResults = ranker.rank();
        Arrays.sort(conceptResults, new Accumulator.DocnoComparator());

        float score = 0.0f;
        for (int i = 0; i < conceptResults.length; i++) {
          if (fbResults[i].docno != conceptResults[i].docno) {
            throw new RetrievalException("Error: Mismatch occured in getExpandedMRF!");
          }
          score += Math.exp(fbResults[i].score + conceptResults[i].score);
        }

        int size = sortedConcepts.size();
        if (size < curFbTerms || sortedConcepts.peek().score < score) {
          if (size == curFbTerms) {
            sortedConcepts.poll(); // Remove worst concept.
          }
          sortedConcepts.add(new Accumulator(conceptID, score));
        }
      }

      // Compute the weights of the expanded terms.
      int numTerms = Math.min(curFbTerms, sortedConcepts.size());
      float totalWt = 0.0f;
      Accumulator[] bestConcepts = new Accumulator[numTerms];
      for (int i = 0; i < numTerms; i++) {
        Accumulator a = sortedConcepts.poll();
        bestConcepts[i] = a;
        totalWt += a.score;
      }

      // Add cliques corresponding to best expansion concepts.
      for (int i = 0; i < numTerms; i++) {
        Accumulator a = bestConcepts[i];

        // Construct the MRF corresponding to this concept.
        String[] concepts = vocab[a.docno].getKey().split(" ");
        MarkovRandomField conceptMRF = curBuilder.buildMRF(concepts);

        // Normalized score.
        float normalizedScore = a.score / totalWt;

        // Add cliques.
        cliques = conceptMRF.getCliques();
        for (Clique c : cliques) {
          if (c.isDocDependent() && c.getWeight() != 0.0) {
            c.setImportance(normalizedScore * c.getImportance());
            expandedMRF.addClique(c);
          }
View Full Code Here

      throws ConfigurationException {
    Preconditions.checkNotNull(mrf);
    Preconditions.checkNotNull(results);

    // Begin constructing the expanded MRF.
    MarkovRandomField expandedMRF = new MarkovRandomField(mrf.getQueryTerms(), env);

    // Add cliques corresponding to original MRF.
    List<Clique> cliques = mrf.getCliques();
    for (Clique clique : cliques) {
      expandedMRF.addClique(clique);
    }

    // Get MRF global evidence.
    GlobalEvidence globalEvidence = mrf.getGlobalEvidence();

    // Gather Accumulators we're actually going to use for feedback purposes.
    Accumulator[] fbResults = new Accumulator[Math.min(results.length, numFeedbackDocs)];
    for (int i = 0; i < Math.min(results.length, numFeedbackDocs); i++) {
      fbResults[i] = results[i];
    }

    // Sort the Accumulators by docid.
    Arrays.sort(fbResults, new Accumulator.DocnoComparator());

    // Get docids that correspond to the accumulators.
    int[] docSet = Accumulator.accumulatorsToDocnos(fbResults);

    // Get document vectors for results.
    IntDocVector[] docVecs = env.documentVectors(docSet);

    // Extract tf and doclen information from document vectors.
    TfDoclengthStatistics stats = null;
    try {
      stats = getTfDoclengthStatistics(docVecs);
    } catch (IOException e) {
      throw new RetrievalException(
          "Error: Unable to extract tf and doclen information from document vectors!");
    }

    VocabFrequencyPair[] vocab = stats.getVocab();
    Map<String, Short>[] tfs = stats.getTfs();
    int[] doclens = stats.getDoclens();

    // Priority queue for the concepts associated with this builder.
    PriorityQueue<Accumulator> sortedConcepts = new PriorityQueue<Accumulator>();

    // Create scoring functions.
    ScoringFunction[] scoringFunctions = new ScoringFunction[scoringFunctionNodes.size()];
    for (int i = 0; i < scoringFunctionNodes.size(); i++) {
      Node functionNode = scoringFunctionNodes.get(i);
      String functionType = XMLTools.getAttributeValueOrThrowException(functionNode,
          "scoreFunction", "conceptscore node must specify a scorefunction attribute!");
      scoringFunctions[i] = ScoringFunction.create(functionType, functionNode);
    }

    // Score each concept.
    for (int conceptID = 0; conceptID < vocab.length; conceptID++) {
      if (maxCandidates > 0 && conceptID >= maxCandidates) {
        break;
      }

      // The current concept.
      String concept = vocab[conceptID].getKey();

      // Get df and cf information for the concept.
      PostingsReader reader = env.getPostingsReader(new Expression(concept));
      if (reader == null) {
        continue;
      }
      PostingsList list = reader.getPostingsList();
      int df = list.getDf();
      long cf = list.getCf();
      env.clearPostingsReaderCache();

      // Construct concept evidence.
      termEvidence.set(df, cf);

      // Score the concept.
      float score = 0.0f;
      for (int i = 0; i < fbResults.length; i++) {
        float docScore = 0.0f;
        for (int j = 0; j < scoringFunctions.length; j++) {
          float weight = parameters.get(j).getWeight();
          ConceptImportanceModel importanceModel = importanceModels.get(j);
          if (importanceModel != null) {
            weight *= importanceModel.getConceptWeight(concept);
          }
          ScoringFunction fn = scoringFunctions[j];
          fn.initialize(termEvidence, globalEvidence);

          Short tf = tfs[i].get(vocab[conceptID].getKey());
          if (tf == null) {
            tf = 0;
          }
          float s = fn.getScore(tf, doclens[i]);

          docScore += weight * s;
        }
        score += Math.exp(fbResults[i].score + docScore);
      }

      int size = sortedConcepts.size();
      if (size < numFeedbackTerms || sortedConcepts.peek().score < score) {
        if (size == numFeedbackTerms) {
          sortedConcepts.poll(); // Remove worst concept.
        }
        sortedConcepts.add(new Accumulator(conceptID, score));
      }
    }

    // Compute the weights of the expanded terms.
    int numTerms = Math.min(numFeedbackTerms, sortedConcepts.size());
    float totalWt = 0.0f;
    Accumulator[] bestConcepts = new Accumulator[numTerms];
    for (int i = 0; i < numTerms; i++) {
      Accumulator a = sortedConcepts.poll();
      bestConcepts[i] = a;
      totalWt += a.score;
    }

    // Document node (shared across all expansion cliques).
    DocumentNode docNode = new DocumentNode();

    // Expression generator (shared across all expansion cliques).
    ExpressionGenerator generator = new TermExpressionGenerator();

    // Add cliques corresponding to best expansion concepts.
    for (int i = 0; i < numTerms; i++) {
      Accumulator a = bestConcepts[i];

      // Construct the MRF corresponding to this concept.
      String concept = vocab[a.docno].getKey();

      for (int j = 0; j < scoringFunctionNodes.size(); j++) {
        Node functionNode = scoringFunctionNodes.get(j);
        String functionType = XMLTools.getAttributeValue(functionNode, "scoreFunction", null);
        ScoringFunction fn = ScoringFunction.create(functionType, functionNode);

        Parameter parameter = parameters.get(j);
        ConceptImportanceModel importanceModel = importanceModels.get(j);

        List<GraphNode> cliqueNodes = Lists.newArrayList();
        cliqueNodes.add(docNode);

        TermNode termNode = new TermNode(concept);
        cliqueNodes.add(termNode);

        PotentialFunction potential = new QueryPotential(env, generator, fn);

        Clique c = new Clique(cliqueNodes, potential, parameter);
        c.setType(Clique.Type.Term);

        // Scale importance values by LCE likelihood.
        float normalizedScore = expanderWeight * (a.score / totalWt);
        if (importanceModel != null) {
          c.setImportance(normalizedScore * importanceModel.getCliqueWeight(c));
        } else {
          c.setImportance(normalizedScore);
        }

        expandedMRF.addClique(c);
      }
    }

    return expandedMRF;
  }
View Full Code Here

TOP

Related Classes of ivory.smrf.model.MarkovRandomField

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.