Package gov.sandia.cognition.math.matrix

Examples of gov.sandia.cognition.math.matrix.Vector


      final int xDim = posteriorState.getInputDimensionality();
      final Matrix Ij = MatrixFactory.getDefault().createIdentity(xDim, xDim);
      final Matrix H = MatrixFactory.getDefault().createMatrix(xDim, xDim * 2);
      H.setSubMatrix(0, 0, Ij);
      H.setSubMatrix(0, xDim, MatrixFactory.getDefault().createDiagonal(predState.getStateSample()));
      final Vector postStateSample = posteriorState.sample(this.rng);
      final MultivariateGaussian priorPhi = predState.getPsiSS();
      final Vector phiPriorSmpl = priorPhi.sample(this.rng);
      final Vector xHdiff = postStateSample.minus(H.times(phiPriorSmpl));

      final double newN = scaleSS.getShape() + 1d;
      final double d = scaleSS.getScale() + xHdiff.dotProduct(xHdiff);
     
      scaleSS.setScale(d);
      scaleSS.setShape(newN);
     
      // FIXME TODO: crappy sampler
      final double newScaleSmpl = scaleSS.sample(this.rng);
     
      /*
       * Update state and measurement covariances, which
       * have a strict dependency in this model (equality).
       */
      kf.setMeasurementCovariance(MatrixFactory.getDefault().createDiagonal(
          VectorFactory.getDefault().createVector(kf.getModel().getOutputDimensionality(),
              newScaleSmpl)));

      kf.setModelCovariance(MatrixFactory.getDefault().createDiagonal(
          VectorFactory.getDefault().createVector(kf.getModel().getStateDimensionality(),
              newScaleSmpl)));

      /*
       * Update offset and AR(1) prior(s).
       * Note that we divide out the previous scale param, since
       * we want to update A alone.
       */
      final Matrix priorAInv = priorPhi.getCovariance().scale(1d/predState.getSigma2Sample()).inverse();
      /*
       * TODO FIXME: we don't have a generalized outer product, so we're only
       * supporting the 1d case for now.
       */
      final Vector Hv = H.convertToVector();
      final Matrix postAInv = priorAInv.plus(Hv.outerProduct(Hv)).inverse();
      // TODO FIXME: ewww.  inverse.
      final Vector postPhiMean = postAInv.times(priorAInv.times(phiPriorSmpl).plus(
          H.transpose().times(postStateSample)));
      final MultivariateGaussian postPhi = systemOffsetsSS;
      postPhi.setMean(postPhiMean);
      postPhi.setCovariance(postAInv.scale(newScaleSmpl));
     
      final Vector postPhiSmpl = postPhi.sample(this.rng);
      final Matrix smplArTerms = MatrixFactory.getDefault().createDiagonal(
          postPhiSmpl.subVector(
              postPhiSmpl.getDimensionality()/2,
              postPhiSmpl.getDimensionality() - 1));
      kf.getModel().setA(smplArTerms);

      final Vector smplOffsetTerm = postPhiSmpl.subVector(0,
              postPhiSmpl.getDimensionality()/2 - 1);
      kf.getModel().setState(smplOffsetTerm);
      kf.setCurrentInput(smplOffsetTerm);
 
      final GaussianArHpWfParticle postState =
View Full Code Here


    Matrix modelCovariance = MatrixFactory.getDefault().copyArray(
        new double[][] {{trueSigma2}});
    Matrix measurementCovariance = MatrixFactory.getDefault().copyArray(
        new double[][] {{trueSigma2}});

    Vector truePsi = VectorFactory.getDefault().copyValues(3d, 0.2d);

    LinearDynamicalSystem dlm = new LinearDynamicalSystem(
        MatrixFactory.getDefault().copyArray(new double[][] {{truePsi.getElement(1)}}),
        MatrixFactory.getDefault().copyArray(new double[][] {{1d}}),
        MatrixFactory.getDefault().copyArray(new double[][] {{1d}})
      );
    KalmanFilter trueKf = new KalmanFilter(dlm, modelCovariance, measurementCovariance);
    trueKf.setCurrentInput(VectorFactory.getDefault().copyValues(truePsi.getElement(0)));
   
    final double sigmaPriorMean = Math.pow(0.4, 2);
    final double sigmaPriorShape = 2d;
    final double sigmaPriorScale = sigmaPriorMean*(sigmaPriorShape - 1d);
    final InverseGammaDistribution sigmaPrior = new InverseGammaDistribution(sigmaPriorShape,
        sigmaPriorScale);
   
    final Vector phiMean = VectorFactory.getDefault().copyArray(new double[] {
        0d, 0.8d
    });
    final Matrix phiCov = MatrixFactory.getDefault().copyArray(new double[][] {
        {2d + 4d * sigmaPriorMean, 0d},
        { 0d, 4d * sigmaPriorMean}
 
View Full Code Here

    final UnivariateGaussian prior = new UnivariateGaussian(0d, sigma_y2);
    final UnivariateGaussian s1Likelihood = prior;
    final UnivariateGaussian s2Likelihood = s1Likelihood;
   
    Vector initialClassProbs = VectorFactory.getDefault()
            .copyArray(new double[] { 0.7d, 0.3d });
    Matrix classTransProbs = MatrixFactory.getDefault().copyArray(
                new double[][] { { 0.7d, 0.7d },
                    { 0.3d, 0.3d } });
   
View Full Code Here

    KalmanFilter trueKf1 = new KalmanFilter(model1, modelCovariance1, measurementCovariance);
    trueKf1.setCurrentInput(VectorFactory.getDefault().copyValues(truePsis.get(0).getElement(0)));
    KalmanFilter trueKf2 = new KalmanFilter(model2, modelCovariance2, measurementCovariance);
    trueKf2.setCurrentInput(VectorFactory.getDefault().copyValues(truePsis.get(1).getElement(0)));
   
    Vector initialClassProbs = VectorFactory.getDefault()
            .copyArray(new double[] { 0.5d, 0.5d });
    Matrix classTransProbs = MatrixFactory.getDefault().copyArray(
                new double[][] { { 0.5d, 0.5d },
                    { 0.5d, 0.5d } });
   
    DlmHiddenMarkovModel trueHmm1 = new DlmHiddenMarkovModel(
        Lists.newArrayList(trueKf1, trueKf2),
        initialClassProbs, classTransProbs);

    final double sigmaPriorMean = Math.pow(0.4, 2);
    final double sigmaPriorShape = 2d;
    final double sigmaPriorScale = sigmaPriorMean*(sigmaPriorShape + 1d);
    final InverseGammaDistribution sigmaPrior = new InverseGammaDistribution(sigmaPriorShape,
        sigmaPriorScale);
   
    final Vector phiMean1 = VectorFactory.getDefault().copyArray(new double[] {
        0d, 0.8d
    });
    final Matrix phiCov1 = MatrixFactory.getDefault().copyArray(new double[][] {
        {2d + 4d * sigmaPriorMean, 0d},
        { 0d, 4d * sigmaPriorMean}
    });
    final MultivariateGaussian phiPrior1 = new MultivariateGaussian(phiMean1, phiCov1);

    final Vector phiMean2 = VectorFactory.getDefault().copyArray(new double[] {
        0d, 0.1d
    });
    final Matrix phiCov2 = MatrixFactory.getDefault().copyArray(new double[][] {
        { 1d + 4d * sigmaPriorMean, 0d},
        { 0d, 4d * sigmaPriorMean}
    });
    final MultivariateGaussian phiPrior2 = new MultivariateGaussian(phiMean2, phiCov2);
   
    List<MultivariateGaussian> priorPhis = Lists.newArrayList(phiPrior1, phiPrior2);

    final HmmPlFilter<DlmHiddenMarkovModel, GaussianArHpTransitionState, Vector> wfFilter =
        new GaussianArHpHmmPLFilter(trueHmm1, sigmaPrior, priorPhis, random, true);

    final int K = 3;
    final int T = 200;
    final int N = 1000;

    /*
     * Note: replications are over the same set of simulated observations.
     */
    List<SimHmmObservedValue<Vector, Vector>> simulation = trueHmm1.sample(random, T);

    wfFilter.setNumParticles(N);
    wfFilter.setResampleOnly(false);

    log.info("rep\tt\tfilter.type\tmeasurement.type\tresample.type\tmeasurement");

    GaussianArHmmClassEvaluator wfClassEvaluator = new GaussianArHmmClassEvaluator("wf-pl",
        null);
    GaussianArHmmRmseEvaluator wfRmseEvaluator = new GaussianArHmmRmseEvaluator("wf-pl",
        null);
    GaussianArHmmPsiLearningEvaluator wfPsiEvaluator = new GaussianArHmmPsiLearningEvaluator("wf-pl",
        truePsis, null);

    RingAccumulator<MutableDouble> wfLatency =
        new RingAccumulator<MutableDouble>();
    Stopwatch wfWatch = new Stopwatch();


    for (int k = 0; k < K; k++) {
      log.info("Processing replication " + k);
      CountedDataDistribution<GaussianArHpTransitionState> wfDistribution =
          (CountedDataDistribution<GaussianArHpTransitionState>) wfFilter.getUpdater().createInitialParticles(N);


      final long numPreRuns = -1l;//wfDistribution.getMaxValueKey().getTime();
     
      /*
       * Recurse through the particle filter
       */
      for (int i = 0; i < T; i++) {
 
        final double x = simulation.get(i).getClassId();
        final Vector y = simulation.get(i).getObservedValue();

        if (i > numPreRuns) {

          if (i > 0) {
            wfWatch.reset();
View Full Code Here

    KalmanFilter trueKf1 = new KalmanFilter(model1, modelCovariance1, measurementCovariance);
    trueKf1.setCurrentInput(VectorFactory.getDefault().copyValues(truePsis.get(0).getElement(0)));
    KalmanFilter trueKf2 = new KalmanFilter(model2, modelCovariance2, measurementCovariance);
    trueKf2.setCurrentInput(VectorFactory.getDefault().copyValues(truePsis.get(1).getElement(0)));
   
    Vector initialClassProbs = VectorFactory.getDefault()
            .copyArray(new double[] { 0.4d, 0.6d });
    Matrix classTransProbs = MatrixFactory.getDefault().copyArray(
                new double[][] { { 0.9d, 0.1d },
                    { 0.1d, 0.9d } });
   
    DlmHiddenMarkovModel trueHmm1 = new DlmHiddenMarkovModel(
        Lists.newArrayList(trueKf1, trueKf2),
        initialClassProbs, classTransProbs);

    final double sigmaPriorMean = Math.pow(0.4, 2);
    final double sigmaPriorShape = 2d;
    final double sigmaPriorScale = sigmaPriorMean*(sigmaPriorShape + 1d);
    final InverseGammaDistribution sigmaPrior = new InverseGammaDistribution(sigmaPriorShape,
        sigmaPriorScale);
   
    final Vector phiMean1 = VectorFactory.getDefault().copyArray(new double[] {
        0d, 0.8d
    });
    final Matrix phiCov1 = MatrixFactory.getDefault().copyArray(new double[][] {
        {2d + 4d * sigmaPriorMean, 0d},
        { 0d, 4d * sigmaPriorMean}
    });
    final MultivariateGaussian phiPrior1 = new MultivariateGaussian(phiMean1, phiCov1);

    final Vector phiMean2 = VectorFactory.getDefault().copyArray(new double[] {
        0d, 0.1d
    });
    final Matrix phiCov2 = MatrixFactory.getDefault().copyArray(new double[][] {
        { 1d + 4d * sigmaPriorMean, 0d},
        { 0d, 4d * sigmaPriorMean}
    });
    final MultivariateGaussian phiPrior2 = new MultivariateGaussian(phiMean2, phiCov2);
   
    List<MultivariateGaussian> priorPhis = Lists.newArrayList(phiPrior1, phiPrior2);

    final HmmPlFilter<DlmHiddenMarkovModel, GaussianArHpTransitionState, Vector> wfFilter =
        new GaussianArHpHmmPLFilter(trueHmm1, sigmaPrior, priorPhis, random, true);

    final int K = 3;
    final int T = 200;
    final int N = 1000;

    /*
     * Note: replications are over the same set of simulated observations.
     */
    List<SimHmmObservedValue<Vector, Vector>> simulation = trueHmm1.sample(random, T);

    wfFilter.setNumParticles(N);
    wfFilter.setResampleOnly(false);

    log.info("rep\tt\tfilter.type\tmeasurement.type\tresample.type\tmeasurement");

    GaussianArHmmClassEvaluator wfClassEvaluator = new GaussianArHmmClassEvaluator("wf-pl",
        null);
    GaussianArHmmRmseEvaluator wfRmseEvaluator = new GaussianArHmmRmseEvaluator("wf-pl",
        null);
    GaussianArHmmPsiLearningEvaluator wfPsiEvaluator = new GaussianArHmmPsiLearningEvaluator("wf-pl",
        truePsis, null);

    RingAccumulator<MutableDouble> wfLatency =
        new RingAccumulator<MutableDouble>();
    Stopwatch wfWatch = new Stopwatch();


    for (int k = 0; k < K; k++) {
      log.info("Processing replication " + k);
      CountedDataDistribution<GaussianArHpTransitionState> wfDistribution =
          (CountedDataDistribution<GaussianArHpTransitionState>) wfFilter.getUpdater().createInitialParticles(N);


      final long numPreRuns = -1l;//wfDistribution.getMaxValueKey().getTime();
     
      /*
       * Recurse through the particle filter
       */
      for (int i = 0; i < T; i++) {
 
        final double x = simulation.get(i).getClassId();
        final Vector y = simulation.get(i).getObservedValue();

        if (i > numPreRuns) {

          if (i > 0) {
            wfWatch.reset();
View Full Code Here

      final long latency = watch.elapsed(TimeUnit.MILLISECONDS);
      latencyStats.update(latency);

      List<WeightedValue<Vector>> wMeanValues = Lists.newArrayList();
      List<WeightedValue<Matrix>> wCovValues = Lists.newArrayList();
      final Vector trueState = dlmSamples.get(i).getTrueState();
      double sum = 0d;
      double sqSum = 0d;
      final double distTotalLogProb = currentMixtureDistribution.getTotal();
      for (Entry<LogitMixParticle, ? extends Number> particleEntry :
        currentMixtureDistribution.asMap().entrySet()) {
        final Vector particleState = particleEntry.getKey().getLinearState().getMean();
        final double rse = trueState.minus(particleState).dotDivide(trueState).norm2();
        final double weight = Math.exp(particleEntry.getValue().doubleValue() - distTotalLogProb);
        wMeanValues.add(DefaultWeightedValue.create(particleState, weight));
        wCovValues.add(DefaultWeightedValue.create(
            particleEntry.getKey().getLinearState().getCovariance(), weight));
View Full Code Here

      log.info("obs:" + observation);
      plFilter.update(currentMixtureDistribution, observation);

      List<WeightedValue<Vector>> wMeanValues = Lists.newArrayList();
      List<WeightedValue<Matrix>> wCovValues = Lists.newArrayList();
      final Vector trueState = dlmSamples.get(i).getTrueState();
      double sum = 0d;
      double sqSum = 0d;
      final double distTotalLogProb = currentMixtureDistribution.getTotal();
      for (Entry<LogitMixParticle, ? extends Number> particleEntry :
        currentMixtureDistribution.asMap().entrySet()) {
        final Vector particleState = particleEntry.getKey().getLinearState().getMean();
        final double rse = trueState.minus(particleState).dotDivide(trueState).norm2();
        final double weight = Math.exp(particleEntry.getValue().doubleValue() - distTotalLogProb);
        wMeanValues.add(DefaultWeightedValue.create(particleState, weight));
        wCovValues.add(DefaultWeightedValue.create(
            particleEntry.getKey().getLinearState().getCovariance(), weight));
View Full Code Here

      log.info("obs:" + observation);
      plFilter.update(currentMixtureDistribution, observation);

      List<WeightedValue<Vector>> wMeanValues = Lists.newArrayList();
      List<WeightedValue<Matrix>> wCovValues = Lists.newArrayList();
      final Vector trueState = dlmSamples.get(i).getTrueState();
      double sum = 0d;
      double sqSum = 0d;
      final double distTotalLogProb = currentMixtureDistribution.getTotal();
      for (Entry<LogitMixParticle, ? extends Number> particleEntry :
        currentMixtureDistribution.asMap().entrySet()) {
        final Vector particleState = particleEntry.getKey().getLinearState().getMean();
        final double rse = trueState.minus(particleState).norm2();
        final double weight = Math.exp(particleEntry.getValue().doubleValue() - distTotalLogProb);
        wMeanValues.add(DefaultWeightedValue.create(particleState, weight));
        wCovValues.add(DefaultWeightedValue.create(
            particleEntry.getKey().getLinearState().getCovariance(), weight));
View Full Code Here

      log.info("obs:" + observation);
      plFilter.update(currentMixtureDistribution, observation);

      List<WeightedValue<Vector>> wMeanValues = Lists.newArrayList();
      List<WeightedValue<Matrix>> wCovValues = Lists.newArrayList();
      final Vector trueState = dlmSamples.get(i).getTrueState();
      double sum = 0d;
      double sqSum = 0d;
      final double distTotalLogProb = currentMixtureDistribution.getTotal();
      for (Entry<LogitMixParticle, ? extends Number> particleEntry :
        currentMixtureDistribution.asMap().entrySet()) {
        final Vector particleState = particleEntry.getKey().getLinearState().getMean();
        final double rse = trueState.minus(particleState).norm2();
        final double weight = Math.exp(particleEntry.getValue().doubleValue() - distTotalLogProb);
        wMeanValues.add(DefaultWeightedValue.create(particleState, weight));
        wCovValues.add(DefaultWeightedValue.create(
            particleEntry.getKey().getLinearState().getCovariance(), weight));
View Full Code Here

      final long latency = watch.elapsed(TimeUnit.MILLISECONDS);
      latencyStats.update(latency);

      List<WeightedValue<Vector>> wMeanValues = Lists.newArrayList();
      List<WeightedValue<Matrix>> wCovValues = Lists.newArrayList();
      final Vector trueState = dlmSamples.get(i).getTrueState();
      double sum = 0d;
      double sqSum = 0d;
      final double distTotalLogProb = currentMixtureDistribution.getTotal();
      for (Entry<LogitPGParticle, ? extends Number> particleEntry :
        currentMixtureDistribution.asMap().entrySet()) {
        final Vector particleState = particleEntry.getKey().getLinearState().getMean();
        final double rse = trueState.minus(particleState).dotDivide(trueState).norm2();
        final double weight = Math.exp(particleEntry.getValue().doubleValue() - distTotalLogProb);
        wMeanValues.add(DefaultWeightedValue.create(particleState, weight));
        wCovValues.add(DefaultWeightedValue.create(
            particleEntry.getKey().getLinearState().getCovariance(), weight));
View Full Code Here

TOP

Related Classes of gov.sandia.cognition.math.matrix.Vector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.