Parameter repetition in model definition

I am trying to create a model for visuomotor adaptation, which is based on a state-space model discussed in this paper: (if you want to get more familiar with the model).

In the definition of my parameters in the model I have sigma_epsilon appearing twice as it is also included in the defintion of y_hat (which is the likelihood). I was wondering if this is an issue for the PyMC code and if so, how I can improve it. I have run the code, and it does not raise any errors, however, there are some divergences.

I would also appreciate any suggestions about the visualization of my results as I have been struggling to make a good graph representing the results.

import pytensor
import pytensor.tensor as pt
import pymc as pm
import numpy as np
import as spio
from os.path import dirname, realpath, join as pjoin
import arviz as az

def main():
    # Get data
    dir_path = dirname(realpath(__file__))
    mat_fname = pjoin(dir_path, 'dataForBayesianAnalysisEEG-VM.mat')
    n_steps = 900
    n_subjects = 60
    sixtydata = spio.loadmat(mat_fname)
    data_y = sixtydata['aimingError']#[:n_steps,:n_subjects]
    data_p = pt.as_tensor_variable(sixtydata['rotation'])#[:n_steps,:n_subjects])
    data_v = pt.as_tensor_variable(sixtydata['showCursor'])#[:n_steps,:n_subjects])

   # PyMC Model
    coords = {'time':np.arange(n_steps),

    with pm.Model(coords=coords) as model:           

        A1mu = pm.Normal('A1mu', mu=3, sigma=1)
        A1std = pm.Gamma('A1std', mu=1, sigma=1)
        B1mu = pm.Normal('B1mu', mu=-2, sigma=1)
        B1std = pm.Gamma('B1std', mu=1, sigma=1)
        etamu = pm.Gamma('etamu', mu=1, sigma=1)
        etastd = pm.Gamma('etastd', mu=1, sigma=1)
        epsilonmu = pm.Gamma('epsilonmu', mu=4, sigma=1)
        epsilonstd = pm.Gamma('epsilonstd', mu=1, sigma=1)

        A1 = pm.Normal('A1', mu=A1mu, sigma=A1std, dims='subjects')
        B1 = pm.Normal('B1', mu=B1mu, sigma=B1std, dims='subjects')
        A = pm.Deterministic('A', pm.math.invlogit(A1))
        B = pm.Deterministic('B', pm.math.invlogit(B1))
        sigma_eta = pm.Gamma('sigma_eta', mu=etamu, sigma=etastd, dims='subjects')
        sigma_epsilon = pm.Gamma('sigma_epsilon', mu=epsilonmu, sigma=epsilonstd, dims='subjects') 
        eta = pm.Normal('eta', mu=0, sigma=sigma_eta, dims=['time', 'subjects'])
        epsilon = pm.Normal('epsilon', mu=0, sigma=sigma_epsilon, dims=['time', 'subjects'])
        x_init = pm.Normal('x_init', mu=0, sigma=sigma_eta)
        def grw_step(eta_t, epsilon_t, p_t, v_t, x_t, A, B):
            x_tp1 = A * x_t - v_t * B * (p_t + x_t + epsilon_t) + eta_t
            return x_tp1
        x, updates = pytensor.scan(fn=grw_step,
            sequences=[eta, epsilon, data_p, data_v],
            outputs_info=[{"initial": x_init}],
            allow_gc= False)
        x = pt.concatenate([[x_init], x[:-1]], axis=0)
        y_hat = pm.Normal('y_hat',mu=x,sigma=sigma_epsilon,observed=data_y) 
        idata = pm.sample(draws=2000, tune=2000, init='adapt_diag')

if __name__ == '__main__':

Hi Rob,

You should probably be using a CustomDist to represent the joint distribution of all observations in each sample trajectory of the statspace. Here’s a gist that shows how to do this, as well as how to handle common post-estimation tasks.