Thank you for your response!
I’m using my own custom likelihood function. My code is as follows:
class LogLike(at.Op):
itypes = [at.dvector] # expects a vector of parameter values when called
otypes = [at.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglik, rtol, atol):
"""
Initialise the Op with various things that our log-likelihood function
requires.
Parameters
----------
loglik:
The log-likelihood
"""
# add inputs as class attributes
self.likelihood = lambda sample: loglik(sample)
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
(theta,) = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta)
logl = LogLike(loglik)
with pm.Model():
variables = []
for param_name in variable_name:
variables.append(pm.Normal(param_name, mu=0., sigma=1.))
theta = at.as_tensor_variable(variables)
# use a Potential to "call" the Op and include it in the logp computation
pm.Potential("likelihood", logl(theta))
mh = pm.sample(draws=int(nsamples), step=pm.Metropolis())
To clarify, are you recommending I change
variables = []
for param_name in param_names:
variables.append(pm.Normal(param_name, mu=0., sigma=1.))
to
variables = MvNormal("variables", mu=np.zeros(len(param_names)),cov(np.diag(ones(len(param_names))))?