Hi,
I am new to pymc3 and have been trying to use the NUTS sampler in my model. My initial model used the Metropolis sampler and it worked fine but I was wanting to use NUTS to draw comparisons between different samplers. The code for my model is as follows -
This part is where I initialize the parameters.
param1 = pm.Normal('param1_{0}'.format(index), mu=3, sd=1)
param2 = pm.Normal('param2_{0}'.format(index),mu=3, sd=1)
param3 = pm.Normal('param3_{0}'.format(index), mu=1, sd=0.5)
param4 = pm.Normal('param4_{0}'.format(index), mu=1, sd=0.5)
offset = pm.Normal('offset_{0}'.format(index), mu=1, sd=0.5)
Consequently, the next iterations use this part to update the params.
self.param1_samples.append(pd.Series(self.parameters['param1_{0}'.format(index - 1)][self.burnin:]))
self.param2_samples.append(pd.Series(self.parameters['param2_{0}'.format(index - 1)][self.burnin:]))
self.param3_samples.append(pd.Series(self.parameters['param3_{0}'.format(index - 1)][self.burnin:]))
self.param4_samples.append(pd.Series(self.parameters['param4_{0}'.format(index - 1)][self.burnin:]))
self.offset_samples.append(pd.Series(self.parameters['offset_{0}'.format(index - 1)][self.burnin:]))
self.param1 = from_posterior('param1_{0}'.format(index), self.param1_samples[index][self.param1_samples[index] > 0].values)
self.param2 = from_posterior('param2_{0}'.format(index), self.param2_samples[index][self.param2_samples[index] > 0].values)
self.param3 = from_posterior('param3_{0}'.format(index), self.param3_samples[index][self.param3_samples[index] > 0].values)
self.param4 = from_posterior('param4_{0}'.format(index), self.param4_samples[index][self.param4_samples[index] > 0].values)
self.offset = from_posterior('offset_{0}'.format(index), self.offset_samples[index][self.offset_samples[index] > 0].values)
The from_posterior function looks like this -
def from_posterior(param, samples):
class From_posterior(pm.Continuous):
def __init__(self, *args, **kwargs):
self.from_posterior_logp = _from_posterior_logp()
super(From_posterior, self).__init__(*args, **kwargs)
def logp(self, value):
return self.from_posterior_logp(value)
class From_posterior_logp:
def __init__(self, samples):
smin, smax = np.min(samples), np.max(samples)
self.x = np.linspace(smin, smax, 100)
self.y = stats.gaussian_kde(samples)(self.x)
def from_posterior_logp(self, value):
x, y = self.x, self.y
y0 = 0
return np.array(np.log(np.interp(value, x, y, left=y0, right=y0)))
from_posterior_logp = From_posterior_logp(samples)
def _from_posterior_logp():
@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])
def logp(value):
return from_posterior_logp.from_posterior_logp(value)
return logp
return From_posterior(param, testval=np.median(samples))
This is how I calculate the likelihood.
self.alpha = self.param1*self.covariate1 + self.param2*self.covariate2 + self.offset self.beta = self.param3*self.covariate1 + self.param4*self.covariate2 self.mu = pm.math.log(self.alpha) self.s = tt.power(self.beta, -1) final = pm.Logistic('final_{0}'.format(index), mu=self.mu, s=self.s, observed=pm.math.log(data)) step = pm.NUTS([self.param1, self.param2, self.param3, self.param4, self.offset]) trace = pm.sample(self.trace_length, step=step)
Now the error that I get from using NUTS is this ‘FromFunctionOp’ has no attribute ‘grad’. I suppose that I need to define the gradients. I wanted to ask how do I define those gradients and for which variables? Do the variables include alpha, beta, mu, and s? Or are they just param1, param2, param3, param4, offset?
The weird thing is that the first iteration works fine. It is the second iteration where things break.