Hi,
I ran into this weird error associated with Metropolis sampling step:
I have to use metropolis sampling in this case.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-692-2bf6cba863a0> in <module>
11
12 path = APWPath( 'apw', poles, 2 )
---> 13 trace = path.create_model()
14
15 # def plot_result():
<ipython-input-691-d73d1cea4efe> in create_model(self, site_lon_lat, watson_concentration, rate_scale, tpw_rate_scale)
194 # print(model.__dict__)
195
--> 196 trace = pm.sample(1000, step = pm.Metropolis())
197
198 return trace
~/anaconda3/envs/Bayesian/lib/python3.6/site-packages/pymc3/step_methods/arraystep.py in __new__(cls, *args, **kwargs)
63 # If we don't return the instance we have to manually
64 # call __init__
---> 65 step.__init__([var], *args, **kwargs)
66 # Hack for creating the class correctly when unpickling.
67 step.__newargs = ([var], ) + args, kwargs
~/anaconda3/envs/Bayesian/lib/python3.6/site-packages/pymc3/step_methods/metropolis.py in __init__(self, vars, S, proposal_dist, scaling, tune, tune_interval, model, mode, **kwargs)
134
135 shared = pm.make_shared_replacements(vars, model)
--> 136 self.delta_logp = delta_logp(model.logpt, vars, shared)
137 super(Metropolis, self).__init__(vars, shared)
138
~/anaconda3/envs/Bayesian/lib/python3.6/site-packages/pymc3/step_methods/metropolis.py in delta_logp(logp, vars, shared)
627 inarray1 = tensor_type('inarray1')
628
--> 629 logp1 = pm.CallableTensor(logp0)(inarray1)
630
631 f = theano.function([inarray1, inarray0], logp1 - logp0)
~/anaconda3/envs/Bayesian/lib/python3.6/site-packages/pymc3/theanof.py in __call__(self, input)
270 input : TensorVariable
271 """
--> 272 oldinput, = inputvars(self.tensor)
273 return theano.clone(self.tensor, {oldinput: input}, strict=False)
274
ValueError: too many values to unpack (expected 1)
The code for model:
def create_model(self, site_lon_lat=[0., 0.], watson_concentration=0., rate_scale=2.5, tpw_rate_scale = None):
rate_scale > 0.0, "rate_scale must be a positive number."
assert tpw_rate_scale == None or tpw_rate_scale > 0.0
assert watson_concentration <= 0.0, "Nonnegative Watson concentration parameters are not supported."
if tpw_rate_scale is None:
self.include_tpw = False
else:
self.include_tpw = True
args = []
with pm.Model() as model:
start = VMF('start',
lon_lat=(self._start_pole.longitude, self._start_pole.latitude),
k=kappa_from_two_sigma(self._start_pole._A95),
testval = np.array([1., 0.]), shape = 2)
if self.include_tpw:
tpw_pole_angle = pm.Uniform('tpw_pole_angle', lower = 0., upper = 360.)
tpw_rate = pm.Exponential('tpw_rate', tpw_rate_scale)
for i in range(self.n_euler_rotations):
euler = Watson_Girdle('euler_' + str(i),
lon_lat=site_lon_lat,
k=watson_concentration,
testval = np.array([1., 0.]), shape = 2)
args.append(euler)
for i in range(self.n_euler_rotations):
rate = pm.Exponential('rate_' + str(i), rate_scale)
args.append(rate)
age_list = [p._age for p in self._poles]
for i in range(self.n_euler_rotations - 1):
changepoint = pm.Uniform(
'changepoint_' + str(i), lower = min(age_list), upper = max(age_list))
args.append(changepoint)
for i, p in enumerate(self._poles):
if p._age_type == 'gaussian':
pole_age = pm.Normal(
'a_' + str(i), mu=p._age, tau=np.power(p._sigma_age, -2.))
elif p._age_type == 'uniform':
pole_age = pm.Uniform(
'a_' + str(i), lower=p._sigma_age[0], upper=p._sigma_age[1])
# Include TPW rate if it is part of model_vars
if self.include_tpw:
lon_lat = self._pole_position_fn(start, pole_age, tpw_pole_angle, tpw_rate, *args)
# Otherwise use zero for TPW rate.
else:
lon_lat = self._pole_position_fn(start, pole_age, 0., 0., *args)
observed_pole = VMF('p_' + str(i),
lon_lat,
k=kappa_from_two_sigma(p._A95),
observed=[p.longitude, p.latitude])
trace = pm.sample(1000, step = pm.Metropolis())
return trace