Changing shape of shared variable

I want to run a model on many different datasets. In order to save computational time I would like to avoid setting up a new model for each of the datasets. I was aiming for implementing a similar approach to what was posted by twiecki (example_pymc3_model_cache.py · GitHub). However, this just works when the shape of all datasets coincide. I tested an example taken from the docs (https://docs.pymc.io/advanced_theano.html?highlight=advanced%20usage) and changed it slightly so that it comprises datasets of different shapes. The code is the following:

# We generate 10 datasets
true_mu = [np.random.randn() for _ in range(10)]
observed_data = [mu + np.random.randn(20) for mu in true_mu]

# change shape of second dataset
observed_data[1] = observed_data[1][0:5]

data = shared(observed_data[0])
with pm.Model() as model:
    mu = pm.Normal('mu', 0, 10)
    pm.Normal('y', mu=mu, sd=1, observed=data)

# Generate one trace for each dataset
traces = []
for data_vals in observed_data:
    # Switch out the observed dataset
    data.set_value(data_vals)
    with model:
        traces.append(pm.sample())

The error I am getting is:

ValueError: Elemwise{sub,no_inplace}.grad returned object of shape (20,) as gradient    term on input 0 of shape (5,)

It seems that shared variables can only be used as long as the shape of the different datasets does not change. A similar bug report has already been opened here: Changing amount of data gives Theano error · Issue #3007 · pymc-devs/pymc · GitHub

Is there any work-around for this issue?
Thanks!

1 Like

Can you post the full traceback?

Here is the full traceback:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-13-b9a3ae314831> in <module>()
     17     data.set_value(data_vals)
     18     with model:
---> 19         traces.append(pm.sample())

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, nuts_kwargs, step_kwargs, progressbar, model, random_seed, live_plot, discard_tuned_samples, live_plot_kwargs, compute_convergence_checks, use_mmap, **kwargs)
    403                 start_, step = init_nuts(init=init, chains=chains, n_init=n_init,
    404                                          model=model, random_seed=random_seed,
--> 405                                          progressbar=progressbar, **args)
    406                 if start is None:
    407                     start = start_

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/sampling.py in init_nuts(init, chains, n_init, model, random_seed, progressbar, **kwargs)
   1504             'Unknown initializer: {}.'.format(init))
   1505 
-> 1506     step = pm.NUTS(potential=potential, model=model, **kwargs)
   1507 
   1508     return start, step

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/step_methods/hmc/nuts.py in __init__(self, vars, max_treedepth, early_max_treedepth, **kwargs)
    150         `pm.sample` to the desired number of tuning steps.
    151         """
--> 152         super(NUTS, self).__init__(vars, **kwargs)
    153 
    154         self.max_treedepth = max_treedepth

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/step_methods/hmc/base_hmc.py in __init__(self, vars, scaling, step_scale, is_cov, model, blocked, potential, integrator, dtype, Emax, target_accept, gamma, k, t0, adapt_step_size, step_rand, **theano_kwargs)
     61 
     62         super(BaseHMC, self).__init__(vars, blocked=blocked, model=model,
---> 63                                       dtype=dtype, **theano_kwargs)
     64 
     65         self.adapt_step_size = adapt_step_size

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/step_methods/arraystep.py in __init__(self, vars, model, blocked, dtype, **theano_kwargs)
    226 
    227         func = model.logp_dlogp_function(
--> 228             vars, dtype=dtype, **theano_kwargs)
    229 
    230         # handle edge case discovered in #2948

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/model.py in logp_dlogp_function(self, grad_vars, **kwargs)
    707         varnames = [var.name for var in grad_vars]
    708         extra_vars = [var for var in self.free_RVs if var.name not in varnames]
--> 709         return ValueGradFunction(self.logpt, grad_vars, extra_vars, **kwargs)
    710 
    711     @property

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/pymc3/model.py in __init__(self, cost, grad_vars, extra_vars, dtype, casting, **kwargs)
    440             self._cost, grad_vars, self._ordering.vmap)
    441 
--> 442         grad = tt.grad(self._cost_joined, self._vars_joined)
    443         grad.name = '__grad'
    444 

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in grad(cost, wrt, consider_constant, disconnected_inputs, add_names, known_grads, return_disconnected, null_gradients)
    603 
    604     rval = _populate_grad_dict(var_to_app_to_idx,
--> 605                                grad_dict, wrt, cost_name)
    606 
    607     for i in xrange(len(rval)):

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in _populate_grad_dict(var_to_app_to_idx, grad_dict, wrt, cost_name)
   1369         return grad_dict[var]
   1370 
-> 1371     rval = [access_grad_cache(elem) for elem in wrt]
   1372 
   1373     return rval

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in <listcomp>(.0)
   1369         return grad_dict[var]
   1370 
-> 1371     rval = [access_grad_cache(elem) for elem in wrt]
   1372 
   1373     return rval

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_grad_cache(var)
   1324                     for idx in node_to_idx[node]:
   1325 
-> 1326                         term = access_term_cache(node)[idx]
   1327 
   1328                         if not isinstance(term, gof.Variable):

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_term_cache(node)
   1019             inputs = node.inputs
   1020 
-> 1021             output_grads = [access_grad_cache(var) for var in node.outputs]
   1022 
   1023             # list of bools indicating if each output is connected to the cost

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in <listcomp>(.0)
   1019             inputs = node.inputs
   1020 
-> 1021             output_grads = [access_grad_cache(var) for var in node.outputs]
   1022 
   1023             # list of bools indicating if each output is connected to the cost

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_grad_cache(var)
   1324                     for idx in node_to_idx[node]:
   1325 
-> 1326                         term = access_term_cache(node)[idx]
   1327 
   1328                         if not isinstance(term, gof.Variable):

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_term_cache(node)
   1019             inputs = node.inputs
   1020 
-> 1021             output_grads = [access_grad_cache(var) for var in node.outputs]
   1022 
   1023             # list of bools indicating if each output is connected to the cost

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in <listcomp>(.0)
   1019             inputs = node.inputs
   1020 
-> 1021             output_grads = [access_grad_cache(var) for var in node.outputs]
   1022 
   1023             # list of bools indicating if each output is connected to the cost

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_grad_cache(var)
   1324                     for idx in node_to_idx[node]:
   1325 
-> 1326                         term = access_term_cache(node)[idx]
   1327 
   1328                         if not isinstance(term, gof.Variable):

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_term_cache(node)
   1019             inputs = node.inputs
   1020 
-> 1021             output_grads = [access_grad_cache(var) for var in node.outputs]
   1022 
   1023             # list of bools indicating if each output is connected to the cost

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in <listcomp>(.0)
   1019             inputs = node.inputs
   1020 
-> 1021             output_grads = [access_grad_cache(var) for var in node.outputs]
   1022 
   1023             # list of bools indicating if each output is connected to the cost

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_grad_cache(var)
   1324                     for idx in node_to_idx[node]:
   1325 
-> 1326                         term = access_term_cache(node)[idx]
   1327 
   1328                         if not isinstance(term, gof.Variable):

/anaconda3/envs/pymc3_env/lib/python3.7/site-packages/theano/gradient.py in access_term_cache(node)
   1235                                 "%s.grad returned object of "
   1236                                 "shape %s as gradient term on input %d "
-> 1237                                 "of shape %s" % (node.op, t_shape, i, i_shape))
   1238 
   1239                 if not isinstance(term.type,

ValueError: Elemwise{sub,no_inplace}.grad returned object of shape (20,) as gradient term on input 0 of shape (5,)

Thanks, unfortunately that means we do not support changing the shape of the observed after model initialization.

I am surprise that it doesn’t work as well, but seems it is the case (I tried also rewriting it using a pm.Potential)