Sampling issue: The following error happened while compiling the node

Greetings!

I’m having issues with sampling. I have a lot of predictors (~150+). Here’s my code with fake data:

fake_data = pd.DataFrame(np.random.poisson(size=(1000,150)))
fake_data.columns = [ 'col_' + str(i) for i in range(150) ]

with pm.Model() as model:
    summation = 0
    for predictor in fake_data.columns[:-1]:
        summation += pm.Normal(name=predictor, mu=0, sd=2) * fake_data[predictor].astype(float)
    
    pm.Poisson(
        name='course_topic_interest_culture_count',
        mu=np.exp(summation),
        observed=fake_data['col_149']
    )
    
    traces['col_149'] = pm.sample()

Here’s the error that I got:

INFO (theano.gof.compilelock): Refreshing lock /Users/eddericugaddan/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.0-64/lock_dir/lock
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
INFO (theano.gof.compilelock): Refreshing lock /Users/eddericugaddan/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.0-64/lock_dir/lock


You can find the C code in this temporary file: /var/folders/_t/k13fpkjx41z2qd6ws3ry4tn00000gn/T/theano_compilation_error_hcy9krlp

---------------------------------------------------------------------------
Exception                                 Traceback (most recent call last)
<ipython-input-106-6190dd043682> in <module>()
     10     )
     11 
---> 12     traces['col_149'] = pm.sample()

~/anaconda3/lib/python3.6/site-packages/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, nuts_kwargs, step_kwargs, progressbar, model, random_seed, live_plot, discard_tuned_samples, live_plot_kwargs, compute_convergence_checks, use_mmap, **kwargs)
    403                 start_, step = init_nuts(init=init, chains=chains, n_init=n_init,
    404                                          model=model, random_seed=random_seed,
--> 405                                          progressbar=progressbar, **args)
    406                 if start is None:
    407                     start = start_

~/anaconda3/lib/python3.6/site-packages/pymc3/sampling.py in init_nuts(init, chains, n_init, model, random_seed, progressbar, **kwargs)
   1504             'Unknown initializer: {}.'.format(init))
   1505 
-> 1506     step = pm.NUTS(potential=potential, model=model, **kwargs)
   1507 
   1508     return start, step

~/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/hmc/nuts.py in __init__(self, vars, max_treedepth, early_max_treedepth, **kwargs)
    150         `pm.sample` to the desired number of tuning steps.
    151         """
--> 152         super(NUTS, self).__init__(vars, **kwargs)
    153 
    154         self.max_treedepth = max_treedepth

~/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/hmc/base_hmc.py in __init__(self, vars, scaling, step_scale, is_cov, model, blocked, potential, integrator, dtype, Emax, target_accept, gamma, k, t0, adapt_step_size, step_rand, **theano_kwargs)
     61 
     62         super(BaseHMC, self).__init__(vars, blocked=blocked, model=model,
---> 63                                       dtype=dtype, **theano_kwargs)
     64 
     65         self.adapt_step_size = adapt_step_size

~/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/arraystep.py in __init__(self, vars, model, blocked, dtype, **theano_kwargs)
    226 
    227         func = model.logp_dlogp_function(
--> 228             vars, dtype=dtype, **theano_kwargs)
    229 
    230         # handle edge case discovered in #2948

~/anaconda3/lib/python3.6/site-packages/pymc3/model.py in logp_dlogp_function(self, grad_vars, **kwargs)
    707         varnames = [var.name for var in grad_vars]
    708         extra_vars = [var for var in self.free_RVs if var.name not in varnames]
--> 709         return ValueGradFunction(self.logpt, grad_vars, extra_vars, **kwargs)
    710 
    711     @property

~/anaconda3/lib/python3.6/site-packages/pymc3/model.py in __init__(self, cost, grad_vars, extra_vars, dtype, casting, **kwargs)
    446 
    447         self._theano_function = theano.function(
--> 448             inputs, [self._cost_joined, grad], givens=givens, **kwargs)
    449 
    450     def set_extra_values(self, extra_vars):

~/anaconda3/lib/python3.6/site-packages/theano/compile/function.py in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
    315                    on_unused_input=on_unused_input,
    316                    profile=profile,
--> 317                    output_keys=output_keys)
    318     return fn

~/anaconda3/lib/python3.6/site-packages/theano/compile/pfunc.py in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys)
    484                          accept_inplace=accept_inplace, name=name,
    485                          profile=profile, on_unused_input=on_unused_input,
--> 486                          output_keys=output_keys)
    487 
    488 

~/anaconda3/lib/python3.6/site-packages/theano/compile/function_module.py in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys)
   1839                   name=name)
   1840         with theano.change_flags(compute_test_value="off"):
-> 1841             fn = m.create(defaults)
   1842     finally:
   1843         t2 = time.time()

~/anaconda3/lib/python3.6/site-packages/theano/compile/function_module.py in create(self, input_storage, trustme, storage_map)
   1713             theano.config.traceback.limit = theano.config.traceback.compile_limit
   1714             _fn, _i, _o = self.linker.make_thunk(
-> 1715                 input_storage=input_storage_lists, storage_map=storage_map)
   1716         finally:
   1717             theano.config.traceback.limit = limit_orig

~/anaconda3/lib/python3.6/site-packages/theano/gof/link.py in make_thunk(self, input_storage, output_storage, storage_map)
    697         return self.make_all(input_storage=input_storage,
    698                              output_storage=output_storage,
--> 699                              storage_map=storage_map)[:3]
    700 
    701     def make_all(self, input_storage, output_storage):

~/anaconda3/lib/python3.6/site-packages/theano/gof/vm.py in make_all(self, profiler, input_storage, output_storage, storage_map)
   1089                                                  compute_map,
   1090                                                  [],
-> 1091                                                  impl=impl))
   1092                 linker_make_thunk_time[node] = time.time() - thunk_start
   1093                 if not hasattr(thunks[-1], 'lazy'):

~/anaconda3/lib/python3.6/site-packages/theano/gof/op.py in make_thunk(self, node, storage_map, compute_map, no_recycling, impl)
    953             try:
    954                 return self.make_c_thunk(node, storage_map, compute_map,
--> 955                                          no_recycling)
    956             except (NotImplementedError, utils.MethodNotDefined):
    957                 # We requested the c code, so don't catch the error.

~/anaconda3/lib/python3.6/site-packages/theano/gof/op.py in make_c_thunk(self, node, storage_map, compute_map, no_recycling)
    856         _logger.debug('Trying CLinker.make_thunk')
    857         outputs = cl.make_thunk(input_storage=node_input_storage,
--> 858                                 output_storage=node_output_storage)
    859         thunk, node_input_filters, node_output_filters = outputs
    860 

~/anaconda3/lib/python3.6/site-packages/theano/gof/cc.py in make_thunk(self, input_storage, output_storage, storage_map, keep_lock)
   1215         cthunk, module, in_storage, out_storage, error_storage = self.__compile__(
   1216             input_storage, output_storage, storage_map,
-> 1217             keep_lock=keep_lock)
   1218 
   1219         res = _CThunk(cthunk, init_tasks, tasks, error_storage, module)

~/anaconda3/lib/python3.6/site-packages/theano/gof/cc.py in __compile__(self, input_storage, output_storage, storage_map, keep_lock)
   1155                                             output_storage,
   1156                                             storage_map,
-> 1157                                             keep_lock=keep_lock)
   1158         return (thunk,
   1159                 module,

~/anaconda3/lib/python3.6/site-packages/theano/gof/cc.py in cthunk_factory(self, error_storage, in_storage, out_storage, storage_map, keep_lock)
   1618                 node.op.prepare_node(node, storage_map, None, 'c')
   1619             module = get_module_cache().module_from_key(
-> 1620                 key=key, lnk=self, keep_lock=keep_lock)
   1621 
   1622         vars = self.inputs + self.outputs + self.orphans

~/anaconda3/lib/python3.6/site-packages/theano/gof/cmodule.py in module_from_key(self, key, lnk, keep_lock)
   1179             try:
   1180                 location = dlimport_workdir(self.dirname)
-> 1181                 module = lnk.compile_cmodule(location)
   1182                 name = module.__file__
   1183                 assert name.startswith(location)

~/anaconda3/lib/python3.6/site-packages/theano/gof/cc.py in compile_cmodule(self, location)
   1521                 lib_dirs=self.lib_dirs(),
   1522                 libs=libs,
-> 1523                 preargs=preargs)
   1524         except Exception as e:
   1525             e.args += (str(self.fgraph),)

~/anaconda3/lib/python3.6/site-packages/theano/gof/cmodule.py in compile_str(module_name, src_code, location, include_dirs, lib_dirs, libs, preargs, py_module, hide_symbols)
   2386             # difficult to read.
   2387             raise Exception('Compilation failed (return status=%s): %s' %
-> 2388                             (status, compile_stderr.replace('\n', '. ')))
   2389         elif config.cmodule.compilation_warning and compile_stderr:
   2390             # Print errors just below the command line.

Exception: ('The following error happened while compiling the node', Elemwise{Composite{exp(((i0 * i1) + (i2 * i3) + (i4 * i5) + (i6 * i7) + (i8 * i9) + (i10 * i11) + (i12 * i13) + (i14 * i15) + (i16 * i17) + (i18 * i19) + (i20 * i21) + (i22 * i23) + (i24 * i25) + (i26 * i27) + (i28 * i29) + (i30 * i31) + (i32 * i33) + (i34 * i35) + (i36 * i37) + (i38 * i39) + (i40 * i41) + (i42 * i43) + (i44 * i45) + (i46 * i47) + (i48 * i49) + (i50 * i51) + (i52 * i53) + (i54 * i55) + (i56 * i57) + (i58 * i59) + (i60 * i61) + (i62 * i63) + (i64 * i65) + (i66 * i67) + (i68 * i69) + (i70 * i71) + (i72 * i73) + (i74 * i75) + (i76 * i77) + (i78 * i79) + (i80 * i81) + (i82 * i83) + (i84 * i85) + (i86 * i87) + (i88 * i89) + (i90 * i91) + (i92 * i93) + (i94 * i95) + (i96 * i97) + (i98 * i99) + (i100 * i101) + (i102 * i103) + (i104 * i105) + (i106 * i107) + (i108 * i109) + (i110 * i111) + (i112 * i113) + (i114 * i115) + (i116 * i117) + (i118 * i119) + (i120 * i121) + (i122 * i123) + (i124 * i125) + (i126 * i127) + (i128 * i129) + (i130 * i131) + (i132 * i133) + (i134 * i135) + (i136 * i137) + (i138 * i139) + (i140 * i141) + (i142 * i143) + (i144 * i145) + (i146 * i147) + (i148 * i149) + (i150 * i151) + (i152 * i153) + (i154 * i155) + (i156 * i157) + (i158 * i159) + (i160 * i161) + (i162 * i163) + (i164 * i165) + (i166 * i167) + (i168 * i169) + (i170 * i171) + (i172 * i173) + (i174 * i175) + (i176 * i177) + (i178 * i179) + (i180 * i181) + (i182 * i183) + (i184 * i185) + (i186 * i187) + (i188 * i189) + (i190 * i191) + (i192 * i193) + (i194 * i195) + (i196 * i197) + (i198 * i199) + (i200 * i201) + (i202 * i203) + (i204 * i205) + (i206 * i207) + (i208 * i209) + (i210 * i211) + (i212 * i213) + (i214 * i215) + (i216 * i217) + (i218 * i219) + (i220 * i221) + (i222 * i223) + (i224 * i225) + (i226 * i227) + (i228 * i229) + (i230 * i231) + (i232 * i233) + (i234 * i235) + (i236 * i237) + (i238 * i239) + (i240 * i241) + (i242 * i243) + (i244 * i245) + (i246 * i247) + (i248 * i249) + (i250 * i251) + (i252 * i253) + (i254 * i255) + (i256 * i257) + (i258 * i259) + (i260 * i261) + (i262 * i263) + (i264 * i265) + (i266 * i267) + (i268 * i269) + (i270 * i271) + (i272 * i273) + (i274 * i275) + (i276 * i277) + (i278 * i279) + (i280 * i281) + (i282 * i283) + (i284 * i285) + (i286 * i287) + (i288 * i289) + (i290 * i291) + (i292 * i293) + (i294 * i295) + (i296 * i297)))}}(InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  0.  ...  3.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  3.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  2.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  5.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  3.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  0.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  2.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  3.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  3.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  2.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  3.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  3.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  0.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  2.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  0.  ...  0.  3.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  4.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  3.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  3.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  3.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  2.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  0.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  0.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  3.  ...  0.  4.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  0.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  3.  ...  2.  3.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  3.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  3.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  1.  3.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  4.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  2.  ...  4.  3.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  3.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 4.  2.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  3.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  0.  ...  0.  4.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  4.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  4.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  4.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  4.  ...  2.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  2.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 4.  0.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  3.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  5.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  0.  3.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  3.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  2.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  1.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 3.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  4.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  3.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  3.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  1.  ...  0.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  0.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  1.  ...  1.  2.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  0.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  0.  ...  2.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  1.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  3.  ...  2.  1.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 1.  2.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 2.  1.  ...  0.  0.]}, InplaceDimShuffle{x}.0, TensorConstant{[ 0.  0.  ...  0.  1.]}), '\n', 'Compilation failed (return status=1): /Users/eddericugaddan/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.0-64/tmpuxmwtlie/mod.cpp:25346:32: fatal error: bracket nesting level exceeded maximum of 256.         if (!PyErr_Occurred()) {.                                ^. /Users/eddericugaddan/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.0-64/tmpuxmwtlie/mod.cpp:25346:32: note: use -fbracket-depth=N to increase maximum nesting level. 1 error generated.. ', '[Elemwise{Composite{exp(((i0 * i1) + (i2 * i3) + (i4 * i5) + (i6 * i7) + (i8 * i9) + (i10 * i11) + (i12 * i13) + (i14 * i15) + (i16 * i17) + (i18 * i19) + (i20 * i21) + (i22 * i23) + (i24 * i25) + (i26 * i27) + (i28 * i29) + (i30 * i31) + (i32 * i33) + (i34 * i35) + (i36 * i37) + (i38 * i39) + (i40 * i41) + (i42 * i43) + (i44 * i45) + (i46 * i47) + (i48 * i49) + (i50 * i51) + (i52 * i53) + (i54 * i55) + (i56 * i57) + (i58 * i59) + (i60 * i61) + (i62 * i63) + (i64 * i65) + (i66 * i67) + (i68 * i69) + (i70 * i71) + (i72 * i73) + (i74 * i75) + (i76 * i77) + (i78 * i79) + (i80 * i81) + (i82 * i83) + (i84 * i85) + (i86 * i87) + (i88 * i89) + (i90 * i91) + (i92 * i93) + (i94 * i95) + (i96 * i97) + (i98 * i99) + (i100 * i101) + (i102 * i103) + (i104 * i105) + (i106 * i107) + (i108 * i109) + (i110 * i111) + (i112 * i113) + (i114 * i115) + (i116 * i117) + (i118 * i119) + (i120 * i121) + (i122 * i123) + (i124 * i125) + (i126 * i127) + (i128 * i129) + (i130 * i131) + (i132 * i133) + (i134 * i135) + (i136 * i137) + (i138 * i139) + (i140 * i141) + (i142 * i143) + (i144 * i145) + (i146 * i147) + (i148 * i149) + (i150 * i151) + (i152 * i153) + (i154 * i155) + (i156 * i157) + (i158 * i159) + (i160 * i161) + (i162 * i163) + (i164 * i165) + (i166 * i167) + (i168 * i169) + (i170 * i171) + (i172 * i173) + (i174 * i175) + (i176 * i177) + (i178 * i179) + (i180 * i181) + (i182 * i183) + (i184 * i185) + (i186 * i187) + (i188 * i189) + (i190 * i191) + (i192 * i193) + (i194 * i195) + (i196 * i197) + (i198 * i199) + (i200 * i201) + (i202 * i203) + (i204 * i205) + (i206 * i207) + (i208 * i209) + (i210 * i211) + (i212 * i213) + (i214 * i215) + (i216 * i217) + (i218 * i219) + (i220 * i221) + (i222 * i223) + (i224 * i225) + (i226 * i227) + (i228 * i229) + (i230 * i231) + (i232 * i233) + (i234 * i235) + (i236 * i237) + (i238 * i239) + (i240 * i241) + (i242 * i243) + (i244 * i245) + (i246 * i247) + (i248 * i249) + (i250 * i251) + (i252 * i253) + (i254 * i255) + (i256 * i257) + (i258 * i259) + (i260 * i261) + (i262 * i263) + (i264 * i265) + (i266 * i267) + (i268 * i269) + (i270 * i271) + (i272 * i273) + (i274 * i275) + (i276 * i277) + (i278 * i279) + (i280 * i281) + (i282 * i283) + (i284 * i285) + (i286 * i287) + (i288 * i289) + (i290 * i291) + (i292 * i293) + (i294 * i295) + (i296 * i297)))}}(<TensorType(float64, (True,))>, TensorConstant{[ 1.  1.  ...  2.  1.]}, <TensorType(float64, (True,))>, TensorConstant{[ 0.  0.  ...  1.  1.]}, <TensorType(float64, (True,))>, TensorConstant{[ 2.  0.  ...  3.  1.]}, <TensorType(float64, (True,))>, TensorConstant{[ 1.  1.  ...  0.  0.]}, <TensorType(float64, (True,))>, TensorConstant{[ 1.  2.  ...  0.  1.]}, <TensorType(float64, (True,))>, TensorConstant{[ 1.  2.  ...  3.  1.]}, <TensorType(float64, (True,))>, TensorConstant{[ 2.  2.  ...  1.  1.]}, <TensorType(float64, (True,))>, TensorConstant{[ 3.  1.  ...  1.  1.]}, 

...

I am able to make it work if I drop the number of predictors, but I wouldn’t want to for the sake of causal inference (i.e. satisfying the backdoor criterion, etc.). Do you think there’s a way around this? Thanks a lot!

Regards,
Edderic

The for loop could be quite inefficient, try something like:

X = fake_data.iloc[:, :-1].values
with pm.Model() as model:
    beta = pm.Normal('beta', mu=0., sd=2., shape=(X.shape[1], 1))
    summation = pm.math.dot(X, beta)
    pm.Poisson(
        name='course_topic_interest_culture_count',
        mu=np.exp(summation),
        observed=fake_data['col_149']
    )
    
    traces = pm.sample()

Thanks for the response! I tried running the above, and it seemed to make more progress than last time, but now I’m getting a different failure:

INFO (theano.gof.compilelock): Refreshing lock /Users/eddericugaddan/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.0-64/lock_dir/lock
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
INFO (theano.gof.compilelock): Refreshing lock /Users/eddericugaddan/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.0-64/lock_dir/lock
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [beta]
Sampling 4 chains:  18%|█▊        | 723/4000 [01:03<04:45, 11.48draws/s]  

---------------------------------------------------------------------------
RemoteTraceback                           Traceback (most recent call last)
RemoteTraceback: 
"""
Traceback (most recent call last):
  File "/Users/eddericugaddan/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py", line 73, in run
    self._start_loop()
  File "/Users/eddericugaddan/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py", line 113, in _start_loop
    point, stats = self._compute_point()
  File "/Users/eddericugaddan/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py", line 139, in _compute_point
    point, stats = self._step_method.step(self._point)
  File "/Users/eddericugaddan/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/arraystep.py", line 247, in step
    apoint, stats = self.astep(array)
  File "/Users/eddericugaddan/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/hmc/base_hmc.py", line 115, in astep
    self.potential.raise_ok(self._logp_dlogp_func._ordering.vmap)
  File "/Users/eddericugaddan/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/hmc/quadpotential.py", line 201, in raise_ok
    raise ValueError('\n'.join(errmsg))
ValueError: Mass matrix contains zeros on the diagonal. 
The derivative of RV `beta`.ravel()[4] is zero.
The derivative of RV `beta`.ravel()[32] is zero.
The derivative of RV `beta`.ravel()[38] is zero.
The derivative of RV `beta`.ravel()[47] is zero.
The derivative of RV `beta`.ravel()[62] is zero.
The derivative of RV `beta`.ravel()[67] is zero.
The derivative of RV `beta`.ravel()[80] is zero.
The derivative of RV `beta`.ravel()[96] is zero.
The derivative of RV `beta`.ravel()[105] is zero.
The derivative of RV `beta`.ravel()[107] is zero.
The derivative of RV `beta`.ravel()[112] is zero.
The derivative of RV `beta`.ravel()[132] is zero.
The derivative of RV `beta`.ravel()[139] is zero.
"""

The above exception was the direct cause of the following exception:

ValueError                                Traceback (most recent call last)
ValueError: Mass matrix contains zeros on the diagonal. 
The derivative of RV `beta`.ravel()[4] is zero.
The derivative of RV `beta`.ravel()[32] is zero.
The derivative of RV `beta`.ravel()[38] is zero.
The derivative of RV `beta`.ravel()[47] is zero.
The derivative of RV `beta`.ravel()[62] is zero.
The derivative of RV `beta`.ravel()[67] is zero.
The derivative of RV `beta`.ravel()[80] is zero.
The derivative of RV `beta`.ravel()[96] is zero.
The derivative of RV `beta`.ravel()[105] is zero.
The derivative of RV `beta`.ravel()[107] is zero.
The derivative of RV `beta`.ravel()[112] is zero.
The derivative of RV `beta`.ravel()[132] is zero.
The derivative of RV `beta`.ravel()[139] is zero.

The above exception was the direct cause of the following exception:

RuntimeError                              Traceback (most recent call last)
<ipython-input-297-992e789ace0a> in <module>()
      9     )
     10 
---> 11     traces = pm.sample()

~/anaconda3/lib/python3.6/site-packages/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, nuts_kwargs, step_kwargs, progressbar, model, random_seed, live_plot, discard_tuned_samples, live_plot_kwargs, compute_convergence_checks, use_mmap, **kwargs)
    447             _print_step_hierarchy(step)
    448             try:
--> 449                 trace = _mp_sample(**sample_args)
    450             except pickle.PickleError:
    451                 _log.warning("Could not pickle model, sampling singlethreaded.")

~/anaconda3/lib/python3.6/site-packages/pymc3/sampling.py in _mp_sample(draws, tune, step, chains, cores, chain, random_seed, start, progressbar, trace, model, use_mmap, **kwargs)
    997         try:
    998             with sampler:
--> 999                 for draw in sampler:
   1000                     trace = traces[draw.chain - chain]
   1001                     if trace.supports_sampler_stats and draw.stats is not None:

~/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py in __iter__(self)
    303 
    304         while self._active:
--> 305             draw = ProcessAdapter.recv_draw(self._active)
    306             proc, is_last, draw, tuning, stats, warns = draw
    307             if self._progress is not None:

~/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py in recv_draw(processes, timeout)
    221         if msg[0] == 'error':
    222             old = msg[1]
--> 223             six.raise_from(RuntimeError('Chain %s failed.' % proc.chain), old)
    224         elif msg[0] == 'writing_done':
    225             proc._readable = True

~/anaconda3/lib/python3.6/site-packages/six.py in raise_from(value, from_value)

RuntimeError: Chain 0 failed.

Likely numerical issue from exp (for example, np.exp(30) # ==> 10686474581524.463), try scaling the input matrix X

Thanks, @junpenglao! That worked. Really appreciate the help!

Cheers,
Edderic