I try to use PyMC to infer parameters of an artificial neuron’s model. For this, I use a so called program called NEURON. I create the observation data with this, and I use it also to obtain the mu parameter of the pm.sample() method. I needed to create a custom function via ‘as_op’.
import pymc as pm
import pytensor.tensor as at
import arviz as az
from pytensor.compile.ops import as_op
from neuron import h, gui
import numpy as np
from matplotlib import pyplot as plt
def one_compartment(cm, gpas, dt=0.1, stype='broad', custom_stim=None):
soma = h.Section(name='soma')
soma.L = soma.diam = 50 # it is a sphere
soma.v = -65
soma.cm = cm # parameter to infer
# Insert passive conductance
soma.insert('pas')
soma.g_pas = gpas # parameter to infer
soma.e_pas = -65
h.dt = dt # Time step (iteration)
h.steps_per_ms = 1 / dt
# Creating stimulus
# Here we define three kind of experimental protocol:
# 1.) broad electrode current
# 2.) narrow electrode current
# 3.) both
if stype == 'broad':
h.tstop = 300
stim = h.IClamp(soma(0.5))
stim.delay = 20
stim.amp = 0.1
stim.dur = 175
elif stype == 'narrow':
h.tstop = 100
stim = h.IClamp(soma(0.5))
stim.delay = 10
stim.amp = 0.5
stim.dur = 5
elif stype == 'both':
h.tstop = 400
stim1 = h.IClamp(soma(0.5))
stim1.delay = 10
stim1.amp = 0.5
stim1.dur = 5
stim2 = h.IClamp(soma(0.5))
stim2.delay = 120
stim2.amp = 0.1
stim2.dur = 175
elif stype == 'steps':
h.tstop = 500
stim1 = h.IClamp(soma(0.5))
stim1.delay = 10
stim1.amp = 0.5
stim1.dur = 5
stim2 = h.IClamp(soma(0.5))
stim2.delay = 120
stim2.amp = 0.1
stim2.dur = 175
stim3 = h.IClamp(soma(0.5))
stim3.delay = 400
stim3.amp = 0.35
stim3.dur = 5
elif stype == 'custom':
h.tstop = len(custom_stim)*dt
h.load_file("vplay.hoc")
vec = h.Vector(custom_stim)
istim = h.IClamp(soma(0.5))
vec.play(istim._ref_amp, h.dt)
istim.delay = 0 # Just for Neuron
istim.dur = 1e9 # Just for Neuron
# Print Information
# h.psection()
# Run simulation ->
# Set up recording Vectors
v_vec = h.Vector() # Membrane potential vector
t_vec = h.Vector() # Time stamp vector
v_vec.record(soma(0.5)._ref_v)
t_vec.record(h._ref_t)
# Simulation RUN
h.v_init = -65
h.finitialize(h.v_init) # Starting membrane potential
h.init()
h.run()
t = t_vec.to_python()
v = v_vec.to_python()
return np.array(t), np.array(v)
def white(sigma, v_vec, mu=0):
"""Returns the given array with white noise as numpy array"""
noise_signal = np.random.normal(mu, sigma, len(v_vec))
exp_v = np.add(v_vec, noise_signal)
return np.array(exp_v)
for sec in h.allsec():
sec.Ra = 150
h('forall {nseg = int((L/(0.1*lambda_f(100))+.9)/2)*2 + 1}') # If Ra_max = 105 dend.nseg = 21 and soma.nseg = 1
t, v = one_compartment(cm=2., gpas=0.0001)
obs = white(3,v)
@as_op(itypes=[at.dscalar, at.dscalar], otypes=[at.dvector])
def one_compartment_(cm, gpas, dt=0.1, stype='broad', custom_stim=None):
soma = h.Section(name='soma')
soma.L = soma.diam = 50 # it is a sphere
soma.v = -65
# parameter to infer
# Insert passive conductance
soma.insert('pas')
soma.g_pas = gpas # parameter to infer
soma.e_pas = -65
h.dt = dt # Time step (iteration)
h.steps_per_ms = 1 / dt
# Creating stimulus
# Here we define three kind of experimental protocol:
# 1.) broad electrode current
# 2.) narrow electrode current
# 3.) both
if stype == 'broad':
h.tstop = 300
stim = h.IClamp(soma(0.5))
stim.delay = 20
stim.amp = 0.1
stim.dur = 175
elif stype == 'narrow':
h.tstop = 100
stim = h.IClamp(soma(0.5))
stim.delay = 10
stim.amp = 0.5
stim.dur = 5
elif stype == 'both':
h.tstop = 400
stim1 = h.IClamp(soma(0.5))
stim1.delay = 10
stim1.amp = 0.5
stim1.dur = 5
stim2 = h.IClamp(soma(0.5))
stim2.delay = 120
stim2.amp = 0.1
stim2.dur = 175
elif stype == 'steps':
h.tstop = 500
stim1 = h.IClamp(soma(0.5))
stim1.delay = 10
stim1.amp = 0.5
stim1.dur = 5
stim2 = h.IClamp(soma(0.5))
stim2.delay = 120
stim2.amp = 0.1
stim2.dur = 175
stim3 = h.IClamp(soma(0.5))
stim3.delay = 400
stim3.amp = 0.35
stim3.dur = 5
elif stype == 'custom':
h.tstop = len(custom_stim)*dt
h.load_file("vplay.hoc")
vec = h.Vector(custom_stim)
istim = h.IClamp(soma(0.5))
vec.play(istim._ref_amp, h.dt)
istim.delay = 0 # Just for Neuron
istim.dur = 1e9 # Just for Neuron
# Print Information
# h.psection()
# Run simulation ->
# Set up recording Vectors
v_vec = h.Vector() # Membrane potential vector
t_vec = h.Vector() # Time stamp vector
v_vec.record(soma(0.5)._ref_v)
t_vec.record(h._ref_t)
# Simulation RUN
h.v_init = -65
h.finitialize(h.v_init) # Starting membrane potential
h.init()
h.run()
t = t_vec.to_python()
v = v_vec.to_python()
return np.array(v)
with pm.Model() as model:
cm = pm.Normal('cm', mu=3, sigma=0.5) # Weakly informative parameter (because of the high sigma value)
gpas = pm.Normal('gpas', mu=0.00011, sigma=0.000009) # Weakly informative parameter (because of the high sigma value)
mu = one_compartment_(cm, gpas)
sigma = 3 # Is this should be a half normal distribution as it is in the examples? This should also change as if it less, then the predition is more probable.
likelihood = pm.Normal('likelihood', mu=mu, sigma=sigma, observed=obs)
# step = pm.Slice([cm, gpas]) # It uses this automatically
start = {"cm": 3, "gpas": 0.00011}
trace = pm.sample(draws=1000, tune=1000, chains=1, initvals=start)
If I run this code I get this error message:
ValueError Traceback (most recent call last)
Cell In[27], line 8
5 mu = one_compartment_(cm, gpas)
6 sigma = cov_mat1 # Is this should be a half normal distribution as it is in the examples? This should also change as if it less, then the predition is more probable.
----> 8 likelihood = pm.Normal('likelihood', mu=mu, sigma=sigma, observed=obs_col)
10 # step = pm.Slice([cm, gpas]) # It uses this automatically
12 start = {"cm": 3, "gpas": 0.00011}
File ~\anaconda3\lib\site-packages\pymc\distributions\distribution.py:310, in Distribution.__new__(cls, name, rng, dims, initval, observed, total_size, transform, *args, **kwargs)
307 elif observed is not None:
308 kwargs["shape"] = tuple(observed.shape)
--> 310 rv_out = cls.dist(*args, **kwargs)
312 rv_out = model.register_rv(
313 rv_out,
314 name,
(...)
319 initval=initval,
320 )
322 # add in pretty-printing support
File ~\anaconda3\lib\site-packages\pymc\distributions\continuous.py:513, in Normal.dist(cls, mu, sigma, tau, **kwargs)
507 sigma = at.as_tensor_variable(sigma)
509 # tau = at.as_tensor_variable(tau)
510 # mean = median = mode = mu = at.as_tensor_variable(floatX(mu))
511 # variance = 1.0 / self.tau
--> 513 return super().dist([mu, sigma], **kwargs)
File ~\anaconda3\lib\site-packages\pymc\distributions\distribution.py:389, in Distribution.dist(cls, dist_params, shape, **kwargs)
387 ndim_supp = cls.rv_op(*dist_params, **kwargs).owner.op.ndim_supp
388 create_size = find_size(shape=shape, size=size, ndim_supp=ndim_supp)
--> 389 rv_out = cls.rv_op(*dist_params, size=create_size, **kwargs)
391 rv_out.logp = _make_nice_attr_error("rv.logp(x)", "pm.logp(rv, x)")
392 rv_out.logcdf = _make_nice_attr_error("rv.logcdf(x)", "pm.logcdf(rv, x)")
File ~\anaconda3\lib\site-packages\pytensor\tensor\random\basic.py:268, in NormalRV.__call__(self, loc, scale, size, **kwargs)
247 def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):
248 r"""Draw samples from a normal distribution.
249
250 Signature
(...)
266
267 """
--> 268 return super().__call__(loc, scale, size=size, **kwargs)
File ~\anaconda3\lib\site-packages\pytensor\tensor\random\op.py:289, in RandomVariable.__call__(self, size, name, rng, dtype, *args, **kwargs)
288 def __call__(self, *args, size=None, name=None, rng=None, dtype=None, **kwargs):
--> 289 res = super().__call__(rng, size, dtype, *args, **kwargs)
291 if name is not None:
292 res.name = name
File ~\anaconda3\lib\site-packages\pytensor\graph\op.py:295, in Op.__call__(self, *inputs, **kwargs)
253 r"""Construct an `Apply` node using :meth:`Op.make_node` and return its outputs.
254
255 This method is just a wrapper around :meth:`Op.make_node`.
(...)
292
293 """
294 return_list = kwargs.pop("return_list", False)
--> 295 node = self.make_node(*inputs, **kwargs)
297 if config.compute_test_value != "off":
298 compute_test_value(node)
File ~\anaconda3\lib\site-packages\pytensor\tensor\random\op.py:334, in RandomVariable.make_node(self, rng, size, dtype, *dist_params)
329 elif not isinstance(rng.type, RandomType):
330 raise TypeError(
331 "The type of rng should be an instance of either RandomGeneratorType or RandomStateType"
332 )
--> 334 shape = self._infer_shape(size, dist_params)
335 _, static_shape = infer_static_shape(shape)
336 dtype = self.dtype or dtype
File ~\anaconda3\lib\site-packages\pytensor\tensor\random\op.py:201, in RandomVariable._infer_shape(self, size, dist_params, param_shapes)
199 param_batched_dims = getattr(param, "ndim", 0) - param_ndim_supp
200 if param_batched_dims > size_len:
--> 201 raise ValueError(
202 f"Size length is incompatible with batched dimensions of parameter {i} {param}:\n"
203 f"len(size) = {size_len}, len(batched dims {param}) = {param_batched_dims}. "
204 f"Size length must be 0 or >= {param_batched_dims}"
205 )
207 if self.ndim_supp == 0:
208 return size
ValueError: Size length is incompatible with batched dimensions of parameter 1 TensorConstant{[[0.2 ..2 ]]}:
len(size) = 1, len(batched dims TensorConstant{[[0.2 ..2 ]]}) = 2. Size length must be 0 or >= 2
Thank in advance for your help, @ricardoV94!