I see!
I followed your advice and used the backend operations.
# Sample parameters
N = 20
mu = 10.
sigma = 1.0
# Sample generation
ts = np.arange(N)
obss = []
obs = -math.inf
for t in ts:
aux = mu + sigma*np.random.randn()
obs = np.max([obs, aux])
obss.append(obs)
obss = np.array(obss)
# PyMC3 model
def logp(os, mu, sigma, epsilon = 0.00001):
x_dist = pm.Normal.dist(mu=mu, sigma=sigma)
log_likelihood= pm.math.constant(0.)
o_n_minus_1 = pm.math.constant(0.)
eps = pm.math.constant(epsilon)
for o_n in os:
log_likelihood += \
pm.math.switch(pm.math.lt(pm.math.abs_(o_n - o_n_minus_1), eps),
x_dist.logp(o_n),
x_dist.logcdf(o_n)
)
o_n_minus_1 = o_n
return log_likelihood
with pm.Model() as model:
mu = pm.Uniform('mu', 0., 20.)
sigma = pm.Uniform('sigma', 0.5, 1.5)
Ys = pm.DensityDist('Ys', logp, observed = {'os':obss, 'mu': mu, 'sigma': sigma})
trace = pm.sample(5000, return_inferencedata=True, idata_kwargs={"density_dist_obs": False})
pm.plot_trace(trace, ['mu', 'sigma'])
I think it works!
