So I tried the following:
data = np.array([1.73, 1.59, 1.64, 1.58])
with pm.Model(coords={"cluster": range(k)}) as model_mix:
μ = pm.Normal(
"μ",
mu=0,
sigma=.01,
transform=pm.distributions.transforms.ordered,
initval=[1.72, 1.78, 1.88],
dims="cluster",
)
σ = pm.HalfNormal("σ", sigma=np.array([.025, .025, 0.01]), dims="cluster")
weights = pm.Dirichlet("w", np.ones(k), dims="cluster")
pred = pm.NormalMixture("x", w=weights, mu=μ, sigma=σ, observed=data)
trace_mix = pm.sample()
with model_mix:
trace_mix.extend(pm.sample_prior_predictive())
trace_mix.extend(pm.sample_posterior_predictive(trace_mix))
plt.hist(trace_mix['prior_predictive']['x'].to_numpy().reshape(-1))
However, sampling from the prior predictive as above just gives a normal-like distribution with a mean of 0. So something isn’t right.
Curious what the difference is between the formulation above using NormalMixture versus the example from here.
I would probably have to specify 2 priors for each of the distributions under this formulation (so 6 priors in total):
# 2-Mixture Poisson using iterable of distributions.
with pm.Model() as model:
lam1 = pm.Exponential('lam1', lam=1)
lam2 = pm.Exponential('lam2', lam=1)
pois1 = pm.Poisson.dist(mu=lam1)
pois2 = pm.Poisson.dist(mu=lam2)
w = pm.Dirichlet('w', a=np.array([1, 1]))
like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)