Hi Ricardo,
thanks - your code essentially works for me.
I include my code checking your solution, for future reference.
Still, I didn’t manage to do it retaining the “PowerTransformed” layer of abstraction…
import numpy as np
import pymc
def sqrt_invgamma_dist(alpha, beta, size=None):
return pymc.InverseGamma.dist(alpha=alpha, beta=beta, size=size) ** 0.5
# Check we get the covariance matrix with the desired variances
with pymc.Model():
n = 4
ig_alpha = 3.5
ig_beta = 50
# distribution of standard deviations as square roots of inverse-gamma variances
sd_dist = pymc.CustomDist.dist(ig_alpha, ig_beta, dist=sqrt_invgamma_dist, shape=n)
chol, corr, sigmas = pymc.LKJCholeskyCov(
"chol_cov",
n=n,
eta=1,
sd_dist=sd_dist,
compute_corr=True,
store_in_trace=True,
)
Sigma = pymc.Deterministic("Sigma", chol @ chol.T) # distribution of covariance matrix
res = pymc.draw(Sigma, draws=100_000, random_seed=42) # sample of covariance matrices
vars = res[:, np.arange(n), np.arange(n)].flatten() # sample of variances, should ~ InverseGamma
print(f"InverseGamma(alpha={ig_alpha}, beta={ig_beta}):")
print(f"expected variances: mean = {ig_beta / (ig_alpha - 1):.3f}, std = {np.sqrt(ig_beta**2 / ((ig_alpha - 1)**2 * (ig_alpha - 2))):.3f}")
print(f"empirical variances : len = {len(vars)} , mean = {vars.mean():.3f}, std = {vars.std():.3f}")
# InverseGamma(alpha=3.5, beta=50):
# expected variances: mean = 20.000, std = 16.330
# empirical variances : len = 400000 , mean = 19.982, std = 16.227