I believe this is the correct logp:
def logp(value, mu, cov, alpha):
return pm.math.log(2) + pm.logp(pm.MvNormal.dist(mu, cov), value) + pm.logcdf(pm.Normal.dist(0, 1), pm.math.dot(value, alpha))
What would the random function be? Would I just convert the rvs function above to use pm.MvNormal.dist?
edit: these are my attempts at logp and random. I am running into a shape mismatch error somewhere:
def logp(value, mu, cov, alpha):
mv_normal = pm.MvNormal.dist(mu=mu, cov=cov)
# Calculate the log probability of the value under the multivariate normal distribution
log_prob_mv_normal = pm.logp(mv_normal, value - mu)
# Calculate omega inverse
omega_inv = pt.nlinalg.matrix_inverse(cov)
# Calculate the argument for the standard normal CDF
arg_cdf = pm.math.dot(pm.math.dot(alpha.T, omega_inv), (value - mu))
# Instantiate the standard normal distribution for the CDF
std_normal = pm.Normal.dist(mu=0, sigma=1)
# Calculate the log CDF of the argument under the standard normal distribution
log_cdf_std_normal = pm.logcdf(std_normal, arg_cdf)
# Return the log of the skew-normal density
return pm.math.log(2) + log_prob_mv_normal + log_cdf_std_normal
def random(mu, cov, alpha, rng=None, size=None):
aCa = alpha @ cov @ alpha
delta = (1/ pm.math.sqrt(1 + aCa)) * cov @ alpha
a = pt.concatenate((pt.ones(1), delta)).reshape((1, -1))
b = pt.concatenate((delta[:, None], cov), axis=1)
cov_star = pt.concatenate((a, b))
x = pm.MvNormal.dist(pt.zeros(alpha.shape[0]+1), cov=cov_star, shape=(alpha.shape[0]+1, -1))
x0 = x[:, 0]
x1 = x[:, 1:]
inds = x0 <= 0
new_x1 = set_subtensor(x1[inds], -1*x1[inds])
new_x1 = new_x1 + mu
return new_x1.eval()