I am working with the implementation of a data fusion method using pymc3. I asked a question before from the following link Creating Data Fusion with PYMC3 and I had investigated again where I noticed that I used the wrong distribution (One I was supposed to use is hypsecant distribution). So I modified the code to the following:
class Hypsecant(Continuous):
"""
Parameters
----------
mu: float
Location parameter.
sigma: float
Scale parameter (sigma > 0). Converges to the standard deviation as nu increases. (only required if lam is not specified)
lam: float
Scale parameter (lam > 0). Converges to the precision as nu increases. (only required if sigma is not specified)
"""
def __init__(self, mu=None, sigma=None, lam=None, sd=None, *args, **kwargs):
# super().__init__(*args, **kwargs)
# super(Hypsecant, self).__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
lam, sigma = get_tau_sigma(tau=lam, sigma=sigma)
self.lam = lam = tt.as_tensor_variable(lam)
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
self.variance = tt.as_tensor_variable(1)
assert_negative_support(mu, 'mu', 'Hypsecant')
assert_negative_support(sigma, 'sigma (lam)', 'Hypsecant')
# return super(Hypsecant, self).__init__(shape=[mu, sigma], *args, **kwargs)
return super().__init__(*args, **kwargs)
def random(self, point=None, size=None):
"""
Draw random values from Hypsecant distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
# mu = self.mu
# lam = self.lam
# sigma = self.sigma
# # sd = self.sd
# mu = self.mu
# lam, sigma = get_tau_sigma(sigma=sigma)
mu, sigma = draw_values([self.mu, self.sigma], point=point, size=size)
return generate_samples(stats.hypsecant.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Hypsecant distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
# mu = self.mu
# lam = self.lam
sigma = self.sigma
# # sd = self.sd
mu = self.mu
# lam, sigma = get_tau_sigma(sigma=sigma)
Px = pm.math.log(1.0/(math.pi * pm.math.cosh(value)))
return bound(Px)
# return bound(Px, mu, lam, sigma)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Hypsecant distribution
at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
# # mu = self.mu
# # lam = self.lam
# # # self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
# mu = self.mu
# lam = self.lam
sigma = self.sigma
# # sd = self.sd
mu = self.mu
# lam, sigma = get_tau_sigma(sigma=sigma)
# return bound(pm.math.log(2.0 / math.pi * tt.math.atan(tt.math.exp(value))), mu, lam, sigma)
return bound(pm.math.log(2.0 / math.pi * tt.math.atan(tt.math.exp(value))))
def from_posterior(param, samples):
# Assigning linspace variable:
samples_len = len(samples)
# print('Samples length:', samples_len)
smin, smax = np.min(samples), np.max(samples)
width = smax - smin
x = np.linspace(smin, smax, samples_len)
# Creating Gaussain KDE:
y = st.gaussian_kde(samples)(x)
# what was never sampled should have a small probability but not 0,
# so we'll extend the domain and use linear approximation of density on it
x = np.concatenate([[x[0] - 3 * width], x, [x[-1] + 3 * width]])
y = np.concatenate([[0], y, [0]])
return Interpolated(param, x, y)
# def from_posterior(param, samples):
# # Assigning linspace variable:
# samples_len = len(samples)
# # print('Samples length:', samples_len)
# smin, smax = np.min(samples), np.max(samples)
# width = smax - smin
# x = np.linspace(smin, smax, samples_len)
# # Creating Gaussain KDE:
# y = st.gaussian_kde(samples)(x)
# # what was never sampled should have a small probability but not 0,
# # so we'll extend the domain and use linear approximation of density on it
# x = np.concatenate([[x[0] - 3 * width], x, [x[-1] + 3 * width]])
# y = np.concatenate([[0], y, [0]])
# ### Obtain the Interpolated values:
# Inter_val = Interpolated(param, x, y)
# return Inter_val.pdf(x, y)
def S3_0_Bayesian_Interface(data):
########################################################################################################################
with pm.Model() as model_0:
# Prior Distributions for unknown model parameters:
# nu_0 = pm.HalfNormal('nu_0', sd=1)
sigma_0 = pm.HalfNormal('sigma_0', sd=5)
mu_0 = pm.Normal('mu_0', mu=0, sd=5)
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_0 = Hypsecant('observed_data_0', mu=mu_0, sigma=sigma_0, observed=data)
# obtain starting values via MAP
# startvals_0 = pm.find_MAP(model=model_0)
# instantiate sampler
# step_0 = pm.Metropolis() ## Best one
# step_0 = pm.HamiltonianMC()
# step_0 = pm.NUTS()
# step_0 = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_0)
# draw 5000 posterior samples
trace_0 = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_0 = pm.sample(start=startvals_0, draws=1500, step=step_0, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1000)
# post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1500)
print(post_pred_0['observed_data_0'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_0))
print(pm.stats.summary(data=post_pred_0))
########################################################################################################################
return trace_0, post_pred_0
def S3_P_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_P:
# Obtain the posterior values from initial model:
# nu_P = from_posterior('nu_0', trace['nu_0'])
sigma_P = from_posterior('sigma_0', trace['sigma_0'])
mu_P = from_posterior('mu_0', trace['mu_0'])
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_P = pm.HalfNormal('nu_P', sigma=sigma)
sigma_P = pm.HalfNormal('sigma_P', sigma=np.mean(sigma_P))
mu_P = pm.Normal('mu_P', mu=np.mean(mu_P), sigma=np.mean(sigma_P))
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_P = Hypsecant('observed_data_P', mu=mu_P, sigma=sigma_P, observed=data)
# obtain starting values via MAP
# startvals_P = pm.find_MAP(model=model_P)
# instantiate sampler
# step_P = pm.Metropolis() ## Best one
# step_P = pm.HamiltonianMC()
# step_P = pm.NUTS()
# step_P = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_P)
# draw 5000 posterior samples
trace_P = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_P = pm.sample(start=startvals_P, draws=1500, step=step_P, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1000)
# post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1500)
print(post_pred_P['observed_data_P'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_P))
print(pm.stats.summary(data=post_pred_P))
########################################################################################################################
return trace_P, post_pred_P
def S2_0_Bayesian_Interface(data):
########################################################################################################################
with pm.Model() as model_0:
# Prior Distributions for unknown model parameters:
# nu_0 = pm.HalfNormal('nu_0', sd=1)
sigma_0 = pm.HalfNormal('sigma_0', sd=5)
mu_0 = pm.Normal('mu_0', mu=0, sd=5)
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_0 = Hypsecant('observed_data_0', mu=mu_0, sigma=sigma_0, observed=data)
# obtain starting values via MAP
# startvals_0 = pm.find_MAP(model=model_0)
# instantiate sampler
# step_0 = pm.Metropolis() ## Best one
# step_0 = pm.HamiltonianMC()
# step_0 = pm.NUTS()
# step_0 = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_0)
# draw 5000 posterior samples
trace_0 = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_0 = pm.sample(start=startvals_0, draws=1500, step=step_0, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1000)
# post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1500)
print(post_pred_0['observed_data_0'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_0))
print(pm.stats.summary(data=post_pred_0))
########################################################################################################################
return trace_0, post_pred_0
def S2_P_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_P:
# Obtain the posterior values from initial model:
# nu_P = from_posterior('nu_0', trace['nu_0'])
sigma_P = from_posterior('sigma_0', trace['sigma_0'])
mu_P = from_posterior('mu_0', trace['mu_0'])
print(sigma_P)
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_P = pm.HalfNormal('nu_P', sigma=sigma)
sigma_P = pm.HalfNormal('sigma_P', sigma=np.mean(sigma_P))
mu_P = pm.Normal('mu_P', mu=np.mean(mu_P), sigma=np.mean(sigma_P))
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_P = Hypsecant('observed_data_P', mu=mu_P, sigma=sigma_P, observed=data)
# obtain starting values via MAP
# startvals_P = pm.find_MAP(model=model_P)
# instantiate sampler
# step_P = pm.Metropolis() ## Best one
# step_P = pm.HamiltonianMC()
# step_P = pm.NUTS()
# step_P = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_P)
# draw 5000 posterior samples
trace_P = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_P = pm.sample(start=startvals_P, draws=1500, step=step_P, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1000)
# post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1500)
print(post_pred_P['observed_data_P'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_P))
print(pm.stats.summary(data=post_pred_P))
########################################################################################################################
return trace_P, post_pred_P
def S2_C_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_C:
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_C = pm.HalfNormal('nu_C', sigma=sigma)
# sigma_C = pm.HalfNormal('sigma_C', sigma=sigma)
# mu_C = pm.Normal('mu_C', mu=mu, sigma=sigma)
# nu_C = from_posterior('nu_P', trace['nu_P'])
sigma_C = from_posterior('sigma_P', trace['sigma_P'])
mu_C = from_posterior('mu_P', trace['mu_P'])
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_C = Hypsecant('observed_data_C', mu=mu_C, sigma=sigma_C, observed=data)
# obtain starting values via MAP
# startvals_C = pm.find_MAP(model=model_C)
# instantiate sampler
# step_C = pm.Metropolis() ## Best one
# step_C = pm.HamiltonianMC()
# step_C = pm.NUTS()
# step_C = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_C)
# draw 5000 posterior samples
trace_C = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_C = pm.sample(start=startvals_C, draws=1500, step=step_C, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1000)
# post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1500)
print(post_pred_C['observed_data_C'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_C))
print(pm.stats.summary(data=post_pred_C))
########################################################################################################################
return trace_C, post_pred_C
def S1_C_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_C:
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_C = pm.HalfNormal('nu_C', sigma=sigma)
# sigma_C = pm.HalfNormal('sigma_C', sigma=sigma)
# mu_C = pm.Normal('mu_C', mu=mu, sigma=sigma)
# nu_C = from_posterior('nu_P', trace['nu_P'])
sigma_C = from_posterior('sigma_P', trace['sigma_P'])
mu_C = from_posterior('mu_P', trace['mu_P'])
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_C = Hypsecant('observed_data_C', mu=mu_C, sigma=sigma_C, observed=data)
# obtain starting values via MAP
# startvals_C = pm.find_MAP(model=model_C)
# instantiate sampler
# step_C = pm.Metropolis() ## Best one
# step_C = pm.HamiltonianMC()
# step_C = pm.NUTS()
# step_C = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_C)
# draw 5000 posterior samples
trace_C = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_C = pm.sample(start=startvals_C, draws=1500, step=step_C, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1000)
# post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1500)
print(post_pred_C['observed_data_C'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_C))
print(pm.stats.summary(data=post_pred_C))
########################################################################################################################
return trace_C, post_pred_C
def S4_0_Bayesian_Interface(data):
########################################################################################################################
with pm.Model() as model_0:
# Prior Distributions for unknown model parameters:
# nu_0 = pm.HalfNormal('nu_0', sd=1)
sigma_0 = pm.HalfNormal('sigma_0', sd=5)
mu_0 = pm.Normal('mu_0', mu=0, sd=5)
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_0 = Hypsecant('observed_data_0', mu=mu_0, sigma=sigma_0, observed=data)
# obtain starting values via MAP
# startvals_0 = pm.find_MAP(model=model_0)
# instantiate sampler
# step_0 = pm.Metropolis() ## Best one
# step_0 = pm.HamiltonianMC()
# step_0 = pm.NUTS()
# step_0 = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_0)
# draw 5000 posterior samples
trace_0 = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_0 = pm.sample(start=startvals_0, draws=1500, step=step_0, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1000)
# post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1500)
print(post_pred_0['observed_data_0'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_0))
print(pm.stats.summary(data=post_pred_0))
########################################################################################################################
return trace_0, post_pred_0
def S4_P_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_P:
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_P = pm.HalfNormal('nu_P', sigma=sigma)
# sigma_P = pm.HalfNormal('sigma_P', sigma=sigma)
# mu_P = pm.Normal('mu_P', mu=mu, sigma=sigma)
# nu_P = from_posterior('nu_0', trace['nu_0'])
sigma_P = from_posterior('sigma_0', trace['sigma_0'])
mu_P = from_posterior('mu_0', trace['mu_0'])
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_P = Hypsecant('observed_data_P', mu=mu_P, sigma=sigma_P, observed=data)
# obtain starting values via MAP
# startvals_P = pm.find_MAP(model=model_P)
# instantiate sampler
# step_P = pm.Metropolis() ## Best one
# step_P = pm.HamiltonianMC()
# step_P = pm.NUTS()
# step_P = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_P)
# draw 5000 posterior samples
trace_P = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_P = pm.sample(start=startvals_P, draws=1500, step=step_P, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1000)
# post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1500)
print(post_pred_P['observed_data_P'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_P))
print(pm.stats.summary(data=post_pred_P))
########################################################################################################################
return trace_P, post_pred_P
def S5_0_Bayesian_Interface(data):
########################################################################################################################
with pm.Model() as model_0:
# Prior Distributions for unknown model parameters:
# nu_0 = pm.HalfNormal('nu_0', sd=1)
sigma_0 = pm.HalfNormal('sigma_0', sd=5)
mu_0 = pm.Normal('mu_0', mu=0, sd=5)
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_0 = Hypsecant('observed_data_0', mu=mu_0, sigma=sigma_0, observed=data)
# obtain starting values via MAP
# startvals_0 = pm.find_MAP(model=model_0)
# instantiate sampler
# step_0 = pm.Metropolis() ## Best one
# step_0 = pm.HamiltonianMC()
# step_0 = pm.NUTS()
# step_0 = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_0)
# draw 5000 posterior samples
trace_0 = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_0 = pm.sample(start=startvals_0, draws=1500, step=step_0, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1000)
# post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=1500)
print(post_pred_0['observed_data_0'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_0))
print(pm.stats.summary(data=post_pred_0))
########################################################################################################################
return trace_0, post_pred_0
def S5_P_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_P:
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_P = pm.HalfNormal('nu_P', sigma=sigma)
# sigma_P = pm.HalfNormal('sigma_P', sigma=sigma)
# mu_P = pm.Normal('mu_P', mu=mu, sigma=sigma)
# nu_P = from_posterior('nu_0', trace['nu_0'])
sigma_P = from_posterior('sigma_0', trace['sigma_0'])
mu_P = from_posterior('mu_0', trace['mu_0'])
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_P = Hypsecant('observed_data_P', mu=mu_P, sigma=sigma_P, observed=data)
# obtain starting values via MAP
# startvals_P = pm.find_MAP(model=model_P)
# instantiate sampler
# step_P = pm.Metropolis() ## Best one
# step_P = pm.HamiltonianMC()
# step_P = pm.NUTS()
# step_P = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_P)
# draw 5000 posterior samples
trace_P = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_P = pm.sample(start=startvals_P, draws=1500, step=step_P, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1000)
# post_pred_P = pm.sample_posterior_predictive(trace_P, samples=1500)
print(post_pred_P['observed_data_P'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_P))
print(pm.stats.summary(data=post_pred_P))
########################################################################################################################
return trace_P, post_pred_P
def S5_C_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_C:
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_C = pm.HalfNormal('nu_C', sigma=sigma)
# sigma_C = pm.HalfNormal('sigma_C', sigma=sigma)
# mu_C = pm.Normal('mu_C', mu=mu, sigma=sigma)
# nu_C = from_posterior('nu_P', trace['nu_P'])
sigma_C = from_posterior('sigma_P', trace['sigma_P'])
mu_C = from_posterior('mu_P', trace['mu_P'])
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_C = Hypsecant('observed_data_C', mu=mu_C, sigma=sigma_C, observed=data)
# obtain starting values via MAP
# startvals_C = pm.find_MAP(model=model_C)
# instantiate sampler
# step_C = pm.Metropolis() ## Best one
# step_C = pm.HamiltonianMC()
# step_C = pm.NUTS()
# step_C = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_C)
# draw 5000 posterior samples
trace_C = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_C = pm.sample(start=startvals_C, draws=1500, step=step_C, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1000)
# post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1500)
print(post_pred_C['observed_data_C'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_C))
print(pm.stats.summary(data=post_pred_C))
########################################################################################################################
return trace_C, post_pred_C
def S6_C_Bayesian_Interface(mu, sigma, data, trace):
########################################################################################################################
with pm.Model() as model_C:
# Prior Distributions for unknown model parameters from posterior distribution:
# nu_C = pm.HalfNormal('nu_C', sigma=sigma)
# sigma_C = pm.HalfNormal('sigma_C', sigma=sigma)
# mu_C = pm.Normal('mu_C', mu=mu, sigma=sigma)
# nu_C = from_posterior('nu_P', trace['nu_P'])
sigma_C = from_posterior('sigma_P', trace['sigma_P'])
mu_C = from_posterior('mu_P', trace['mu_P'])
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data_C = Hypsecant('observed_data_C', mu=mu_C, sigma=sigma_C, observed=data)
# obtain starting values via MAP
# startvals_C = pm.find_MAP(model=model_C)
# instantiate sampler
# step_C = pm.Metropolis() ## Best one
# step_C = pm.HamiltonianMC()
# step_C = pm.NUTS()
# step_C = pm.sample_smc(n_steps=10, cores=2,progressbar=True)
# Printing the result of log_likelihood:
# print('log_likelihood result:', model_C)
# draw 5000 posterior samples
trace_C = pm.sample(draws=1000, tune=1000, chains=3, cores=1, progressbar=True)
# trace_C = pm.sample(start=startvals_C, draws=1500, step=step_C, tune=500, chains=3, cores=1, progressbar=True)
# Obtaining Posterior Predictive Sampling:
post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1000)
# post_pred_C = pm.sample_posterior_predictive(trace_C, samples=1500)
print(post_pred_C['observed_data_C'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_C))
print(pm.stats.summary(data=post_pred_C))
########################################################################################################################
return trace_C, post_pred_C
However, When I get to the part for the interpolated, output that I get is sigma_0 ~ Interpolated
in S2_P_Bayesian_Interface
. So, How can I extract prior values from the sigma and mu to be used with the correction part?