Hi,

I am new to pymc, I have been leanring the previous codes since past three days. I am trying to fit power law for x vs y. For values where y_err is -1 they denote the upperlimit in y and where y_err = -999 denotes the lower limit in y. I tried following exactly the same one code where they say censoring modeling issue is solved. Below is my code. The doubt I have is I am getting very narrow distribution of fitted parameters say A and B ( if I use y = A * np.power(x , B). I do not have true values on A and B to check. So I want make sure if my code is correct (it seems like not to me!)

```
import numpy as np
import matplotlib.pyplot as plt
import pymc as pm
from pymc.distributions.dist_math import normal_lccdf, normal_lcdf
import arviz as az
from copy import copy
import xarray as xr
import aesara.tensor as at
# Define your data
x = np.array([6.7, 14.9, 7.8, 11.3, 12.1, 20.3, 6.7, 23.4, 16.9, 28.8, 30.0, 29.8, 28.6, 8.0, 5.5, 12.6, 15.1, 29.0, 26.6, 15.2, 13.3, 22.6, 3.2, 9.7, 30.9, 33.9, 30.9, 32.5])
y = np.array([14.392, 13.393, 14.493, 14.898, 13.187, 13.512, 14.396, 13.489, 12.652, 13.838, 13.506, 12.796, 12.794, 14.586, 14.204, 13.107, 13.702, 12.483, 13.125, 13.392, 14.314, 14.458, 14.944, 14.777, 11.76328143, 12.30545265, 12.02984059, 11.66659327])
y_err = np.array([0.175, 0.059, 0.011, 0.417, 0.01, 0.024, 0.021, 0.013, 0.044, 0.029, 0.083, 0.049, 0.115, 0.011, 0.023, 0.078, 0.028, 0.098, 0.026, 0.034, 0.038, -1, -1, -1, -999, -999, -999, -999])
### y_err = -1: lower_limits on y
### y_err = -999 : upper limits on y
######### ============= Model 3 ============================
def censored_regression3(x, y, y_err):
# data pre-processing
right_censored = (y_err < -10)
left_censored = (y_err > -10) & (y_err < -0)
x_lc = x[left_censored]
y_lc = y[left_censored]
x_rc = x[right_censored]
y_rc = y[right_censored]
uncensored = (y_err > 0)
x_un = x[uncensored]
y_un = y[uncensored]
y_err_un = y_err[uncensored]
with pm.Model() as model3:
A = pm.Normal('A', mu=0, sigma=10)
B = pm.Normal('B', mu=0, sigma=10)
σ = pm.HalfNormal("σ", sigma=1)
y_likelihood = pm.Normal("y_likelihood", mu= A * at.power(x_un, B), sigma=np.abs(y_err_un), observed=y_un)
left_censored = pm.Potential("left_censored", normal_lcdf( A * at.power(x_lc, B), σ, y_lc))
right_censored = pm.Potential("right_censored", normal_lccdf( A * at.power(x_rc, B), σ, y_rc))
with model3:
trace3 = pm.sample()
return model3, trace3
#############=========================================
censored_model3, censored_fit3 = censored_regression3(x, y, y_err)
#az.plot_posterior(censored_fit3) #, var_names=['A'], ref_val=A)
def pp_plot(x, y, fit, ax, clr):
# plot data
ax.plot(x, y, "k.")
# plot posterior predicted... samples from posterior
xi = xr.DataArray(np.array([np.min(x), np.max(x)]), dims=["obs_id"])
post = az.extract(fit)
y_ppc = post["A"] * at.power(xi, post["B"])
y_ppc = y_ppc.transpose()
ax.plot(xi, y_ppc, c=clr, alpha=0.8, rasterized=True)
ax.legend()
ax.set(xlabel="x", ylabel="y")
fig, ax = plt.subplots(1, 1, figsize=(10, 5), sharex=True, sharey=True)
pp_plot(x, y, censored_fit3, ax, 'steelblue')
plt.show()
```