Hello
I am solving a problem with definition my own joint prior distribution. This distribution is similar to Uniform but with some “bricks” inside this distributions.
class LogPrior(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, lim):
# add inputs as class attributes
self.lim = lim
self.init_x = 38.45;
self.init_y = 20;
self.size_x = 2.42;
self.size_y = 12.2;
self.size_z = 2.54;
self.delta_y = 7.9 + self.size_y;
self.delta_x = 12.9 + self.size_x;
self.N_x = 12
self.N_y = 10
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
coords, = inputs # this will contain my variables
coord = np.array(coords)
xs=coord[0];
ys=coord[1];
zs=coord[2];
# call the log-likelihood function
logp = None
# Loop over data points; create box from errors at each point
if tt.gt(xs,self.lim['x_max']) or tt.lt(xs,self.lim['x_min']) or \
tt.gt(ys,self.lim['y_max']) or tt.lt(ys,self.lim['y_min']) or \
tt.gt(zs,self.lim['z_max']) or tt.lt(zs,self.lim['z_min']):
is_coord_inside_domain = False;
logp = -np.inf;
container_vol = 0;
if logp !=None:
x = self.init_x - self.delta_x
for i in np.arange(self.N_x):
x = x + self.delta_x;
y = self.init_y - self.delta_y
for j in np.arange(self.N_y):
y = y + self.delta_y
if xs>x and xs<x+self.size_x and ys>y and ys<y+self.size_y and zs<self.size_z:
is_coord_inside_domain = False;
logp = -np.inf;
if logp !=None:
container_vol = self.N_x*self.size_x*self.N_y*self.size_y*self.size_z;
domain_vol = (self.lim['x_max']- self.lim['x_min'])* \
(self.lim['y_max']- self.lim['y_min'])* \
(self.lim['z_max']- self.lim['z_min']);
allowed_vol = domain_vol - container_vol
logp = -tt.log(allowed_vol)
outputs[0][0] = np.array(logp)
But this code do not sample. It return the same value all the time. Could you help me with that?