Hello,
I am trying to sample from a custom distribution whose derivatives I also compute myself.
So far I learned that I have to define my own Theano Op, but I have problems matching the types.
import pymc3 as pm
import theano
import numpy as np
class myOp(theano.gof.Op):
itypes=[theano.tensor.dvector]
otypes=[theano.tensor.dscalar]
#def make_node(self, v):
#v = theano.tensor.as_tensor_variable(v)
#return theano.Apply(self, [v], [v.type()])
def __init__(self):
self.logp = lambda x: x
self.dlogp = lambda x: x
def perform(self, node, inputs, outputs):
outputs[0] = self.logp(inputs[0])
def grad(self, inputs, g):
return [g[0] * self.dlogp(inputs[0])]
class myDist(pm.distributions.Continuous):
def __init__(self, *args, **kwargs):
self.op = myOp()
super(myDist, self).__init__(*args, **kwargs)
def logp(self, x):
return self.op(x)
with pm.Model() as model:
mydist = myDist('m', testval=np.random.rand(3))
This returns
TypeError: We expected inputs of types '[TensorType(float64, vector)]' but got types '[TensorType(float64, scalar)]'
Right now logl and dlogl
are just the identity, but I want to replace them by some custom code. Do I really need to define a second Op for the derivative, as suggested in this post?