Custom distribution class for spherical von mises fisher distribution


Newbie here. I’m trying to make a custom spherical distribution which takes in pairs of longitudes and latitudes in my model. But I’m stuck at this testval argument here while initializing a model.

My functions look like this:

def fisher_distribution_PdA(k, theta): 
    PdA = k*np.exp(k*np.cos(theta/180*np.pi))/(2*np.pi*(np.exp(k)-np.exp(-k)))
    return PdA

def vmf_random(lon_lat, k):
    lon_lat_int = ipmag.fishrot(k = k, n = 1, dec = lon_lat[0], inc = lon_lat[1], di_block = True)
    lon_lat = np.array([lon_lat_int[0][0], lon_lat_int[0][1]]) 
    return lon_lat

def vmf_logp(lon_lat, k, x):

     if x[1] < -90. or x[1] > 90.:
         raise ZeroProbability
         return -np.inf
     if k < eps:
        return np.log(1. / 4. / np.pi)
     angle = pmag.angle(x, lon_lat)
    logp = np.log(fisher_distribution_PdA(k, angle))
    return logp

class VMF(Continuous):

    def __init__(self, lon_lat, k, mv = True, *args, **kwargs):
        super(VMF, self).__init__(*args, **kwargs)
        self._lon_lat = lon_lat
        self._k = k
        self.mode = 0
    def random(self):

        lon_lat = self._lon_lat
        k = self._k
        return vmf_random(lon_lat, k)

    def logp(self, value):
        lon_lat = self._lon_lat
        k = self._k
        logp = vmf_logp(lon_lat, k, value)
        return logp

with pm.Model() as model:
    vmf = VMF('vmf', lon_lat = np.array([0, 90]), k = 10, shape = 2, testval = np.array([1,1]), observed = False)

the print out of value is always TensorConstant{0.0} instead of the test array.

Similar code worked in pymc2:

VMF('start', lon_lat=(lon, lat), k=k, value=(0.,0.), observed=False)

I am new to this and would love to have some guidance, thanks!

I found my issue similar to this: How to customize von-mises-fisher distribution in PyMC3?.

However, in my case I want my value to be something like a location coordinates, [longitude, latitude] instead of a value. How should I do that?


I think I realized that testval can take in an array but it is interpreting whatever input value for testval as a class in Theano which includes a scratchpad :

({'tag': scratchpad{'test_value': array([[0., 0.]])}, 'type': TensorType(float64, row), 'owner': None, 'index': None, 'name': 'vmf', 'auto_name': 'auto_227584', 'dshape': (1, 2), 'dsize': 2, 'distribution': <__main__.VMF object at 0x13d201748>}).

So I have been able to call that test_value when initializing a model. However, this doesn’t work when I want to have a observed data in.

Now my logo function looks like this:

def logp(self, value):
    lon_lat = self._lon_lat
    k = self._k
    x = value.tag.test_value
    if 'data' in value.__dict__.keys():
        x = []
        x = value.tag.test_value

and this works:

vmf = VMF('vmf', lon_lat=[lon_hidden,lat_hidden], k=kappa_hidden, testval = [0,0], shape = (1,2))
data = np.array([vmf.random() for i in range(100)])

but when putting in the observed data:

kappa = pm.Exponential('kappa', 1.)
lonlat = VMF('lonlat', lon_lat=[0.,0.], k=1., testval = [0.,0.], shape = (1,2))
for sample in data:
    VMF('direction', lon_lat=lonlat, k=kappa, observed = sample)

it always fails to interpret lonlat as and single array instead of distribution.

any idea on how to get around?

after reading source code now I have made my custom distribution class running:

@as_op(itypes=[T.dvector, T.dscalar, T.dvector], otypes=[T.dscalar])
def vmf_logp(lon_lat, k, x):

    if x[1] < -90. or x[1] > 90.:
        raise ZeroProbability
        return -np.inf
    if k < eps:
        return np.log(1. / 4. / np.pi)
    theta = pmag.angle(x, lon_lat)[0]
    PdA = k*np.exp(k*np.cos(theta*d2r))/(2*np.pi*(np.exp(k)-np.exp(-k)))
    logp = np.log(PdA)
    return logp

class VMF(Continuous):
    def __init__(self, lon_lat=[0,0], k=None,
             *args, **kwargs):
    super(VMF, self).__init__(*args, **kwargs)
    self._k = T.as_tensor_variable(floatX(k))
    self._lon_lat = T.as_tensor(lon_lat)

    def logp(self, value):
        lon_lat = self._lon_lat
        k = self._k
        value = T.as_tensor_variable(value)
        return vmf_logp(lon_lat, k, value)

    def _random(self, lon_lat, k, size = None):

        alpha = 0.
        beta = np.pi / 2. - lon_lat[1] * d2r
        gamma = lon_lat[0] * d2r

        rotation_matrix = construct_euler_rotation_matrix(alpha, beta, gamma)

        lamda = np.exp(-2*k)

        r1 = np.random.random()
        r2 = np.random.random()
        colat = 2*np.arcsin(np.sqrt(-np.log(r1*(1-lamda)+lamda)/2/k))
        this_lon = 2*np.pi*r2
        lat = 90-colat*r2d
        lon = this_lon*r2d

        unrotated = pmag.dir2cart([lon, lat])[0]
        rotated = np.transpose(, unrotated))
        rotated_dir = pmag.cart2dir(rotated)
        return np.array([rotated_dir[0], rotated_dir[1]])

    def random(self, point=None, size=None):
        lon_lat, k = draw_values([self._lon_lat, self._k], point=point, size=size)
        return generate_samples(self._random, lon_lat, k,

Now, I have a test model to see if I can successfully recover a longitude and latitude which is the center from which a suite of random locations are generated: it looks like this in pymc2 logic:

vmf = VonMisesFisher('vmf', lon_lat=[lon_hidden,lat_hidden], kappa=kappa_hidden)
data = np.array([vmf.random() for i in range(100)])
model_parameters = []
kappa = pymc.Exponential('kappa', 1.)
lon_lat = VonMisesFisher('lon_lat', lon_lat=(0.,0.), kappa=0.00)

for sample in data:
    model_parameters.append(VonMisesFisher('direction', lon_lat=lon_lat, kappa=kappa,   value=sample, observed=True))

How can I do the same loop as this model_parameters.append(VonMisesFisher(‘direction’, lon_lat=lon_lat, kappa=kappa, value=sample, observed=True)) to add all the observed data?

I have learned it is not possible to make observed = data since I have to specify the x in logp function as to be a Theano tensor vector.

I guess the question is: how can you add observed data as an array of arrays? or use a for loop to add a number of model parameters?


I finally got the model to run!

Model setup:

theano.config.floatX = 'float64'
lat_hidden = -10.
lon_hidden = 45.
kappa_hidden = 50.

with pm.Model() as model1:
    vmf = VMF('vmf', lon_lat = [lon_hidden, lat_hidden], k = kappa_hidden, testval = np.array([1., 0.]), shape = 2)
    data = np.array([vmf.random() for i in range(200)])

with pm.Model() as model2:    
    phi = np.array(data)[:,0]
    theta = np.array(data)[:,1]

    ax = ipmag.make_orthographic_map(lon_hidden, lat_hidden)
    ipmag.plot_vgp(ax, phi, theta)
    ipmag.plot_pole(ax, lon_hidden, lat_hidden, 140/np.sqrt(kappa_hidden))

    kappa = pm.Exponential('kappa', 1.)
    lon = 1.
    lat = 1.
    k = 1.
    lonlat = VMF('lonlat', lon_lat = [lon, lat], k = k, testval = np.array([1., 0.]), shape = 2)
    for i in range(len(data)):
        direction = VMF('direction_'+str(i), lon_lat=lonlat, k=kappa, observed = np.array(data[i]))

    trace = pm.sample(10000,  tune = 1000, cores = 1, step = pm.Metropolis())

Model output:

you can see that it successfully recovers the longitude and latitude which the “fake” random observations where centered on. However, it seems to always underestimate the precision parameter kappa. In this case it should recover a value of 50, but the result falls around 40. I don’t think there is any issue with the logp function. Wondering what is going on…

1 Like