One thing I have tried is to run
import numpy as np
import pytensor.tensor as at
import pymc as pm
from pytensor.graph.op import Op, Apply
from pytensor.tensor import grad, as_tensor_variable
from pytensor import function
import pytensor as pt
def _calc_ecc_anom(manom, ecc):
"""
Computes the eccentric anomaly from the mean anomlay.
Code from Rob De Rosa's orbit solver (e < 0.95 use Newton, e >= 0.95 use Mikkola)
Args:
manom (float/np.array): mean anomaly, either a scalar or np.array of any shape
ecc (float/np.array): eccentricity, either a scalar or np.array of the same shape as manom
Return:
eanom (float/np.array): eccentric anomalies, same shape as manom
Written: Jason Wang, 2018
"""
# print(type(ecc))
alpha = (1.0 - ecc) / ((4.0 * ecc) + 0.5)
beta = (0.5 * manom) / ((4.0 * ecc) + 0.5)
aux = pm.math.sqrt(beta**2.0 + alpha**3.0)
z = pm.math.abs(beta + aux)**(1.0/3.0)
s0 = z - (alpha/z)
s1 = s0 - (0.078*(s0**5.0)) / (1.0 + ecc)
e0 = manom + (ecc * (3.0*s1 - 4.0*(s1**3.0)))
se0 = pm.math.sin(e0)
ce0 = pm.math.cos(e0)
f = e0-ecc*se0-manom
f1 = 1.0-ecc*ce0
f2 = ecc*se0
f3 = ecc*ce0
f4 = -f2
u1 = -f/f1
u2 = -f/(f1+0.5*f2*u1)
u3 = -f/(f1+0.5*f2*u2+(1.0/6.0)*f3*u2*u2)
u4 = -f/(f1+0.5*f2*u3+(1.0/6.0)*f3*u3*u3+(1.0/24.0)*f4*(u3**3.0))
return (e0 + u4)
class KeplerSolverOp(Op):
__props__ = ()
def make_node(self, M, e):
#setting inputs to function of M and e
M = at.as_tensor(M)
e = at.as_tensor_variable(e)
#setting input list for Apply
inputs = [M, e]
outputs = [M.type()]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
M, e = inputs
EA = _calc_ecc_anom(M, e) # Compute ecc-anomly
outputs[0][0] = EA #may have to add something likept.value...
def grad(self, inputs, out_grad):
M, e = inputs
grad_wrt_m, grad_wrt_e = kepler_loglikegrad(M,e)
out_grad = grad_wrt_m, grad_wrt_e
# print(grad_wrt_m, grad_wrt_e)
return[grad_wrt_m,grad_wrt_e]
class LogLikeGrad(Op):
def make_node(self, M, e):
#setting inputs to function of M and e
M = at.as_tensor(M)
e = at.as_tensor_variable(e)
#setting input list for Apply
inputs = [M, e]
# There are two outputs with the same type as data,
# for the partial derivatives wrt to m, c
outputs = [M.type(), M.type()]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
M, e = inputs
EA = _calc_ecc_anom(M, e)
sea = pm.math.sin(EA)
cea = pm.math.cos(EA)
temp = 1 - e * cea
dEA_dM = 1.0 / temp
dEA_de = (sea * EA) / temp
# return [grad_M, grad_e]
outputs[0][0] = dEA_dM
outputs[1][0] = dEA_de
keplergrad = KeplerSolverOp()
kepler_loglikegrad = LogLikeGrad()
M = at.scalar("m")
e = at.scalar("e")
out = keplergrad(M,e)
eval_out = out.eval({M: 2.96466117, e: 0.4781471212028443})
print(eval_out.eval())
and I got no errors and received “3.021801889806251” which is correct…
Also I have tried
out = keplergrad(M,e)
grad_wrt_m, grad_wrt_e = pt.grad(out.sum(), wrt=[M, e])
pt.dprint([grad_wrt_m], print_type=True)
and received
LogLikeGrad.0 [id A] <Scalar(float64, shape=())>
├─ m [id B] <Scalar(float64, shape=())>
└─ e [id C] <Scalar(float64, shape=())>