Maybe too many years late. But I run into the same problem and followed @junpenglao 's advice and tried adapting numpy.trapz. I simply replaced np by at (ie. numpy by pytensor) and I commented out the exception rule (just for the sake of this example but may break things up in more complex uses). Results are equivalent, but would it make sense for every situation?:
import numpy as np
import pytensor.tensor as at
x = np.arange(100)
np.trapz(x)
Out[4]: 4900.5
def at_trapz(y, x=None, dx=1.0, axis=-1):
if x is None:
d = dx
else:
#x = asanyarray(x)
if x.ndim == 1:
d = at.diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = at.diff(x, axis=axis)
nd = y.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
#try:
ret = at.sum((d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0), axis)
# except ValueError:
# # Operations didn't work, cast to ndarray
# d = np.asarray(d)
# y = np.asarray(y)
# ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
return ret
at_trapz(x)
Out[6]: Sum{axis=[0], acc_dtype=float64}.0
at_trapz(x).eval()
Out[7]: array(4900.5)