Hello, thank you for suggesting the use of scan. I had previously tried to use it and had failed to make it work, so I tried it again. I tried to use the following to recursively construct the array I need, but an issue that is popping up is that PyTensor is not liking that I am passing in a (Random)Variable instead of a numeric array, with an error saying “Expected an array-like object, but found a Variable: maybe you are trying to call a function on a (possibly shared) variable instead of a numeric array”. Do you have any advice on how I can get PyTensor to operate on the RandomVariables rather than arrays?
def monogaussianHelper2(r, fluorescenceModel, t, forsterAcceleration):
deltaR = r[1] - r[0]
# multiply final output by deltaR
rF = fluorescenceModel['rateFluorescence']
#rF = pm.TruncatedNormal('rateF', mu=70, sigma=80, lower=0.0)
aD = fluorescenceModel['alphaDD']
mD = fluorescenceModel['r_0DD']
sD = fluorescenceModel['sigmaDD']
R = pt.matrix("r") # m x 1 dimensions
F = pt.matrix("forsterAcceleration") # same dimensions as R (m x 1 dimensions)
T = pt.matrix("t") # n x 1 dimensions
def accumulatorFunction(r, forsterAcceleration, prevArray, t, rateF, alphaD, muD, sigmaD):
P_r = distDistribution(r, alphaD, muD, sigmaD, sigmaD.eval().size)
donorAcceptorTerm = pt.exp(- t * rateF * forsterAcceleration) #check if this causes issues
return pt.inc_subtensor(prevArray[:,:], P_r * donorAcceptorTerm * deltaR)
initialCondition = pt.zeros((t.size, 1))
results, updates = pytensor.scan(
fn=accumulatorFunction,
sequences=[R, F],
outputs_info=initialCondition, # this may be the issue, as there are random variables involved whereas a matrix is likely determined already
non_sequences=[T, rF, aD, mD, sD],
n_steps=R.shape[0])
answer = results[-1]
compiledFunction = pytensor.function(inputs=[R, F,T, rF, aD, mD, sD], outputs=answer, updates=updates)
return compiledFunction(r[:, None], forsterAcceleration[:, None], t[:, None], rF, aD, mD, sD)[:, None]
def fluorescenceModelPyTensor2(r, t, data, numGauss, numComponents):
R_0 = 67
f_Donor = 0.07 #generally below .10
forsterAcceleration = np.power(R_0 / r, 6)
with pm.Model() as fluorescentModel:
rateFluorescence = pm.TruncatedNormal('rateFluorescence', mu=70, sigma=80, lower=0.0, shape=numComponents) #can put shape = numComponents when normal works
alphaDistance = pm.Dirichlet('alphaDD', a=np.ones(numGauss), shape=numGauss)
muDistance = pm.TruncatedNormal('r_0DD', mu=50, sigma=60, lower=0.0, upper=120, shape=numGauss)
sigmaDistance = pm.TruncatedNormal('sigmaDD', mu=10, sigma=13, lower=0.0, upper=30, shape=numGauss)
I_D = np.exp(- t[:, np.newaxis] * rateFluorescence)
I_DA = monogaussianHelper2(r, fluorescentModel, t, forsterAcceleration)
I_F = f_Donor * I_D + (1 - f_Donor) * I_DA
likelihood = pm.Poisson("likelihood", mu=I_F, observed=data[:, None])
with fluorescentModel:
trace = pm.sample(1000)
I will also try using a different NUTS sampler to try and make sure that PyMC can keep up sampling rates (once I have remediated the main issue of course).