Thanks @jessegrabowski. I’m having problems inverting prosail as just a single point bayesian inversion problem, so I’ll work on that first before moving onto the time series approach.
As long as my example code looks like a vaguely sensible approach to “model something in time so it has more sensible results that just modelling each point in time individually”. If it helps, this is the R code that I am trying to port to python: RSE-00511/Code/RTM_MCMC_V2.Rmd at main · DongchenZ/RSE-00511 · GitHub
Regarding my first bullet point, I am getting a confusing TypeError when I include lai. If you are able to run my code snippet yourself and pass comment on what the cause could be that would be greatly appreciated… This is my MWE:
import numpy as np
import pymc as pm
import pytensor.tensor as pt
from pytensor.graph import Op, Apply
import arviz as az
import prosail
# Define the new model function with additional parameters
def my_model(n, cab, car, cbrown, cw, cm, lai, tto, tts, x):
reflectance = prosail.run_prosail(n=n, cab=cab, car=car, cbrown=cbrown, cw=cw, cm=cm, lai=1.5, tto=10., tts=30., lidfa=-1.0, hspot=0.01, psi=0., prospect_version="D", factor='SDR', rsoil=.5, psoil=.5)
return reflectance #Prosail2S2("s2_response.csv", reflectance)
# Example model function - you can replace this with your specific model
return (n * x + cab * x**2 + car * np.sin(x) + cbrown * np.cos(x) +
cw * x**0.5 + cm * np.log1p(x) + lai * np.exp(-x) +
tto * np.tan(x) + tts * np.tanh(x))
def my_loglike(n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data):
for param in (n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data):
if not isinstance(param, (float, np.ndarray)):
raise TypeError(f"Invalid input type to loglike: {type(param)}")
model = my_model(n, cab, car, cbrown, cw, cm, lai, tto, tts, x)
return -0.5 * ((data - model) / sigma) ** 2 - np.log(np.sqrt(2 * np.pi)) - np.log(sigma)
class LogLike(Op):
def make_node(self, n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data) -> Apply:
n = pt.as_tensor(n)
cab = pt.as_tensor(cab)
car = pt.as_tensor(car)
cbrown = pt.as_tensor(cbrown)
cw = pt.as_tensor(cw)
cm = pt.as_tensor(cm)
lai = pt.as_tensor(lai, dtype=float)
tto = pt.as_tensor(tto)
tts = pt.as_tensor(tts)
sigma = pt.as_tensor(sigma)
x = pt.as_tensor(x)
data = pt.as_tensor(data)
inputs = [n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data]
outputs = [data.type()]
return Apply(self, inputs, outputs)
def perform(self, node: Apply, inputs: list[np.ndarray], outputs: list[list[None]]) -> None:
n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data = inputs
loglike_eval = my_loglike(n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data)
outputs[0][0] = np.asarray(loglike_eval)
def custom_dist_loglike(data, n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x):
return loglike_op(n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, data)
N = 2101
sigma = 1.0
x = np.linspace(0.0, 9.0, N)
# True parameter values for testing
ntrue = 0.4
cabtrue = 0.1
cartrue = 0.2
cbrowntrue = 0.3
cwtrue = 0.05
cmtrue = 0.07
laitrue = 3.0
ttotrue = 0.15
ttstrue = 0.25
truemodel = my_model(ntrue, cabtrue, cartrue, cbrowntrue, cwtrue, cmtrue, laitrue, ttotrue, ttstrue, x)
rng = np.random.default_rng(716743)
data = sigma * rng.normal(size=N) + truemodel
loglike_op = LogLike()
test_out = loglike_op(ntrue, cabtrue, cartrue, cbrowntrue, cwtrue, cmtrue, laitrue, ttotrue, ttstrue, sigma, x, data)
with pm.Model() as no_grad_model:
# n = pm.Uniform("n", lower=-10, upper=10.0, initval=ntrue)
# cab = pm.Uniform("cab", lower=-10, upper=60.0, initval=cabtrue)
# car = pm.Uniform("car", lower=-10.0, upper=20.0, initval=cartrue)
# cbrown = pm.Uniform("cbrown", lower=-10.0, upper=10.0, initval=cbrowntrue)
# cw = pm.Uniform("cw", lower=-10.0, upper=10.0, initval=cwtrue)
# cm = pm.Uniform("cm", lower=-10.0, upper=10.0, initval=cmtrue)
# lai = pm.Uniform("lai", lower=-10.0, upper=10.0, initval=laitrue)
# tto = pm.Uniform("tto", lower=-10.0, upper=20.0, initval=ttotrue)
# tts = pm.Uniform("tts", lower=-10.0, upper=60.0, initval=ttstrue)
n = pm.Uniform("n", lower=-1.0, upper=5.0, initval=ntrue)
cab = pm.Uniform("cab", lower=-5, upper=60, initval=cabtrue)
car = pm.Uniform("car", lower=-5, upper=20, initval=cartrue)
cbrown = pm.Uniform("cbrown", lower=0, upper=2, initval=cbrowntrue)
cw = pm.Uniform("cw", lower=-10.0, upper=10, initval=cwtrue)
cm = pm.Uniform("cm", lower=0, upper=0.2, initval=cmtrue)
lai = pm.Uniform("lai", lower=0.5, upper=5.0, initval=laitrue)
tto = pm.Uniform("tto", lower=0.0, upper=20.0, initval=ttotrue)
tts = pm.Uniform("tts", lower=0, upper=60.0, initval=ttstrue)
# sigma = pm.Uniform("sigma", lower=0, upper=3, initval=sigma)
likelihood = pm.CustomDist(
"likelihood", n, cab, car, cbrown, cw, cm, lai, tto, tts, sigma, x, observed=data, logp=custom_dist_loglike
)
ip = no_grad_model.initial_point()
ip
no_grad_model.compile_logp(vars=[likelihood], sum=False)(ip)
try:
no_grad_model.compile_dlogp()
except Exception as exc:
print(type(exc))
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with no_grad_model:
idata_no_grad = pm.sample(10000, step=pm.DEMetropolisZ())
az.plot_trace(idata_no_grad, lines=[
("n", {}, ntrue), ("cab", {}, cabtrue),
("car", {}, cartrue), ("cbrown", {}, cbrowntrue),
("cw", {}, cwtrue), ("cm", {}, cmtrue),
("lai", {}, laitrue), ("tto", {}, ttotrue),
("tts", {}, ttstrue)
])
az.summary(idata_no_grad, round_to=2)