Inversion of parameter given forward simulation function

Thank you. I still run into an error. Here is my code:


radarspy_path = r"I:\projects\RadSPy-1DRadarSimulator"
sys.path.append(radarspy_path)

from RadSPy_op import simulator_1D

def my_model(epsilon_r, loss_tangent, thicknesses, dB_noise)
     ...
     The simulation function use the parameters given and returns an ndarray
     ...
    return np.real(result[start_index:end_index, 1] )

def my_loglike(epsilon_r, loss_tangent, thicknesses, dB_noise, data):
    for param in (epsilon_r, loss_tangent, thicknesses, dB_noise, data):
        if not isinstance(param, (float, np.ndarray)):
            raise TypeError(f"Invalid input type to loglike: {type(param)}")
    model = my_model(epsilon_r, loss_tangent, thicknesses, dB_noise)
    return -0.5 * ((data - model) / np.abs(dB_noise)) ** 2 - np.log(np.sqrt(2 * np.pi)) - np.log(np.abs(dB_noise))

class LogLike(Op):
    def make_node(self, epsilon_r, loss_tangent, thicknesses, dB_noise, data) -> Apply:
        # Convert inputs to tensor variables
        epsilon_r = pt.as_tensor(epsilon_r)
        loss_tangent = pt.as_tensor(loss_tangent)
        thicknesses = pt.as_tensor(thicknesses)
        dB_noise = pt.as_tensor(dB_noise)
        data = pt.as_tensor(data)

        inputs = [epsilon_r, loss_tangent, thicknesses, dB_noise, data]
        outputs = [data.type()]

        return Apply(self, inputs, outputs)

    def perform(self, node: Apply, inputs: list[np.ndarray], outputs: list[list[None]]) -> None:
        epsilon_r, loss_tangent, thicknesses, dB_noise, data = inputs  # this will contain my variables
        loglike_eval = my_loglike(epsilon_r, loss_tangent, thicknesses, dB_noise, data)
        outputs[0][0] = np.asarray(loglike_eval)
# set up our data
true_epsilon_r = np.array([8.8, 5.0, 7.0])
true_loss_tangent = np.array([0.01, 0.02, 0.03])
true_thickness = np.array([30, 40])

dBnoise = -40
synthetic_data = my_model(true_epsilon_r, true_loss_tangent, true_thickness, dBnoise)
# create our Op
loglike_op = LogLike()
test_out = loglike_op(true_epsilon_r, true_loss_tangent, true_thickness, -100, synthetic_data)

def custom_dist_loglike(data, true_epsilon_r, true_loss_tangent, true_thickness, dB_noise):
    # data, or observed is always passed as the first input of CustomDist
    return loglike_op(true_epsilon_r, true_loss_tangent, true_thickness, dB_noise, data)

# use PyMC to sampler from log-likelihood
with pm.Model() as no_grad_model:
    # uniform priors on epsilon_r
    eps = pm.Uniform("eps", lower=3.0, upper=10.0, shape=3)
    # use a CustomDist with a custom logp function
    likelihood = pm.CustomDist("likelihood", eps, true_loss_tangent, true_thickness, dBnoise, observed=synthetic_data, logp=custom_dist_loglike)
    trace = pm.sample(5000, tune=1000, progressbar=True, return_inferencedata=True)

The error reads in likelihood = pm.CustomDist(“likelihood”, eps, true_loss_tangent, true_thickness, dBnoise, observed=synthetic_data, logp=custom_dist_loglike)
ValueError: Could not broadcast dimensions. Incompatible shapes were [(ScalarConstant(ScalarType(int64), data=3),), (ScalarConstant(ScalarType(int64), data=3),), (ScalarConstant(ScalarType(int64), data=2),), (ScalarConstant(ScalarType(int64), data=1),)]

It seems that the CustomDist function cannot handle the input parameter correctly, as the example given in PyTensor Op without gradients Model definition Using a “black box” likelihood function — PyMC example gallery