It will not override anything, it’s just added to the total model logp.
Here is a simple example and some code so you can investigate if things are doing what you want them to:
import numpy as np
import pymc as pm
from pymc.model.fgraph import clone_model
with pm.Model( ) as m1:
x = pm.Exponential("x", 1)
y = pm.Poisson("y", x, observed=[0, 1, 2])
with clone_model(m1) as m2:
# Just add one to whatever the model joint-probability is
pm.Potential("potential", pm.math.ones((1,)))
print(m1.point_logps()) # {'x': -1.0, 'y': -3.69}
print(m2.point_logps()) # {'x': -1.0, 'y': -3.69, 'potential': 1.0}
np.testing.assert_allclose(
m1.compile_logp()({"x_log__": 0}) + 1,
m2.compile_logp()({"x_log__": 0}),
)