The Op for true_div, real, sqrt and others does not have a C implementation

Here goes the minimal model that I found raise the Op warnings. Surprisingly, model.logp does not raise any warning: just sampling does.

from numpy import pi, real, sqrt, sin, cos
import numpy as np
import pytensor.tensor as TT
import pymc as pm

def kx(ki,th,ph):
    return ki*sin(th)*cos(ph)

def ky(ki,th,ph):
    return ki*sin(th)*sin(ph)

def kz(ki,th,ep):
    return ki*sqrt(ep-sin(th)**2)

def kr(ki,th,ph):
    return np.sqrt(kx(ki,th,ph)**2+ky(ki,th,ph)**2)

def c_is(ki,th,ph,ths,phs):
    return (kx(ki,th,ph)*kx(ki,ths,phs)+ky(ki,th,ph)*ky(ki,ths,phs))/(kr(ki,th,ph)*kr(ki,ths,phs))

def beta0_v(ki,th,ep1):
    ph = 0
    a = ep1*kz(ki,th,1)-kz(ki,th,ep1)
    b = ep1*kz(ki,th,1)+kz(ki,th,ep1)
    return a/b

def beta1_v(ki,th,ph,ths,phs,ep0,ep1):
    return -2*1j*kz(ki,th,ep0)*(ki**2-ep1*ki**2)/((ep1*kz(ki,ths,ep0)+kz(ki,ths,ep1))*(ep1*kz(ki,th,ep0)+kz(ki,th,ep1)))*\
        (ep1*kr(ki,th,ph)*kr(ki,ths,phs)/ki**2-kz(ki,th,ep1)*kz(ki,th,ep1)*c_is(ki,th,ph,ths,phs)/ki**2)

def w(s,l,k1,k2):
    return s**2*l**2/(4*np.pi)*np.exp(-0.25*l**2*(k1**2+k2**2))

def T0(ki,th,ep1,za,s):
    ph = 0
    cuenta = 1 + 2*np.cos(2*kz(ki,th,1)*za)*beta0_v(ki,th,ep1)+np.abs(beta0_v(ki,th,ep1))**2
    return cuenta

def T1(rx,ry,ki,th,ep1,s,l):
    ph, phs = 0, 0
    ep0 = 1
    beta1 = beta1_v(ki,th,ph,th,phs,ep0,ep1)
    cuenta = np.abs(beta1)**2*w(s,l,rx-kx(ki,th,ph),ry-ky(ki,th,ph))
    return cuenta

def generadorS0_1capa_nuevo_simplificado2(thi,ep1,s,l,ZA,Wt,J,RX_,RY_):
    landa = 0.19
    k0 = 2*np.pi/landa
    I_0 = T0(k0,thi,ep1,ZA,s)
    aux_1 = Wt*T1(RX_,RY_,k0,thi,ep1,s,l)
    I_1 = J*aux_1.sum()            
    I_t = I_0 + I_1
    s0s = 10*np.log10(I_t)
    return s0s

def patron_simplificado2(ep,s,l):
    landa = 0.19
    k0 = 2*np.pi/landa
    phi = 0
    ph = phi
    s0 = []
    for th_ in thi:
      s0.append(generadorS0_1capa_nuevo_simplificado2(th_,ep,s,l,ZA,Wt,J,RX_,RY_))
    nn = len(thi)
    a = TT.zeros(nn)
    for i in range(nn):
        a = TT.set_subtensor(a[i], s0[i])
    return a

def int_gauss(n,k_lim):
    beta = np.zeros(n,dtype=float)
    for k in range(1,n+1):
        beta[k-1] = 0.5/np.sqrt(1-(2*(k))**(-2))
    m = n+1
    T_low = np.zeros((m,m))
    T_up = np.zeros((m,m))
    T = np.zeros((m,m))
    # defino T_low
    for i in range(0,m):
        for j in range(0,m):
            if i==j+1:
                T_low[i,j]=beta[i-1]
    # defino T_up
    for i in range(0,m):
        for j in range(0,m):
            if j==i+1:
                T_up[i,j]=beta[i]

    T = T_low + T_up        
    d_,V = np.linalg.eig(T)
    D = np.zeros((m,m))

    for i in range(0,m):
        for j in range(0,m):
            if i==j:
                D[i,j]= k_lim*d_[i]

    W = (2*V[0,:]**2)
    Wt = np.kron(W,W)

    r = k_lim*d_
    X_IG,Y_IG = np.meshgrid(r,r)

    return {'Wt': Wt, 'X_IG': X_IG, 'Y_IG': Y_IG, 'm_gauss': m}

# simulated inputs set

epsilon = 7
ssup = 0.006 #0.006
lsup = 0.1
ZA = 3.0
discretizacion = 80
thi = np.linspace(62*pi/180,82*pi/180, discretizacion)
entrada = [epsilon,ssup,lsup]
landa = 0.19
k0 = 2*np.pi/landa
n_gauss = 20
k_lim = 1.5*k0
m_gauss = int_gauss(n_gauss,k_lim)['m_gauss']
Wt = int_gauss(n_gauss,k_lim)['Wt']
J = k_lim**2
RX = int_gauss(n_gauss,k_lim)['X_IG']
RY = int_gauss(n_gauss,k_lim)['Y_IG']
RX_ = np.reshape(RX,(1,m_gauss**2))
RY_ = np.reshape(RY,(1,m_gauss**2))
s0_1 = patron_simplificado2(epsilon,ssup,lsup)
noise = 0.5 * np.random.randn(len(thi))

pert = s0_1.eval() + noise
t0 = time.time()
medidos = pert
errores = 1
samples = 100
RANDOM_SEED = 58

ranges = {}
ranges['ep'] = 4, 40
ranges['s'] = 0.001, 0.01
ranges['l'] = 0.06, 0.15

with pm.Model() as model:
    ep = pm.Uniform('ep', ranges['ep'][0], ranges['ep'][1])
    s = pm.Uniform('s', ranges['s'][0], ranges['s'][1])
    l = pm.Uniform('l', ranges['l'][0], ranges['l'][1])

def f(ep = ep,
        s = s,
        l = l):
    return patron_simplificado2(ep,s,l)

with model:
    function_pm = pm.Deterministic('s0f', f())
    observations = pm.Normal( "obs",  mu=real(function_pm), sigma=errores, observed=medidos)

with model:
    model.logp()

with model:
    trace = pm.sample_smc(samples, cores=4)

I am sorry for the long model, it is a physically-based scattering model and is the simples one that produces the warnings