How to calculate the Posterior predictive distribution within the BackBoxe methode?

hello,
How can I calculate the posterior predictive with the BackBoxe methode ?
As the y_pred variable is not defined in the model, I get the following error :

ppc = pm.sample_posterior_predictive(trace, samples=800, model=model1)  

ValueError: Distribution was not passed any random method Define a custom random method and pass it as kwarg random

Can you post some more code? How is your model defined?

def standardize(x,data):
    return  ((x - data.mean()) / data.std())


def my_model(theta,x):

    var1,var2= theta 
    prediction=x*var1+var2
    return prediction


def my_loglike(theta,x,data, sigma):
    model = standardize(my_model(theta, x),data)
    data=standardize(data,data)

    return -(0.5/sigma**2)*np.sum((data - model)**2)



class LogLike(tt.Op):

    """
    Specify what type of object will be passed and returned to the Op when it is
    called. In our case we will be passing it a vector of values (the parameters
    that define our model) and returning a single "scalar" value (the
    log-likelihood)
    """
    itypes = [tt.dvector] # expects a vector of parameter values when called
    otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)

    def __init__(self, loglike, data, x, sigma):
        """
        Initialise the Op with various things that our log-likelihood function
        requires. Below are the things that are needed in this particular
        example.

        Parameters
        ----------
        loglike:
            The log-likelihood (or whatever) function we've defined
        data:
            The "observed" data that our log-likelihood function takes in
        x:
            The dependent variable (aka 'x') that our model requires
        sigma:
            The noise standard deviation that our function requires.
        """

        # add inputs as class attributes
        self.likelihood = loglike
        self.data = data
        self.x = x
        self.sigma = sigma

    def perform(self, node, inputs, outputs):
        # the method that is used when calling the Op
        theta, = inputs  # this will contain my variables

        # call the log-likelihood function
        logl = self.likelihood(theta, self.x, self.data, self.sigma)

        outputs[0][0] = np.array(logl) # output the log-likelihood




# create our Op
logl = LogLike(my_loglike, data, x, sigma)


def my_mu(v):
    return logl(v)


# use PyMC3 to sampler from log-likelihood
if __name__ == "__main__":
   with pm.Model() as model1:
       var1 = pm.Normal('var1', mu=prio_var1, sd=sd_var1)
       var2 = pm.Normal('var2', mu=prio_var2, sd=sd_var2)
	   
       # convert m and c to a tensor vector
       theta = tt.as_tensor_variable([var1, var2])
   
       # use a DensityDist (use a lamdba function to "call" the Op)
       pm.DensityDist('likelihood',my_mu , observed={'v': theta})#

       step = pm.Slice()
       
       tim_init=time.process_time()
       trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True, chains=chains, step=step,cores=cores)#, trace=db)



    

   ppc = pm.sample_posterior_predictive(trace, samples=800, model=model1)

For this old question, I just coded the calculation of the posterior_predictive like that:

tracedf=pm.trace_to_dataframe(trace)
d=np.random.choice(range(len(tracedf)), size=800, replace=False)
d2=tracedf.iloc[d]
model=my_model([d2.iloc[0].var1,d2.iloc[0].var2,d2.iloc[0].var3],x)
ppc =  {'y_obs': np.array(multivariate_normal(model, sigma**2).rvs()[np.newaxis])} 
for i in range(1,len(d2)):
	var1=d2.iloc[i].var1
	var2=d2.iloc[i].var2
	var3=d2.iloc[i].var3
	model=my_model([var1,var2,var3],x)
	ppc['y_obs']=np.concatenate((ppc['y_obs'],np.array(multivariate_normal(model, sigma**2).rvs()[np.newaxis])))