valueError: Input dimension mis-match

Hi. I am trying to implement cross-validation for my model. here is my code:

from sklearn.model_selection import train_test_split

train, test = train_test_split(df, test_size=0.2)


# In[25]:


train = train[np.argsort(train.T[0])]
test = test[np.argsort(test.T[0])]


# In[26]:


ztr, logLUVtr, logLXtr, logFUVtr, logFXtr,errQtr = train.T
zte, logLUVte, logLXte, logFUVte, logFXte,errQte = test.T


# In[27]:


DMtr= (logFXtr )
DMte= (logFXte )


# In[28]:


print(len(zte))
print(len(ztr))

320
1278






JZtr=theano.shared(ztr)

# cov=theano.shared(covQ)
errtr=theano.shared(errQtr)
logFUV1tr=theano.shared(logFUVtr)
logFX1tr=theano.shared(logFXtr)
DM1tr=theano.shared(DMtr)

JZte=theano.shared(zte)

# cov=theano.shared(covQ)
errte=theano.shared(errQte)
logFUV1te=theano.shared(logFUVte)
logFX1te=theano.shared(logFXte)
DM1te=theano.shared(DMte)


# In[32]:


model= pm.Model()


    
    
with model:    
    
  
    
    
    
    
   
    
   
    H0 =70.
    s=pm.Uniform("s",lower=1.5,upper=2.)
    OmegaM = pm.Uniform("OmegaM",lower=0,upper=1.)
    beta_prime=pm.Uniform("beta_prime",lower=6.,upper=8.)
    w      = pm.Normal("w",mu=-1,sd=1)
    gamma      = pm.Normal("gamma",mu=0,sd=1)

    dm =distmod_w_flat(OmegaM,H0,w,JZtr)
 
    mb =pm.math.dot((2.*(gamma-1.)/5),dm)-beta_prime+pm.math.dot(gamma,logFUVtr)
    covrtr=pm.Deterministic("covr",pm.math.sqrt((pm.math.dot(5/2/(1-gamma),errtr))**2 + s**2))
    
    obsm = pm.Normal("obsm",mu=mb,sd=covrtr,observed=DM1tr)

with model:

    step = pm.NUTS()

    trace = pm.sample(3000, step=step, return_inferencedata=False)

It works well, but as I implement pm.sample_posterior_predictive :

JZtr.set_value(zte)


errtr.set_value(errQte)
logFUV1tr.set_value(logFUVte)
logFX1tr.set_value(logFXte)
DM1tr.set_value(DMte)


ppc = pm.sample_posterior_predictive(trace, model=model, samples=100)

I get following error:

ValueError: Input dimension mis-match. (input[5].shape[0] = 320, input[8].shape[0] = 1278)

Thanks in advance for any help.