Sure, it looks like this (I am not a stan programmer so I cant say that I follow it completely):
Here is the R code:
#brms
wages <- read.table("https://stats.idre.ucla.edu/stat/r/examples/alda/data/wages_pp.txt", header=T, sep=",")
#center
wages$exper_c<-(wages$exper-mean(wages$exper))
wages$hgc.9_c<-(wages$hgc.9-mean(wages$hgc.9))
modbrms<-brm(lnw~ hgc.9_c + exper_c + (exper_c+ hgc.9_c | id) , data=wages
, family = gaussian(),cores = 8)
The output of summary(modbrms):
Links: mu = identity; sigma = identity
Formula: lnw ~ hgc.9_c + exper_c + (exper_c + hgc.9_c | id)
Data: wages (Number of observations: 6402)
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
total post-warmup samples = 4000
Group-Level Effects:
~id (Number of levels: 888)
Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
sd(Intercept) 0.23 0.01 0.22 0.25 493 1.01
sd(exper_c) 0.04 0.00 0.04 0.05 518 1.01
sd(hgc.9_c) 0.02 0.01 0.00 0.05 103 1.05
cor(Intercept,exper_c) 0.39 0.06 0.27 0.50 1222 1.00
cor(Intercept,hgc.9_c) 0.35 0.32 -0.37 0.90 1163 1.00
cor(exper_c,hgc.9_c) 0.02 0.39 -0.74 0.81 721 1.01
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
Intercept 1.90 0.01 1.88 1.91 966 1.00
hgc.9_c 0.04 0.01 0.02 0.05 1102 1.00
exper_c 0.05 0.00 0.04 0.05 1717 1.00
Family Specific Parameters:
Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
sigma 0.31 0.00 0.30 0.31 1656 1.00
The stan code:
// generated with brms 2.7.0
functions {
}
data {
int<lower=1> N; // total number of observations
vector[N] Y; // response variable
int<lower=1> K; // number of population-level effects
matrix[N, K] X; // population-level design matrix
// data for group-level effects of ID 1
int<lower=1> J_1[N];
int<lower=1> N_1;
int<lower=1> M_1;
vector[N] Z_1_1;
vector[N] Z_1_2;
vector[N] Z_1_3;
int<lower=1> NC_1;
int prior_only; // should the likelihood be ignored?
}
transformed data {
int Kc = K - 1;
matrix[N, K - 1] Xc; // centered version of X
vector[K - 1] means_X; // column means of X before centering
for (i in 2:K) {
means_X[i - 1] = mean(X[, i]);
Xc[, i - 1] = X[, i] - means_X[i - 1];
}
}
parameters {
vector[Kc] b; // population-level effects
real temp_Intercept; // temporary intercept
real<lower=0> sigma; // residual SD
vector<lower=0>[M_1] sd_1; // group-level standard deviations
matrix[M_1, N_1] z_1; // unscaled group-level effects
// cholesky factor of correlation matrix
cholesky_factor_corr[M_1] L_1;
}
transformed parameters {
// group-level effects
matrix[N_1, M_1] r_1 = (diag_pre_multiply(sd_1, L_1) * z_1)';
vector[N_1] r_1_1 = r_1[, 1];
vector[N_1] r_1_2 = r_1[, 2];
vector[N_1] r_1_3 = r_1[, 3];
}
model {
vector[N] mu = temp_Intercept + Xc * b;
for (n in 1:N) {
mu[n] += r_1_1[J_1[n]] * Z_1_1[n] + r_1_2[J_1[n]] * Z_1_2[n] + r_1_3[J_1[n]] * Z_1_3[n];
}
// priors including all constants
target += student_t_lpdf(temp_Intercept | 3, 2, 10);
target += student_t_lpdf(sigma | 3, 0, 10)
- 1 * student_t_lccdf(0 | 3, 0, 10);
target += student_t_lpdf(sd_1 | 3, 0, 10)
- 3 * student_t_lccdf(0 | 3, 0, 10);
target += normal_lpdf(to_vector(z_1) | 0, 1);
target += lkj_corr_cholesky_lpdf(L_1 | 1);
// likelihood including all constants
if (!prior_only) {
target += normal_lpdf(Y | mu, sigma);
}
}
generated quantities {
// actual population-level intercept
real b_Intercept = temp_Intercept - dot_product(means_X, b);
corr_matrix[M_1] Cor_1 = multiply_lower_tri_self_transpose(L_1);
vector<lower=-1,upper=1>[NC_1] cor_1;
// take only relevant parts of correlation matrix
cor_1[1] = Cor_1[1,2];
cor_1[2] = Cor_1[1,3];
cor_1[3] = Cor_1[2,3];
}