Unexpected results from logistic regression model

The above question, was an unexpected snag in the exploration of the following question:

The results show that the below data is different than the above, but I don’t understand why. The only difference is that I’m using the pymc-style indexing in the below, while I’m explicitly mapping the distributions to the groups in the above model.

def create_no_pooling_model(data, model_features, samples=1000, burn_in=1000):

    #create index
    if 'idx' not in data.columns:
        data.loc[:, 'idx'] = np.arange(data.shape[0])

    group_idxs, groups = pd.factorize(data.group)
    coords = {
        "group": groups,
        "group_id": np.arange(len(group_idxs)),
        "feature_id" : np.arange(len(model_features)),
    }
    with pm.Model(coords=coords) as model:
        #define data

        group_idx = pm.ConstantData('group_idx', group_idxs, dims="group_id")

        regressors = pm.ConstantData("regressors", data[model_features], dims=("group_id", "feature_id"))

        target = pm.ConstantData("target", data.target)

        index = pm.ConstantData("index", data.idx)

        #define priors
        intercept = pm.Normal('intercept', mu=0, sigma=5, dims="group")

        betas = pm.Normal('betas', mu=0, sigma=5, dims=("group", "feature_id"))

        # calc logit
        logit = intercept[group_idx] + (regressors[group_idx] * betas[group_idx]).sum(axis=-1)

        #get probabilities
        p_i = pm.Deterministic('p_i', pm.math.invlogit(logit))

        #define the bernoulli likelihood
        y_observed = pm.Bernoulli('y_obs', p=p_i, observed=target)
        trace = pm.sample(samples, tune=burn_in, cores=2, nuts=dict(max_treedepth=15, target_accept=.97)
        )
        return dict(
            model =  model, 
            trace = trace,
            data = data
        )

Here is the parameter summary:

Here are the proba summaries: