Yes, I had a check through quantecon .
But looking at linked file shared by you. I have re-written the code with the help of the reference . Is it possible to have glance through what I have interpreted.
# Define the transition matrix
trans_mat = np.array([[0.1, 0.2, 0.7],
[0.09, 0.01, 0.9],
[0.2, 0.8, 0.0]])
#number of states: 3
#States: s0,s1 ans s2¶
ax=sb.heatmap(trans_mat, cmap='YlOrRd', annot=True,linewidth=.3)
ax.set(xlabel="", ylabel="")
ax.xaxis.tick_top()
# Use current state as s0 prob list
#Estimate the transition matrix from data, using DiscreteMarkovChain as a likelihood
#(Ref https://github.com/pymc-devs/pymc-experimental/blob/main/notebooks/discrete_markov_chain.ipynb)
current_state=0
def generate_chains(P, steps, n_chains=1):
output = np.empty((n_chains, steps), dtype="int64")
x0 = pm.draw(pm.Categorical.dist(p=trans_mat[0], shape=(n_chains,)))
output[:, 0] = x0
for t in range(1, steps):
output[:, t] = [
np.random.choice(range(P.shape[0]), p=P[output[i, t - 1]].ravel()).astype(int)
for i in range(n_chains)
]
return output.squeeze()
chains = generate_chains(trans_mat, 10, n_chains=2)
with pm.Model() as model:
x0 = pm.Categorical.dist(np.ones(3) / 3, size=(100,))
P = pm.Dirichlet("P", a=[1, 1, 1], size=(3,))
discrete_mc = DiscreteMarkovChain("MarkovChain", P=trans_mat, init_dist=x0, observed=chains)
idata = pm.sample()
az.summary(idata)
RANDOM_SEED = 58
rng = np.random.default_rng(RANDOM_SEED)
with pm.Model() as model:
x0 = pm.Categorical.dist(np.ones(3) / 3, size=(100,))
P = pm.Dirichlet("P", a=[1, 1, 1], size=(3,))
discrete_mc = DiscreteMarkovChain("MarkovChain", P=trans_mat, init_dist=x0, observed=chains)
idata = pm.sample_prior_predictive(samples=50, random_seed=rng)
az.summary(idata)
Actually, I was trying to understand sample and sample_prior.
But we have written manually for generate chains code.
What does actually DiscreteMarkov Chain code section does?