Hi!
I was hoping someone may be able to clarify something for me. I am trying to do a timeseries forecasting with the GaussianRandomWalk function. I have been suggested that my code is wrong as I’ve modeled it so that the standard deviation of the latent walk is the same as the observation noise, which seems like it might be a mistake. Is it a mistake? How would i change it?
import pymc3 as pm
#import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# generate a random walk
sd = .1
N = 200
deltas = np.random.normal(scale=sd, size=N)
y = np.cumsum(deltas)
x = np.arange(N)
df = pd.DataFrame({‘y’: y})
df = df.reindex(np.arange(250))
with pm.Model() as model:
sd = pm.HalfNormal(‘sd’)
mu = pm.Uniform(“mu”, 0, 100)
prior = pm.GaussianRandomWalk(‘prior’, mu=mu, sd=sd, shape=len(df))
obs = pm.Normal("obs", mu=prior, sd=sd, observed=df["y"])
# graph = pm.model_to_graphviz(model)
# print(graph)
trace = pm.sample(2000, chains=1)
pm.traceplot(trace)
plt.show()
with model:
ppc = pm.sample_posterior_predictive(trace)
pm.traceplot(ppc)
plt.show()
print(ppc)