ARMA-GARCH with CustomDist ParameterValueError

Hi Community,

I’m currently in the process of learning the pymc basics and would like dive into scenario generation.

I came across this ARMA-GARCH(1,1).ipynb and Custom Dist AR & MA

My current attempt to comine both gets rewarded with “ParameterValueError: base >= 0”

Would you have any idea on how to fix this and what is happening here?

Thanks!


def armagarch_dist(X,sigma_sq_init,epsilon_init, omega, alpha, beta, mu, rho, theta, size):
    def step(y_true, y_tm1, sigma_sq_tm1, epsilon_tm1, ω, α, β, ρ, θ, μ):
        
        # E[y] model.
        μ = μ + θ * epsilon_tm1 + ρ * y_tm1
        
        # GARCH process
        σ2 = ω + α * epsilon_tm1 ** 2 + β * sigma_sq_tm1      
        
        # y = E[y] + epsilon implies y ~ N(E[y], sigma)
        y = pm.Normal.dist(mu=μ, sigma=pt.sqrt(σ2))
        
        # back out the value of epsilon from y and mu
        epsilon = y_true - μ
        
        return (σ2, epsilon, y), collect_default_updates([y])

    [_, _, y_hat], _ = pytensor.scan(
        fn=step,
        sequences=[{'input':X, 'taps':[0, -1]}],
        outputs_info=[sigma_sq_init, epsilon_init, None],
        non_sequences=[omega, alpha, beta,rho, theta, mu],
        strict=True,
        mode = get_mode(None)
        )
    
    return y_hat

with pm.Model() as m:
    
    # Non-zero mean
    mu = pm.Normal('mu',0, 1)
    
    # GARCH parameters
    omega = pm.Uniform('omega',0,1)
    alpha = pm.Uniform('alpha',0,1)
    beta = pm.Uniform('beta',0, (1-alpha))
    
    theta = pm.Normal('theta')
    rho = pm.HalfNormal('rho')

    # Inital values 
    sigma_sq_init = pm.Exponential('sigma_sq_init', 1)
    epsilon_init = pm.Normal('epsilon_init')
    y_init = pm.Normal('y_init')
    
    # Data -- Need two containers, one for scan, one for observed. We'll append the inital value to the one
    # that goes to scan so that we don't need to drop the first observation.
    X_data = pm.MutableData('X_data', y)
    y_data = pm.MutableData('y_data', y)
    X = pt.concatenate([y_init[None], X_data])
    
    #ar_innov_obs = pm.MutableData("ar_innov_obs", np.zeros(trials-lags), dims=("steps",))
    y_data = pm.CustomDist(
        "armagarch_dist",
        X,sigma_sq_init,epsilon_init, omega, alpha, beta, mu, rho, theta,
        dist=armagarch_dist,
        observed=X_data,
    )
    
    armag = pm.Deterministic("armag",  y_data)

with m:
    idata = pm.sample(init='jitter+adapt_diag_grad', target_accept=0.95)

## For data from the notebook.

N = 250
true_omega = 0.5
true_alpha = 0.8
true_beta = 0.15
true_mu = 1.5

true_rho = 0.95
true_theta = 1.8

true_sigma_sq_init = 1
true_epsilon_init = 0 
true_y_init = 0

true_sigmas_sq = np.empty(N)
true_sigmas_sq[0] = true_sigma_sq_init

true_epsilons = np.empty(N)
true_epsilons[0] = true_epsilon_init

y = np.zeros(N)
y[0] = true_y_init

for t in range(1,N):
    true_sigmas_sq[t] = true_omega + true_alpha * true_epsilons[t-1] ** 2 + true_beta * true_sigmas_sq[t-1]
    true_epsilons[t] = rng.normal(loc=0, scale=np.sqrt(true_sigmas_sq[t]))
    y[t] = true_mu + true_rho * y[t-1] + true_theta * true_epsilons[t-1] +  true_epsilons[t]

---------------------------------------------------------------------------
ParameterValueError                       Traceback (most recent call last)
File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/scan_perform.pyx:418, in pytensor.scan.scan_perform.perform()

ParameterValueError: base >= 0

During handling of the above exception, another exception occurred:

InnerFunctionError                        Traceback (most recent call last)
File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/op.py:1576, in Scan.make_thunk.<locals>.p(node, inputs, outputs)
   1575 try:
-> 1576     t_fn, n_steps = scan_perform_ext.perform(
   1577         self.info.n_shared_outs,
   1578         self.info.n_mit_mot_outs,
   1579         self.info.n_seqs,
   1580         self.info.n_mit_mot,
   1581         self.info.n_mit_sot,
   1582         self.info.n_sit_sot,
   1583         self.info.n_nit_sot,
   1584         self.info.as_while,
   1585         cython_mintaps,
   1586         cython_pos,
   1587         cython_store_steps,
   1588         self.info.mit_mot_in_slices
   1589         + self.info.mit_sot_in_slices
   1590         + self.info.sit_sot_in_slices,
   1591         tap_array_len,
   1592         cython_vector_seqs,
   1593         cython_vector_outs,
   1594         self.info.mit_mot_out_slices,
   1595         cython_mitmots_preallocated,
   1596         mit_mot_out_to_tap_idx,
   1597         cython_outs_is_tensor,
   1598         inner_input_storage,
   1599         inner_output_storage,
   1600         cython_destroy_map,
   1601         inputs,
   1602         outputs,
   1603         outer_output_dtypes,
   1604         outer_output_ndims,
   1605         self.fn.vm,
   1606     )
   1607 except InnerFunctionError as exc:

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/scan_perform.pyx:420, in pytensor.scan.scan_perform.perform()

InnerFunctionError: (ParameterValueError('base >= 0'), <traceback object at 0x7fbb0e860a00>)

During handling of the above exception, another exception occurred:

ParameterValueError                       Traceback (most recent call last)
File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/compile/function/types.py:970, in Function.__call__(self, *args, **kwargs)
    968 try:
    969     outputs = (
--> 970         self.vm()
    971         if output_subset is None
    972         else self.vm(output_subset=output_subset)
    973     )
    974 except Exception:

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/op.py:1648, in Scan.make_thunk.<locals>.rval(p, i, o, n, allow_gc)
   1645 def rval(
   1646     p=p, i=node_input_storage, o=node_output_storage, n=node, allow_gc=allow_gc
   1647 ):
-> 1648     r = p(n, [x[0] for x in i], o)
   1649     for o in node.outputs:

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/op.py:1615, in Scan.make_thunk.<locals>.p(node, inputs, outputs)
   1612 if hasattr(self.fn.vm, "position_of_error") and hasattr(
   1613     self.fn.vm, "thunks"
   1614 ):
-> 1615     link_utils.raise_with_op(
   1616         self.fn.maker.fgraph,
   1617         self.fn.vm.nodes[self.fn.vm.position_of_error],
   1618         self.fn.vm.thunks[self.fn.vm.position_of_error],
   1619         exc_info=(exc_type, exc_value, exc_trace),
   1620     )
   1621 else:

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/link/utils.py:531, in raise_with_op(fgraph, node, thunk, exc_info, storage_map)
    529     # Some exception need extra parameter in inputs. So forget the
    530     # extra long error message in that case.
--> 531 raise exc_value.with_traceback(exc_trace)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/scan_perform.pyx:418, in pytensor.scan.scan_perform.perform()

ParameterValueError: base >= 0
Apply node that caused the error: Check{base >= 0}(*4-<Scalar(float64, shape=())>, Ge.0)
Toposort index: 1
Inputs types: [TensorType(float64, shape=()), TensorType(bool, shape=())]
Inputs shapes: [(), ()]
Inputs strides: [(), ()]
Inputs values: [array(-0.56258017), array(False)]
Outputs clients: [[Composite{...}(*7-<Scalar(float64, shape=())>, *3-<Scalar(float64, shape=())>, *5-<Scalar(float64, shape=())>, *6-<Scalar(float64, shape=())>, 2.0, Check{base >= 0}.0, 0, *2-<Scalar(float64, shape=())>, *9-<Scalar(float64, shape=())>, *0-<Scalar(float64, shape=())>, *8-<Scalar(float64, shape=())>, *4-<Scalar(float64, shape=())>, 0.9189385332046727, -0.5, *1-<Scalar(float64, shape=())>)]]


HINT: Use the PyTensor flag `exception_verbosity=high` for a debug print-out and storage map footprint of this Apply node.

During handling of the above exception, another exception occurred:

ParameterValueError                       Traceback (most recent call last)
Cell In[59], line 2
      1 with m:
----> 2     idata = pm.sample(init='jitter+adapt_diag_grad', target_accept=0.95)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pymc/sampling/mcmc.py:711, in sample(draws, tune, chains, cores, random_seed, progressbar, step, nuts_sampler, initvals, init, jitter_max_retries, n_init, trace, discard_tuned_samples, compute_convergence_checks, keep_warning_stat, return_inferencedata, idata_kwargs, nuts_sampler_kwargs, callback, mp_ctx, model, **kwargs)
    709         [kwargs.setdefault(k, v) for k, v in nuts_kwargs.items()]
    710     _log.info("Auto-assigning NUTS sampler...")
--> 711     initial_points, step = init_nuts(
    712         init=init,
    713         chains=chains,
    714         n_init=n_init,
    715         model=model,
    716         random_seed=random_seed_list,
    717         progressbar=progressbar,
    718         jitter_max_retries=jitter_max_retries,
    719         tune=tune,
    720         initvals=initvals,
    721         **kwargs,
    722     )
    724 if initial_points is None:
    725     # Time to draw/evaluate numeric start points for each chain.
    726     ipfns = make_initial_point_fns_per_chain(
    727         model=model,
    728         overrides=initvals,
    729         jitter_rvs=set(),
    730         chains=chains,
    731     )

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pymc/sampling/mcmc.py:1356, in init_nuts(init, chains, n_init, model, random_seed, progressbar, jitter_max_retries, tune, initvals, **kwargs)
   1349 _log.info(f"Initializing NUTS using {init}...")
   1351 cb = [
   1352     pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff="absolute"),
   1353     pm.callbacks.CheckParametersConvergence(tolerance=1e-2, diff="relative"),
   1354 ]
-> 1356 initial_points = _init_jitter(
   1357     model,
   1358     initvals,
   1359     seeds=random_seed_list,
   1360     jitter="jitter" in init,
   1361     jitter_max_retries=jitter_max_retries,
   1362 )
   1364 apoints = [DictToArrayBijection.map(point) for point in initial_points]
   1365 apoints_data = [apoint.data for apoint in apoints]

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pymc/sampling/mcmc.py:1250, in _init_jitter(model, initvals, seeds, jitter, jitter_max_retries)
   1248 if i < jitter_max_retries:
   1249     try:
-> 1250         model.check_start_vals(point)
   1251     except SamplingError:
   1252         # Retry with a new seed
   1253         seed = rng.randint(2**30, dtype=np.int64)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pymc/model/core.py:1650, in Model.check_start_vals(self, start)
   1644     valid_keys = ", ".join(value_names_set)
   1645     raise KeyError(
   1646         "Some start parameters do not appear in the model!\n"
   1647         f"Valid keys are: {valid_keys}, but {extra_keys} was supplied"
   1648     )
-> 1650 initial_eval = self.point_logps(point=elem)
   1652 if not all(np.isfinite(v) for v in initial_eval.values()):
   1653     raise SamplingError(
   1654         "Initial evaluation of model at starting point failed!\n"
   1655         f"Starting values:\n{elem}\n\n"
   1656         f"Logp initial evaluation results:\n{initial_eval}\n"
   1657         "You can call `model.debug()` for more details."
   1658     )

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pymc/model/core.py:1685, in Model.point_logps(self, point, round_vals)
   1679 factors = self.basic_RVs + self.potentials
   1680 factor_logps_fn = [pt.sum(factor) for factor in self.logp(factors, sum=False)]
   1681 return {
   1682     factor.name: np.round(np.asarray(factor_logp), round_vals)
   1683     for factor, factor_logp in zip(
   1684         factors,
-> 1685         self.compile_fn(factor_logps_fn)(point),
   1686     )
   1687 }

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pymc/pytensorf.py:593, in PointFunc.__call__(self, state)
    592 def __call__(self, state):
--> 593     return self.f(**state)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/compile/function/types.py:983, in Function.__call__(self, *args, **kwargs)
    981     if hasattr(self.vm, "thunks"):
    982         thunk = self.vm.thunks[self.vm.position_of_error]
--> 983     raise_with_op(
    984         self.maker.fgraph,
    985         node=self.vm.nodes[self.vm.position_of_error],
    986         thunk=thunk,
    987         storage_map=getattr(self.vm, "storage_map", None),
    988     )
    989 else:
    990     # old-style linkers raise their own exceptions
    991     raise

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/link/utils.py:531, in raise_with_op(fgraph, node, thunk, exc_info, storage_map)
    526     warnings.warn(
    527         f"{exc_type} error does not allow us to add an extra error message"
    528     )
    529     # Some exception need extra parameter in inputs. So forget the
    530     # extra long error message in that case.
--> 531 raise exc_value.with_traceback(exc_trace)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/compile/function/types.py:970, in Function.__call__(self, *args, **kwargs)
    967 t0_fn = time.perf_counter()
    968 try:
    969     outputs = (
--> 970         self.vm()
    971         if output_subset is None
    972         else self.vm(output_subset=output_subset)
    973     )
    974 except Exception:
    975     restore_defaults()

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/op.py:1648, in Scan.make_thunk.<locals>.rval(p, i, o, n, allow_gc)
   1645 def rval(
   1646     p=p, i=node_input_storage, o=node_output_storage, n=node, allow_gc=allow_gc
   1647 ):
-> 1648     r = p(n, [x[0] for x in i], o)
   1649     for o in node.outputs:
   1650         compute_map[o][0] = True

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/op.py:1615, in Scan.make_thunk.<locals>.p(node, inputs, outputs)
   1610 exc_trace = exc.args[1]
   1612 if hasattr(self.fn.vm, "position_of_error") and hasattr(
   1613     self.fn.vm, "thunks"
   1614 ):
-> 1615     link_utils.raise_with_op(
   1616         self.fn.maker.fgraph,
   1617         self.fn.vm.nodes[self.fn.vm.position_of_error],
   1618         self.fn.vm.thunks[self.fn.vm.position_of_error],
   1619         exc_info=(exc_type, exc_value, exc_trace),
   1620     )
   1621 else:
   1622     raise exc_value.with_traceback(exc_trace)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/link/utils.py:531, in raise_with_op(fgraph, node, thunk, exc_info, storage_map)
    526     warnings.warn(
    527         f"{exc_type} error does not allow us to add an extra error message"
    528     )
    529     # Some exception need extra parameter in inputs. So forget the
    530     # extra long error message in that case.
--> 531 raise exc_value.with_traceback(exc_trace)

File ~/miniconda3/envs/pymc_env/lib/python3.11/site-packages/pytensor/scan/scan_perform.pyx:418, in pytensor.scan.scan_perform.perform()

ParameterValueError: base >= 0
Apply node that caused the error: Check{base >= 0}(*4-<Scalar(float64, shape=())>, Ge.0)
Toposort index: 1
Inputs types: [TensorType(float64, shape=()), TensorType(bool, shape=())]
Inputs shapes: [(), ()]
Inputs strides: [(), ()]
Inputs values: [array(-0.56258017), array(False)]
Outputs clients: [[Composite{...}(*7-<Scalar(float64, shape=())>, *3-<Scalar(float64, shape=())>, *5-<Scalar(float64, shape=())>, *6-<Scalar(float64, shape=())>, 2.0, Check{base >= 0}.0, 0, *2-<Scalar(float64, shape=())>, *9-<Scalar(float64, shape=())>, *0-<Scalar(float64, shape=())>, *8-<Scalar(float64, shape=())>, *4-<Scalar(float64, shape=())>, 0.9189385332046727, -0.5, *1-<Scalar(float64, shape=())>)]]

[...]

HINT: Use the PyTensor flag `exception_verbosity=high` for a debug print-out and storage map footprint of this Apply node.
Apply node that caused the error: Scan{scan_fn, while_loop=False, inplace=all}(DropDims{axis=0}.0, Mul.0, Subtensor{start:stop:step}.0, Subtensor{start:stop:step}.0, SetSubtensor{:stop}.0, SetSubtensor{:stop}.0, Composite{...}.3, Composite{...}.1, Composite{...}.1, Composite{...}.2, theta, mu)
Toposort index: 47
Inputs types: [TensorType(int64, shape=()), TensorType(float64, shape=(None,)), TensorType(float64, shape=(None,)), TensorType(float64, shape=(None,)), TensorType(float64, shape=(1,)), TensorType(float64, shape=(1,)), TensorType(int64, shape=()), TensorType(float64, shape=()), TensorType(float64, shape=()), TensorType(float64, shape=()), TensorType(float64, shape=()), TensorType(float64, shape=())]
Inputs shapes: [(), (250,), (250,), (250,), (1,), (1,), (), (), (), (), (), ()]
Inputs strides: [(), (8,), (8,), (8,), (8,), (8,), (), (), (), (), (), ()]
Inputs values: [array(250), 'not shown', 'not shown', 'not shown', array([1.93402925]), array([-0.56258017]), array(250), array(0.33794936), array(0.64579449), array(0.23985711), array(-0.87767689), array(-0.89979126)]
Outputs clients: [[], [], [Sum{axes=None}(X_data_logprob)]]

HINT: Re-running with most PyTensor optimizations disabled could provide a back-trace showing when this node was created. This can be done by setting the PyTensor flag 'optimizer=fast_compile'. If that does not work, PyTensor optimizations can be disabled with 'optimizer=None'.
HINT: Use the PyTensor flag `exception_verbosity=high` for a debug print-out and storage map footprint of this Apply node.

I think @jessegrabowski found a similar issue. It counts as a bug. Could you open an issue in the pymc GitHub repository?

Hi thank you for the quick response. A pity. I will open an issue!

IIRC you can hack around by adding an absolute value to epsilon_tm1 ** 2 like this:

σ2 = ω + α * pt.abs(epsilon_tm1) ** 2 + β * sigma_sq_tm1      

The bug is related to a unnecessary check on the base of the power when doing logp inference. Please tag me if you open an issue.

1 Like

This works! Thanky you!