From 924412f1e8de3a0b5ac4cc0a296c6107faa9d9cf Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Wed, 13 Oct 2021 18:11:33 +0200 Subject: [PATCH 01/22] Make prior pred return inference data --- pymc/sampling.py | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/pymc/sampling.py b/pymc/sampling.py index e3d5c1ac7f..1867155391 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -271,7 +271,7 @@ def sample( idata_kwargs: dict = None, mp_ctx=None, **kwargs, -): +) -> Union[InferenceData, MultiTrace]: r"""Draw samples from the posterior using the given step methods. Multiple step methods are supported via compound step methods. @@ -338,7 +338,7 @@ def sample( init methods. return_inferencedata : bool, default=True Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False) - Defaults to `False`, but we'll switch to `True` in an upcoming release. + Defaults to `True`. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data` mp_ctx : multiprocessing.context.BaseContent @@ -1893,7 +1893,9 @@ def sample_prior_predictive( var_names: Optional[Iterable[str]] = None, random_seed=None, mode: Optional[Union[str, Mode]] = None, -) -> Dict[str, np.ndarray]: + return_inferencedata=None, + idata_kwargs: dict = None, +) -> Union[InferenceData, Dict[str, np.ndarray]]: """Generate samples from the prior predictive distribution. Parameters @@ -1909,14 +1911,21 @@ def sample_prior_predictive( Seed for the random number generator. mode: The mode used by ``aesara.function`` to compile the graph. + return_inferencedata : bool, default=True + Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). + Defaults to `True`. + idata_kwargs : dict, optional + Keyword arguments for :func:`pymc.to_inference_data` Returns ------- - dict - Dictionary with variable names as keys. The values are numpy arrays of prior - samples. + arviz.InferenceData or Dict + An ArviZ ``InferenceData`` object containing the prior and prior predictive samples (default), + or a dictionary with variable names as keys and samples as numpy arrays. """ model = modelcontext(model) + if return_inferencedata is None: + return_inferencedata = True if model.potentials: warnings.warn( @@ -1980,7 +1989,15 @@ def sample_prior_predictive( for var_name in vars_: if var_name in data: prior[var_name] = data[var_name] - return prior + + if not return_inferencedata: + return prior + + ikwargs = dict(model=model) + if idata_kwargs: + ikwargs.update(idata_kwargs) + + return pm.to_inference_data(prior=prior, **ikwargs) def _init_jitter(model, point, chains, jitter_max_retries): From ca2613418c64bf8610afb324ccb3bcafd05788bd Mon Sep 17 00:00:00 2001 From: Alexandre ANDORRA Date: Thu, 14 Oct 2021 11:34:22 +0200 Subject: [PATCH 02/22] Add Osvaldo's suggestion Co-authored-by: Osvaldo Martin --- pymc/sampling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymc/sampling.py b/pymc/sampling.py index 1867155391..588173a922 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -1911,9 +1911,9 @@ def sample_prior_predictive( Seed for the random number generator. mode: The mode used by ``aesara.function`` to compile the graph. - return_inferencedata : bool, default=True + return_inferencedata : bool Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). - Defaults to `True`. + Defaults to True. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data` From cf71383cd455579e4d8b54211c8f0718ee3a57bd Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 13:02:05 +0200 Subject: [PATCH 03/22] Set return_inferencedata to True by default instead of None --- pymc/sampling.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/pymc/sampling.py b/pymc/sampling.py index 588173a922..4f1b8aebb8 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -267,7 +267,7 @@ def sample( callback=None, jitter_max_retries=10, *, - return_inferencedata=None, + return_inferencedata=True, idata_kwargs: dict = None, mp_ctx=None, **kwargs, @@ -336,7 +336,7 @@ def sample( Maximum number of repeated attempts (per chain) at creating an initial matrix with uniform jitter that yields a finite probability. This applies to ``jitter+adapt_diag`` and ``jitter+adapt_full`` init methods. - return_inferencedata : bool, default=True + return_inferencedata : bool Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False) Defaults to `True`. idata_kwargs : dict, optional @@ -450,9 +450,6 @@ def sample( if not isinstance(random_seed, abc.Iterable): raise TypeError("Invalid value for `random_seed`. Must be tuple, list or int") - if return_inferencedata is None: - return_inferencedata = True - if not discard_tuned_samples and not return_inferencedata: warnings.warn( "Tuning samples will be included in the returned `MultiTrace` object, which can lead to" @@ -1893,7 +1890,7 @@ def sample_prior_predictive( var_names: Optional[Iterable[str]] = None, random_seed=None, mode: Optional[Union[str, Mode]] = None, - return_inferencedata=None, + return_inferencedata=True, idata_kwargs: dict = None, ) -> Union[InferenceData, Dict[str, np.ndarray]]: """Generate samples from the prior predictive distribution. @@ -1924,8 +1921,6 @@ def sample_prior_predictive( or a dictionary with variable names as keys and samples as numpy arrays. """ model = modelcontext(model) - if return_inferencedata is None: - return_inferencedata = True if model.potentials: warnings.warn( @@ -1992,11 +1987,9 @@ def sample_prior_predictive( if not return_inferencedata: return prior - ikwargs = dict(model=model) if idata_kwargs: ikwargs.update(idata_kwargs) - return pm.to_inference_data(prior=prior, **ikwargs) From eb7c40c8024a000040e65c3a890b8c999070fc76 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 13:14:17 +0200 Subject: [PATCH 04/22] Make posterior pred return inference data --- pymc/sampling.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/pymc/sampling.py b/pymc/sampling.py index 4f1b8aebb8..ad8ffb3f55 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -1532,7 +1532,9 @@ def sample_posterior_predictive( random_seed=None, progressbar: bool = True, mode: Optional[Union[str, Mode]] = None, -) -> Dict[str, np.ndarray]: + return_inferencedata=True, + idata_kwargs: dict = None, +) -> Union[InferenceData, Dict[str, np.ndarray]]: """Generate posterior predictive samples from a model given a trace. Parameters @@ -1567,12 +1569,17 @@ def sample_posterior_predictive( time until completion ("expected time of arrival"; ETA). mode: The mode used by ``aesara.function`` to compile the graph. + return_inferencedata : bool + Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). + Defaults to True. + idata_kwargs : dict, optional + Keyword arguments for :func:`pymc.to_inference_data` Returns ------- - samples : dict - Dictionary with the variable names as keys, and values numpy arrays containing - posterior predictive samples. + arviz.InferenceData or Dict + An ArviZ ``InferenceData`` object containing the posterior predictive samples (default), or + a dictionary with variable names as keys, and samples numpy arrays. """ _trace: Union[MultiTrace, PointList] @@ -1721,7 +1728,12 @@ def sample_posterior_predictive( for k, ary in ppc_trace.items(): ppc_trace[k] = ary.reshape((nchain, len_trace, *ary.shape[1:])) - return ppc_trace + if not return_inferencedata: + return ppc_trace + ikwargs = dict(model=model) + if idata_kwargs: + ikwargs.update(idata_kwargs) + return pm.to_inference_data(posterior_predictive=ppc_trace, **ikwargs) def sample_posterior_predictive_w( From 991fe04fed2675b497f645b35129f5177f84fa9b Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 13:34:41 +0200 Subject: [PATCH 05/22] Make posterior_predictive_w return inference data --- pymc/sampling.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/pymc/sampling.py b/pymc/sampling.py index ad8ffb3f55..5c22026aac 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -1579,7 +1579,7 @@ def sample_posterior_predictive( ------- arviz.InferenceData or Dict An ArviZ ``InferenceData`` object containing the posterior predictive samples (default), or - a dictionary with variable names as keys, and samples numpy arrays. + a dictionary with variable names as keys, and samples as numpy arrays. """ _trace: Union[MultiTrace, PointList] @@ -1743,6 +1743,8 @@ def sample_posterior_predictive_w( weights: Optional[ArrayLike] = None, random_seed: Optional[int] = None, progressbar: bool = True, + return_inferencedata=True, + idata_kwargs: dict = None, ): """Generate weighted posterior predictive samples from a list of models and a list of traces according to a set of weights. @@ -1769,12 +1771,18 @@ def sample_posterior_predictive_w( Whether or not to display a progress bar in the command line. The bar shows the percentage of completion, the sampling speed in samples per second (SPS), and the estimated remaining time until completion ("expected time of arrival"; ETA). + return_inferencedata : bool + Whether to return an :class:`arviz:arviz.InferenceData` (True) object or a dictionary (False). + Defaults to True. + idata_kwargs : dict, optional + Keyword arguments for :func:`pymc.to_inference_data` Returns ------- - samples : dict - Dictionary with the variables as keys. The values corresponding to the - posterior predictive samples from the weighted models. + arviz.InferenceData or Dict + An ArviZ ``InferenceData`` object containing the posterior predictive samples from the + weighted models (default), or a dictionary with variable names as keys, and samples as + numpy arrays. """ if isinstance(traces[0], InferenceData): n_samples = [ @@ -1893,7 +1901,13 @@ def sample_posterior_predictive_w( except KeyboardInterrupt: pass else: - return {k: np.asarray(v) for k, v in ppc.items()} + ppc = {k: np.asarray(v) for k, v in ppc.items()} + if not return_inferencedata: + return ppc + ikwargs = dict(model=models) + if idata_kwargs: + ikwargs.update(idata_kwargs) + return pm.to_inference_data(posterior_predictive=ppc, **ikwargs) def sample_prior_predictive( From 772f4544c601e681e9fb1eac0358077d809663da Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 13:38:49 +0200 Subject: [PATCH 06/22] Add release note --- RELEASE-NOTES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 742f5d9462..d31b500260 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -8,6 +8,8 @@ - The GLM submodule has been removed, please use [Bambi](https://bambinos.github.io/bambi/) instead. - The `Distribution` keyword argument `testval` has been deprecated in favor of `initval`. Furthermore `initval` no longer assigns a `tag.test_value` on tensors since the initial values are now kept track of by the model object ([see #4913](https://github.com/pymc-devs/pymc/pull/4913)). - `pm.sample` now returns results as `InferenceData` instead of `MultiTrace` by default (see [#4744](https://github.com/pymc-devs/pymc/pull/4744)). +- `pm.sample_prior_predictive`, `pm.sample_posterior_predictive` and `pm.sample_posterior_predictive_w` now return an `InferenceData` object + by default, instead of a dictionary (see [#5073](https://github.com/pymc-devs/pymc/pull/5073)). - `pm.sample_prior_predictive` no longer returns transformed variable values by default. Pass them by name in `var_names` if you want to obtain these draws (see [4769](https://github.com/pymc-devs/pymc/pull/4769)). - ⚠ `pm.Bound` interface no longer accepts a callable class as argument, instead it requires an instantiated distribution (created via the `.dist()` API) to be passed as an argument. In addition, Bound no longer returns a class instance but works as a normal PyMC distribution. Finally, it is no longer possible to do predictive random sampling from Bounded variables. Please, consult the new documentation for details on how to use Bounded variables (see [4815](https://github.com/pymc-devs/pymc/pull/4815)). - `pm.DensityDist` no longer accepts the `logp` as its first position argument. It is now an optional keyword argument. If you pass a callable as the first positional argument, a `TypeError` will be raised (see [5026](https://github.com/pymc-devs/pymc3/pull/5026)). From 84ab2e810cfc697ff2be8c6b2c438cb61f6b5414 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 15:27:42 +0200 Subject: [PATCH 07/22] black --- pymc/tests/test_data_container.py | 40 ++++++++++++++++++------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/pymc/tests/test_data_container.py b/pymc/tests/test_data_container.py index e6506a1c9d..cfbdaa32a9 100644 --- a/pymc/tests/test_data_container.py +++ b/pymc/tests/test_data_container.py @@ -55,17 +55,19 @@ def test_sample(self): prior_trace1 = pm.sample_prior_predictive(1000) pp_trace1 = pm.sample_posterior_predictive(idata, samples=1000) - assert prior_trace0["b"].shape == (1000,) - assert prior_trace0["obs"].shape == (1000, 100) - assert prior_trace1["obs"].shape == (1000, 200) + assert prior_trace0.prior["b"].shape == (1, 1000) + assert prior_trace0.prior_predictive["obs"].shape == (1, 1000, 100) + assert prior_trace1.prior_predictive["obs"].shape == (1, 1000, 200) - assert pp_trace0["obs"].shape == (1000, 100) - - np.testing.assert_allclose(x, pp_trace0["obs"].mean(axis=0), atol=1e-1) - - assert pp_trace1["obs"].shape == (1000, 200) + assert pp_trace0.posterior_predictive["obs"].shape == (1, 1000, 100) + np.testing.assert_allclose( + x, pp_trace0.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) - np.testing.assert_allclose(x_pred, pp_trace1["obs"].mean(axis=0), atol=1e-1) + assert pp_trace1.posterior_predictive["obs"].shape == (1, 1000, 200) + np.testing.assert_allclose( + x_pred, pp_trace1.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) def test_sample_posterior_predictive_after_set_data(self): with pm.Model() as model: @@ -86,8 +88,10 @@ def test_sample_posterior_predictive_after_set_data(self): pm.set_data(new_data={"x": x_test}) y_test = pm.sample_posterior_predictive(trace) - assert y_test["obs"].shape == (1000, 3) - np.testing.assert_allclose(x_test, y_test["obs"].mean(axis=0), atol=1e-1) + assert y_test.posterior_predictive["obs"].shape == (1, 1000, 3) + np.testing.assert_allclose( + x_test, y_test.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) def test_sample_after_set_data(self): with pm.Model() as model: @@ -116,8 +120,10 @@ def test_sample_after_set_data(self): ) pp_trace = pm.sample_posterior_predictive(new_idata, 1000) - assert pp_trace["obs"].shape == (1000, 3) - np.testing.assert_allclose(new_y, pp_trace["obs"].mean(axis=0), atol=1e-1) + assert pp_trace.posterior_predictive["obs"].shape == (1, 1000, 3) + np.testing.assert_allclose( + new_y, pp_trace.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) def test_shared_data_as_index(self): """ @@ -130,7 +136,7 @@ def test_shared_data_as_index(self): alpha = pm.Normal("alpha", 0, 1.5, size=3) pm.Normal("obs", alpha[index], np.sqrt(1e-2), observed=y) - prior_trace = pm.sample_prior_predictive(1000, var_names=["alpha"]) + prior_trace = pm.sample_prior_predictive(1000) idata = pm.sample( 1000, init=None, @@ -146,10 +152,10 @@ def test_shared_data_as_index(self): pm.set_data(new_data={"index": new_index, "y": new_y}) pp_trace = pm.sample_posterior_predictive(idata, 1000, var_names=["alpha", "obs"]) - assert prior_trace["alpha"].shape == (1000, 3) + assert prior_trace.prior["alpha"].shape == (1, 1000, 3) assert idata.posterior["alpha"].shape == (1, 1000, 3) - assert pp_trace["alpha"].shape == (1000, 3) - assert pp_trace["obs"].shape == (1000, 3) + assert pp_trace.posterior_predictive["alpha"].shape == (1, 1000, 3) + assert pp_trace.posterior_predictive["obs"].shape == (1, 1000, 3) def test_shared_data_as_rv_input(self): """ From 89422061775849291d2ddefa5aff2c66221515c6 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 15:35:36 +0200 Subject: [PATCH 08/22] Fix tests missing --- pymc/tests/test_missing.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index f252200990..a532bdf3b1 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -27,15 +27,14 @@ @pytest.mark.parametrize( - "data", - [ma.masked_values([1, 2, -1, 4, -1], value=-1), pd.DataFrame([1, 2, np.nan, 4, np.nan])], + "data", [ma.masked_values([1, 2, -1, 4, -1], value=-1), pd.DataFrame([1, 2, np.nan, 4, np.nan])] ) def test_missing(data): with Model() as model: x = Normal("x", 1, 1) with pytest.warns(ImputationWarning): - y = Normal("y", x, 1, observed=data) + _ = Normal("y", x, 1, observed=data) assert "y_missing" in model.named_vars @@ -43,7 +42,7 @@ def test_missing(data): assert not np.isnan(model.logp(test_point)) with model: - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"x", "y"} <= set(prior_trace.keys()) @@ -61,7 +60,7 @@ def test_missing_with_predictors(): assert not np.isnan(model.logp(test_point)) with model: - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"x", "y"} <= set(prior_trace.keys()) @@ -77,7 +76,7 @@ def test_missing_dual_observations(): with pytest.warns(ImputationWarning): ovar2 = Normal("o2", mu=beta2 * latent, observed=obs2) - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) assert {"beta1", "beta2", "theta", "o1", "o2"} <= set(prior_trace.keys()) # TODO: Assert something trace = sample(chains=1, draws=50) @@ -101,7 +100,7 @@ def test_interval_missing_observations(): model.rvs_to_values[model.named_vars["theta1_observed"]].tag.transform, Interval ) - prior_trace = sample_prior_predictive() + prior_trace = sample_prior_predictive(return_inferencedata=False) # Make sure the observed + missing combined deterministics have the # same shape as the original observations vectors @@ -122,10 +121,7 @@ def test_interval_missing_observations(): assert {"theta1", "theta2"} <= set(prior_trace.keys()) trace = sample( - chains=1, - draws=50, - compute_convergence_checks=False, - return_inferencedata=False, + chains=1, draws=50, compute_convergence_checks=False, return_inferencedata=False ) assert np.all(0 < trace["theta1_missing"].mean(0)) @@ -135,7 +131,7 @@ def test_interval_missing_observations(): # Make sure that the observed values are newly generated samples and that # the observed and deterministic matche - pp_trace = sample_posterior_predictive(trace) + pp_trace = sample_posterior_predictive(trace, return_inferencedata=False) assert np.all(np.var(pp_trace["theta1"], 0) > 0.0) assert np.all(np.var(pp_trace["theta2"], 0) > 0.0) assert np.mean(pp_trace["theta1"][:, ~obs1.mask] - pp_trace["theta1_observed"]) == 0.0 From 7b396ac2d40d1f6e14581f28a513aec72ad1566f Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 15:38:52 +0200 Subject: [PATCH 09/22] Fix tests shared --- pymc/tests/test_shared.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/pymc/tests/test_shared.py b/pymc/tests/test_shared.py index ad09d19ad1..233ba01508 100644 --- a/pymc/tests/test_shared.py +++ b/pymc/tests/test_shared.py @@ -48,10 +48,14 @@ def test_sample(self): prior_trace1 = pm.sample_prior_predictive(1000) pp_trace1 = pm.sample_posterior_predictive(idata, 1000) - assert prior_trace0["b"].shape == (1000,) - assert prior_trace0["obs"].shape == (1000, 100) - np.testing.assert_allclose(x, pp_trace0["obs"].mean(axis=0), atol=1e-1) - - assert prior_trace1["b"].shape == (1000,) - assert prior_trace1["obs"].shape == (1000, 200) - np.testing.assert_allclose(x_pred, pp_trace1["obs"].mean(axis=0), atol=1e-1) + assert prior_trace0.prior["b"].shape == (1, 1000) + assert prior_trace0.prior_predictive["obs"].shape == (1, 1000, 100) + np.testing.assert_allclose( + x, pp_trace0.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) + + assert prior_trace1.prior["b"].shape == (1, 1000) + assert prior_trace1.prior_predictive["obs"].shape == (1, 1000, 200) + np.testing.assert_allclose( + x_pred, pp_trace1.posterior_predictive["obs"].mean(("chain", "draw")), atol=1e-1 + ) From e33a813ba4d72d17b6d839164cf771545ed461bf Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 16:41:42 +0200 Subject: [PATCH 10/22] Fix tests distributions random --- pymc/tests/test_distributions_random.py | 397 ++++-------------------- 1 file changed, 63 insertions(+), 334 deletions(-) diff --git a/pymc/tests/test_distributions_random.py b/pymc/tests/test_distributions_random.py index b6990c6a38..bd61d582e0 100644 --- a/pymc/tests/test_distributions_random.py +++ b/pymc/tests/test_distributions_random.py @@ -102,13 +102,7 @@ def pymc_random( def pymc_random_discrete( - dist, - paramdomains, - valuedomain=Domain([0]), - ref_rand=None, - size=100000, - alpha=0.05, - fails=20, + dist, paramdomains, valuedomain=Domain([0]), ref_rand=None, size=100000, alpha=0.05, fails=20 ): model, param_vars = build_model(dist, valuedomain, paramdomains) model_dist = change_rv_size(model.named_vars["value"], size, expand=True) @@ -416,11 +410,7 @@ class TestFlat(BaseTestDistribution): pymc_dist = pm.Flat pymc_dist_params = {} expected_rv_op_params = {} - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_rv_size", - "check_not_implemented", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size", "check_not_implemented"] def check_not_implemented(self): with pytest.raises(NotImplementedError): @@ -431,11 +421,7 @@ class TestHalfFlat(BaseTestDistribution): pymc_dist = pm.HalfFlat pymc_dist_params = {} expected_rv_op_params = {} - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_rv_size", - "check_not_implemented", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size", "check_not_implemented"] def check_not_implemented(self): with pytest.raises(NotImplementedError): @@ -554,10 +540,7 @@ class TestGumbel(BaseTestDistribution): expected_rv_op_params = {"mu": 1.5, "beta": 3.0} reference_dist_params = {"loc": 1.5, "scale": 3.0} reference_dist = seeded_scipy_distribution_builder("gumbel_r") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestStudentT(BaseTestDistribution): @@ -633,9 +616,7 @@ class TestTruncatedNormalTau(BaseTestDistribution): tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "lower": lower, "upper": upper} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = [ - "check_pymc_params_match_rv_op", - ] + tests_to_run = ["check_pymc_params_match_rv_op"] class TestTruncatedNormalLowerTau(BaseTestDistribution): @@ -644,9 +625,7 @@ class TestTruncatedNormalLowerTau(BaseTestDistribution): tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "lower": lower} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = [ - "check_pymc_params_match_rv_op", - ] + tests_to_run = ["check_pymc_params_match_rv_op"] class TestTruncatedNormalUpperTau(BaseTestDistribution): @@ -655,9 +634,7 @@ class TestTruncatedNormalUpperTau(BaseTestDistribution): tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "upper": upper} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = [ - "check_pymc_params_match_rv_op", - ] + tests_to_run = ["check_pymc_params_match_rv_op"] class TestTruncatedNormalUpperArray(BaseTestDistribution): @@ -666,20 +643,13 @@ class TestTruncatedNormalUpperArray(BaseTestDistribution): np.array([-np.inf, -np.inf]), np.array([3, 2]), np.array([0, 0]), - np.array( - [ - 1, - 1, - ] - ), + np.array([1, 1]), ) size = (15, 2) tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "upper": upper} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = [ - "check_pymc_params_match_rv_op", - ] + tests_to_run = ["check_pymc_params_match_rv_op"] class TestWald(BaseTestDistribution): @@ -718,9 +688,7 @@ class TestWaldMuPhi(BaseTestDistribution): mu_rv, lam_rv, phi_rv = pm.Wald.get_mu_lam_phi(mu=mu, lam=None, phi=phi) pymc_dist_params = {"mu": mu, "phi": phi, "alpha": alpha} expected_rv_op_params = {"mu": mu_rv, "lam": lam_rv, "alpha": alpha} - tests_to_run = [ - "check_pymc_params_match_rv_op", - ] + tests_to_run = ["check_pymc_params_match_rv_op"] class TestSkewNormal(BaseTestDistribution): @@ -844,10 +812,7 @@ class TestHalfNormal(BaseTestDistribution): expected_rv_op_params = {"mean": 0, "sigma": 10.0} reference_dist_params = {"loc": 0, "scale": 10.0} reference_dist = seeded_scipy_distribution_builder("halfnorm") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestHalfNormalTau(BaseTestDistribution): @@ -897,10 +862,7 @@ class TestExponential(BaseTestDistribution): expected_rv_op_params = {"mu": 1.0 / pymc_dist_params["lam"]} reference_dist_params = {"scale": 1.0 / pymc_dist_params["lam"]} reference_dist = seeded_numpy_distribution_builder("exponential") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestCauchy(BaseTestDistribution): @@ -909,10 +871,7 @@ class TestCauchy(BaseTestDistribution): expected_rv_op_params = {"alpha": 2.0, "beta": 5.0} reference_dist_params = {"loc": 2.0, "scale": 5.0} reference_dist = seeded_scipy_distribution_builder("cauchy") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestHalfCauchy(BaseTestDistribution): @@ -921,10 +880,7 @@ class TestHalfCauchy(BaseTestDistribution): expected_rv_op_params = {"alpha": 0.0, "beta": 5.0} reference_dist_params = {"loc": 0.0, "scale": 5.0} reference_dist = seeded_scipy_distribution_builder("halfcauchy") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestGamma(BaseTestDistribution): @@ -933,10 +889,7 @@ class TestGamma(BaseTestDistribution): expected_rv_op_params = {"alpha": 2.0, "beta": 1 / 5.0} reference_dist_params = {"shape": 2.0, "scale": 1 / 5.0} reference_dist = seeded_numpy_distribution_builder("gamma") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestGammaMuSigma(BaseTestDistribution): @@ -955,20 +908,14 @@ class TestInverseGamma(BaseTestDistribution): expected_rv_op_params = {"alpha": 2.0, "beta": 5.0} reference_dist_params = {"a": 2.0, "scale": 5.0} reference_dist = seeded_scipy_distribution_builder("invgamma") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestInverseGammaMuSigma(BaseTestDistribution): pymc_dist = pm.InverseGamma pymc_dist_params = {"mu": 0.5, "sigma": 0.25} expected_alpha, expected_beta = pm.InverseGamma._get_alpha_beta( - alpha=None, - beta=None, - mu=pymc_dist_params["mu"], - sigma=pymc_dist_params["sigma"], + alpha=None, beta=None, mu=pymc_dist_params["mu"], sigma=pymc_dist_params["sigma"] ) expected_rv_op_params = {"alpha": expected_alpha, "beta": expected_beta} tests_to_run = ["check_pymc_params_match_rv_op"] @@ -1005,10 +952,7 @@ class TestNegativeBinomialMuSigma(BaseTestDistribution): pymc_dist = pm.NegativeBinomial pymc_dist_params = {"mu": 5.0, "alpha": 8.0} expected_n, expected_p = pm.NegativeBinomial.get_n_p( - mu=pymc_dist_params["mu"], - alpha=pymc_dist_params["alpha"], - n=None, - p=None, + mu=pymc_dist_params["mu"], alpha=pymc_dist_params["alpha"], n=None, p=None ) expected_rv_op_params = {"n": expected_n, "p": expected_p} tests_to_run = ["check_pymc_params_match_rv_op"] @@ -1020,10 +964,7 @@ class TestBernoulli(BaseTestDistribution): expected_rv_op_params = {"p": 0.33} reference_dist_params = {"p": 0.33} reference_dist = seeded_scipy_distribution_builder("bernoulli") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestBernoulliLogitP(BaseTestDistribution): @@ -1042,14 +983,8 @@ class TestPoisson(BaseTestDistribution): class TestMvNormalCov(BaseTestDistribution): pymc_dist = pm.MvNormal - pymc_dist_params = { - "mu": np.array([1.0, 2.0]), - "cov": np.array([[2.0, 0.0], [0.0, 3.5]]), - } - expected_rv_op_params = { - "mu": np.array([1.0, 2.0]), - "cov": np.array([[2.0, 0.0], [0.0, 3.5]]), - } + pymc_dist_params = {"mu": np.array([1.0, 2.0]), "cov": np.array([[2.0, 0.0], [0.0, 3.5]])} + expected_rv_op_params = {"mu": np.array([1.0, 2.0]), "cov": np.array([[2.0, 0.0], [0.0, 3.5]])} sizes_to_check = [None, (1), (2, 3)] sizes_expected = [(2,), (1, 2), (2, 3, 2)] reference_dist_params = { @@ -1066,10 +1001,7 @@ class TestMvNormalCov(BaseTestDistribution): class TestMvNormalChol(BaseTestDistribution): pymc_dist = pm.MvNormal - pymc_dist_params = { - "mu": np.array([1.0, 2.0]), - "chol": np.array([[2.0, 0.0], [0.0, 3.5]]), - } + pymc_dist_params = {"mu": np.array([1.0, 2.0]), "chol": np.array([[2.0, 0.0], [0.0, 3.5]])} expected_rv_op_params = { "mu": np.array([1.0, 2.0]), "cov": quaddist_matrix(chol=pymc_dist_params["chol"]).eval(), @@ -1079,10 +1011,7 @@ class TestMvNormalChol(BaseTestDistribution): class TestMvNormalTau(BaseTestDistribution): pymc_dist = pm.MvNormal - pymc_dist_params = { - "mu": np.array([1.0, 2.0]), - "tau": np.array([[2.0, 0.0], [0.0, 3.5]]), - } + pymc_dist_params = {"mu": np.array([1.0, 2.0]), "tau": np.array([[2.0, 0.0], [0.0, 3.5]])} expected_rv_op_params = { "mu": np.array([1.0, 2.0]), "cov": quaddist_matrix(tau=pymc_dist_params["tau"]).eval(), @@ -1193,11 +1122,7 @@ class TestDirichletMultinomial(BaseTestDistribution): sizes_to_check = [None, 1, (4,), (3, 4)] sizes_expected = [(4,), (1, 4), (4, 4), (3, 4, 4)] - tests_to_run = [ - "check_pymc_params_match_rv_op", - "test_random_draws", - "check_rv_size", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "test_random_draws", "check_rv_size"] def test_random_draws(self): draws = pm.DirichletMultinomial.dist( @@ -1227,10 +1152,7 @@ class TestCategorical(BaseTestDistribution): pymc_dist = pm.Categorical pymc_dist_params = {"p": np.array([0.28, 0.62, 0.10])} expected_rv_op_params = {"p": np.array([0.28, 0.62, 0.10])} - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_rv_size", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] class TestGeometric(BaseTestDistribution): @@ -1250,10 +1172,7 @@ class TestHyperGeometric(BaseTestDistribution): } reference_dist_params = expected_rv_op_params reference_dist = seeded_numpy_distribution_builder("hypergeometric") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestLogistic(BaseTestDistribution): @@ -1291,10 +1210,7 @@ class TestTriangular(BaseTestDistribution): expected_rv_op_params = {"lower": 0, "c": 0.5, "upper": 1} reference_dist_params = {"left": 0, "mode": 0.5, "right": 1} reference_dist = seeded_numpy_distribution_builder("triangular") - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_pymc_draws_match_reference", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] class TestVonMises(BaseTestDistribution): @@ -1340,8 +1256,7 @@ class TestBetaBinomial(BaseTestDistribution): @pytest.mark.skipif( - condition=_polyagamma_not_installed, - reason="`polyagamma package is not available/installed.", + condition=_polyagamma_not_installed, reason="`polyagamma package is not available/installed." ) class TestPolyaGamma(BaseTestDistribution): def polyagamma_rng_fn(self, size, h, z, rng): @@ -1498,20 +1413,14 @@ class TestOrderedLogistic(BaseTestDistribution): pymc_dist = _OrderedLogistic pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])} expected_rv_op_params = {"p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292])} - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_rv_size", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] class TestOrderedProbit(BaseTestDistribution): pymc_dist = _OrderedProbit pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])} expected_rv_op_params = {"p": np.array([0.02275013, 0.47724987, 0.47724987, 0.02275013])} - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_rv_size", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] class TestOrderedMultinomial(BaseTestDistribution): @@ -1523,10 +1432,7 @@ class TestOrderedMultinomial(BaseTestDistribution): "n": 1000, "p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292]), } - tests_to_run = [ - "check_pymc_params_match_rv_op", - "check_rv_size", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] class TestWishart(BaseTestDistribution): @@ -1540,11 +1446,7 @@ def wishart_rng_fn(self, size, nu, V, rng): reference_dist_params = {"nu": 4, "V": V} expected_rv_op_params = {"nu": 4, "V": V} sizes_to_check = [None, 1, (4, 5)] - sizes_expected = [ - (3, 3), - (1, 3, 3), - (4, 5, 3, 3), - ] + sizes_expected = [(3, 3), (1, 3, 3), (4, 5, 3, 3)] reference_dist = lambda self: functools.partial( self.wishart_rng_fn, rng=self.get_random_state() ) @@ -1578,12 +1480,9 @@ def ref_rand(mu, rowcov, colcov): with pm.Model(rng_seeder=1): matrixnormal = pm.MatrixNormal( - "matnormal", - mu=np.random.random((3, 3)), - rowcov=np.eye(3), - colcov=np.eye(3), + "matnormal", mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3) ) - check = pm.sample_prior_predictive(n_fails) + check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) ref_smp = ref_rand(mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3)) @@ -1594,8 +1493,7 @@ def ref_rand(mu, rowcov, colcov): p = np.min( [ st.ks_2samp( - np.atleast_1d(matrixnormal_smp).flatten(), - np.atleast_1d(ref_smp).flatten(), + np.atleast_1d(matrixnormal_smp).flatten(), np.atleast_1d(ref_smp).flatten() ) ] ) @@ -1618,10 +1516,7 @@ def test_errors(self): msg = "Value must be two dimensional." with pm.Model(): matrixnormal = pm.MatrixNormal( - "matnormal", - mu=np.random.random((3, 3)), - rowcov=np.eye(3), - colcov=np.eye(3), + "matnormal", mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3) ) with pytest.raises(ValueError, match=msg): rvs_to_values = {matrixnormal: aesara.tensor.ones((3, 3, 3))} @@ -1710,10 +1605,7 @@ def kronecker_rng_fn(self, size, mu, covs=None, sigma=None, rng=None): reference_dist = lambda self: functools.partial( self.kronecker_rng_fn, rng=self.get_random_state() ) - tests_to_run = [ - "check_pymc_draws_match_reference", - "check_rv_size", - ] + tests_to_run = ["check_pymc_draws_match_reference", "check_rv_size"] class TestScalarParameterSamples(SeededTest): @@ -1899,63 +1791,28 @@ def build_model(self, distribution, shape, nested_rvs_info): except ValueError: value, nested_shape, loc = info if value is None: - nested_rvs[rv_name] = pm.Uniform( - rv_name, - 0 + loc, - 1 + loc, - shape=nested_shape, - ) + nested_rvs[rv_name] = pm.Uniform(rv_name, 0 + loc, 1 + loc, shape=nested_shape) else: nested_rvs[rv_name] = value * np.ones(nested_shape) - rv = distribution( - "target", - shape=shape, - **nested_rvs, - ) + rv = distribution("target", shape=shape, **nested_rvs) return model, rv, nested_rvs def sample_prior(self, distribution, shape, nested_rvs_info, prior_samples): - model, rv, nested_rvs = self.build_model( - distribution, - shape, - nested_rvs_info, - ) + model, rv, nested_rvs = self.build_model(distribution, shape, nested_rvs_info) with model: - return pm.sample_prior_predictive(prior_samples) + return pm.sample_prior_predictive(prior_samples, return_inferencedata=False) @pytest.mark.parametrize( ["prior_samples", "shape", "mu", "alpha"], [ [10, (3,), (None, tuple()), (None, (3,))], [10, (3,), (None, (3,)), (None, tuple())], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (None, (3,)), - ], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (None, (4, 3)), - ], + [10, (4, 3), (None, (3,)), (None, (3,))], + [10, (4, 3), (None, (3,)), (None, (4, 3))], ], ids=str, ) - def test_NegativeBinomial( - self, - prior_samples, - shape, - mu, - alpha, - ): + def test_NegativeBinomial(self, prior_samples, shape, mu, alpha): prior = self.sample_prior( distribution=pm.NegativeBinomial, shape=shape, @@ -1971,37 +1828,12 @@ def test_NegativeBinomial( [10, (3,), (0.5, (3,)), (None, tuple()), (None, (3,))], [10, (3,), (0.5, tuple()), (None, (3,)), (None, tuple())], [10, (3,), (0.5, (3,)), (None, (3,)), (None, tuple())], - [ - 10, - ( - 4, - 3, - ), - (0.5, (3,)), - (None, (3,)), - (None, (3,)), - ], - [ - 10, - ( - 4, - 3, - ), - (0.5, (3,)), - (None, (3,)), - (None, (4, 3)), - ], + [10, (4, 3), (0.5, (3,)), (None, (3,)), (None, (3,))], + [10, (4, 3), (0.5, (3,)), (None, (3,)), (None, (4, 3))], ], ids=str, ) - def test_ZeroInflatedNegativeBinomial( - self, - prior_samples, - shape, - psi, - mu, - alpha, - ): + def test_ZeroInflatedNegativeBinomial(self, prior_samples, shape, psi, mu, alpha): prior = self.sample_prior( distribution=pm.ZeroInflatedNegativeBinomial, shape=shape, @@ -2017,34 +1849,12 @@ def test_ZeroInflatedNegativeBinomial( [10, (3,), (None, tuple()), (None, (3,))], [10, (3,), (None, (3,)), (None, tuple())], [10, (3,), (None, (3,)), (None, tuple())], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (None, (3,)), - ], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (None, (4, 3)), - ], + [10, (4, 3), (None, (3,)), (None, (3,))], + [10, (4, 3), (None, (3,)), (None, (4, 3))], ], ids=str, ) - def test_Rice( - self, - prior_samples, - shape, - nu, - sigma, - ): + def test_Rice(self, prior_samples, shape, nu, sigma): prior = self.sample_prior( distribution=pm.Rice, shape=shape, @@ -2060,66 +1870,18 @@ def test_Rice( [10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))], [10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())], [10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (1.0, tuple()), - (None, (3,), -1), - (None, (3,)), - ], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (1.0, tuple()), - (None, (3,), -1), - (None, (4, 3)), - ], + [10, (4, 3), (None, (3,)), (1.0, tuple()), (None, (3,), -1), (None, (3,))], + [10, (4, 3), (None, (3,)), (1.0, tuple()), (None, (3,), -1), (None, (4, 3))], [10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))], [10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))], [10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())], [10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())], - [ - 10, - ( - 4, - 3, - ), - (0.0, tuple()), - (None, (3,)), - (None, (3,), -1), - (None, (3,)), - ], - [ - 10, - ( - 4, - 3, - ), - (0.0, tuple()), - (None, (3,)), - (None, (3,), -1), - (None, (4, 3)), - ], + [10, (4, 3), (0.0, tuple()), (None, (3,)), (None, (3,), -1), (None, (3,))], + [10, (4, 3), (0.0, tuple()), (None, (3,)), (None, (3,), -1), (None, (4, 3))], ], ids=str, ) - def test_TruncatedNormal( - self, - prior_samples, - shape, - mu, - sigma, - lower, - upper, - ): + def test_TruncatedNormal(self, prior_samples, shape, mu, sigma, lower, upper): prior = self.sample_prior( distribution=pm.TruncatedNormal, shape=shape, @@ -2134,37 +1896,12 @@ def test_TruncatedNormal( [10, (3,), (None, tuple()), (-1.0, (3,)), (2, tuple())], [10, (3,), (None, tuple()), (-1.0, tuple()), (None, tuple(), 1)], [10, (3,), (None, (3,)), (-1.0, tuple()), (None, tuple(), 1)], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (-1.0, tuple()), - (None, (3,), 1), - ], - [ - 10, - ( - 4, - 3, - ), - (None, (3,)), - (None, tuple(), -1), - (None, (3,), 1), - ], + [10, (4, 3), (None, (3,)), (-1.0, tuple()), (None, (3,), 1)], + [10, (4, 3), (None, (3,)), (None, tuple(), -1), (None, (3,), 1)], ], ids=str, ) - def test_Triangular( - self, - prior_samples, - shape, - c, - lower, - upper, - ): + def test_Triangular(self, prior_samples, shape, c, lower, upper): prior = self.sample_prior( distribution=pm.Triangular, shape=shape, @@ -2209,9 +1946,7 @@ def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param): assert dist.random(size=sample_shape).shape == output_shape @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], - generate_shapes(include_params=False), - ids=str, + ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str ) def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -2226,9 +1961,7 @@ def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], - generate_shapes(include_params=False), - ids=str, + ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str ) def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -2320,9 +2053,7 @@ def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param): @pytest.mark.xfail @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], - generate_shapes(include_params=False), - ids=str, + ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str ) def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -2338,9 +2069,7 @@ def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): @pytest.mark.xfail @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], - generate_shapes(include_params=False), - ids=str, + ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str ) def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -2379,7 +2108,7 @@ def test_car_rng_fn(sparse): with pm.Model(rng_seeder=1): car = pm.CAR("car", mu, W, alpha, tau, size=size) mn = pm.MvNormal("mn", mu, cov, size=size) - check = pm.sample_prior_predictive(n_fails) + check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) p, f = delta, n_fails while p <= delta and f > 0: From cd4ce13115ef3063944cef974a7d92b1591806e2 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 17:18:06 +0200 Subject: [PATCH 11/22] Fix tests distributions --- pymc/tests/test_distributions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 75dffe586d..40f6cdf22b 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -3249,7 +3249,7 @@ def test_distinct_rvs(): X_rv = pm.Normal("x") Y_rv = pm.Normal("y") - pp_samples = pm.sample_prior_predictive(samples=2) + pp_samples = pm.sample_prior_predictive(samples=2, return_inferencedata=False) assert X_rv.owner.inputs[0] != Y_rv.owner.inputs[0] @@ -3259,7 +3259,7 @@ def test_distinct_rvs(): X_rv = pm.Normal("x") Y_rv = pm.Normal("y") - pp_samples_2 = pm.sample_prior_predictive(samples=2) + pp_samples_2 = pm.sample_prior_predictive(samples=2, return_inferencedata=False) assert np.array_equal(pp_samples["y"], pp_samples_2["y"]) From fbda63e890f26ac85ef31907e3fe164ae28224e4 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 18:11:06 +0200 Subject: [PATCH 12/22] black --- pymc/tests/test_sampling.py | 171 ++++++++++++++++++++---------------- 1 file changed, 96 insertions(+), 75 deletions(-) diff --git a/pymc/tests/test_sampling.py b/pymc/tests/test_sampling.py index 132781815d..ad8cb4b21e 100644 --- a/pymc/tests/test_sampling.py +++ b/pymc/tests/test_sampling.py @@ -204,7 +204,7 @@ def test_return_inferencedata(self): assert len(result._groups_warmup) > 0 # inferencedata without tuning, with idata_kwargs - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) result = pm.sample( **kwargs, return_inferencedata=True, @@ -472,7 +472,6 @@ def test_normal_scalar(self): trace = pm.sample( draws=ndraws, chains=nchains, - return_inferencedata=False, ) with model: @@ -486,43 +485,23 @@ def test_normal_scalar(self): # test keep_size parameter ppc = pm.sample_posterior_predictive(trace, keep_size=True) - assert ppc["a"].shape == (nchains, ndraws) + assert ppc.posterior_predictive["a"].shape == (1, nchains, ndraws) # test default case ppc = pm.sample_posterior_predictive(trace, var_names=["a"]) - assert "a" in ppc - assert ppc["a"].shape == (nchains * ndraws,) + assert "a" in ppc.posterior_predictive.data_vars + assert ppc.posterior_predictive["a"].shape == (1, nchains * ndraws) # mu's standard deviation may have changed thanks to a's observed - _, pval = stats.kstest(ppc["a"] - trace["mu"], stats.norm(loc=0, scale=1).cdf) + _, pval = stats.kstest( + ppc.posterior_predictive["a"] - trace.posterior["mu"], + stats.norm(loc=0, scale=1).cdf, + ) assert pval > 0.001 # size argument not introduced to fast version [2019/08/20:rpg] with model: ppc = pm.sample_posterior_predictive(trace, size=5, var_names=["a"]) - assert ppc["a"].shape == (nchains * ndraws, 5) - - def test_normal_scalar_idata(self): - nchains = 2 - ndraws = 500 - with pm.Model() as model: - mu = pm.Normal("mu", 0.0, 1.0) - a = pm.Normal("a", mu=mu, sigma=1, observed=0.0) - trace = pm.sample( - draws=ndraws, - chains=nchains, - return_inferencedata=False, - discard_tuned_samples=False, - ) - - assert not isinstance(trace, InferenceData) - - with model: - # test keep_size parameter and idata input - idata = pm.to_inference_data(trace) - assert isinstance(idata, InferenceData) - - ppc = pm.sample_posterior_predictive(idata, keep_size=True) - assert ppc["a"].shape == (nchains, ndraws) + assert ppc.posterior_predictive["a"].shape == (1, nchains * ndraws, 5) def test_normal_vector(self, caplog): with pm.Model() as model: @@ -532,25 +511,35 @@ def test_normal_vector(self, caplog): with model: # test list input - ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10) - ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=[]) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point], return_inferencedata=False, samples=10 + ) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=[] + ) assert len(ppc) == 0 # test keep_size parameter - ppc = pm.sample_posterior_predictive(trace, keep_size=True) + ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, keep_size=True) assert ppc["a"].shape == (trace.nchains, len(trace), 2) with pytest.warns(UserWarning): - ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) with pytest.warns(UserWarning): - ppc = pm.sample_posterior_predictive(trace, samples=12, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) # size unsupported by fast_ version argument. [2019/08/19:rpg] - ppc = pm.sample_posterior_predictive(trace, samples=10, var_names=["a"], size=4) + ppc = pm.sample_posterior_predictive( + trace, return_inferencedata=False, samples=10, var_names=["a"], size=4 + ) assert "a" in ppc assert ppc["a"].shape == (10, 4, 2) @@ -567,7 +556,7 @@ def test_normal_vector_idata(self, caplog): idata = pm.to_inference_data(trace) assert isinstance(idata, InferenceData) - ppc = pm.sample_posterior_predictive(idata, keep_size=True) + ppc = pm.sample_posterior_predictive(idata, return_inferencedata=False, keep_size=True) assert ppc["a"].shape == (trace.nchains, len(trace), 2) def test_exceptions(self, caplog): @@ -600,11 +589,15 @@ def test_vector_observed(self): # TODO: Assert something about the output # ppc = pm.sample_posterior_predictive(idata, samples=12, var_names=[]) # assert len(ppc) == 0 - ppc = pm.sample_posterior_predictive(idata, samples=12, var_names=["a"]) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=12, var_names=["a"] + ) assert "a" in ppc assert ppc["a"].shape == (12, 2) - ppc = pm.sample_posterior_predictive(idata, samples=10, var_names=["a"], size=4) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=10, var_names=["a"], size=4 + ) assert "a" in ppc assert ppc["a"].shape == (10, 4, 2) @@ -616,9 +609,13 @@ def test_sum_normal(self): with model: # test list input - ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point], return_inferencedata=False, samples=10 + ) assert ppc0 == {} - ppc = pm.sample_posterior_predictive(idata, samples=1000, var_names=["b"]) + ppc = pm.sample_posterior_predictive( + idata, return_inferencedata=False, samples=1000, var_names=["b"] + ) assert len(ppc) == 1 assert ppc["b"].shape == (1000,) scale = np.sqrt(1 + 0.2 ** 2) @@ -637,7 +634,7 @@ def test_model_not_drawable_prior(self): with pytest.raises(NotImplementedError) as excinfo: pm.sample_prior_predictive(50) assert "Cannot sample" in str(excinfo.value) - samples = pm.sample_posterior_predictive(idata, 40) + samples = pm.sample_posterior_predictive(idata, 40, return_inferencedata=False) assert samples["foo"].shape == (40, 200) def test_model_shared_variable(self): @@ -660,7 +657,7 @@ def test_model_shared_variable(self): samples = 100 with model: post_pred = pm.sample_posterior_predictive( - trace, samples=samples, var_names=["p", "obs"] + trace, return_inferencedata=False, samples=samples, var_names=["p", "obs"] ) expected_p = np.array([logistic.eval({coeff: val}) for val in trace["x"][:samples]]) @@ -694,6 +691,7 @@ def test_deterministic_of_observed(self): rtol = 1e-5 if aesara.config.floatX == "float64" else 1e-4 ppc = pm.sample_posterior_predictive( + return_inferencedata=False, model=model, trace=trace, samples=len(trace) * nchains, @@ -728,6 +726,7 @@ def test_deterministic_of_observed_modified_interface(self): trace, varnames=[n for n in trace.varnames if n != "out"] ).to_dict("records") ppc = pm.sample_posterior_predictive( + return_inferencedata=False, model=model, trace=ppc_trace, samples=len(ppc_trace), @@ -745,7 +744,7 @@ def test_variable_type(self): trace = pm.sample(compute_convergence_checks=False, return_inferencedata=False) with model: - ppc = pm.sample_posterior_predictive(trace, samples=1) + ppc = pm.sample_posterior_predictive(trace, return_inferencedata=False, samples=1) assert ppc["a"].dtype.kind == "f" assert ppc["b"].dtype.kind == "i" @@ -918,7 +917,7 @@ def test_ignores_observed(self): positive_mu = pm.Deterministic("positive_mu", np.abs(mu)) z = -1 - positive_mu pm.Normal("x_obs", mu=z, sigma=1, observed=observed_data) - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) assert "observed_data" not in prior assert (prior["mu"] < -90).all() @@ -932,8 +931,12 @@ def test_respects_shape(self): with pm.Model(): mu = pm.Gamma("mu", 3, 1, size=1) goals = pm.Poisson("goals", mu, size=shape) - trace1 = pm.sample_prior_predictive(10, var_names=["mu", "mu", "goals"]) - trace2 = pm.sample_prior_predictive(10, var_names=["mu", "goals"]) + trace1 = pm.sample_prior_predictive( + 10, return_inferencedata=False, var_names=["mu", "mu", "goals"] + ) + trace2 = pm.sample_prior_predictive( + 10, return_inferencedata=False, var_names=["mu", "goals"] + ) if shape == 2: # want to test shape as an int shape = (2,) assert trace1["goals"].shape == (10,) + shape @@ -944,7 +947,7 @@ def test_multivariate(self): m = pm.Multinomial("m", n=5, p=np.array([0.25, 0.25, 0.25, 0.25])) trace = pm.sample_prior_predictive(10) - assert trace["m"].shape == (10, 4) + assert trace.prior["m"].shape == (1, 10, 4) def test_multivariate2(self): # Added test for issue #3271 @@ -955,8 +958,12 @@ def test_multivariate2(self): burned_trace = pm.sample( 20, tune=10, cores=1, return_inferencedata=False, compute_convergence_checks=False ) - sim_priors = pm.sample_prior_predictive(samples=20, model=dm_model) - sim_ppc = pm.sample_posterior_predictive(burned_trace, samples=20, model=dm_model) + sim_priors = pm.sample_prior_predictive( + return_inferencedata=False, samples=20, model=dm_model + ) + sim_ppc = pm.sample_posterior_predictive( + burned_trace, return_inferencedata=False, samples=20, model=dm_model + ) assert sim_priors["probs"].shape == (20, 6) assert sim_priors["obs"].shape == (20,) + mn_data.shape assert sim_ppc["obs"].shape == (20,) + mn_data.shape @@ -987,9 +994,9 @@ def test_transformed(self): y = pm.Binomial("y", n=at_bats, p=thetas, observed=hits) gen = pm.sample_prior_predictive(draws) - assert gen["phi"].shape == (draws,) - assert gen["y"].shape == (draws, n) - assert "thetas" in gen + assert gen.prior["phi"].shape == (1, draws) + assert gen.prior_predictive["y"].shape == (1, draws, n) + assert "thetas" in gen.prior.data_vars def test_shared(self): n1 = 10 @@ -1002,16 +1009,16 @@ def test_shared(self): o = pm.Deterministic("o", obs) gen1 = pm.sample_prior_predictive(draws) - assert gen1["y"].shape == (draws, n1) - assert gen1["o"].shape == (draws, n1) + assert gen1.prior["y"].shape == (1, draws, n1) + assert gen1.prior["o"].shape == (1, draws, n1) n2 = 20 obs.set_value(np.random.rand(n2) < 0.5) with m: gen2 = pm.sample_prior_predictive(draws) - assert gen2["y"].shape == (draws, n2) - assert gen2["o"].shape == (draws, n2) + assert gen2.prior["y"].shape == (1, draws, n2) + assert gen2.prior["o"].shape == (1, draws, n2) def test_density_dist(self): obs = np.random.normal(-1, 0.1, size=10) @@ -1025,7 +1032,7 @@ def test_density_dist(self): random=lambda mu, sd, rng=None, size=None: rng.normal(loc=mu, scale=sd, size=size), observed=obs, ) - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) npt.assert_almost_equal(prior["a"].mean(), 0, decimal=1) @@ -1035,7 +1042,7 @@ def test_shape_edgecase(self): sd = pm.Uniform("sd", lower=2, upper=3) x = pm.Normal("x", mu=mu, sigma=sd, size=5) prior = pm.sample_prior_predictive(10) - assert prior["mu"].shape == (10, 5) + assert prior.prior["mu"].shape == (1, 10, 5) def test_zeroinflatedpoisson(self): with pm.Model(): @@ -1043,9 +1050,9 @@ def test_zeroinflatedpoisson(self): psi = pm.HalfNormal("psi", sd=1) pm.ZeroInflatedPoisson("suppliers", psi=psi, theta=theta, size=20) gen_data = pm.sample_prior_predictive(samples=5000) - assert gen_data["theta"].shape == (5000,) - assert gen_data["psi"].shape == (5000,) - assert gen_data["suppliers"].shape == (5000, 20) + assert gen_data.prior["theta"].shape == (1, 5000) + assert gen_data.prior["psi"].shape == (1, 5000) + assert gen_data.prior["suppliers"].shape == (1, 5000, 20) def test_potentials_warning(self): warning_msg = "The effect of Potentials on other parameters is ignored during" @@ -1075,10 +1082,10 @@ def ub_interval_forward(x, ub): ) # Check values are correct - assert np.allclose(prior["ub_log__"], np.log(prior["ub"])) + assert np.allclose(prior.prior["ub_log__"].data, np.log(prior.prior["ub"].data)) assert np.allclose( - prior["x_interval__"], - ub_interval_forward(prior["x"], prior["ub"]), + prior.prior["x_interval__"].data, + ub_interval_forward(prior.prior["x"].data, prior.prior["ub"].data), ) # Check that it works when the original RVs are not mentioned in var_names @@ -1090,9 +1097,16 @@ def ub_interval_forward(x, ub): var_names=["ub_log__", "x_interval__"], samples=10, ) - assert "ub" not in prior_transformed_only and "x" not in prior_transformed_only - assert np.allclose(prior["ub_log__"], prior_transformed_only["ub_log__"]) - assert np.allclose(prior["x_interval__"], prior_transformed_only["x_interval__"]) + assert ( + "ub" not in prior_transformed_only.prior.data_vars + and "x" not in prior_transformed_only.prior.data_vars + ) + assert np.allclose( + prior.prior["ub_log__"].data, prior_transformed_only.prior["ub_log__"].data + ) + assert np.allclose( + prior.prior["x_interval__"], prior_transformed_only.prior["x_interval__"].data + ) def test_issue_4490(self): # Test that samples do not depend on var_name order or, more fundamentally, @@ -1112,27 +1126,34 @@ def test_issue_4490(self): d = pm.Normal("d") prior2 = pm.sample_prior_predictive(samples=1, var_names=["b", "a", "d", "c"]) - assert prior1["a"] == prior2["a"] - assert prior1["b"] == prior2["b"] - assert prior1["c"] == prior2["c"] - assert prior1["d"] == prior2["d"] + assert prior1.prior["a"] == prior2.prior["a"] + assert prior1.prior["b"] == prior2.prior["b"] + assert prior1.prior["c"] == prior2.prior["c"] + assert prior1.prior["d"] == prior2.prior["d"] class TestSamplePosteriorPredictive: def test_point_list_arg_bug_spp(self, point_list_arg_bug_fixture): pmodel, trace = point_list_arg_bug_fixture with pmodel: - pp = pm.sample_posterior_predictive([trace[15]], var_names=["d"]) + pp = pm.sample_posterior_predictive( + [trace[15]], return_inferencedata=False, var_names=["d"] + ) def test_sample_from_xarray_prior(self, point_list_arg_bug_fixture): pmodel, trace = point_list_arg_bug_fixture with pmodel: - prior = pm.sample_prior_predictive(samples=20) + prior = pm.sample_prior_predictive( + samples=20, + return_inferencedata=False, + ) idat = pm.to_inference_data(trace, prior=prior) with pmodel: - pp = pm.sample_posterior_predictive(idat.prior, var_names=["d"]) + pp = pm.sample_posterior_predictive( + idat.prior, return_inferencedata=False, var_names=["d"] + ) def test_sample_from_xarray_posterior(self, point_list_arg_bug_fixture): pmodel, trace = point_list_arg_bug_fixture From 826c60266525545ab9ead77b5c7a24f3ff9f86ad Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Thu, 14 Oct 2021 18:15:59 +0200 Subject: [PATCH 13/22] Fix tests idata conversion --- pymc/tests/test_idata_conversion.py | 61 +++++++++++++---------------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/pymc/tests/test_idata_conversion.py b/pymc/tests/test_idata_conversion.py index 465169ae13..83106bb091 100644 --- a/pymc/tests/test_idata_conversion.py +++ b/pymc/tests/test_idata_conversion.py @@ -66,8 +66,10 @@ def data(self, eight_schools_params, draws, chains): def get_inference_data(self, data, eight_schools_params): with data.model: - prior = pm.sample_prior_predictive() - posterior_predictive = pm.sample_posterior_predictive(data.obj) + prior = pm.sample_prior_predictive(return_inferencedata=False) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, return_inferencedata=False + ) return ( to_inference_data( @@ -85,8 +87,10 @@ def get_predictions_inference_data( self, data, eight_schools_params, inplace ) -> Tuple[InferenceData, Dict[str, np.ndarray]]: with data.model: - prior = pm.sample_prior_predictive() - posterior_predictive = pm.sample_posterior_predictive(data.obj) + prior = pm.sample_prior_predictive(return_inferencedata=False) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, return_inferencedata=False + ) idata = to_inference_data( trace=data.obj, @@ -106,7 +110,9 @@ def make_predictions_inference_data( self, data, eight_schools_params ) -> Tuple[InferenceData, Dict[str, np.ndarray]]: with data.model: - posterior_predictive = pm.sample_posterior_predictive(data.obj) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, return_inferencedata=False + ) idata = predictions_to_inference_data( posterior_predictive, posterior_trace=data.obj, @@ -199,7 +205,9 @@ def test_predictions_to_idata_new(self, data, eight_schools_params): def test_posterior_predictive_keep_size(self, data, chains, draws, eight_schools_params): with data.model: - posterior_predictive = pm.sample_posterior_predictive(data.obj, keep_size=True) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, keep_size=True, return_inferencedata=False + ) inference_data = to_inference_data( trace=data.obj, posterior_predictive=posterior_predictive, @@ -214,7 +222,9 @@ def test_posterior_predictive_keep_size(self, data, chains, draws, eight_schools def test_posterior_predictive_warning(self, data, eight_schools_params, caplog): with data.model: - posterior_predictive = pm.sample_posterior_predictive(data.obj, 370) + posterior_predictive = pm.sample_posterior_predictive( + data.obj, 370, return_inferencedata=False + ) inference_data = to_inference_data( trace=data.obj, posterior_predictive=posterior_predictive, @@ -375,10 +385,7 @@ def test_multiple_observed_rv_without_observations(self): with pm.Model(): mu = pm.Normal("mu") x = pm.DensityDist( # pylint: disable=unused-variable - "x", - mu, - logp=lambda value, mu: pm.Normal.logp(value, mu, 1), - observed=0.1, + "x", mu, logp=lambda value, mu: pm.Normal.logp(value, mu, 1), observed=0.1 ) inference_data = pm.sample(100, chains=2, return_inferencedata=True) test_dict = { @@ -483,7 +490,9 @@ def test_predictions_constant_data(self): y = pm.Data("y", [1.0, 2.0]) beta = pm.Normal("beta", 0, 1) obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable - predictive_trace = pm.sample_posterior_predictive(inference_data) + predictive_trace = pm.sample_posterior_predictive( + inference_data, return_inferencedata=False + ) assert set(predictive_trace.keys()) == {"obs"} # this should be four chains of 100 samples # assert predictive_trace["obs"].shape == (400, 2) @@ -506,8 +515,8 @@ def test_no_trace(self): beta = pm.Normal("beta", 0, 1) obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable idata = pm.sample(100, tune=100) - prior = pm.sample_prior_predictive() - posterior_predictive = pm.sample_posterior_predictive(idata) + prior = pm.sample_prior_predictive(return_inferencedata=False) + posterior_predictive = pm.sample_posterior_predictive(idata, return_inferencedata=False) # Only prior inference_data = to_inference_data(prior=prior, model=model) @@ -539,7 +548,7 @@ def test_priors_separation(self, use_context): y = pm.Data("y", [1.0, 2.0, 3.0]) beta = pm.Normal("beta", 0, 1) obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable - prior = pm.sample_prior_predictive() + prior = pm.sample_prior_predictive(return_inferencedata=False) test_dict = { "prior": ["beta", "~obs"], @@ -574,10 +583,7 @@ def test_multivariate_observations(self): def test_constant_data_coords_issue_5046(self): """This is a regression test against a bug where a local coords variable was overwritten.""" - dims = { - "alpha": ["backwards"], - "bravo": ["letters", "yesno"], - } + dims = {"alpha": ["backwards"], "bravo": ["letters", "yesno"]} coords = { "backwards": np.arange(17)[::-1], "letters": list("ABCDEFGHIJK"), @@ -592,20 +598,13 @@ def test_constant_data_coords_issue_5046(self): assert len(data[k].shape) == len(dims[k]) ds = pm.backends.arviz.dict_to_dataset( - data=data, - library=pm, - coords=coords, - dims=dims, - default_dims=[], - index_origin=0, + data=data, library=pm, coords=coords, dims=dims, default_dims=[], index_origin=0 ) for dname, cvals in coords.items(): np.testing.assert_array_equal(ds[dname].values, cvals) def test_issue_5043_autoconvert_coord_values(self): - coords = { - "city": pd.Series(["Bonn", "Berlin"]), - } + coords = {"city": pd.Series(["Bonn", "Berlin"])} with pm.Model(coords=coords) as pmodel: # The model tracks coord values as (immutable) tuples assert isinstance(pmodel.coords["city"], tuple) @@ -631,11 +630,7 @@ def test_issue_5043_autoconvert_coord_values(self): trace=mtrace, coords={ "city": pd.MultiIndex.from_tuples( - [ - ("Bonn", 53111), - ("Berlin", 10178), - ], - names=["name", "zipcode"], + [("Bonn", 53111), ("Berlin", 10178)], names=["name", "zipcode"] ) }, ) From 59d347546a6c000b73472cd277dce1de1e28a91d Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 10:47:50 +0200 Subject: [PATCH 14/22] black on smc --- pymc/smc/smc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pymc/smc/smc.py b/pymc/smc/smc.py index 83b71b478d..bc43fa643a 100644 --- a/pymc/smc/smc.py +++ b/pymc/smc/smc.py @@ -179,6 +179,7 @@ def initialize_population(self) -> Dict[str, NDArray]: self.draws, var_names=[v.name for v in self.model.unobserved_value_vars], model=self.model, + return_inferencedata=False, ) def _initialize_kernel(self): From 9f8af4d94859c747c0a22fa19e13e6e19daae293 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 10:48:24 +0200 Subject: [PATCH 15/22] Fix tests SMC --- pymc/tests/test_smc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymc/tests/test_smc.py b/pymc/tests/test_smc.py index 8227ad569d..c86b03292e 100644 --- a/pymc/tests/test_smc.py +++ b/pymc/tests/test_smc.py @@ -293,8 +293,8 @@ def test_one_gaussian(self): with self.SMABC_test: trace = pm.sample_smc(draws=1000, return_inferencedata=False) - pr_p = pm.sample_prior_predictive(1000) - po_p = pm.sample_posterior_predictive(trace, 1000) + pr_p = pm.sample_prior_predictive(1000, return_inferencedata=False) + po_p = pm.sample_posterior_predictive(trace, 1000, return_inferencedata=False) assert abs(self.data.mean() - trace["a"].mean()) < 0.05 assert abs(self.data.std() - trace["b"].mean()) < 0.05 From ef7a2e6f3f1e28ccff8968f21d2acb22e6e87295 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 13:25:31 +0200 Subject: [PATCH 16/22] Black on test_sampling --- pymc/tests/test_sampling.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/pymc/tests/test_sampling.py b/pymc/tests/test_sampling.py index ad8cb4b21e..85d9413e09 100644 --- a/pymc/tests/test_sampling.py +++ b/pymc/tests/test_sampling.py @@ -472,36 +472,38 @@ def test_normal_scalar(self): trace = pm.sample( draws=ndraws, chains=nchains, + return_inferencedata=False, ) with model: # test list input - ppc0 = pm.sample_posterior_predictive([model.initial_point], samples=10) + ppc0 = pm.sample_posterior_predictive( + [model.initial_point], samples=10, return_inferencedata=False + ) # # deprecated argument is not introduced to fast version [2019/08/20:rpg] - ppc = pm.sample_posterior_predictive(trace, var_names=["a"]) + ppc = pm.sample_posterior_predictive(trace, var_names=["a"], return_inferencedata=False) # test empty ppc - ppc = pm.sample_posterior_predictive(trace, var_names=[]) + ppc = pm.sample_posterior_predictive(trace, var_names=[], return_inferencedata=False) assert len(ppc) == 0 # test keep_size parameter - ppc = pm.sample_posterior_predictive(trace, keep_size=True) - assert ppc.posterior_predictive["a"].shape == (1, nchains, ndraws) + ppc = pm.sample_posterior_predictive(trace, keep_size=True, return_inferencedata=False) + assert ppc["a"].shape == (nchains, ndraws) # test default case - ppc = pm.sample_posterior_predictive(trace, var_names=["a"]) - assert "a" in ppc.posterior_predictive.data_vars - assert ppc.posterior_predictive["a"].shape == (1, nchains * ndraws) + ppc = pm.sample_posterior_predictive(trace, var_names=["a"], return_inferencedata=False) + assert "a" in ppc + assert ppc["a"].shape == (nchains * ndraws,) # mu's standard deviation may have changed thanks to a's observed - _, pval = stats.kstest( - ppc.posterior_predictive["a"] - trace.posterior["mu"], - stats.norm(loc=0, scale=1).cdf, - ) + _, pval = stats.kstest(ppc["a"] - trace["mu"], stats.norm(loc=0, scale=1).cdf) assert pval > 0.001 # size argument not introduced to fast version [2019/08/20:rpg] with model: - ppc = pm.sample_posterior_predictive(trace, size=5, var_names=["a"]) - assert ppc.posterior_predictive["a"].shape == (1, nchains * ndraws, 5) + ppc = pm.sample_posterior_predictive( + trace, size=5, var_names=["a"], return_inferencedata=False + ) + assert ppc["a"].shape == (nchains * ndraws, 5) def test_normal_vector(self, caplog): with pm.Model() as model: From 44d373036ff9794283d97d52fcc5f82df9bfad0a Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 13:28:27 +0200 Subject: [PATCH 17/22] Reinstate test_normal_scalar_idata --- pymc/tests/test_sampling.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/pymc/tests/test_sampling.py b/pymc/tests/test_sampling.py index 85d9413e09..5f45d4d69c 100644 --- a/pymc/tests/test_sampling.py +++ b/pymc/tests/test_sampling.py @@ -505,6 +505,29 @@ def test_normal_scalar(self): ) assert ppc["a"].shape == (nchains * ndraws, 5) + def test_normal_scalar_idata(self): + nchains = 2 + ndraws = 500 + with pm.Model() as model: + mu = pm.Normal("mu", 0.0, 1.0) + a = pm.Normal("a", mu=mu, sigma=1, observed=0.0) + trace = pm.sample( + draws=ndraws, + chains=nchains, + return_inferencedata=False, + discard_tuned_samples=False, + ) + + assert not isinstance(trace, InferenceData) + + with model: + # test keep_size parameter and idata input + idata = pm.to_inference_data(trace) + assert isinstance(idata, InferenceData) + + ppc = pm.sample_posterior_predictive(idata, keep_size=True, return_inferencedata=False) + assert ppc["a"].shape == (nchains, ndraws) + def test_normal_vector(self, caplog): with pm.Model() as model: mu = pm.Normal("mu", 0.0, 1.0) From 4eea15ce516bfa529d123c583566d3dfa1d19d75 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 13:29:22 +0200 Subject: [PATCH 18/22] Remove Black line length 100 in pyproject.toml --- pyproject.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index aa4e5e2bfa..31bdfe0dcc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,9 +2,6 @@ minversion = "6.0" xfail_strict=true -[tool.black] -line-length = 100 - [tool.coverage.report] exclude_lines = [ "pragma: nocover", From 7ad4586b1d58376efbdf37c356a0366c153c47bd Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 13:38:37 +0200 Subject: [PATCH 19/22] Black on test_distributions_random --- pymc/tests/test_distributions_random.py | 330 +++++++++++++++++++----- 1 file changed, 269 insertions(+), 61 deletions(-) diff --git a/pymc/tests/test_distributions_random.py b/pymc/tests/test_distributions_random.py index bd61d582e0..a7dc49a14a 100644 --- a/pymc/tests/test_distributions_random.py +++ b/pymc/tests/test_distributions_random.py @@ -102,7 +102,13 @@ def pymc_random( def pymc_random_discrete( - dist, paramdomains, valuedomain=Domain([0]), ref_rand=None, size=100000, alpha=0.05, fails=20 + dist, + paramdomains, + valuedomain=Domain([0]), + ref_rand=None, + size=100000, + alpha=0.05, + fails=20, ): model, param_vars = build_model(dist, valuedomain, paramdomains) model_dist = change_rv_size(model.named_vars["value"], size, expand=True) @@ -176,7 +182,9 @@ def get_random_variable(self, shape, with_vector_params=False, name=None): size = shape else: size = shape[:-ndim_supp] - return self.distribution(name, size=size, transform=None, **params) + return self.distribution( + name, size=size, transform=None, **params + ) except TypeError: if np.sum(np.atleast_1d(shape)) == 0: pytest.skip("Timeseries must have positive shape") @@ -194,7 +202,9 @@ def sample_random_variable(random_variable, size): def test_scalar_distribution_shape(self, shape, size): """Draws samples of different [size] from a scalar [shape] RV.""" rv = self.get_random_variable(shape) - exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape)) + exp_shape = ( + self.default_shape if shape is None else tuple(np.atleast_1d(shape)) + ) exp_size = self.default_size if size is None else tuple(np.atleast_1d(size)) expected = exp_size + exp_shape actual = np.shape(self.sample_random_variable(rv, size)) @@ -214,7 +224,9 @@ def test_scalar_distribution_shape(self, shape, size): def test_scalar_sample_shape(self, shape, size): """Draws samples of scalar [size] from a [shape] RV.""" rv = self.get_random_variable(shape) - exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape)) + exp_shape = ( + self.default_shape if shape is None else tuple(np.atleast_1d(shape)) + ) exp_size = self.default_size if size is None else tuple(np.atleast_1d(size)) expected = exp_size + exp_shape actual = np.shape(self.sample_random_variable(rv, size)) @@ -227,7 +239,9 @@ def test_scalar_sample_shape(self, shape, size): def test_vector_params(self, shape, size): shape = self.shape rv = self.get_random_variable(shape, with_vector_params=True) - exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape)) + exp_shape = ( + self.default_shape if shape is None else tuple(np.atleast_1d(shape)) + ) exp_size = self.default_size if size is None else tuple(np.atleast_1d(size)) expected = exp_size + exp_shape actual = np.shape(self.sample_random_variable(rv, size)) @@ -340,7 +354,9 @@ def test_distribution(self): def _instantiate_pymc_rv(self, dist_params=None): params = dist_params if dist_params else self.pymc_dist_params self.pymc_rv = self.pymc_dist.dist( - **params, size=self.size, rng=aesara.shared(self.get_random_state(reset=True)) + **params, + size=self.size, + rng=aesara.shared(self.get_random_state(reset=True)), ) def check_pymc_draws_match_reference(self): @@ -356,16 +372,36 @@ def check_pymc_params_match_rv_op(self): for (expected_name, expected_value), actual_variable in zip( self.expected_rv_op_params.items(), aesera_dist_inputs ): - assert_almost_equal(expected_value, actual_variable.eval(), decimal=self.decimal) + assert_almost_equal( + expected_value, actual_variable.eval(), decimal=self.decimal + ) def check_rv_size(self): # test sizes - sizes_to_check = self.sizes_to_check or [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)] - sizes_expected = self.sizes_expected or [(), (), (1,), (1,), (5,), (4, 5), (2, 4, 2)] + sizes_to_check = self.sizes_to_check or [ + None, + (), + 1, + (1,), + 5, + (4, 5), + (2, 4, 2), + ] + sizes_expected = self.sizes_expected or [ + (), + (), + (1,), + (1,), + (5,), + (4, 5), + (2, 4, 2), + ] for size, expected in zip(sizes_to_check, sizes_expected): pymc_rv = self.pymc_dist.dist(**self.pymc_dist_params, size=size) actual = tuple(pymc_rv.shape.eval()) - assert actual == expected, f"size={size}, expected={expected}, actual={actual}" + assert ( + actual == expected + ), f"size={size}, expected={expected}, actual={actual}" # test multi-parameters sampling for univariate distributions (with univariate inputs) if ( @@ -374,10 +410,15 @@ def check_rv_size(self): and sum(self.pymc_dist.rv_op.ndims_params) == 0 ): params = { - k: p * np.ones(self.repeated_params_shape) for k, p in self.pymc_dist_params.items() + k: p * np.ones(self.repeated_params_shape) + for k, p in self.pymc_dist_params.items() } self._instantiate_pymc_rv(params) - sizes_to_check = [None, self.repeated_params_shape, (5, self.repeated_params_shape)] + sizes_to_check = [ + None, + self.repeated_params_shape, + (5, self.repeated_params_shape), + ] sizes_expected = [ (self.repeated_params_shape,), (self.repeated_params_shape,), @@ -410,7 +451,11 @@ class TestFlat(BaseTestDistribution): pymc_dist = pm.Flat pymc_dist_params = {} expected_rv_op_params = {} - tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size", "check_not_implemented"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_rv_size", + "check_not_implemented", + ] def check_not_implemented(self): with pytest.raises(NotImplementedError): @@ -421,7 +466,11 @@ class TestHalfFlat(BaseTestDistribution): pymc_dist = pm.HalfFlat pymc_dist_params = {} expected_rv_op_params = {} - tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size", "check_not_implemented"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_rv_size", + "check_not_implemented", + ] def check_not_implemented(self): with pytest.raises(NotImplementedError): @@ -430,13 +479,20 @@ def check_not_implemented(self): class TestDiscreteWeibull(BaseTestDistribution): def discrete_weibul_rng_fn(self, size, q, beta, uniform_rng_fct): - return np.ceil(np.power(np.log(1 - uniform_rng_fct(size=size)) / np.log(q), 1.0 / beta)) - 1 + return ( + np.ceil( + np.power(np.log(1 - uniform_rng_fct(size=size)) / np.log(q), 1.0 / beta) + ) + - 1 + ) def seeded_discrete_weibul_rng_fn(self): uniform_rng_fct = functools.partial( getattr(np.random.RandomState, "uniform"), self.get_random_state() ) - return functools.partial(self.discrete_weibul_rng_fn, uniform_rng_fct=uniform_rng_fct) + return functools.partial( + self.discrete_weibul_rng_fn, uniform_rng_fct=uniform_rng_fct + ) pymc_dist = pm.DiscreteWeibull pymc_dist_params = {"q": 0.25, "beta": 2.0} @@ -489,7 +545,9 @@ def seeded_asymmetriclaplace_rng_fn(self): uniform_rng_fct = functools.partial( getattr(np.random.RandomState, "uniform"), self.get_random_state() ) - return functools.partial(self.asymmetriclaplace_rng_fn, uniform_rng_fct=uniform_rng_fct) + return functools.partial( + self.asymmetriclaplace_rng_fn, uniform_rng_fct=uniform_rng_fct + ) pymc_dist = pm.AsymmetricLaplace @@ -505,8 +563,12 @@ def seeded_asymmetriclaplace_rng_fn(self): class TestExGaussian(BaseTestDistribution): - def exgaussian_rng_fn(self, mu, sigma, nu, size, normal_rng_fct, exponential_rng_fct): - return normal_rng_fct(mu, sigma, size=size) + exponential_rng_fct(scale=nu, size=size) + def exgaussian_rng_fn( + self, mu, sigma, nu, size, normal_rng_fct, exponential_rng_fct + ): + return normal_rng_fct(mu, sigma, size=size) + exponential_rng_fct( + scale=nu, size=size + ) def seeded_exgaussian_rng_fn(self): normal_rng_fct = functools.partial( @@ -577,7 +639,9 @@ def seeded_kumaraswamy_rng_fn(self): uniform_rng_fct = functools.partial( getattr(np.random.RandomState, "uniform"), self.get_random_state() ) - return functools.partial(self.kumaraswamy_rng_fn, uniform_rng_fct=uniform_rng_fct) + return functools.partial( + self.kumaraswamy_rng_fn, uniform_rng_fct=uniform_rng_fct + ) pymc_dist = pm.Kumaraswamy pymc_dist_params = {"a": 1.0, "b": 1.0} @@ -678,7 +742,9 @@ def test_distribution(self): def check_pymc_draws_match_reference(self): assert_array_almost_equal( - self.pymc_rv.eval(), self.reference_dist_draws + self.alpha, decimal=self.decimal + self.pymc_rv.eval(), + self.reference_dist_draws + self.alpha, + decimal=self.decimal, ) @@ -915,7 +981,10 @@ class TestInverseGammaMuSigma(BaseTestDistribution): pymc_dist = pm.InverseGamma pymc_dist_params = {"mu": 0.5, "sigma": 0.25} expected_alpha, expected_beta = pm.InverseGamma._get_alpha_beta( - alpha=None, beta=None, mu=pymc_dist_params["mu"], sigma=pymc_dist_params["sigma"] + alpha=None, + beta=None, + mu=pymc_dist_params["mu"], + sigma=pymc_dist_params["sigma"], ) expected_rv_op_params = {"alpha": expected_alpha, "beta": expected_beta} tests_to_run = ["check_pymc_params_match_rv_op"] @@ -983,8 +1052,14 @@ class TestPoisson(BaseTestDistribution): class TestMvNormalCov(BaseTestDistribution): pymc_dist = pm.MvNormal - pymc_dist_params = {"mu": np.array([1.0, 2.0]), "cov": np.array([[2.0, 0.0], [0.0, 3.5]])} - expected_rv_op_params = {"mu": np.array([1.0, 2.0]), "cov": np.array([[2.0, 0.0], [0.0, 3.5]])} + pymc_dist_params = { + "mu": np.array([1.0, 2.0]), + "cov": np.array([[2.0, 0.0], [0.0, 3.5]]), + } + expected_rv_op_params = { + "mu": np.array([1.0, 2.0]), + "cov": np.array([[2.0, 0.0], [0.0, 3.5]]), + } sizes_to_check = [None, (1), (2, 3)] sizes_expected = [(2,), (1, 2), (2, 3, 2)] reference_dist_params = { @@ -1001,7 +1076,10 @@ class TestMvNormalCov(BaseTestDistribution): class TestMvNormalChol(BaseTestDistribution): pymc_dist = pm.MvNormal - pymc_dist_params = {"mu": np.array([1.0, 2.0]), "chol": np.array([[2.0, 0.0], [0.0, 3.5]])} + pymc_dist_params = { + "mu": np.array([1.0, 2.0]), + "chol": np.array([[2.0, 0.0], [0.0, 3.5]]), + } expected_rv_op_params = { "mu": np.array([1.0, 2.0]), "cov": quaddist_matrix(chol=pymc_dist_params["chol"]).eval(), @@ -1011,7 +1089,10 @@ class TestMvNormalChol(BaseTestDistribution): class TestMvNormalTau(BaseTestDistribution): pymc_dist = pm.MvNormal - pymc_dist_params = {"mu": np.array([1.0, 2.0]), "tau": np.array([[2.0, 0.0], [0.0, 3.5]])} + pymc_dist_params = { + "mu": np.array([1.0, 2.0]), + "tau": np.array([[2.0, 0.0], [0.0, 3.5]]), + } expected_rv_op_params = { "mu": np.array([1.0, 2.0]), "cov": quaddist_matrix(tau=pymc_dist_params["tau"]).eval(), @@ -1122,7 +1203,11 @@ class TestDirichletMultinomial(BaseTestDistribution): sizes_to_check = [None, 1, (4,), (3, 4)] sizes_expected = [(4,), (1, 4), (4, 4), (3, 4, 4)] - tests_to_run = ["check_pymc_params_match_rv_op", "test_random_draws", "check_rv_size"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "test_random_draws", + "check_rv_size", + ] def test_random_draws(self): draws = pm.DirichletMultinomial.dist( @@ -1228,7 +1313,9 @@ def seeded_weibul_rng_fn(self): std_weibull_rng_fct = functools.partial( getattr(np.random.RandomState, "weibull"), self.get_random_state() ) - return functools.partial(self.weibull_rng_fn, std_weibull_rng_fct=std_weibull_rng_fct) + return functools.partial( + self.weibull_rng_fn, std_weibull_rng_fct=std_weibull_rng_fct + ) pymc_dist = pm.Weibull pymc_dist_params = {"alpha": 1.0, "beta": 2.0} @@ -1256,7 +1343,8 @@ class TestBetaBinomial(BaseTestDistribution): @pytest.mark.skipif( - condition=_polyagamma_not_installed, reason="`polyagamma package is not available/installed." + condition=_polyagamma_not_installed, + reason="`polyagamma package is not available/installed.", ) class TestPolyaGamma(BaseTestDistribution): def polyagamma_rng_fn(self, size, h, z, rng): @@ -1313,7 +1401,9 @@ def constant_rng_fn(self, size, c): class TestZeroInflatedPoisson(BaseTestDistribution): - def zero_inflated_poisson_rng_fn(self, size, psi, theta, poisson_rng_fct, random_rng_fct): + def zero_inflated_poisson_rng_fn( + self, size, psi, theta, poisson_rng_fct, random_rng_fct + ): return poisson_rng_fct(theta, size=size) * (random_rng_fct(size=size) < psi) def seeded_zero_inflated_poisson_rng_fn(self): @@ -1344,7 +1434,9 @@ def seeded_zero_inflated_poisson_rng_fn(self): class TestZeroInflatedBinomial(BaseTestDistribution): - def zero_inflated_binomial_rng_fn(self, size, psi, n, p, binomial_rng_fct, random_rng_fct): + def zero_inflated_binomial_rng_fn( + self, size, psi, n, p, binomial_rng_fct, random_rng_fct + ): return binomial_rng_fct(n, p, size=size) * (random_rng_fct(size=size) < psi) def seeded_zero_inflated_binomial_rng_fn(self): @@ -1412,14 +1504,18 @@ def seeded_zero_inflated_negbinomial_rng_fn(self): class TestOrderedLogistic(BaseTestDistribution): pymc_dist = _OrderedLogistic pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])} - expected_rv_op_params = {"p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292])} + expected_rv_op_params = { + "p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292]) + } tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] class TestOrderedProbit(BaseTestDistribution): pymc_dist = _OrderedProbit pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])} - expected_rv_op_params = {"p": np.array([0.02275013, 0.47724987, 0.47724987, 0.02275013])} + expected_rv_op_params = { + "p": np.array([0.02275013, 0.47724987, 0.47724987, 0.02275013]) + } tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] @@ -1469,7 +1565,11 @@ class TestMatrixNormal(BaseTestDistribution): pymc_dist_params = {"mu": mu, "rowcov": row_cov, "colcov": col_cov} expected_rv_op_params = {"mu": mu, "rowcov": row_cov, "colcov": col_cov} - tests_to_run = ["check_pymc_params_match_rv_op", "test_matrix_normal", "test_errors"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "test_matrix_normal", + "test_errors", + ] def test_matrix_normal(self): delta = 0.05 # limit for KS p-value @@ -1480,11 +1580,16 @@ def ref_rand(mu, rowcov, colcov): with pm.Model(rng_seeder=1): matrixnormal = pm.MatrixNormal( - "matnormal", mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3) + "matnormal", + mu=np.random.random((3, 3)), + rowcov=np.eye(3), + colcov=np.eye(3), ) check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) - ref_smp = ref_rand(mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3)) + ref_smp = ref_rand( + mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3) + ) p, f = delta, n_fails while p <= delta and f > 0: @@ -1493,7 +1598,8 @@ def ref_rand(mu, rowcov, colcov): p = np.min( [ st.ks_2samp( - np.atleast_1d(matrixnormal_smp).flatten(), np.atleast_1d(ref_smp).flatten() + np.atleast_1d(matrixnormal_smp).flatten(), + np.atleast_1d(ref_smp).flatten(), ) ] ) @@ -1516,7 +1622,10 @@ def test_errors(self): msg = "Value must be two dimensional." with pm.Model(): matrixnormal = pm.MatrixNormal( - "matnormal", mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3) + "matnormal", + mu=np.random.random((3, 3)), + rowcov=np.eye(3), + colcov=np.eye(3), ) with pytest.raises(ValueError, match=msg): rvs_to_values = {matrixnormal: aesara.tensor.ones((3, 3, 3))} @@ -1572,7 +1681,9 @@ class TestedInterpolated(pm.Interpolated): def dist(cls, **kwargs): x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100) pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma) - return super().dist(x_points=x_points, pdf_points=pdf_points, **kwargs) + return super().dist( + x_points=x_points, pdf_points=pdf_points, **kwargs + ) pymc_random( TestedInterpolated, @@ -1738,7 +1849,9 @@ def test_density_dist_with_random(self, size): obs = pm.DensityDist( "density_dist", mu, - random=lambda mu, rng=None, size=None: rng.normal(loc=mu, scale=1, size=size), + random=lambda mu, rng=None, size=None: rng.normal( + loc=mu, scale=1, size=size + ), observed=np.random.randn(100, *size), size=size, ) @@ -1758,7 +1871,9 @@ def test_density_dist_without_random(self): samples = 500 with pytest.raises(NotImplementedError): - pm.sample_posterior_predictive(idata, samples=samples, model=model, size=100) + pm.sample_posterior_predictive( + idata, samples=samples, model=model, size=100 + ) @pytest.mark.parametrize("size", [(), (3,), (3, 2)], ids=str) def test_density_dist_with_random_multivariate(self, size): @@ -1791,7 +1906,9 @@ def build_model(self, distribution, shape, nested_rvs_info): except ValueError: value, nested_shape, loc = info if value is None: - nested_rvs[rv_name] = pm.Uniform(rv_name, 0 + loc, 1 + loc, shape=nested_shape) + nested_rvs[rv_name] = pm.Uniform( + rv_name, 0 + loc, 1 + loc, shape=nested_shape + ) else: nested_rvs[rv_name] = value * np.ones(nested_shape) rv = distribution("target", shape=shape, **nested_rvs) @@ -1866,18 +1983,88 @@ def test_Rice(self, prior_samples, shape, nu, sigma): @pytest.mark.parametrize( ["prior_samples", "shape", "mu", "sigma", "lower", "upper"], [ - [10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))], - [10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))], - [10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())], - [10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())], + [ + 10, + (3,), + (None, tuple()), + (1.0, tuple()), + (None, tuple(), -1), + (None, (3,)), + ], + [ + 10, + (3,), + (None, tuple()), + (1.0, tuple()), + (None, tuple(), -1), + (None, (3,)), + ], + [ + 10, + (3,), + (None, tuple()), + (1.0, tuple()), + (None, (3,), -1), + (None, tuple()), + ], + [ + 10, + (3,), + (None, tuple()), + (1.0, tuple()), + (None, (3,), -1), + (None, tuple()), + ], [10, (4, 3), (None, (3,)), (1.0, tuple()), (None, (3,), -1), (None, (3,))], - [10, (4, 3), (None, (3,)), (1.0, tuple()), (None, (3,), -1), (None, (4, 3))], - [10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))], - [10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))], - [10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())], - [10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())], + [ + 10, + (4, 3), + (None, (3,)), + (1.0, tuple()), + (None, (3,), -1), + (None, (4, 3)), + ], + [ + 10, + (3,), + (0.0, tuple()), + (None, tuple()), + (None, tuple(), -1), + (None, (3,)), + ], + [ + 10, + (3,), + (0.0, tuple()), + (None, tuple()), + (None, tuple(), -1), + (None, (3,)), + ], + [ + 10, + (3,), + (0.0, tuple()), + (None, tuple()), + (None, (3,), -1), + (None, tuple()), + ], + [ + 10, + (3,), + (0.0, tuple()), + (None, tuple()), + (None, (3,), -1), + (None, tuple()), + ], [10, (4, 3), (0.0, tuple()), (None, (3,)), (None, (3,), -1), (None, (3,))], - [10, (4, 3), (0.0, tuple()), (None, (3,)), (None, (3,), -1), (None, (4, 3))], + [ + 10, + (4, 3), + (0.0, tuple()), + (None, (3,)), + (None, (3,), -1), + (None, (4, 3)), + ], ], ids=str, ) @@ -1929,7 +2116,9 @@ def generate_shapes(include_params=False): if not include_params: del mudim_as_event[-1] del mudim_as_dist[-1] - data = itertools.chain(itertools.product(*mudim_as_event), itertools.product(*mudim_as_dist)) + data = itertools.chain( + itertools.product(*mudim_as_event), itertools.product(*mudim_as_dist) + ) return data @@ -1941,12 +2130,16 @@ class TestMvNormal(SeededTest): ids=str, ) def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param): - dist = pm.MvNormal.dist(mu=np.ones(mu_shape), **{param: np.eye(3)}, shape=dist_shape) + dist = pm.MvNormal.dist( + mu=np.ones(mu_shape), **{param: np.eye(3)}, shape=dist_shape + ) output_shape = to_tuple(sample_shape) + dist_shape assert dist.random(size=sample_shape).shape == output_shape @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str + ["sample_shape", "dist_shape", "mu_shape"], + generate_shapes(include_params=False), + ids=str, ) def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -1961,7 +2154,9 @@ def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str + ["sample_shape", "dist_shape", "mu_shape"], + generate_shapes(include_params=False), + ids=str, ) def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -1981,7 +2176,9 @@ def test_issue_3758(self): with pm.Model() as model: a = pm.Normal("a", sigma=100, shape=ndim) b = pm.Normal("b", mu=a, sigma=1, shape=ndim) - c = pm.MvNormal("c", mu=a, chol=np.linalg.cholesky(np.eye(ndim)), shape=ndim) + c = pm.MvNormal( + "c", mu=a, chol=np.linalg.cholesky(np.eye(ndim)), shape=ndim + ) d = pm.MvNormal("d", mu=a, cov=np.eye(ndim), shape=ndim) samples = pm.sample_prior_predictive(1000) @@ -2053,7 +2250,9 @@ def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param): @pytest.mark.xfail @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str + ["sample_shape", "dist_shape", "mu_shape"], + generate_shapes(include_params=False), + ids=str, ) def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -2069,7 +2268,9 @@ def test_with_chol_rv(self, sample_shape, dist_shape, mu_shape): @pytest.mark.xfail @pytest.mark.parametrize( - ["sample_shape", "dist_shape", "mu_shape"], generate_shapes(include_params=False), ids=str + ["sample_shape", "dist_shape", "mu_shape"], + generate_shapes(include_params=False), + ids=str, ) def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape): with pm.Model() as model: @@ -2078,7 +2279,9 @@ def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape): chol, corr, stds = pm.LKJCholeskyCov( "chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True ) - mv = pm.MvGaussianRandomWalk("mv", mu, cov=pm.math.dot(chol, chol.T), shape=dist_shape) + mv = pm.MvGaussianRandomWalk( + "mv", mu, cov=pm.math.dot(chol, chol.T), shape=dist_shape + ) prior = pm.sample_prior_predictive(samples=sample_shape) assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape @@ -2091,7 +2294,12 @@ def test_car_rng_fn(sparse): size = (100,) W = np.array( - [[0.0, 1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 0.0]] + [ + [0.0, 1.0, 1.0, 0.0], + [1.0, 0.0, 0.0, 1.0], + [1.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 1.0, 0.0], + ] ) tau = 2 From 3d17a8a94ab05948cc2d57263059e730394fad52 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 13:52:51 +0200 Subject: [PATCH 20/22] Reinstate Black line length 100 in pyproject --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 31bdfe0dcc..aa4e5e2bfa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,6 +2,9 @@ minversion = "6.0" xfail_strict=true +[tool.black] +line-length = 100 + [tool.coverage.report] exclude_lines = [ "pragma: nocover", From 56cea87272ab4c67dfd85324b0ede6652e8fd8e9 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 14:01:50 +0200 Subject: [PATCH 21/22] Revert test_distributions_random to version main --- pymc/tests/test_distributions_random.py | 499 +++++++++++++----------- 1 file changed, 281 insertions(+), 218 deletions(-) diff --git a/pymc/tests/test_distributions_random.py b/pymc/tests/test_distributions_random.py index a7dc49a14a..b6990c6a38 100644 --- a/pymc/tests/test_distributions_random.py +++ b/pymc/tests/test_distributions_random.py @@ -182,9 +182,7 @@ def get_random_variable(self, shape, with_vector_params=False, name=None): size = shape else: size = shape[:-ndim_supp] - return self.distribution( - name, size=size, transform=None, **params - ) + return self.distribution(name, size=size, transform=None, **params) except TypeError: if np.sum(np.atleast_1d(shape)) == 0: pytest.skip("Timeseries must have positive shape") @@ -202,9 +200,7 @@ def sample_random_variable(random_variable, size): def test_scalar_distribution_shape(self, shape, size): """Draws samples of different [size] from a scalar [shape] RV.""" rv = self.get_random_variable(shape) - exp_shape = ( - self.default_shape if shape is None else tuple(np.atleast_1d(shape)) - ) + exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape)) exp_size = self.default_size if size is None else tuple(np.atleast_1d(size)) expected = exp_size + exp_shape actual = np.shape(self.sample_random_variable(rv, size)) @@ -224,9 +220,7 @@ def test_scalar_distribution_shape(self, shape, size): def test_scalar_sample_shape(self, shape, size): """Draws samples of scalar [size] from a [shape] RV.""" rv = self.get_random_variable(shape) - exp_shape = ( - self.default_shape if shape is None else tuple(np.atleast_1d(shape)) - ) + exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape)) exp_size = self.default_size if size is None else tuple(np.atleast_1d(size)) expected = exp_size + exp_shape actual = np.shape(self.sample_random_variable(rv, size)) @@ -239,9 +233,7 @@ def test_scalar_sample_shape(self, shape, size): def test_vector_params(self, shape, size): shape = self.shape rv = self.get_random_variable(shape, with_vector_params=True) - exp_shape = ( - self.default_shape if shape is None else tuple(np.atleast_1d(shape)) - ) + exp_shape = self.default_shape if shape is None else tuple(np.atleast_1d(shape)) exp_size = self.default_size if size is None else tuple(np.atleast_1d(size)) expected = exp_size + exp_shape actual = np.shape(self.sample_random_variable(rv, size)) @@ -354,9 +346,7 @@ def test_distribution(self): def _instantiate_pymc_rv(self, dist_params=None): params = dist_params if dist_params else self.pymc_dist_params self.pymc_rv = self.pymc_dist.dist( - **params, - size=self.size, - rng=aesara.shared(self.get_random_state(reset=True)), + **params, size=self.size, rng=aesara.shared(self.get_random_state(reset=True)) ) def check_pymc_draws_match_reference(self): @@ -372,36 +362,16 @@ def check_pymc_params_match_rv_op(self): for (expected_name, expected_value), actual_variable in zip( self.expected_rv_op_params.items(), aesera_dist_inputs ): - assert_almost_equal( - expected_value, actual_variable.eval(), decimal=self.decimal - ) + assert_almost_equal(expected_value, actual_variable.eval(), decimal=self.decimal) def check_rv_size(self): # test sizes - sizes_to_check = self.sizes_to_check or [ - None, - (), - 1, - (1,), - 5, - (4, 5), - (2, 4, 2), - ] - sizes_expected = self.sizes_expected or [ - (), - (), - (1,), - (1,), - (5,), - (4, 5), - (2, 4, 2), - ] + sizes_to_check = self.sizes_to_check or [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)] + sizes_expected = self.sizes_expected or [(), (), (1,), (1,), (5,), (4, 5), (2, 4, 2)] for size, expected in zip(sizes_to_check, sizes_expected): pymc_rv = self.pymc_dist.dist(**self.pymc_dist_params, size=size) actual = tuple(pymc_rv.shape.eval()) - assert ( - actual == expected - ), f"size={size}, expected={expected}, actual={actual}" + assert actual == expected, f"size={size}, expected={expected}, actual={actual}" # test multi-parameters sampling for univariate distributions (with univariate inputs) if ( @@ -410,15 +380,10 @@ def check_rv_size(self): and sum(self.pymc_dist.rv_op.ndims_params) == 0 ): params = { - k: p * np.ones(self.repeated_params_shape) - for k, p in self.pymc_dist_params.items() + k: p * np.ones(self.repeated_params_shape) for k, p in self.pymc_dist_params.items() } self._instantiate_pymc_rv(params) - sizes_to_check = [ - None, - self.repeated_params_shape, - (5, self.repeated_params_shape), - ] + sizes_to_check = [None, self.repeated_params_shape, (5, self.repeated_params_shape)] sizes_expected = [ (self.repeated_params_shape,), (self.repeated_params_shape,), @@ -479,20 +444,13 @@ def check_not_implemented(self): class TestDiscreteWeibull(BaseTestDistribution): def discrete_weibul_rng_fn(self, size, q, beta, uniform_rng_fct): - return ( - np.ceil( - np.power(np.log(1 - uniform_rng_fct(size=size)) / np.log(q), 1.0 / beta) - ) - - 1 - ) + return np.ceil(np.power(np.log(1 - uniform_rng_fct(size=size)) / np.log(q), 1.0 / beta)) - 1 def seeded_discrete_weibul_rng_fn(self): uniform_rng_fct = functools.partial( getattr(np.random.RandomState, "uniform"), self.get_random_state() ) - return functools.partial( - self.discrete_weibul_rng_fn, uniform_rng_fct=uniform_rng_fct - ) + return functools.partial(self.discrete_weibul_rng_fn, uniform_rng_fct=uniform_rng_fct) pymc_dist = pm.DiscreteWeibull pymc_dist_params = {"q": 0.25, "beta": 2.0} @@ -545,9 +503,7 @@ def seeded_asymmetriclaplace_rng_fn(self): uniform_rng_fct = functools.partial( getattr(np.random.RandomState, "uniform"), self.get_random_state() ) - return functools.partial( - self.asymmetriclaplace_rng_fn, uniform_rng_fct=uniform_rng_fct - ) + return functools.partial(self.asymmetriclaplace_rng_fn, uniform_rng_fct=uniform_rng_fct) pymc_dist = pm.AsymmetricLaplace @@ -563,12 +519,8 @@ def seeded_asymmetriclaplace_rng_fn(self): class TestExGaussian(BaseTestDistribution): - def exgaussian_rng_fn( - self, mu, sigma, nu, size, normal_rng_fct, exponential_rng_fct - ): - return normal_rng_fct(mu, sigma, size=size) + exponential_rng_fct( - scale=nu, size=size - ) + def exgaussian_rng_fn(self, mu, sigma, nu, size, normal_rng_fct, exponential_rng_fct): + return normal_rng_fct(mu, sigma, size=size) + exponential_rng_fct(scale=nu, size=size) def seeded_exgaussian_rng_fn(self): normal_rng_fct = functools.partial( @@ -602,7 +554,10 @@ class TestGumbel(BaseTestDistribution): expected_rv_op_params = {"mu": 1.5, "beta": 3.0} reference_dist_params = {"loc": 1.5, "scale": 3.0} reference_dist = seeded_scipy_distribution_builder("gumbel_r") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestStudentT(BaseTestDistribution): @@ -639,9 +594,7 @@ def seeded_kumaraswamy_rng_fn(self): uniform_rng_fct = functools.partial( getattr(np.random.RandomState, "uniform"), self.get_random_state() ) - return functools.partial( - self.kumaraswamy_rng_fn, uniform_rng_fct=uniform_rng_fct - ) + return functools.partial(self.kumaraswamy_rng_fn, uniform_rng_fct=uniform_rng_fct) pymc_dist = pm.Kumaraswamy pymc_dist_params = {"a": 1.0, "b": 1.0} @@ -680,7 +633,9 @@ class TestTruncatedNormalTau(BaseTestDistribution): tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "lower": lower, "upper": upper} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = ["check_pymc_params_match_rv_op"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + ] class TestTruncatedNormalLowerTau(BaseTestDistribution): @@ -689,7 +644,9 @@ class TestTruncatedNormalLowerTau(BaseTestDistribution): tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "lower": lower} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = ["check_pymc_params_match_rv_op"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + ] class TestTruncatedNormalUpperTau(BaseTestDistribution): @@ -698,7 +655,9 @@ class TestTruncatedNormalUpperTau(BaseTestDistribution): tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "upper": upper} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = ["check_pymc_params_match_rv_op"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + ] class TestTruncatedNormalUpperArray(BaseTestDistribution): @@ -707,13 +666,20 @@ class TestTruncatedNormalUpperArray(BaseTestDistribution): np.array([-np.inf, -np.inf]), np.array([3, 2]), np.array([0, 0]), - np.array([1, 1]), + np.array( + [ + 1, + 1, + ] + ), ) size = (15, 2) tau, sigma = get_tau_sigma(tau=tau, sigma=None) pymc_dist_params = {"mu": mu, "tau": tau, "upper": upper} expected_rv_op_params = {"mu": mu, "sigma": sigma, "lower": lower, "upper": upper} - tests_to_run = ["check_pymc_params_match_rv_op"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + ] class TestWald(BaseTestDistribution): @@ -742,9 +708,7 @@ def test_distribution(self): def check_pymc_draws_match_reference(self): assert_array_almost_equal( - self.pymc_rv.eval(), - self.reference_dist_draws + self.alpha, - decimal=self.decimal, + self.pymc_rv.eval(), self.reference_dist_draws + self.alpha, decimal=self.decimal ) @@ -754,7 +718,9 @@ class TestWaldMuPhi(BaseTestDistribution): mu_rv, lam_rv, phi_rv = pm.Wald.get_mu_lam_phi(mu=mu, lam=None, phi=phi) pymc_dist_params = {"mu": mu, "phi": phi, "alpha": alpha} expected_rv_op_params = {"mu": mu_rv, "lam": lam_rv, "alpha": alpha} - tests_to_run = ["check_pymc_params_match_rv_op"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + ] class TestSkewNormal(BaseTestDistribution): @@ -878,7 +844,10 @@ class TestHalfNormal(BaseTestDistribution): expected_rv_op_params = {"mean": 0, "sigma": 10.0} reference_dist_params = {"loc": 0, "scale": 10.0} reference_dist = seeded_scipy_distribution_builder("halfnorm") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestHalfNormalTau(BaseTestDistribution): @@ -928,7 +897,10 @@ class TestExponential(BaseTestDistribution): expected_rv_op_params = {"mu": 1.0 / pymc_dist_params["lam"]} reference_dist_params = {"scale": 1.0 / pymc_dist_params["lam"]} reference_dist = seeded_numpy_distribution_builder("exponential") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestCauchy(BaseTestDistribution): @@ -937,7 +909,10 @@ class TestCauchy(BaseTestDistribution): expected_rv_op_params = {"alpha": 2.0, "beta": 5.0} reference_dist_params = {"loc": 2.0, "scale": 5.0} reference_dist = seeded_scipy_distribution_builder("cauchy") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestHalfCauchy(BaseTestDistribution): @@ -946,7 +921,10 @@ class TestHalfCauchy(BaseTestDistribution): expected_rv_op_params = {"alpha": 0.0, "beta": 5.0} reference_dist_params = {"loc": 0.0, "scale": 5.0} reference_dist = seeded_scipy_distribution_builder("halfcauchy") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestGamma(BaseTestDistribution): @@ -955,7 +933,10 @@ class TestGamma(BaseTestDistribution): expected_rv_op_params = {"alpha": 2.0, "beta": 1 / 5.0} reference_dist_params = {"shape": 2.0, "scale": 1 / 5.0} reference_dist = seeded_numpy_distribution_builder("gamma") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestGammaMuSigma(BaseTestDistribution): @@ -974,7 +955,10 @@ class TestInverseGamma(BaseTestDistribution): expected_rv_op_params = {"alpha": 2.0, "beta": 5.0} reference_dist_params = {"a": 2.0, "scale": 5.0} reference_dist = seeded_scipy_distribution_builder("invgamma") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestInverseGammaMuSigma(BaseTestDistribution): @@ -1021,7 +1005,10 @@ class TestNegativeBinomialMuSigma(BaseTestDistribution): pymc_dist = pm.NegativeBinomial pymc_dist_params = {"mu": 5.0, "alpha": 8.0} expected_n, expected_p = pm.NegativeBinomial.get_n_p( - mu=pymc_dist_params["mu"], alpha=pymc_dist_params["alpha"], n=None, p=None + mu=pymc_dist_params["mu"], + alpha=pymc_dist_params["alpha"], + n=None, + p=None, ) expected_rv_op_params = {"n": expected_n, "p": expected_p} tests_to_run = ["check_pymc_params_match_rv_op"] @@ -1033,7 +1020,10 @@ class TestBernoulli(BaseTestDistribution): expected_rv_op_params = {"p": 0.33} reference_dist_params = {"p": 0.33} reference_dist = seeded_scipy_distribution_builder("bernoulli") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestBernoulliLogitP(BaseTestDistribution): @@ -1237,7 +1227,10 @@ class TestCategorical(BaseTestDistribution): pymc_dist = pm.Categorical pymc_dist_params = {"p": np.array([0.28, 0.62, 0.10])} expected_rv_op_params = {"p": np.array([0.28, 0.62, 0.10])} - tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_rv_size", + ] class TestGeometric(BaseTestDistribution): @@ -1257,7 +1250,10 @@ class TestHyperGeometric(BaseTestDistribution): } reference_dist_params = expected_rv_op_params reference_dist = seeded_numpy_distribution_builder("hypergeometric") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestLogistic(BaseTestDistribution): @@ -1295,7 +1291,10 @@ class TestTriangular(BaseTestDistribution): expected_rv_op_params = {"lower": 0, "c": 0.5, "upper": 1} reference_dist_params = {"left": 0, "mode": 0.5, "right": 1} reference_dist = seeded_numpy_distribution_builder("triangular") - tests_to_run = ["check_pymc_params_match_rv_op", "check_pymc_draws_match_reference"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_pymc_draws_match_reference", + ] class TestVonMises(BaseTestDistribution): @@ -1313,9 +1312,7 @@ def seeded_weibul_rng_fn(self): std_weibull_rng_fct = functools.partial( getattr(np.random.RandomState, "weibull"), self.get_random_state() ) - return functools.partial( - self.weibull_rng_fn, std_weibull_rng_fct=std_weibull_rng_fct - ) + return functools.partial(self.weibull_rng_fn, std_weibull_rng_fct=std_weibull_rng_fct) pymc_dist = pm.Weibull pymc_dist_params = {"alpha": 1.0, "beta": 2.0} @@ -1401,9 +1398,7 @@ def constant_rng_fn(self, size, c): class TestZeroInflatedPoisson(BaseTestDistribution): - def zero_inflated_poisson_rng_fn( - self, size, psi, theta, poisson_rng_fct, random_rng_fct - ): + def zero_inflated_poisson_rng_fn(self, size, psi, theta, poisson_rng_fct, random_rng_fct): return poisson_rng_fct(theta, size=size) * (random_rng_fct(size=size) < psi) def seeded_zero_inflated_poisson_rng_fn(self): @@ -1434,9 +1429,7 @@ def seeded_zero_inflated_poisson_rng_fn(self): class TestZeroInflatedBinomial(BaseTestDistribution): - def zero_inflated_binomial_rng_fn( - self, size, psi, n, p, binomial_rng_fct, random_rng_fct - ): + def zero_inflated_binomial_rng_fn(self, size, psi, n, p, binomial_rng_fct, random_rng_fct): return binomial_rng_fct(n, p, size=size) * (random_rng_fct(size=size) < psi) def seeded_zero_inflated_binomial_rng_fn(self): @@ -1504,19 +1497,21 @@ def seeded_zero_inflated_negbinomial_rng_fn(self): class TestOrderedLogistic(BaseTestDistribution): pymc_dist = _OrderedLogistic pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])} - expected_rv_op_params = { - "p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292]) - } - tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] + expected_rv_op_params = {"p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292])} + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_rv_size", + ] class TestOrderedProbit(BaseTestDistribution): pymc_dist = _OrderedProbit pymc_dist_params = {"eta": 0, "cutpoints": np.array([-2, 0, 2])} - expected_rv_op_params = { - "p": np.array([0.02275013, 0.47724987, 0.47724987, 0.02275013]) - } - tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] + expected_rv_op_params = {"p": np.array([0.02275013, 0.47724987, 0.47724987, 0.02275013])} + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_rv_size", + ] class TestOrderedMultinomial(BaseTestDistribution): @@ -1528,7 +1523,10 @@ class TestOrderedMultinomial(BaseTestDistribution): "n": 1000, "p": np.array([0.11920292, 0.38079708, 0.38079708, 0.11920292]), } - tests_to_run = ["check_pymc_params_match_rv_op", "check_rv_size"] + tests_to_run = [ + "check_pymc_params_match_rv_op", + "check_rv_size", + ] class TestWishart(BaseTestDistribution): @@ -1542,7 +1540,11 @@ def wishart_rng_fn(self, size, nu, V, rng): reference_dist_params = {"nu": 4, "V": V} expected_rv_op_params = {"nu": 4, "V": V} sizes_to_check = [None, 1, (4, 5)] - sizes_expected = [(3, 3), (1, 3, 3), (4, 5, 3, 3)] + sizes_expected = [ + (3, 3), + (1, 3, 3), + (4, 5, 3, 3), + ] reference_dist = lambda self: functools.partial( self.wishart_rng_fn, rng=self.get_random_state() ) @@ -1565,11 +1567,7 @@ class TestMatrixNormal(BaseTestDistribution): pymc_dist_params = {"mu": mu, "rowcov": row_cov, "colcov": col_cov} expected_rv_op_params = {"mu": mu, "rowcov": row_cov, "colcov": col_cov} - tests_to_run = [ - "check_pymc_params_match_rv_op", - "test_matrix_normal", - "test_errors", - ] + tests_to_run = ["check_pymc_params_match_rv_op", "test_matrix_normal", "test_errors"] def test_matrix_normal(self): delta = 0.05 # limit for KS p-value @@ -1585,11 +1583,9 @@ def ref_rand(mu, rowcov, colcov): rowcov=np.eye(3), colcov=np.eye(3), ) - check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) + check = pm.sample_prior_predictive(n_fails) - ref_smp = ref_rand( - mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3) - ) + ref_smp = ref_rand(mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3)) p, f = delta, n_fails while p <= delta and f > 0: @@ -1681,9 +1677,7 @@ class TestedInterpolated(pm.Interpolated): def dist(cls, **kwargs): x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100) pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma) - return super().dist( - x_points=x_points, pdf_points=pdf_points, **kwargs - ) + return super().dist(x_points=x_points, pdf_points=pdf_points, **kwargs) pymc_random( TestedInterpolated, @@ -1716,7 +1710,10 @@ def kronecker_rng_fn(self, size, mu, covs=None, sigma=None, rng=None): reference_dist = lambda self: functools.partial( self.kronecker_rng_fn, rng=self.get_random_state() ) - tests_to_run = ["check_pymc_draws_match_reference", "check_rv_size"] + tests_to_run = [ + "check_pymc_draws_match_reference", + "check_rv_size", + ] class TestScalarParameterSamples(SeededTest): @@ -1849,9 +1846,7 @@ def test_density_dist_with_random(self, size): obs = pm.DensityDist( "density_dist", mu, - random=lambda mu, rng=None, size=None: rng.normal( - loc=mu, scale=1, size=size - ), + random=lambda mu, rng=None, size=None: rng.normal(loc=mu, scale=1, size=size), observed=np.random.randn(100, *size), size=size, ) @@ -1871,9 +1866,7 @@ def test_density_dist_without_random(self): samples = 500 with pytest.raises(NotImplementedError): - pm.sample_posterior_predictive( - idata, samples=samples, model=model, size=100 - ) + pm.sample_posterior_predictive(idata, samples=samples, model=model, size=100) @pytest.mark.parametrize("size", [(), (3,), (3, 2)], ids=str) def test_density_dist_with_random_multivariate(self, size): @@ -1907,29 +1900,62 @@ def build_model(self, distribution, shape, nested_rvs_info): value, nested_shape, loc = info if value is None: nested_rvs[rv_name] = pm.Uniform( - rv_name, 0 + loc, 1 + loc, shape=nested_shape + rv_name, + 0 + loc, + 1 + loc, + shape=nested_shape, ) else: nested_rvs[rv_name] = value * np.ones(nested_shape) - rv = distribution("target", shape=shape, **nested_rvs) + rv = distribution( + "target", + shape=shape, + **nested_rvs, + ) return model, rv, nested_rvs def sample_prior(self, distribution, shape, nested_rvs_info, prior_samples): - model, rv, nested_rvs = self.build_model(distribution, shape, nested_rvs_info) + model, rv, nested_rvs = self.build_model( + distribution, + shape, + nested_rvs_info, + ) with model: - return pm.sample_prior_predictive(prior_samples, return_inferencedata=False) + return pm.sample_prior_predictive(prior_samples) @pytest.mark.parametrize( ["prior_samples", "shape", "mu", "alpha"], [ [10, (3,), (None, tuple()), (None, (3,))], [10, (3,), (None, (3,)), (None, tuple())], - [10, (4, 3), (None, (3,)), (None, (3,))], - [10, (4, 3), (None, (3,)), (None, (4, 3))], + [ + 10, + ( + 4, + 3, + ), + (None, (3,)), + (None, (3,)), + ], + [ + 10, + ( + 4, + 3, + ), + (None, (3,)), + (None, (4, 3)), + ], ], ids=str, ) - def test_NegativeBinomial(self, prior_samples, shape, mu, alpha): + def test_NegativeBinomial( + self, + prior_samples, + shape, + mu, + alpha, + ): prior = self.sample_prior( distribution=pm.NegativeBinomial, shape=shape, @@ -1945,12 +1971,37 @@ def test_NegativeBinomial(self, prior_samples, shape, mu, alpha): [10, (3,), (0.5, (3,)), (None, tuple()), (None, (3,))], [10, (3,), (0.5, tuple()), (None, (3,)), (None, tuple())], [10, (3,), (0.5, (3,)), (None, (3,)), (None, tuple())], - [10, (4, 3), (0.5, (3,)), (None, (3,)), (None, (3,))], - [10, (4, 3), (0.5, (3,)), (None, (3,)), (None, (4, 3))], + [ + 10, + ( + 4, + 3, + ), + (0.5, (3,)), + (None, (3,)), + (None, (3,)), + ], + [ + 10, + ( + 4, + 3, + ), + (0.5, (3,)), + (None, (3,)), + (None, (4, 3)), + ], ], ids=str, ) - def test_ZeroInflatedNegativeBinomial(self, prior_samples, shape, psi, mu, alpha): + def test_ZeroInflatedNegativeBinomial( + self, + prior_samples, + shape, + psi, + mu, + alpha, + ): prior = self.sample_prior( distribution=pm.ZeroInflatedNegativeBinomial, shape=shape, @@ -1966,12 +2017,34 @@ def test_ZeroInflatedNegativeBinomial(self, prior_samples, shape, psi, mu, alpha [10, (3,), (None, tuple()), (None, (3,))], [10, (3,), (None, (3,)), (None, tuple())], [10, (3,), (None, (3,)), (None, tuple())], - [10, (4, 3), (None, (3,)), (None, (3,))], - [10, (4, 3), (None, (3,)), (None, (4, 3))], + [ + 10, + ( + 4, + 3, + ), + (None, (3,)), + (None, (3,)), + ], + [ + 10, + ( + 4, + 3, + ), + (None, (3,)), + (None, (4, 3)), + ], ], ids=str, ) - def test_Rice(self, prior_samples, shape, nu, sigma): + def test_Rice( + self, + prior_samples, + shape, + nu, + sigma, + ): prior = self.sample_prior( distribution=pm.Rice, shape=shape, @@ -1983,83 +2056,53 @@ def test_Rice(self, prior_samples, shape, nu, sigma): @pytest.mark.parametrize( ["prior_samples", "shape", "mu", "sigma", "lower", "upper"], [ + [10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))], + [10, (3,), (None, tuple()), (1.0, tuple()), (None, tuple(), -1), (None, (3,))], + [10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())], + [10, (3,), (None, tuple()), (1.0, tuple()), (None, (3,), -1), (None, tuple())], [ 10, - (3,), - (None, tuple()), - (1.0, tuple()), - (None, tuple(), -1), - (None, (3,)), - ], - [ - 10, - (3,), - (None, tuple()), - (1.0, tuple()), - (None, tuple(), -1), + ( + 4, + 3, + ), (None, (3,)), - ], - [ - 10, - (3,), - (None, tuple()), (1.0, tuple()), (None, (3,), -1), - (None, tuple()), - ], - [ - 10, - (3,), - (None, tuple()), - (1.0, tuple()), - (None, (3,), -1), - (None, tuple()), + (None, (3,)), ], - [10, (4, 3), (None, (3,)), (1.0, tuple()), (None, (3,), -1), (None, (3,))], [ 10, - (4, 3), + ( + 4, + 3, + ), (None, (3,)), (1.0, tuple()), (None, (3,), -1), (None, (4, 3)), ], + [10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))], + [10, (3,), (0.0, tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))], + [10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())], + [10, (3,), (0.0, tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())], [ 10, - (3,), - (0.0, tuple()), - (None, tuple()), - (None, tuple(), -1), - (None, (3,)), - ], - [ - 10, - (3,), + ( + 4, + 3, + ), (0.0, tuple()), - (None, tuple()), - (None, tuple(), -1), (None, (3,)), - ], - [ - 10, - (3,), - (0.0, tuple()), - (None, tuple()), (None, (3,), -1), - (None, tuple()), - ], - [ - 10, - (3,), - (0.0, tuple()), - (None, tuple()), - (None, (3,), -1), - (None, tuple()), + (None, (3,)), ], - [10, (4, 3), (0.0, tuple()), (None, (3,)), (None, (3,), -1), (None, (3,))], [ 10, - (4, 3), + ( + 4, + 3, + ), (0.0, tuple()), (None, (3,)), (None, (3,), -1), @@ -2068,7 +2111,15 @@ def test_Rice(self, prior_samples, shape, nu, sigma): ], ids=str, ) - def test_TruncatedNormal(self, prior_samples, shape, mu, sigma, lower, upper): + def test_TruncatedNormal( + self, + prior_samples, + shape, + mu, + sigma, + lower, + upper, + ): prior = self.sample_prior( distribution=pm.TruncatedNormal, shape=shape, @@ -2083,12 +2134,37 @@ def test_TruncatedNormal(self, prior_samples, shape, mu, sigma, lower, upper): [10, (3,), (None, tuple()), (-1.0, (3,)), (2, tuple())], [10, (3,), (None, tuple()), (-1.0, tuple()), (None, tuple(), 1)], [10, (3,), (None, (3,)), (-1.0, tuple()), (None, tuple(), 1)], - [10, (4, 3), (None, (3,)), (-1.0, tuple()), (None, (3,), 1)], - [10, (4, 3), (None, (3,)), (None, tuple(), -1), (None, (3,), 1)], + [ + 10, + ( + 4, + 3, + ), + (None, (3,)), + (-1.0, tuple()), + (None, (3,), 1), + ], + [ + 10, + ( + 4, + 3, + ), + (None, (3,)), + (None, tuple(), -1), + (None, (3,), 1), + ], ], ids=str, ) - def test_Triangular(self, prior_samples, shape, c, lower, upper): + def test_Triangular( + self, + prior_samples, + shape, + c, + lower, + upper, + ): prior = self.sample_prior( distribution=pm.Triangular, shape=shape, @@ -2116,9 +2192,7 @@ def generate_shapes(include_params=False): if not include_params: del mudim_as_event[-1] del mudim_as_dist[-1] - data = itertools.chain( - itertools.product(*mudim_as_event), itertools.product(*mudim_as_dist) - ) + data = itertools.chain(itertools.product(*mudim_as_event), itertools.product(*mudim_as_dist)) return data @@ -2130,9 +2204,7 @@ class TestMvNormal(SeededTest): ids=str, ) def test_with_np_arrays(self, sample_shape, dist_shape, mu_shape, param): - dist = pm.MvNormal.dist( - mu=np.ones(mu_shape), **{param: np.eye(3)}, shape=dist_shape - ) + dist = pm.MvNormal.dist(mu=np.ones(mu_shape), **{param: np.eye(3)}, shape=dist_shape) output_shape = to_tuple(sample_shape) + dist_shape assert dist.random(size=sample_shape).shape == output_shape @@ -2176,9 +2248,7 @@ def test_issue_3758(self): with pm.Model() as model: a = pm.Normal("a", sigma=100, shape=ndim) b = pm.Normal("b", mu=a, sigma=1, shape=ndim) - c = pm.MvNormal( - "c", mu=a, chol=np.linalg.cholesky(np.eye(ndim)), shape=ndim - ) + c = pm.MvNormal("c", mu=a, chol=np.linalg.cholesky(np.eye(ndim)), shape=ndim) d = pm.MvNormal("d", mu=a, cov=np.eye(ndim), shape=ndim) samples = pm.sample_prior_predictive(1000) @@ -2279,9 +2349,7 @@ def test_with_cov_rv(self, sample_shape, dist_shape, mu_shape): chol, corr, stds = pm.LKJCholeskyCov( "chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True ) - mv = pm.MvGaussianRandomWalk( - "mv", mu, cov=pm.math.dot(chol, chol.T), shape=dist_shape - ) + mv = pm.MvGaussianRandomWalk("mv", mu, cov=pm.math.dot(chol, chol.T), shape=dist_shape) prior = pm.sample_prior_predictive(samples=sample_shape) assert prior["mv"].shape == to_tuple(sample_shape) + dist_shape @@ -2294,12 +2362,7 @@ def test_car_rng_fn(sparse): size = (100,) W = np.array( - [ - [0.0, 1.0, 1.0, 0.0], - [1.0, 0.0, 0.0, 1.0], - [1.0, 0.0, 0.0, 1.0], - [0.0, 1.0, 1.0, 0.0], - ] + [[0.0, 1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 0.0]] ) tau = 2 @@ -2316,7 +2379,7 @@ def test_car_rng_fn(sparse): with pm.Model(rng_seeder=1): car = pm.CAR("car", mu, W, alpha, tau, size=size) mn = pm.MvNormal("mn", mu, cov, size=size) - check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) + check = pm.sample_prior_predictive(n_fails) p, f = delta, n_fails while p <= delta and f > 0: From aeb7c0d9a15b2bc74eb2b1e6908ac88ec894122b Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 15 Oct 2021 14:09:05 +0200 Subject: [PATCH 22/22] Fix test_distributions_random for idata forward sampling --- pymc/tests/test_distributions_random.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pymc/tests/test_distributions_random.py b/pymc/tests/test_distributions_random.py index b6990c6a38..94e26e0ca8 100644 --- a/pymc/tests/test_distributions_random.py +++ b/pymc/tests/test_distributions_random.py @@ -1583,7 +1583,7 @@ def ref_rand(mu, rowcov, colcov): rowcov=np.eye(3), colcov=np.eye(3), ) - check = pm.sample_prior_predictive(n_fails) + check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) ref_smp = ref_rand(mu=np.random.random((3, 3)), rowcov=np.eye(3), colcov=np.eye(3)) @@ -1921,7 +1921,7 @@ def sample_prior(self, distribution, shape, nested_rvs_info, prior_samples): nested_rvs_info, ) with model: - return pm.sample_prior_predictive(prior_samples) + return pm.sample_prior_predictive(prior_samples, return_inferencedata=False) @pytest.mark.parametrize( ["prior_samples", "shape", "mu", "alpha"], @@ -2379,7 +2379,7 @@ def test_car_rng_fn(sparse): with pm.Model(rng_seeder=1): car = pm.CAR("car", mu, W, alpha, tau, size=size) mn = pm.MvNormal("mn", mu, cov, size=size) - check = pm.sample_prior_predictive(n_fails) + check = pm.sample_prior_predictive(n_fails, return_inferencedata=False) p, f = delta, n_fails while p <= delta and f > 0: