From 635eb5d95f89ae2a4852d01515c3f176d96f8cc6 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Fri, 2 Jul 2021 12:13:02 +0200 Subject: [PATCH 1/3] Add friendly logp and logcdf methods --- ...veloper_guide_implementing_distribution.md | 22 ++-- pymc3/distributions/__init__.py | 4 +- pymc3/distributions/discrete.py | 2 +- pymc3/distributions/{logp.py => logprob.py} | 19 ++- pymc3/tests/test_distributions.py | 116 ++++++++---------- pymc3/tests/{test_logp.py => test_logprob.py} | 24 +++- 6 files changed, 103 insertions(+), 84 deletions(-) rename pymc3/distributions/{logp.py => logprob.py} (95%) rename pymc3/tests/{test_logp.py => test_logprob.py} (90%) diff --git a/docs/source/developer_guide_implementing_distribution.md b/docs/source/developer_guide_implementing_distribution.md index 4d5e2abd45..82c01d7996 100644 --- a/docs/source/developer_guide_implementing_distribution.md +++ b/docs/source/developer_guide_implementing_distribution.md @@ -188,33 +188,25 @@ Some notes: 1. As mentioned above, `v4` works in a very {functional}`Functional_Programming` way, and all the information that is needed in the `logp` and `logcdf` methods is expected to be "carried" via the `RandomVariable` inputs. You may pass numerical arguments that are not strictly needed for the `rng_fn` method but are used in the `logp` and `logcdf` methods. Just keep in mind whether this affects the correct shape inference behavior of the `RandomVariable`. If specialized non-numeric information is needed you might need to define your custom`_logp` and `_logcdf` {dispatch}`Dispatching` functions, but this should be done as a last resort. 1. The `logcdf` method is not a requirement, but it's a nice plus! -For a quick check that things are working you can try to create the new distribution in a model context: +For a quick check that things are working you can try the following: ```python import pymc3 as pm -from pymc3.distributions.logp import logpt -with pm.Model() as model: - # pm.blah = pm.Uniform in this example - blah = pm.Blah('blah', [0, 0], [1, 2]) +# pm.blah = pm.Uniform in this example +blah = pm.Blah.dist([0, 0], [1, 2]) # Test that the returned blah_op is still working fine blah.eval() # array([0.62778803, 1.95165513]) -# logpt will replace the blah RandomVariable with the corresponding logp -# expression, which takes as input the `blah_value` `TensorVariable` and -# `blah.owner.inputs`. We pass `transformed=False` to return the original -# `logp` expression just for simplicity of the example. -blah_value = model.rvs_to_values[blah] -blah_logp = logpt(blah, {blah: blah_value}, transformed=False) -blah_logp.eval({blah_value: [1.5, 1.5]}) +# Test the logp +pm.logp(blah, [1.5, 1.5]).eval() # array([ -inf, -0.69314718]) -# It will instead introduce the `logcdf` expression if we pass `cdf=True` -blah_logcdf = logpt(blah, {blah: blah_value}, cdf=True) -blah_logcdf.eval({blah_value: [1.5, 1.5]}) +# Test the logcdf +pm.logcdf(blah, [1.5, 1.5]).eval() # array([ 0. , -0.28768207]) ``` diff --git a/pymc3/distributions/__init__.py b/pymc3/distributions/__init__.py index 6dba668ec6..758ed74a3d 100644 --- a/pymc3/distributions/__init__.py +++ b/pymc3/distributions/__init__.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pymc3.distributions.logp import ( # isort:skip +from pymc3.distributions.logprob import ( # isort:skip _logcdf, _logp, logcdf, + logp, logp_transform, logpt, logpt_sum, @@ -189,6 +190,7 @@ "BART", "CAR", "logpt", + "logp", "_logp", "logp_transform", "logcdf", diff --git a/pymc3/distributions/discrete.py b/pymc3/distributions/discrete.py index 790c47645f..8efeb60bc9 100644 --- a/pymc3/distributions/discrete.py +++ b/pymc3/distributions/discrete.py @@ -42,7 +42,7 @@ normal_lcdf, ) from pymc3.distributions.distribution import Discrete -from pymc3.distributions.logp import _logcdf, _logp +from pymc3.distributions.logprob import _logcdf, _logp from pymc3.math import log1mexp, logaddexp, logsumexp, sigmoid __all__ = [ diff --git a/pymc3/distributions/logp.py b/pymc3/distributions/logprob.py similarity index 95% rename from pymc3/distributions/logp.py rename to pymc3/distributions/logprob.py index e265cbb937..4055e50af0 100644 --- a/pymc3/distributions/logp.py +++ b/pymc3/distributions/logprob.py @@ -342,9 +342,24 @@ def subtensor_logp(op, var, rvs_to_values, indexed_rv_var, *indices, **kwargs): return logp_var -def logcdf(*args, **kwargs): +def logp(var, rv_values, **kwargs): + """Create a log-probability graph.""" + + # Attach the value_var to the tag of var when it does not have one + if not hasattr(var.tag, "value_var"): + if isinstance(rv_values, Mapping): + value_var = rv_values[var] + else: + value_var = rv_values + var.tag.value_var = at.as_tensor_variable(value_var, dtype=var.dtype) + + return logpt(var, rv_values, **kwargs) + + +def logcdf(var, rv_values, **kwargs): """Create a log-CDF graph.""" - return logpt(*args, cdf=True, **kwargs) + + return logp(var, rv_values, cdf=True, **kwargs) @singledispatch diff --git a/pymc3/tests/test_distributions.py b/pymc3/tests/test_distributions.py index d32d6b0266..5648ae4872 100644 --- a/pymc3/tests/test_distributions.py +++ b/pymc3/tests/test_distributions.py @@ -36,7 +36,7 @@ import pymc3 as pm -from pymc3.aesaraf import change_rv_size, floatX +from pymc3.aesaraf import change_rv_size, floatX, intX from pymc3.distributions import ( AR1, CAR, @@ -101,7 +101,7 @@ ZeroInflatedPoisson, continuous, logcdf, - logpt, + logp, logpt_sum, ) from pymc3.math import kronecker, logsumexp @@ -450,8 +450,8 @@ def mvt_logpdf(value, nu, Sigma, mu=0): lgamma = scipy.special.gammaln norm = lgamma((nu + d) / 2.0) - 0.5 * d * np.log(nu * np.pi) - lgamma(nu / 2.0) - logp = norm - logdet - (nu + d) / 2.0 * np.log1p((trafo * trafo).sum(-1) / nu) - return logp.sum() + logp_mvt = norm - logdet - (nu + d) / 2.0 * np.log1p((trafo * trafo).sum(-1) / nu) + return logp_mvt.sum() def AR1_logpdf(value, k, tau_e): @@ -645,7 +645,7 @@ def logp_reference(args): return scipy_logp(**args) model, param_vars = build_model(pymc3_dist, domain, paramdomains, extra_args) - logp = model.fastlogp_nojac + logp_pymc3 = model.fastlogp_nojac domains = paramdomains.copy() domains["value"] = domain @@ -657,7 +657,7 @@ def logp_reference(args): pt_logp = Point(pt_d, model=model) pt_ref = Point(pt, filter_model_vars=False, model=model) assert_almost_equal( - logp(pt_logp), + logp_pymc3(pt_logp), logp_reference(pt_ref), decimal=decimal, err_msg=str(pt), @@ -828,7 +828,7 @@ def check_logcdf( ) # Test that method works with multiple values or raises informative TypeError - valid_dist = change_rv_size(valid_dist, 2) + valid_dist = pymc3_dist.dist(**valid_params, size=2) with aesara.config.change_flags(mode=Mode("py")): try: logcdf(valid_dist, np.array([valid_value, valid_value])).eval() @@ -866,7 +866,7 @@ def check_selfconsistency_discrete_logcdf( with aesara.config.change_flags(mode=Mode("py")): assert_almost_equal( logcdf(dist, value).eval(), - logsumexp(logpt(values_dist, values), keepdims=False).eval(), + logsumexp(logp(values_dist, values), keepdims=False).eval(), decimal=decimal, err_msg=str(pt), ) @@ -906,9 +906,8 @@ def test_uniform(self): ) # Custom logp / logcdf check for invalid parameters invalid_dist = Uniform.dist(lower=1, upper=0) - with aesara.config.change_flags(mode=Mode("py")): - assert logpt(invalid_dist, np.array(0.5)).eval() == -np.inf + assert logp(invalid_dist, np.array(0.5)).eval() == -np.inf assert logcdf(invalid_dist, np.array(2.0)).eval() == -np.inf def test_triangular(self): @@ -929,18 +928,18 @@ def test_triangular(self): # Custom logp/logcdf check for values outside of domain valid_dist = Triangular.dist(lower=0, upper=1, c=0.9, size=2) with aesara.config.change_flags(mode=Mode("py")): - assert np.all(logpt(valid_dist, np.array([-1, 2])).eval() == -np.inf) + assert np.all(logp(valid_dist, np.array([-1, 2])).eval() == -np.inf) assert np.all(logcdf(valid_dist, np.array([-1, 2])).eval() == [-np.inf, 0]) # Custom logp / logcdf check for invalid parameters invalid_dist = Triangular.dist(lower=1, upper=0, c=0.1) with aesara.config.change_flags(mode=Mode("py")): - assert logpt(invalid_dist, 0.5).eval() == -np.inf + assert logp(invalid_dist, 0.5).eval() == -np.inf assert logcdf(invalid_dist, 2).eval() == -np.inf invalid_dist = Triangular.dist(lower=0, upper=1, c=2.0) with aesara.config.change_flags(mode=Mode("py")): - assert logpt(invalid_dist, 0.5).eval() == -np.inf + assert logp(invalid_dist, 0.5).eval() == -np.inf assert logcdf(invalid_dist, 2).eval() == -np.inf @pytest.mark.xfail(reason="Bound not refactored yet") @@ -955,7 +954,7 @@ def test_bound_normal(self): ) with Model(): x = PositiveNormal("x", mu=0, sigma=1, transform=None) - assert np.isinf(logpt(x, -1).eval()) + assert np.isinf(logp(x, -1).eval()) def test_discrete_unif(self): self.check_logp( @@ -979,7 +978,7 @@ def test_discrete_unif(self): # Custom logp / logcdf check for invalid parameters invalid_dist = DiscreteUniform.dist(lower=1, upper=0) with aesara.config.change_flags(mode=Mode("py")): - assert logpt(invalid_dist, 0.5).eval() == -np.inf + assert logp(invalid_dist, 0.5).eval() == -np.inf assert logcdf(invalid_dist, 2).eval() == -np.inf def test_flat(self): @@ -1687,7 +1686,7 @@ def test_bound_poisson(self): with Model(): x = NonZeroPoisson("x", mu=4) - assert np.isinf(logpt(x, 0).eval()) + assert np.isinf(logp(x, 0).eval()) def test_constantdist(self): self.check_logp(Constant, I, {"c": I}, lambda value, c: np.log(c == value)) @@ -1883,17 +1882,17 @@ def test_mvnormal_indef(self): mu = floatX(np.zeros(2)) x = at.vector("x") x.tag.test_value = np.zeros(2) - logp = logpt(MvNormal.dist(mu=mu, cov=cov), x) - f_logp = aesara.function([cov, x], logp) + mvn_logp = logp(MvNormal.dist(mu=mu, cov=cov), x) + f_logp = aesara.function([cov, x], mvn_logp) assert f_logp(cov_val, np.ones(2)) == -np.inf - dlogp = at.grad(logp, cov) + dlogp = at.grad(mvn_logp, cov) f_dlogp = aesara.function([cov, x], dlogp) assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2)))) - logp = logpt(MvNormal.dist(mu=mu, tau=cov), x) - f_logp = aesara.function([cov, x], logp) + mvn_logp = logp(MvNormal.dist(mu=mu, tau=cov), x) + f_logp = aesara.function([cov, x], mvn_logp) assert f_logp(cov_val, np.ones(2)) == -np.inf - dlogp = at.grad(logp, cov) + dlogp = at.grad(mvn_logp, cov) f_dlogp = aesara.function([cov, x], dlogp) assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2)))) @@ -2094,7 +2093,7 @@ def test_dirichlet_with_batch_shapes(self, dist_shape): else: d_point_trans = d_point - pymc3_res = logpt(d, d_point_trans, jacobian=False).eval() + pymc3_res = logp(d, d_point_trans, jacobian=False).eval() scipy_res = np.empty_like(pymc3_res) for idx in np.ndindex(a.shape[:-1]): scipy_res[idx] = scipy.stats.dirichlet(a[idx]).logpdf(d_point[idx]) @@ -2196,7 +2195,7 @@ def test_multinomial_vec(self): assert_almost_equal( scipy.stats.multinomial.logpmf(vals, n, p), - logpt(model_many.m, vals).eval().squeeze(), + logp(model_many.m, vals).eval().squeeze(), decimal=4, ) @@ -2250,20 +2249,16 @@ def test_multinomial_vec_2d_p(self): def test_batch_multinomial(self): n = 10 - vals = np.zeros((4, 5, 3), dtype="int32") - p = np.zeros_like(vals, dtype=aesara.config.floatX) + vals = intX(np.zeros((4, 5, 3))) + p = floatX(np.zeros_like(vals)) inds = np.random.randint(vals.shape[-1], size=vals.shape[:-1])[..., None] np.put_along_axis(vals, inds, n, axis=-1) np.put_along_axis(p, inds, 1, axis=-1) dist = Multinomial.dist(n=n, p=p) - - value = at.tensor3(dtype="int32") - value.tag.test_value = np.zeros_like(vals, dtype="int32") - logp = at.exp(logpt(dist, value)) - f = aesara.function(inputs=[value], outputs=logp) + logp_mn = at.exp(pm.logp(dist, vals)).eval() assert_almost_equal( - f(vals), + logp_mn, np.ones(vals.shape[:-1]), decimal=select_by_precision(float64=6, float32=3), ) @@ -2297,14 +2292,10 @@ def test_dirichlet_multinomial_matches_beta_binomial(self): ns_dm = np.vstack((ns, n - ns)).T # convert ns=1 to ns_dm=[1, 4], for all ns... bb = pm.BetaBinomial.dist(n=n, alpha=a, beta=b, size=2) - bb_value = bb.type() - bb.tag.value_var = bb_value - bb_logp = logpt(var=bb, rv_values={bb: bb_value}).eval({bb_value: ns}) + bb_logp = logp(bb, ns).eval() dm = pm.DirichletMultinomial.dist(n=n, a=[a, b], size=2) - dm_value = dm.type() - dm.tag.value_var = dm_value - dm_logp = logpt(var=dm, rv_values={dm: dm_value}).eval({dm_value: ns_dm}).ravel() + dm_logp = logp(dm, ns_dm).eval().ravel() assert_almost_equal( dm_logp, @@ -2331,7 +2322,7 @@ def test_dirichlet_multinomial_vec(self): assert_almost_equal( np.asarray([dirichlet_multinomial_logpmf(val, n, a) for val in vals]), - logpt(model_many.m, vals).eval().squeeze(), + logp(model_many.m, vals).eval().squeeze(), decimal=4, ) @@ -2398,13 +2389,10 @@ def test_batch_dirichlet_multinomial(self): dist = DirichletMultinomial.dist(n=n, a=a) # Logp should be approx -9.98004998e-06 - value = at.tensor3(dtype="int32") - value.tag.test_value = np.zeros_like(vals, dtype="int32") - logp = logpt(dist, value) - f = aesara.function(inputs=[value], outputs=logp) - expected_logp = np.full(shape=f(vals).shape, fill_value=-9.98004998e-06) + dist_logp = logp(dist, vals).eval() + expected_logp = np.full_like(dist_logp, fill_value=-9.98004998e-06) assert_almost_equal( - f(vals), + dist_logp, expected_logp, decimal=select_by_precision(float64=6, float32=3), ) @@ -2418,32 +2406,32 @@ def test_batch_dirichlet_multinomial(self): def test_categorical_bounds(self): with Model(): x = Categorical("x", p=np.array([0.2, 0.3, 0.5])) - assert np.isinf(logpt(x, -1).tag.test_value) - assert np.isinf(logpt(x, 3).tag.test_value) + assert np.isinf(logp(x, -1).eval()) + assert np.isinf(logp(x, 3).eval()) @aesara.config.change_flags(compute_test_value="raise") def test_categorical_valid_p(self): with Model(): x = Categorical("x", p=np.array([-0.2, 0.3, 0.5])) - assert np.isinf(logpt(x, 0).tag.test_value) - assert np.isinf(logpt(x, 1).tag.test_value) - assert np.isinf(logpt(x, 2).tag.test_value) + assert np.isinf(logp(x, 0).eval()) + assert np.isinf(logp(x, 1).eval()) + assert np.isinf(logp(x, 2).eval()) with Model(): # A model where p sums to 1 but contains negative values x = Categorical("x", p=np.array([-0.2, 0.7, 0.5])) - assert np.isinf(logpt(x, 0).tag.test_value) - assert np.isinf(logpt(x, 1).tag.test_value) - assert np.isinf(logpt(x, 2).tag.test_value) + assert np.isinf(logp(x, 0).eval()) + assert np.isinf(logp(x, 1).eval()) + assert np.isinf(logp(x, 2).eval()) with Model(): # Hard edge case from #2082 # Early automatic normalization of p's sum would hide the negative # entries if there is a single or pair number of negative values # and the rest are zero x = Categorical("x", p=np.array([-1, -1, 0, 0])) - assert np.isinf(logpt(x, 0).tag.test_value) - assert np.isinf(logpt(x, 1).tag.test_value) - assert np.isinf(logpt(x, 2).tag.test_value) - assert np.isinf(logpt(x, 3).tag.test_value) + assert np.isinf(logp(x, 0).eval()) + assert np.isinf(logp(x, 1).eval()) + assert np.isinf(logp(x, 2).eval()) + assert np.isinf(logp(x, 3).eval()) @pytest.mark.parametrize("n", [2, 3, 4]) def test_categorical(self, n): @@ -2682,19 +2670,19 @@ def test_bound(): LowerNormal = Bound(Normal, lower=1) dist = LowerNormal.dist(mu=0, sigma=1) - assert logpt(dist, 0).eval() == -np.inf + assert logp(dist, 0).eval() == -np.inf # assert dist.transform is not None assert np.all(dist.random() > 1) UpperNormal = Bound(Normal, upper=-1) dist = UpperNormal.dist(mu=0, sigma=1) - assert logpt(dist, -0.5).eval() == -np.inf + assert logp(dist, -0.5).eval() == -np.inf # assert dist.transform is not None assert np.all(dist.random() < -1) ArrayNormal = Bound(Normal, lower=[1, 2], upper=[2, 3]) dist = ArrayNormal.dist(mu=0, sigma=1, size=2) - assert_equal(logpt(dist, [0.5, 3.5]).eval(), -np.array([np.inf, np.inf])) + assert_equal(logp(dist, [0.5, 3.5]).eval(), -np.array([np.inf, np.inf])) # assert dist.transform is not None with pytest.raises(ValueError) as err: dist.random() @@ -2709,8 +2697,8 @@ def test_bound(): upper = 3 ArrayNormal = Bound(Normal, lower=lower, upper=upper) dist = ArrayNormal.dist(mu=0, sigma=1, size=2) - logp = logpt(dist, [0.5, 3.5]).eval({lower: lower.tag.test_value}) - assert_equal(logp, -np.array([np.inf, np.inf])) + logp_dist = logp(dist, [0.5, 3.5]).eval({lower: lower.tag.test_value}) + assert_equal(logp_dist, -np.array([np.inf, np.inf])) assert dist.transform is not None with Model(): @@ -3071,7 +3059,7 @@ def test_car_logp(size): cov = np.linalg.inv(prec) scipy_logp = scipy.stats.multivariate_normal.logpdf(xs, mu, cov) - car_logp = logpt(CAR.dist(mu, W, alpha, tau, size=size), xs).eval() + car_logp = logp(CAR.dist(mu, W, alpha, tau, size=size), xs).eval() # Check to make sure that the CAR and MVN log PDFs are equivalent # up to an additive constant which is independent of the CAR parameters @@ -3089,7 +3077,7 @@ def test_issue_3051(self, dims, dist_cls, kwargs): d = dist_cls.dist(mu=mu, cov=np.eye(dims), **kwargs, size=(20)) X = np.random.normal(size=(20, dims)) - actual_t = logpt(d, X) + actual_t = logp(d, X) assert isinstance(actual_t, TensorVariable) actual_a = actual_t.eval() assert isinstance(actual_a, np.ndarray) diff --git a/pymc3/tests/test_logp.py b/pymc3/tests/test_logprob.py similarity index 90% rename from pymc3/tests/test_logp.py rename to pymc3/tests/test_logprob.py index 6047820292..072dbcb2c9 100644 --- a/pymc3/tests/test_logp.py +++ b/pymc3/tests/test_logprob.py @@ -33,7 +33,7 @@ from pymc3.aesaraf import floatX, walk_model from pymc3.distributions.continuous import Normal, Uniform from pymc3.distributions.discrete import Bernoulli -from pymc3.distributions.logp import logpt +from pymc3.distributions.logprob import logcdf, logp, logpt from pymc3.model import Model from pymc3.tests.helpers import select_by_precision @@ -193,3 +193,25 @@ def test_logpt_subtensor(): logp_vals = logp_vals_fn(A_idx_value, I_value) np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals) + + +def test_logp_helper(): + value = at.vector("value") + x = Normal.dist(0, 1, size=2) + + x_logp = logp(x, value) + np.testing.assert_almost_equal(x_logp.eval({value: [0, 1]}), sp.norm(0, 1).logpdf([0, 1])) + + x_logp = logp(x, [0, 1]) + np.testing.assert_almost_equal(x_logp.eval(), sp.norm(0, 1).logpdf([0, 1])) + + +def test_logcdf_helper(): + value = at.vector("value") + x = Normal.dist(0, 1, size=2) + + x_logp = logcdf(x, value) + np.testing.assert_almost_equal(x_logp.eval({value: [0, 1]}), sp.norm(0, 1).logcdf([0, 1])) + + x_logp = logcdf(x, [0, 1]) + np.testing.assert_almost_equal(x_logp.eval(), sp.norm(0, 1).logcdf([0, 1])) From 81cc30605e35a249d36aef69bc994eb4e41660a1 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Fri, 2 Jul 2021 15:00:30 +0200 Subject: [PATCH 2/3] Fix float32 rtol in `test_mle_jacobian` --- pymc3/tests/test_tuning.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pymc3/tests/test_tuning.py b/pymc3/tests/test_tuning.py index 7bad67269d..038f65f564 100644 --- a/pymc3/tests/test_tuning.py +++ b/pymc3/tests/test_tuning.py @@ -18,6 +18,7 @@ from pymc3.step_methods.metropolis import tune from pymc3.tests import models +from pymc3.tests.helpers import select_by_precision from pymc3.tuning import find_MAP, scaling @@ -36,18 +37,16 @@ def test_guess_scaling(): def test_mle_jacobian(): """Test MAP / MLE estimation for distributions with flat priors.""" truth = 10.0 # Simple normal model should give mu=10.0 + rtol = select_by_precision(float64=1e-6, float32=1e-4) start, model, _ = models.simple_normal(bounded_prior=False) with model: map_estimate = find_MAP(method="BFGS", model=model) - - rtol = 1e-5 # this rtol should work on both floatX precisions np.testing.assert_allclose(map_estimate["mu_i"], truth, rtol=rtol) start, model, _ = models.simple_normal(bounded_prior=True) with model: map_estimate = find_MAP(method="BFGS", model=model) - np.testing.assert_allclose(map_estimate["mu_i"], truth, rtol=rtol) From 528edc7524d396d3c19d550be688131c4acb6dac Mon Sep 17 00:00:00 2001 From: Ricardo Date: Sun, 4 Jul 2021 16:03:17 +0200 Subject: [PATCH 3/3] Remove obsolete tests --- pymc3/tests/test_distributions_random.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/pymc3/tests/test_distributions_random.py b/pymc3/tests/test_distributions_random.py index 0ec356b13a..ea796f863e 100644 --- a/pymc3/tests/test_distributions_random.py +++ b/pymc3/tests/test_distributions_random.py @@ -1660,24 +1660,6 @@ def ref_rand(size, tau): pymc3_random(BoundedNormal, {"tau": Rplus}, ref_rand=ref_rand) - def test_skew_normal(self): - def ref_rand(size, alpha, mu, sigma): - return st.skewnorm.rvs(size=size, a=alpha, loc=mu, scale=sigma) - - pymc3_random(pm.SkewNormal, {"mu": R, "sigma": Rplus, "alpha": R}, ref_rand=ref_rand) - - def test_logitnormal(self): - def ref_rand(size, mu, sigma): - return expit(st.norm.rvs(loc=mu, scale=sigma, size=size)) - - pymc3_random(pm.LogitNormal, {"mu": R, "sigma": Rplus}, ref_rand=ref_rand) - - def test_moyal(self): - def ref_rand(size, mu, sigma): - return st.moyal.rvs(loc=mu, scale=sigma, size=size) - - pymc3_random(pm.Moyal, {"mu": R, "sigma": Rplus}, ref_rand=ref_rand) - @pytest.mark.xfail(reason="This distribution has not been refactored for v4") def test_lkj(self): for n in [2, 10, 50]: