diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 75a75b07c1..fa4c056f93 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -57,6 +57,7 @@ from pytensor.tensor.var import TensorConstant from pymc.logprob.abstract import _logcdf_helper, _logprob_helper +from pymc.logprob.basic import icdf try: from polyagamma import polyagamma_cdf, polyagamma_pdf, random_polyagamma @@ -856,6 +857,11 @@ def logcdf(value, loc, sigma): msg="sigma > 0", ) + def icdf(value, loc, sigma): + res = icdf(Normal.dist(loc, sigma), (value + 1.0) / 2.0) + res = check_icdf_value(res, value) + return res + class WaldRV(RandomVariable): name = "wald" @@ -1714,12 +1720,17 @@ def logcdf(value, mu, sigma): -np.inf, normal_lcdf(mu, sigma, pt.log(value)), ) + return check_parameters( res, sigma > 0, msg="sigma > 0", ) + def icdf(value, mu, sigma): + res = pt.exp(icdf(Normal.dist(mu, sigma), value)) + return res + Lognormal = LogNormal @@ -2121,6 +2132,15 @@ def logcdf(value, loc, beta): msg="beta > 0", ) + def icdf(value, loc, beta): + res = loc + beta * pt.tan(np.pi * (value) / 2.0) + res = check_icdf_value(res, value) + return check_parameters( + res, + beta > 0, + msg="beta > 0", + ) + class Gamma(PositiveContinuous): r""" diff --git a/tests/distributions/test_continuous.py b/tests/distributions/test_continuous.py index a4512b8a61..dccf6afeff 100644 --- a/tests/distributions/test_continuous.py +++ b/tests/distributions/test_continuous.py @@ -299,6 +299,11 @@ def test_half_normal(self): {"sigma": Rplus}, lambda value, sigma: st.halfnorm.logcdf(value, scale=sigma), ) + check_icdf( + pm.HalfNormal, + {"sigma": Rplus}, + lambda q, sigma: st.halfnorm.ppf(q, scale=sigma), + ) def test_chisquared_logp(self): check_logp( @@ -502,6 +507,21 @@ def test_lognormal(self): {"mu": R, "sigma": Rplusbig}, lambda value, mu, sigma: st.lognorm.logcdf(value, sigma, 0, np.exp(mu)), ) + check_icdf( + pm.LogNormal, + {"mu": R, "tau": Rplusbig}, + lambda q, mu, tau: floatX(st.lognorm.ppf(q, tau**-0.5, 0, np.exp(mu))), + ) + # Because we exponentiate the normal quantile function, setting sigma >= 9.5 + # return extreme values that results in relative errors above 4 digits + # we circumvent it by keeping it below or equal to 9. + custom_rplusbig = Domain([0, 0.5, 0.9, 0.99, 1, 1.5, 2, 9, np.inf]) + check_icdf( + pm.LogNormal, + {"mu": R, "sigma": custom_rplusbig}, + lambda q, mu, sigma: floatX(st.lognorm.ppf(q, sigma, 0, np.exp(mu))), + decimal=select_by_precision(float64=4, float32=3), + ) def test_studentt_logp(self): check_logp( @@ -567,6 +587,9 @@ def test_half_cauchy(self): {"beta": Rplusbig}, lambda value, beta: st.halfcauchy.logcdf(value, scale=beta), ) + check_icdf( + pm.HalfCauchy, {"beta": Rplusbig}, lambda q, beta: st.halfcauchy.ppf(q, scale=beta) + ) def test_gamma_logp(self): check_logp(