From 73976215812a2a1c4e54e1868cd156d4e08c3ba6 Mon Sep 17 00:00:00 2001 From: SangamSwadiK Date: Thu, 30 Mar 2023 15:16:00 +0000 Subject: [PATCH 1/2] test --- pymc/distributions/continuous.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 8688cb04c2..ef341c82e2 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -814,7 +814,13 @@ class HalfNormal(PositiveContinuous): rv_op = halfnormal @classmethod - def dist(cls, sigma=None, tau=None, *args, **kwargs): + def dist( + cls, + sigma: Optional[DIST_PARAMETER_TYPES] = None, + tau: Optional[DIST_PARAMETER_TYPES] = None, + *args, + **kwargs, + ): tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) return super().dist([0.0, sigma], **kwargs) @@ -946,7 +952,14 @@ class Wald(PositiveContinuous): rv_op = wald @classmethod - def dist(cls, mu=None, lam=None, phi=None, alpha=0.0, **kwargs): + def dist( + cls, + mu: Optional[DIST_PARAMETER_TYPES] = None, + lam: Optional[DIST_PARAMETER_TYPES] = None, + phi: Optional[DIST_PARAMETER_TYPES] = None, + alpha=0.0, + **kwargs, + ): mu, lam, phi = cls.get_mu_lam_phi(mu, lam, phi) alpha = pt.as_tensor_variable(floatX(alpha)) mu = pt.as_tensor_variable(floatX(mu)) From 6b4b71ff348f1e3aa11d83a7af8c23fd52aca80f Mon Sep 17 00:00:00 2001 From: Prince Takyi Date: Sat, 1 Apr 2023 17:38:36 +0000 Subject: [PATCH 2/2] Add more type hints to distribution parameter --- pymc/distributions/continuous.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 55f04cdead..4f07481afc 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -959,7 +959,7 @@ def dist( mu: Optional[DIST_PARAMETER_TYPES] = None, lam: Optional[DIST_PARAMETER_TYPES] = None, phi: Optional[DIST_PARAMETER_TYPES] = None, - alpha=0.0, + alpha: Optional[DIST_PARAMETER_TYPES] = 0.0, **kwargs, ): mu, lam, phi = cls.get_mu_lam_phi(mu, lam, phi) @@ -1128,7 +1128,16 @@ class Beta(UnitContinuous): rv_op = pytensor.tensor.random.beta @classmethod - def dist(cls, alpha=None, beta=None, mu=None, sigma=None, nu=None, *args, **kwargs): + def dist( + cls, + alpha: Optional[DIST_PARAMETER_TYPES] = None, + beta: Optional[DIST_PARAMETER_TYPES] = None, + mu: Optional[DIST_PARAMETER_TYPES] = None, + sigma: Optional[DIST_PARAMETER_TYPES] = None, + nu: Optional[DIST_PARAMETER_TYPES] = None, + *args, + **kwargs, + ): alpha, beta = cls.get_alpha_beta(alpha, beta, mu, sigma, nu) alpha = pt.as_tensor_variable(floatX(alpha)) beta = pt.as_tensor_variable(floatX(beta)) @@ -1256,7 +1265,7 @@ class Kumaraswamy(UnitContinuous): rv_op = kumaraswamy @classmethod - def dist(cls, a, b, *args, **kwargs): + def dist(cls, a: DIST_PARAMETER_TYPES, b: DIST_PARAMETER_TYPES, *args, **kwargs): a = pt.as_tensor_variable(floatX(a)) b = pt.as_tensor_variable(floatX(b)) @@ -1342,7 +1351,7 @@ class Exponential(PositiveContinuous): rv_op = exponential @classmethod - def dist(cls, lam, *args, **kwargs): + def dist(cls, lam: DIST_PARAMETER_TYPES, *args, **kwargs): lam = pt.as_tensor_variable(floatX(lam)) # PyTensor exponential op is parametrized in terms of mu (1/lam)