From 8db2ffcf848622f1cbdce2921db2167a8044cc4a Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 10:52:12 -0400 Subject: [PATCH 01/28] Future warning for logpt --- pymc/model.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 4f089e5732..c8ec96716d 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -690,7 +690,14 @@ def compile_d2logp( """ return self.model.compile_fn(self.d2logpt(vars=vars, jacobian=jacobian)) - def logpt( + def logpt(self, *args, **kwargs): + warnings.warn( + "Model.logpt has been deprecated. Use Model.logp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def logp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, From 47cf58e9259f8367fb2cd2bf93adfe75cd8d6927 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 11:10:00 -0400 Subject: [PATCH 02/28] Future warning for dlogpt and d2logpt --- pymc/model.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/pymc/model.py b/pymc/model.py index c8ec96716d..aa159f28a7 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -671,7 +671,7 @@ def compile_dlogp( jacobian: Whether to include jacobian terms in logprob graph. Defaults to True. """ - return self.model.compile_fn(self.dlogpt(vars=vars, jacobian=jacobian)) + return self.model.compile_fn(self.dlogp(vars=vars, jacobian=jacobian)) def compile_d2logp( self, @@ -688,7 +688,7 @@ def compile_d2logp( jacobian: Whether to include jacobian terms in logprob graph. Defaults to True. """ - return self.model.compile_fn(self.d2logpt(vars=vars, jacobian=jacobian)) + return self.model.compile_fn(self.d2logp(vars=vars, jacobian=jacobian)) def logpt(self, *args, **kwargs): warnings.warn( @@ -771,7 +771,14 @@ def logp( logp_scalar.name = logp_scalar_name return logp_scalar - def dlogpt( + def dlogpt(self, *args, **kwargs): + warnings.warn( + "Model.dlogpt has been deprecated. Use Model.dlogp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def dlogp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, @@ -809,7 +816,14 @@ def dlogpt( cost = self.logpt(jacobian=jacobian) return gradient(cost, value_vars) - def d2logpt( + def d2logpt(self, *args, **kwargs): + warnings.warn( + "Model.d2logpt has been deprecated. Use Model.d2logp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def d2logp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, From 89352479a83aeaecdbabb4233c484c4905d75bef Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 22:04:08 -0400 Subject: [PATCH 03/28] Updated references to logpt, and updated varlogpt, datalogpt, joint_logpt --- pymc/backends/arviz.py | 4 +- pymc/distributions/__init__.py | 2 + pymc/distributions/continuous.py | 2 +- pymc/distributions/logprob.py | 16 +++-- pymc/model.py | 64 +++++++++++++----- pymc/sampling.py | 4 +- pymc/sampling_jax.py | 10 +-- pymc/smc/smc.py | 4 +- pymc/step_methods/metropolis.py | 6 +- pymc/step_methods/mlda.py | 4 +- pymc/tests/test_distributions.py | 86 ++++++++++++------------ pymc/tests/test_distributions_moments.py | 4 +- pymc/tests/test_logprob.py | 24 +++---- pymc/tests/test_minibatches.py | 38 +++++------ pymc/tests/test_missing.py | 8 +-- pymc/tests/test_mixture.py | 2 +- pymc/tests/test_model.py | 4 +- pymc/tests/test_profile.py | 6 +- pymc/tests/test_smc.py | 18 ++--- pymc/tests/test_transforms.py | 16 ++--- pymc/tuning/scaling.py | 2 +- pymc/variational/opvi.py | 4 +- 22 files changed, 185 insertions(+), 143 deletions(-) diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index 552a2afd44..80da0a89d8 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -251,7 +251,7 @@ def _extract_log_likelihood(self, trace): ( var, self.model.compile_fn( - self.model.logpt(var, sum=False)[0], + self.model.logp(var, sum=False)[0], inputs=self.model.value_vars, on_unused_input="ignore", ), @@ -263,7 +263,7 @@ def _extract_log_likelihood(self, trace): ( var, self.model.compile_fn( - self.model.logpt(var, sum=False)[0], + self.model.logp(var, sum=False)[0], inputs=self.model.value_vars, on_unused_input="ignore", ), diff --git a/pymc/distributions/__init__.py b/pymc/distributions/__init__.py index 8680528682..40ea9894a6 100644 --- a/pymc/distributions/__init__.py +++ b/pymc/distributions/__init__.py @@ -15,6 +15,7 @@ from pymc.distributions.logprob import ( # isort:skip logcdf, logp, + joint_logp, joint_logpt, ) @@ -191,6 +192,7 @@ "CAR", "PolyaGamma", "joint_logpt", + "joint_logp", "logp", "logcdf", ] diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index e1ade92dd2..ef20c39ca6 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -2558,7 +2558,7 @@ def logcdf(value, nu): return logcdf(Gamma.dist(alpha=nu / 2, beta=0.5), value) -# TODO: Remove this once logpt for multiplication is working! +# TODO: Remove this once logp for multiplication is working! class WeibullBetaRV(WeibullRV): ndims_params = [0, 0] diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index fb2b041ff8..b1f76d2b30 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -119,7 +119,15 @@ def _get_scaling( ) -def joint_logpt( +def joint_logpt(self, *args, **kwargs): + warnings.warn( + "logprob.joint_logpt has been deprecated. Use logprob.joint_logp instead.", + FutureWarning, + ) + return self.joint_logp(*args, **kwargs) + + +def joint_logp( var: Union[TensorVariable, List[TensorVariable]], rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None, *, @@ -159,14 +167,14 @@ def joint_logpt( """ # TODO: In future when we drop support for tag.value_var most of the following - # logic can be removed and logpt can just be a wrapper function that calls aeppl's + # logic can be removed and logp can just be a wrapper function that calls aeppl's # joint_logprob directly. # If var is not a list make it one. if not isinstance(var, (list, tuple)): var = [var] - # If logpt isn't provided values it is assumed that the tagged value var or + # If logp isn't provided values it is assumed that the tagged value var or # observation is the value variable for that particular RV. if rv_values is None: rv_values = {} @@ -251,7 +259,7 @@ def joint_logpt( "reference nonlocal variables." ) - # aeppl returns the logpt for every single value term we provided to it. This includes + # aeppl returns the logp for every single value term we provided to it. This includes # the extra values we plugged in above, so we filter those we actually wanted in the # same order they were given in. logp_var_dict = {} diff --git a/pymc/model.py b/pymc/model.py index aa159f28a7..694873c27c 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -57,7 +57,7 @@ ) from pymc.blocking import DictToArrayBijection, RaveledVars from pymc.data import GenTensorVariable, Minibatch -from pymc.distributions import joint_logpt +from pymc.distributions import joint_logp from pymc.distributions.logprob import _get_scaling from pymc.distributions.transforms import _default_transform from pymc.exceptions import ImputationWarning, SamplingError, ShapeError, ShapeWarning @@ -623,9 +623,9 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs): raise ValueError(f"Can only compute the gradient of continuous types: {var}") if tempered: - costs = [self.varlogpt, self.datalogpt] + costs = [self.varlogp, self.datalogp] else: - costs = [self.logpt()] + costs = [self.logp()] input_vars = {i for i in graph_inputs(costs) if not isinstance(i, Constant)} extra_vars = [self.rvs_to_values.get(var, var) for var in self.free_RVs] @@ -654,7 +654,7 @@ def compile_logp( Whether to sum all logp terms or return elemwise logp for each variable. Defaults to True. """ - return self.model.compile_fn(self.logpt(vars=vars, jacobian=jacobian, sum=sum)) + return self.model.compile_fn(self.logp(vars=vars, jacobian=jacobian, sum=sum)) def compile_dlogp( self, @@ -749,7 +749,7 @@ def logp( rv_logps: List[TensorVariable] = [] if rv_values: - rv_logps = joint_logpt(list(rv_values.keys()), rv_values, sum=False, jacobian=jacobian) + rv_logps = joint_logp(list(rv_values.keys()), rv_values, sum=False, jacobian=jacobian) assert isinstance(rv_logps, list) # Replace random variables by their value variables in potential terms @@ -813,7 +813,7 @@ def dlogp( f"Requested variable {var} not found among the model variables" ) - cost = self.logpt(jacobian=jacobian) + cost = self.logp(jacobian=jacobian) return gradient(cost, value_vars) def d2logpt(self, *args, **kwargs): @@ -858,34 +858,66 @@ def d2logp( f"Requested variable {var} not found among the model variables" ) - cost = self.logpt(jacobian=jacobian) + cost = self.logp(jacobian=jacobian) return hessian(cost, value_vars) @property - def datalogpt(self) -> Variable: + def datalogpt(self, *args, **kwargs): + warnings.warn( + "Model.datalogpt has been deprecated. Use Model.datalogp instead.", + FutureWarning, + ) + return self.datalogp(*args, **kwargs) + + @property + def datalogp(self) -> Variable: """Aesara scalar of log-probability of the observed variables and potential terms""" - return self.observedlogpt + self.potentiallogpt + return self.observedlogp + self.potentiallogp + + @property + def varlogpt(self, *args, **kwargs): + warnings.warn( + "Model.varlogpt has been deprecated. Use Model.varlogp instead.", + FutureWarning, + ) + return self.varlogp(*args, **kwargs) @property - def varlogpt(self) -> Variable: + def varlogp(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic).""" - return self.logpt(vars=self.free_RVs) + return self.logp(vars=self.free_RVs) @property def varlogp_nojact(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic) without jacobian term.""" - return self.logpt(vars=self.free_RVs, jacobian=False) + return self.logp(vars=self.free_RVs, jacobian=False) @property - def observedlogpt(self) -> Variable: + def observedlogpt(self, *args, **kwargs): + warnings.warn( + "Model.observedlogpt has been deprecated. Use Model.observedlogp instead.", + FutureWarning, + ) + return self.observedlogp(*args, **kwargs) + + @property + def observedlogp(self) -> Variable: """Aesara scalar of log-probability of the observed variables""" - return self.logpt(vars=self.observed_RVs) + return self.logp(vars=self.observed_RVs) + + @property + def potentiallogpt(self, *args, **kwargs): + warnings.warn( + "Model.potentiallogpt has been deprecated. Use Model.potentiallogp instead.", + FutureWarning, + ) + return self.potentiallogp(*args, **kwargs) @property - def potentiallogpt(self) -> Variable: + def potentiallogp(self) -> Variable: """Aesara scalar of log-probability of the Potential terms""" # Convert random variables in Potential expression into their log-likelihood # inputs and apply their transforms, if any @@ -1776,7 +1808,7 @@ def point_logps(self, point=None, round_vals=2): point = self.initial_point() factors = self.basic_RVs + self.potentials - factor_logps_fn = [at.sum(factor) for factor in self.logpt(factors, sum=False)] + factor_logps_fn = [at.sum(factor) for factor in self.logp(factors, sum=False)] return { factor.name: np.round(np.asarray(factor_logp), round_vals) for factor, factor_logp in zip( diff --git a/pymc/sampling.py b/pymc/sampling.py index df3dcdd213..6827b49436 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -204,7 +204,7 @@ def assign_step_methods(model, step=None, methods=None, step_kwargs=None): # Use competence classmethods to select step methods for remaining # variables selected_steps = defaultdict(list) - model_logpt = model.logpt() + model_logp = model.logp() for var in model.value_vars: if var not in assigned_vars: @@ -212,7 +212,7 @@ def assign_step_methods(model, step=None, methods=None, step_kwargs=None): has_gradient = var.dtype not in discrete_types if has_gradient: try: - tg.grad(model_logpt, var) + tg.grad(model_logp, var) except (NotImplementedError, tg.NullTypeGradError): has_gradient = False diff --git a/pymc/sampling_jax.py b/pymc/sampling_jax.py index a087e005ca..4127dac702 100644 --- a/pymc/sampling_jax.py +++ b/pymc/sampling_jax.py @@ -100,10 +100,10 @@ def get_jaxified_graph( def get_jaxified_logp(model: Model, negative_logp=True) -> Callable: - model_logpt = model.logpt() + model_logp = model.logp() if not negative_logp: - model_logpt = -model_logpt - logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logpt]) + model_logp = -model_logp + logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logp]) def logp_fn_wrap(x): return logp_fn(*x)[0] @@ -136,8 +136,8 @@ def _get_log_likelihood(model: Model, samples, backend=None) -> Dict: """Compute log-likelihood for all observations""" data = {} for v in model.observed_RVs: - v_elemwise_logpt = model.logpt(v, sum=False) - jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logpt) + v_elemwise_logp = model.logp(v, sum=False) + jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logp) result = jax.jit(jax.vmap(jax.vmap(jax_fn)), backend=backend)(*samples)[0] data[v.name] = result return data diff --git a/pymc/smc/smc.py b/pymc/smc/smc.py index 6cb37a8feb..f03c977afa 100644 --- a/pymc/smc/smc.py +++ b/pymc/smc/smc.py @@ -219,10 +219,10 @@ def _initialize_kernel(self): shared = make_shared_replacements(initial_point, self.variables, self.model) self.prior_logp_func = _logp_forw( - initial_point, [self.model.varlogpt], self.variables, shared + initial_point, [self.model.varlogp], self.variables, shared ) self.likelihood_logp_func = _logp_forw( - initial_point, [self.model.datalogpt], self.variables, shared + initial_point, [self.model.datalogp], self.variables, shared ) priors = [self.prior_logp_func(sample) for sample in self.tempered_posterior] diff --git a/pymc/step_methods/metropolis.py b/pymc/step_methods/metropolis.py index a96f68068d..428896a751 100644 --- a/pymc/step_methods/metropolis.py +++ b/pymc/step_methods/metropolis.py @@ -226,7 +226,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def reset_tuning(self): @@ -794,7 +794,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def astep(self, q0: RaveledVars) -> Tuple[RaveledVars, List[Dict[str, Any]]]: @@ -957,7 +957,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def reset_tuning(self): diff --git a/pymc/step_methods/mlda.py b/pymc/step_methods/mlda.py index 441890ebbd..99970111d7 100644 --- a/pymc/step_methods/mlda.py +++ b/pymc/step_methods/mlda.py @@ -538,7 +538,7 @@ def __init__( # Construct Aesara function for current-level model likelihood # (for use in acceptance) shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) # Construct Aesara function for below-level model likelihood # (for use in acceptance) @@ -547,7 +547,7 @@ def __init__( vars_below = pm.inputvars(vars_below) shared_below = pm.make_shared_replacements(initial_values, vars_below, model_below) self.delta_logp_below = delta_logp( - initial_values, model_below.logpt(), vars_below, shared_below + initial_values, model_below.logp(), vars_below, shared_below ) super().__init__(vars, shared) diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 06ba3428d3..c7bd0868af 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -121,7 +121,7 @@ def polyagamma_cdf(*args, **kwargs): ZeroInflatedBinomial, ZeroInflatedNegativeBinomial, ZeroInflatedPoisson, - joint_logpt, + joint_logp, logcdf, logp, ) @@ -924,29 +924,29 @@ def RandomPdMatrix(n): return np.dot(A, A.T) + n * np.identity(n) -def test_hierarchical_logpt(): +def test_hierarchical_logp(): """Make sure there are no random variables in a model's log-likelihood graph.""" with pm.Model() as m: x = pm.Uniform("x", lower=0, upper=1) y = pm.Uniform("y", lower=0, upper=x) - logpt_ancestors = list(ancestors([m.logpt()])) - ops = {a.owner.op for a in logpt_ancestors if a.owner} + logp_ancestors = list(ancestors([m.logp()])) + ops = {a.owner.op for a in logp_ancestors if a.owner} assert len(ops) > 0 assert not any(isinstance(o, RandomVariable) for o in ops) - assert x.tag.value_var in logpt_ancestors - assert y.tag.value_var in logpt_ancestors + assert x.tag.value_var in logp_ancestors + assert y.tag.value_var in logp_ancestors -def test_hierarchical_obs_logpt(): +def test_hierarchical_obs_logp(): obs = np.array([0.5, 0.4, 5, 2]) with pm.Model() as model: x = pm.Uniform("x", 0, 1, observed=obs) pm.Uniform("y", x, 2, observed=obs) - logpt_ancestors = list(ancestors([model.logpt()])) - ops = {a.owner.op for a in logpt_ancestors if a.owner} + logp_ancestors = list(ancestors([model.logp()])) + ops = {a.owner.op for a in logp_ancestors if a.owner} assert len(ops) > 0 assert not any(isinstance(o, RandomVariable) for o in ops) @@ -2638,29 +2638,29 @@ def test_continuous(self): UpperNormalTransform = Bound("uppertrans", dist, upper=10) BoundedNormalTransform = Bound("boundedtrans", dist, lower=1, upper=10) - assert joint_logpt(LowerNormal, -1).eval() == -np.inf - assert joint_logpt(UpperNormal, 1).eval() == -np.inf - assert joint_logpt(BoundedNormal, 0).eval() == -np.inf - assert joint_logpt(BoundedNormal, 11).eval() == -np.inf + assert joint_logp(LowerNormal, -1).eval() == -np.inf + assert joint_logp(UpperNormal, 1).eval() == -np.inf + assert joint_logp(BoundedNormal, 0).eval() == -np.inf + assert joint_logp(BoundedNormal, 11).eval() == -np.inf - assert joint_logpt(UnboundedNormal, 0).eval() != -np.inf - assert joint_logpt(UnboundedNormal, 11).eval() != -np.inf - assert joint_logpt(InfBoundedNormal, 0).eval() != -np.inf - assert joint_logpt(InfBoundedNormal, 11).eval() != -np.inf + assert joint_logp(UnboundedNormal, 0).eval() != -np.inf + assert joint_logp(UnboundedNormal, 11).eval() != -np.inf + assert joint_logp(InfBoundedNormal, 0).eval() != -np.inf + assert joint_logp(InfBoundedNormal, 11).eval() != -np.inf value = model.rvs_to_values[LowerNormalTransform] - assert joint_logpt(LowerNormalTransform, value).eval({value: -1}) != -np.inf + assert joint_logp(LowerNormalTransform, value).eval({value: -1}) != -np.inf value = model.rvs_to_values[UpperNormalTransform] - assert joint_logpt(UpperNormalTransform, value).eval({value: 1}) != -np.inf + assert joint_logp(UpperNormalTransform, value).eval({value: 1}) != -np.inf value = model.rvs_to_values[BoundedNormalTransform] - assert joint_logpt(BoundedNormalTransform, value).eval({value: 0}) != -np.inf - assert joint_logpt(BoundedNormalTransform, value).eval({value: 11}) != -np.inf + assert joint_logp(BoundedNormalTransform, value).eval({value: 0}) != -np.inf + assert joint_logp(BoundedNormalTransform, value).eval({value: 11}) != -np.inf ref_dist = Normal.dist(mu=0, sigma=1) - assert np.allclose(joint_logpt(UnboundedNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(LowerNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(UpperNormal, -5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(BoundedNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UnboundedNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(LowerNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UpperNormal, -5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(BoundedNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) def test_discrete(self): with Model() as model: @@ -2670,19 +2670,19 @@ def test_discrete(self): UpperPoisson = Bound("upper", dist, upper=10) BoundedPoisson = Bound("bounded", dist, lower=1, upper=10) - assert joint_logpt(LowerPoisson, 0).eval() == -np.inf - assert joint_logpt(UpperPoisson, 11).eval() == -np.inf - assert joint_logpt(BoundedPoisson, 0).eval() == -np.inf - assert joint_logpt(BoundedPoisson, 11).eval() == -np.inf + assert joint_logp(LowerPoisson, 0).eval() == -np.inf + assert joint_logp(UpperPoisson, 11).eval() == -np.inf + assert joint_logp(BoundedPoisson, 0).eval() == -np.inf + assert joint_logp(BoundedPoisson, 11).eval() == -np.inf - assert joint_logpt(UnboundedPoisson, 0).eval() != -np.inf - assert joint_logpt(UnboundedPoisson, 11).eval() != -np.inf + assert joint_logp(UnboundedPoisson, 0).eval() != -np.inf + assert joint_logp(UnboundedPoisson, 11).eval() != -np.inf ref_dist = Poisson.dist(mu=4) - assert np.allclose(joint_logpt(UnboundedPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(LowerPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(UpperPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(BoundedPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UnboundedPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(LowerPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UpperPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(BoundedPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) def create_invalid_distribution(self): class MyNormal(RandomVariable): @@ -2786,19 +2786,19 @@ def test_array_bound(self): UpperPoisson = Bound("upper", dist, upper=[np.inf, 10], transform=None) BoundedPoisson = Bound("bounded", dist, lower=[1, 2], upper=[9, 10], transform=None) - first, second = joint_logpt(LowerPoisson, [0, 0], sum=False)[0].eval() + first, second = joint_logp(LowerPoisson, [0, 0], sum=False)[0].eval() assert first == -np.inf assert second != -np.inf - first, second = joint_logpt(UpperPoisson, [11, 11], sum=False)[0].eval() + first, second = joint_logp(UpperPoisson, [11, 11], sum=False)[0].eval() assert first != -np.inf assert second == -np.inf - first, second = joint_logpt(BoundedPoisson, [1, 1], sum=False)[0].eval() + first, second = joint_logp(BoundedPoisson, [1, 1], sum=False)[0].eval() assert first != -np.inf assert second == -np.inf - first, second = joint_logpt(BoundedPoisson, [10, 10], sum=False)[0].eval() + first, second = joint_logp(BoundedPoisson, [10, 10], sum=False)[0].eval() assert first == -np.inf assert second != -np.inf @@ -2914,8 +2914,8 @@ def test_orderedlogistic_dimensions(shape): p=p, observed=obs, ) - ologp = joint_logpt(ol, np.ones_like(obs), sum=True).eval() * loge - clogp = joint_logpt(c, np.ones_like(obs), sum=True).eval() * loge + ologp = joint_logp(ol, np.ones_like(obs), sum=True).eval() * loge + clogp = joint_logp(c, np.ones_like(obs), sum=True).eval() * loge expected = -np.prod((size,) + shape) assert c.owner.inputs[3].ndim == (len(shape) + 1) @@ -3157,7 +3157,7 @@ def logp(value, mu): a_val = np.random.normal(loc=mu_val, scale=1, size=to_tuple(size) + (supp_shape,)).astype( aesara.config.floatX ) - log_densityt = joint_logpt(a, a.tag.value_var, sum=False)[0] + log_densityt = joint_logp(a, a.tag.value_var, sum=False)[0] assert log_densityt.eval( {a.tag.value_var: a_val, mu.tag.value_var: mu_val}, ).shape == to_tuple(size) @@ -3288,7 +3288,7 @@ def test_no_warning_logp(self): sd_dist = pm.Exponential.dist(1, size=3) x = pm.LKJCholeskyCov("x", n=3, eta=1, sd_dist=sd_dist) with pytest.warns(None) as record: - m.logpt() + m.logp() assert not record @pytest.mark.parametrize( diff --git a/pymc/tests/test_distributions_moments.py b/pymc/tests/test_distributions_moments.py index 230989000c..dbe4ae60ba 100644 --- a/pymc/tests/test_distributions_moments.py +++ b/pymc/tests/test_distributions_moments.py @@ -71,7 +71,7 @@ ZeroInflatedPoisson, ) from pymc.distributions.distribution import _moment, moment -from pymc.distributions.logprob import joint_logpt +from pymc.distributions.logprob import joint_logp from pymc.distributions.shape_utils import rv_size_is_none, to_tuple from pymc.initial_point import make_initial_point_fn from pymc.model import Model @@ -163,7 +163,7 @@ def assert_moment_is_expected(model, expected, check_finite_logp=True): assert np.allclose(moment, expected) if check_finite_logp: - logp_moment = joint_logpt(model["x"], at.constant(moment), transformed=False).eval() + logp_moment = joint_logp(model["x"], at.constant(moment), transformed=False).eval() assert np.isfinite(logp_moment) diff --git a/pymc/tests/test_logprob.py b/pymc/tests/test_logprob.py index e3a7d846d8..66abd59ded 100644 --- a/pymc/tests/test_logprob.py +++ b/pymc/tests/test_logprob.py @@ -42,7 +42,7 @@ from pymc.distributions.logprob import ( _get_scaling, ignore_logprob, - joint_logpt, + joint_logp, logcdf, logp, ) @@ -102,7 +102,7 @@ def test_get_scaling(): assert _get_scaling(total_size, shape=rv_var.shape, ndim=rv_var.ndim).eval() == 1.0 -def test_joint_logpt_basic(): +def test_joint_logp_basic(): """Make sure we can compute a log-likelihood for a hierarchical model with transforms.""" with Model() as m: @@ -119,7 +119,7 @@ def test_joint_logpt_basic(): c_value_var = m.rvs_to_values[c] - b_logp = joint_logpt(b, b_value_var, sum=False) + b_logp = joint_logp(b, b_value_var, sum=False) res_ancestors = list(walk_model(b_logp, walk_past_rvs=True)) res_rv_ancestors = [ @@ -142,7 +142,7 @@ def test_joint_logpt_basic(): ((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)), ], ) -def test_joint_logpt_incsubtensor(indices, size): +def test_joint_logp_incsubtensor(indices, size): """Make sure we can compute a log-likelihood for ``Y[idx] = data`` where ``Y`` is univariate.""" mu = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size) @@ -163,7 +163,7 @@ def test_joint_logpt_incsubtensor(indices, size): a_idx_value_var = a_idx.type() a_idx_value_var.name = "a_idx_value" - a_idx_logp = joint_logpt(a_idx, {a_idx: a_value_var}, sum=False) + a_idx_logp = joint_logp(a_idx, {a_idx: a_value_var}, sum=False) logp_vals = a_idx_logp[0].eval({a_value_var: a_val}) @@ -177,7 +177,7 @@ def test_joint_logpt_incsubtensor(indices, size): np.testing.assert_almost_equal(logp_vals, exp_obs_logps) -def test_joint_logpt_subtensor(): +def test_joint_logp_subtensor(): """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables.""" size = 5 @@ -205,7 +205,7 @@ def test_joint_logpt_subtensor(): I_value_var = I_rv.type() I_value_var.name = "I_value" - A_idx_logps = joint_logpt(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) + A_idx_logps = joint_logp(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) A_idx_logp = at.add(*A_idx_logps) logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp) @@ -289,8 +289,8 @@ def test_model_unchanged_logprob_access(): c = Uniform("c", lower=a - 1, upper=1) original_inputs = set(aesara.graph.graph_inputs([c])) - # Extract model.logpt - model.logpt() + # Extract model.logp + model.logp() new_inputs = set(aesara.graph.graph_inputs([c])) assert original_inputs == new_inputs @@ -301,7 +301,7 @@ def test_unexpected_rvs(): y = DensityDist("y", logp=lambda *args: x) with pytest.raises(ValueError, match="^Random variables detected in the logp graph"): - model.logpt() + model.logp() def test_ignore_logprob_basic(): @@ -331,9 +331,9 @@ def logp(value, x): y = DensityDist("y", x, logp=logp) # Aeppl raises a KeyError when it finds an unexpected RV with pytest.raises(KeyError): - joint_logpt([y], {y: y.type()}) + joint_logp([y], {y: y.type()}) with Model() as m: x = ignore_logprob(Normal.dist()) y = DensityDist("y", x, logp=logp) - assert joint_logpt([y], {y: y.type()}) + assert joint_logp([y], {y: y.type()}) diff --git a/pymc/tests/test_minibatches.py b/pymc/tests/test_minibatches.py index 62ff81874a..58bf66c138 100644 --- a/pymc/tests/test_minibatches.py +++ b/pymc/tests/test_minibatches.py @@ -170,11 +170,11 @@ class TestScaling: def test_density_scaling(self): with pm.Model() as model1: Normal("n", observed=[[1]], total_size=1) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: Normal("n", observed=[[1]], total_size=2) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) assert p1() * 2 == p2() def test_density_scaling_with_generator(self): @@ -189,12 +189,12 @@ def true_dens(): # We have same size models with pm.Model() as model1: Normal("n", observed=gen1(), total_size=100) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: gen_var = generator(gen2()) Normal("n", observed=gen_var, total_size=100) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) for i in range(10): _1, _2, _t = p1(), p2(), next(t) @@ -208,12 +208,12 @@ def test_gradient_with_scaling(self): genvar = generator(gen1()) m = Normal("m") Normal("n", observed=genvar, total_size=1000) - grad1 = aesara.function([m.tag.value_var], at.grad(model1.logpt(), m.tag.value_var)) + grad1 = aesara.function([m.tag.value_var], at.grad(model1.logp(), m.tag.value_var)) with pm.Model() as model2: m = Normal("m") shavar = aesara.shared(np.ones((1000, 100))) Normal("n", observed=shavar) - grad2 = aesara.function([m.tag.value_var], at.grad(model2.logpt(), m.tag.value_var)) + grad2 = aesara.function([m.tag.value_var], at.grad(model2.logp(), m.tag.value_var)) for i in range(10): shavar.set_value(np.ones((100, 100)) * i) @@ -224,27 +224,27 @@ def test_gradient_with_scaling(self): def test_multidim_scaling(self): with pm.Model() as model0: Normal("n", observed=[[1, 1], [1, 1]], total_size=[]) - p0 = aesara.function([], model0.logpt()) + p0 = aesara.function([], model0.logp()) with pm.Model() as model1: Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2]) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: Normal("n", observed=[[1], [1]], total_size=[2, 2]) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) with pm.Model() as model3: Normal("n", observed=[[1, 1]], total_size=[2, 2]) - p3 = aesara.function([], model3.logpt()) + p3 = aesara.function([], model3.logp()) with pm.Model() as model4: Normal("n", observed=[[1]], total_size=[2, 2]) - p4 = aesara.function([], model4.logpt()) + p4 = aesara.function([], model4.logp()) with pm.Model() as model5: Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2]) - p5 = aesara.function([], model5.logpt()) + p5 = aesara.function([], model5.logp()) _p0 = p0() assert ( np.allclose(_p0, p1()) @@ -258,27 +258,27 @@ def test_common_errors(self): with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2, 2]) - m.logpt() + m.logp() assert "Length of" in str(e.value) with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[2, 2, 2]) - m.logpt() + m.logp() assert "Length of" in str(e.value) with pytest.raises(TypeError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size="foo") - m.logpt() + m.logp() assert "Unrecognized" in str(e.value) with pytest.raises(TypeError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=["foo"]) - m.logpt() + m.logp() assert "Unrecognized" in str(e.value) with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[Ellipsis, Ellipsis]) - m.logpt() + m.logp() assert "Double Ellipsis" in str(e.value) def test_mixed1(self): @@ -296,11 +296,11 @@ def test_mixed2(self): def test_free_rv(self): with pm.Model() as model4: Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2]) - p4 = aesara.function([], model4.logpt()) + p4 = aesara.function([], model4.logp()) with pm.Model() as model5: n = Normal("n", total_size=[2, Ellipsis, 2], size=(2, 2)) - p5 = aesara.function([n.tag.value_var], model5.logpt()) + p5 = aesara.function([n.tag.value_var], model5.logp()) assert p4() == p5(pm.floatX([[1]])) assert p4() == p5(pm.floatX([[1, 1], [1, 1]])) diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index cbe3165874..2a7f92be78 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -20,7 +20,7 @@ from aesara.graph import graph_inputs from numpy import array, ma -from pymc import joint_logpt +from pymc import joint_logp from pymc.distributions import Dirichlet, Gamma, Normal, Uniform from pymc.exceptions import ImputationWarning from pymc.model import Model @@ -220,12 +220,12 @@ def test_missing_vector_parameter(): def test_missing_symmetric(): - """Check that logpt works when partially observed variable have equal observed and + """Check that logp works when partially observed variable have equal observed and unobserved dimensions. This would fail in a previous implementation because the two variables would be equivalent and one of them would be discarded during MergeOptimization while - buling the logpt graph + buling the logp graph """ with Model() as m: x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) @@ -236,7 +236,7 @@ def test_missing_symmetric(): x_unobs_rv = m["x_missing"] x_unobs_vv = m.rvs_to_values[x_unobs_rv] - logp = joint_logpt([x_obs_rv, x_unobs_rv], {x_obs_rv: x_obs_vv, x_unobs_rv: x_unobs_vv}) + logp = joint_logp([x_obs_rv, x_unobs_rv], {x_obs_rv: x_obs_vv, x_unobs_rv: x_unobs_vv}) logp_inputs = list(graph_inputs([logp])) assert x_obs_vv in logp_inputs assert x_unobs_vv in logp_inputs diff --git a/pymc/tests/test_mixture.py b/pymc/tests/test_mixture.py index 4f19f9c3e2..46249f93f9 100644 --- a/pymc/tests/test_mixture.py +++ b/pymc/tests/test_mixture.py @@ -927,7 +927,7 @@ def logp_matches(self, mixture, latent_mix, z, npop, model): def loose_logp(model, vars): """Return logp function that accepts dictionary with unused variables as input""" return model.compile_fn( - model.logpt(vars=vars, sum=False), + model.logp(vars=vars, sum=False), inputs=model.value_vars, on_unused_input="ignore", ) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 2f8683e9e3..4c4d86479a 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -677,7 +677,7 @@ def test_set_initval(): assert y in model.initial_values -def test_datalogpt_multiple_shapes(): +def test_datalogp_multiple_shapes(): with pm.Model() as m: x = pm.Normal("x", 0, 1) z1 = pm.Potential("z1", x) @@ -688,7 +688,7 @@ def test_datalogpt_multiple_shapes(): # This would raise a TypeError, see #4803 and #4804 x_val = m.rvs_to_values[x] - m.datalogpt.eval({x_val: 0}) + m.datalogp.eval({x_val: 0}) def test_nested_model_coords(): diff --git a/pymc/tests/test_profile.py b/pymc/tests/test_profile.py index 60c9260122..55d54cbaee 100644 --- a/pymc/tests/test_profile.py +++ b/pymc/tests/test_profile.py @@ -20,12 +20,12 @@ def setup_method(self): _, self.model, _ = simple_model() def test_profile_model(self): - assert self.model.profile(self.model.logpt()).fct_call_time > 0 + assert self.model.profile(self.model.logp()).fct_call_time > 0 def test_profile_variable(self): rv = self.model.basic_RVs[0] - assert self.model.profile(self.model.logpt(vars=[rv], sum=False)).fct_call_time + assert self.model.profile(self.model.logp(vars=[rv], sum=False)).fct_call_time def test_profile_count(self): count = 1005 - assert self.model.profile(self.model.logpt(), n=count).fct_callcount == count + assert self.model.profile(self.model.logp(), n=count).fct_callcount == count diff --git a/pymc/tests/test_smc.py b/pymc/tests/test_smc.py index d9e729f85b..f99932f8a2 100644 --- a/pymc/tests/test_smc.py +++ b/pymc/tests/test_smc.py @@ -294,7 +294,7 @@ def setup_class(self): s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data) def test_one_gaussian(self): - assert self.count_rvs(self.SMABC_test.logpt()) == 1 + assert self.count_rvs(self.SMABC_test.logp()) == 1 with self.SMABC_test: trace = pm.sample_smc(draws=1000, chains=1, return_inferencedata=False) @@ -330,7 +330,7 @@ def test_custom_dist_sum_stat(self, floatX): observed=self.data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 with m: pm.sample_smc(draws=100) @@ -353,7 +353,7 @@ def test_custom_dist_sum_stat_scalar(self, floatX): sum_stat=self.quantiles, observed=scalar_data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 with pm.Model() as m: s = pm.Simulator( @@ -365,10 +365,10 @@ def test_custom_dist_sum_stat_scalar(self, floatX): sum_stat="mean", observed=scalar_data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 def test_model_with_potential(self): - assert self.count_rvs(self.SMABC_potential.logpt()) == 1 + assert self.count_rvs(self.SMABC_potential.logp()) == 1 with self.SMABC_potential: trace = pm.sample_smc(draws=100, chains=1, return_inferencedata=False) @@ -412,17 +412,17 @@ def test_multiple_simulators(self): observed=data2, ) - assert self.count_rvs(m.logpt()) == 2 + assert self.count_rvs(m.logp()) == 2 # Check that the logps use the correct methods a_val = m.rvs_to_values[a] sim1_val = m.rvs_to_values[sim1] - logp_sim1 = pm.joint_logpt(sim1, sim1_val) + logp_sim1 = pm.joint_logp(sim1, sim1_val) logp_sim1_fn = aesara.function([a_val], logp_sim1) b_val = m.rvs_to_values[b] sim2_val = m.rvs_to_values[sim2] - logp_sim2 = pm.joint_logpt(sim2, sim2_val) + logp_sim2 = pm.joint_logp(sim2, sim2_val) logp_sim2_fn = aesara.function([b_val], logp_sim2) assert any( @@ -462,7 +462,7 @@ def test_nested_simulators(self): observed=data, ) - assert self.count_rvs(m.logpt()) == 2 + assert self.count_rvs(m.logp()) == 2 with m: trace = pm.sample_smc(return_inferencedata=False) diff --git a/pymc/tests/test_transforms.py b/pymc/tests/test_transforms.py index 493f418d33..d939d0acfb 100644 --- a/pymc/tests/test_transforms.py +++ b/pymc/tests/test_transforms.py @@ -24,7 +24,7 @@ import pymc.distributions.transforms as tr from pymc.aesaraf import floatX, jacobian -from pymc.distributions import joint_logpt +from pymc.distributions import joint_logp from pymc.tests.checks import close_to, close_to_logical from pymc.tests.helpers import SeededTest from pymc.tests.test_distributions import ( @@ -287,10 +287,10 @@ def check_transform_elementwise_logp(self, model): x_val_untransf = at.constant(test_array_untransf).type() jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs) - assert joint_logpt(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim + assert joint_logp(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim - v1 = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) - v2 = joint_logpt(x, x_val_untransf, transformed=False).eval( + v1 = joint_logp(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) + v2 = joint_logp(x, x_val_untransf, transformed=False).eval( {x_val_untransf: test_array_untransf} ) close_to(v1, v2, tol) @@ -310,13 +310,13 @@ def check_vectortransform_elementwise_logp(self, model): jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs) # Original distribution is univariate if x.owner.op.ndim_supp == 0: - assert joint_logpt(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1) + assert joint_logp(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1) # Original distribution is multivariate else: - assert joint_logpt(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim + assert joint_logp(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim - a = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) - b = joint_logpt(x, x_val_untransf, transformed=False).eval( + a = joint_logp(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) + b = joint_logp(x, x_val_untransf, transformed=False).eval( {x_val_untransf: test_array_untransf} ) # Hack to get relative tolerance diff --git a/pymc/tuning/scaling.py b/pymc/tuning/scaling.py index 28471a0dda..2209d29cca 100644 --- a/pymc/tuning/scaling.py +++ b/pymc/tuning/scaling.py @@ -75,7 +75,7 @@ def find_hessian_diag(point, vars=None, model=None): Variables for which Hessian is to be calculated. """ model = modelcontext(model) - H = model.compile_fn(hessian_diag(model.logpt(), vars)) + H = model.compile_fn(hessian_diag(model.logp(), vars)) return H(Point(point, model=model)) diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index 5f2efce143..f4080bdbba 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -1232,7 +1232,7 @@ def logq_norm(self): def _sized_symbolic_varlogp_and_datalogp(self): """*Dev* - computes sampled prior term from model via `aesara.scan`""" varlogp_s, datalogp_s = self.symbolic_sample_over_posterior( - [self.model.varlogpt, self.model.datalogpt] + [self.model.varlogp, self.model.datalogp] ) return varlogp_s, datalogp_s # both shape (s,) @@ -1269,7 +1269,7 @@ def datalogp(self): @node_property def _single_symbolic_varlogp_and_datalogp(self): """*Dev* - computes sampled prior term from model via `aesara.scan`""" - varlogp, datalogp = self.symbolic_single_sample([self.model.varlogpt, self.model.datalogpt]) + varlogp, datalogp = self.symbolic_single_sample([self.model.varlogp, self.model.datalogp]) return varlogp, datalogp @node_property From bf37bba93a4f8b1e9dc60bdfe098859e2a7e4c63 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 22:09:32 -0400 Subject: [PATCH 04/28] Fix issue with d2logpt --- pymc/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 694873c27c..b0991e1758 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -821,7 +821,7 @@ def d2logpt(self, *args, **kwargs): "Model.d2logpt has been deprecated. Use Model.d2logp instead.", FutureWarning, ) - return self.logp(*args, **kwargs) + return self.d2logp(*args, **kwargs) def d2logp( self, From 8a1348d749fdb109751cbc3af5c2a8cb9a3cea04 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:48:56 -0400 Subject: [PATCH 05/28] Added tests --- pymc/distributions/logprob.py | 5 +++-- pymc/model.py | 16 ++++++++-------- pymc/tests/test_logprob.py | 4 ++++ pymc/tests/test_model.py | 20 ++++++++++++++++++++ 4 files changed, 35 insertions(+), 10 deletions(-) diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index b1f76d2b30..620c6ec511 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from collections.abc import Mapping from typing import Dict, List, Optional, Sequence, Union @@ -119,12 +120,12 @@ def _get_scaling( ) -def joint_logpt(self, *args, **kwargs): +def joint_logpt(*args, **kwargs): warnings.warn( "logprob.joint_logpt has been deprecated. Use logprob.joint_logp instead.", FutureWarning, ) - return self.joint_logp(*args, **kwargs) + return joint_logp(*args, **kwargs) def joint_logp( diff --git a/pymc/model.py b/pymc/model.py index b0991e1758..3c38477e65 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -862,12 +862,12 @@ def d2logp( return hessian(cost, value_vars) @property - def datalogpt(self, *args, **kwargs): + def datalogpt(self): warnings.warn( "Model.datalogpt has been deprecated. Use Model.datalogp instead.", FutureWarning, ) - return self.datalogp(*args, **kwargs) + return self.datalogp @property def datalogp(self) -> Variable: @@ -876,12 +876,12 @@ def datalogp(self) -> Variable: return self.observedlogp + self.potentiallogp @property - def varlogpt(self, *args, **kwargs): + def varlogpt(self): warnings.warn( "Model.varlogpt has been deprecated. Use Model.varlogp instead.", FutureWarning, ) - return self.varlogp(*args, **kwargs) + return self.varlogp @property def varlogp(self) -> Variable: @@ -896,12 +896,12 @@ def varlogp_nojact(self) -> Variable: return self.logp(vars=self.free_RVs, jacobian=False) @property - def observedlogpt(self, *args, **kwargs): + def observedlogpt(self): warnings.warn( "Model.observedlogpt has been deprecated. Use Model.observedlogp instead.", FutureWarning, ) - return self.observedlogp(*args, **kwargs) + return self.observedlogp @property def observedlogp(self) -> Variable: @@ -909,12 +909,12 @@ def observedlogp(self) -> Variable: return self.logp(vars=self.observed_RVs) @property - def potentiallogpt(self, *args, **kwargs): + def potentiallogpt(self): warnings.warn( "Model.potentiallogpt has been deprecated. Use Model.potentiallogp instead.", FutureWarning, ) - return self.potentiallogp(*args, **kwargs) + return self.potentiallogp @property def potentiallogp(self) -> Variable: diff --git a/pymc/tests/test_logprob.py b/pymc/tests/test_logprob.py index 66abd59ded..d0be51e833 100644 --- a/pymc/tests/test_logprob.py +++ b/pymc/tests/test_logprob.py @@ -43,6 +43,7 @@ _get_scaling, ignore_logprob, joint_logp, + joint_logpt, logcdf, logp, ) @@ -121,6 +122,9 @@ def test_joint_logp_basic(): b_logp = joint_logp(b, b_value_var, sum=False) + with pytest.warns(FutureWarning): + b_logpt = joint_logpt(b, b_value_var, sum=False) + res_ancestors = list(walk_model(b_logp, walk_past_rvs=True)) res_rv_ancestors = [ v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 4c4d86479a..11a7380bc9 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -880,6 +880,26 @@ def test_set_data_indirect_resize_with_coords(): pmodel.set_data("mdata", [1, 2], coords=dict(mdim=[1, 2, 3])) +def test_model_logpt_deprecation_warning(): + with pm.Model() as m: + x = pm.Normal("x", 0, 1, size=2) + y = pm.LogNormal("y", 0, 1, size=2) + + with pytest.warns(FutureWarning): + m.logpt() + with pytest.warns(FutureWarning): + m.dlogpt() + with pytest.warns(FutureWarning): + m.d2logpt() + + with pytest.warns(FutureWarning): + m.datalogpt + with pytest.warns(FutureWarning): + m.varlogpt + with pytest.warns(FutureWarning): + m.observedlogpt + + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): with pm.Model() as m: From 876eb11b31addd40ec8b919f8ba2397b118fc15c Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:56:38 -0400 Subject: [PATCH 06/28] Fix typo --- pymc/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 3c38477e65..242f6f7ef6 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -776,7 +776,7 @@ def dlogpt(self, *args, **kwargs): "Model.dlogpt has been deprecated. Use Model.dlogp instead.", FutureWarning, ) - return self.logp(*args, **kwargs) + return self.dlogp(*args, **kwargs) def dlogp( self, From 5ebdfe71266c87d027e2a02f75b11daeed70469f Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:58:07 -0400 Subject: [PATCH 07/28] Updated release notes for 4.0 --- RELEASE-NOTES.md | 4 ++-- docs/source/learn/core_notebooks/pymc_aesara.ipynb | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 4aeb873d29..cf54a1b491 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -113,8 +113,8 @@ _Read on if you're a developer. Or curious. Or both._ - `pm.GaussianRandomWalk` initial distribution defaults to zero-centered normal with sigma=100 instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - `pm.AR` initial distribution defaults to unit normal instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logpt(jacobian=True/False, sum=True/False)` instead. - - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogpt` and `d2logpt` with `jacobian=False` instead. + - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logp(jacobian=True/False, sum=True/False)` instead. + - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogp` and `d2logp` with `jacobian=False` instead. - `model.makefn` is now called `Model.compile_fn`, and `model.fn` was removed. - Methods starting with `fast_*`, such as `Model.fast_logp`, were removed. Same applies to `PointFunc` classes - `Model(model=...)` kwarg was removed diff --git a/docs/source/learn/core_notebooks/pymc_aesara.ipynb b/docs/source/learn/core_notebooks/pymc_aesara.ipynb index 329a3aa6d4..48e24053fe 100644 --- a/docs/source/learn/core_notebooks/pymc_aesara.ipynb +++ b/docs/source/learn/core_notebooks/pymc_aesara.ipynb @@ -1844,7 +1844,7 @@ } }, "source": [ - "`pymc` models provide some helpful routines to facilitating the conversion of `RandomVariable`s to probability functions. {meth}`~pymc.Model.logpt`, for instance can be used to extract the joint probability of all variables in the model:" + "`pymc` models provide some helpful routines to facilitating the conversion of `RandomVariable`s to probability functions. {meth}`~pymc.Model.logp`, for instance can be used to extract the joint probability of all variables in the model:" ] }, { @@ -1902,7 +1902,7 @@ } ], "source": [ - "aesara.dprint(model.logpt(sum=False))" + "aesara.dprint(model.logp(sum=False))" ] }, { @@ -2213,7 +2213,7 @@ "sigma_log_value = model_2.rvs_to_values[sigma]\n", "x_value = model_2.rvs_to_values[x]\n", "# element-wise log-probability of the model (we do not take te sum)\n", - "logp_graph = at.stack(model_2.logpt(sum=False))\n", + "logp_graph = at.stack(model_2.logp(sum=False))\n", "# evaluate by passing concrete values\n", "logp_graph.eval({mu_value: 0, sigma_log_value: -10, x_value:0})" ] @@ -2314,7 +2314,7 @@ } }, "source": [ - "The {class}`~pymc.Model` class also has methods to extract the gradient ({meth}`~pymc.Model.dlogpt`) and the hessian ({meth}`~pymc.Model.d2logpt`) of the logp." + "The {class}`~pymc.Model` class also has methods to extract the gradient ({meth}`~pymc.Model.dlogp`) and the hessian ({meth}`~pymc.Model.d2logp`) of the logp." ] }, { From 067d4f9946f59db2ff853110498148e80eeb3e82 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 09:49:27 -0400 Subject: [PATCH 08/28] Added potentiallogpt test --- pymc/tests/test_model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 11a7380bc9..82fe021b53 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -887,18 +887,25 @@ def test_model_logpt_deprecation_warning(): with pytest.warns(FutureWarning): m.logpt() + with pytest.warns(FutureWarning): m.dlogpt() + with pytest.warns(FutureWarning): m.d2logpt() with pytest.warns(FutureWarning): m.datalogpt + with pytest.warns(FutureWarning): m.varlogpt + with pytest.warns(FutureWarning): m.observedlogpt + with pytest.warns(FutureWarning): + m.potentiallogpt + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): From 454fc18ce285df2a2ad38dfecb000f4898807635 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 13:34:08 -0400 Subject: [PATCH 09/28] Updated developer guide --- docs/source/contributing/developer_guide.rst | 42 ++++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/source/contributing/developer_guide.rst b/docs/source/contributing/developer_guide.rst index c1b7c90228..ec589e6ac6 100644 --- a/docs/source/contributing/developer_guide.rst +++ b/docs/source/contributing/developer_guide.rst @@ -204,8 +204,8 @@ distribution. It has the following signature: In the ``logp`` method, parameters and values are either Aesara tensors, or could be converted to tensors. It is rather convenient as the -evaluation of logp is represented as a tensor (``RV.logpt``), and when -we linked different ``logp`` together (e.g., summing all ``RVs.logpt`` +evaluation of logp is represented as a tensor (``RV.logp``), and when +we linked different ``logp`` together (e.g., summing all ``RVs.logp`` to get the model totall logp) the dependence is taken care of by Aesara when the graph is built and compiled. Again, since the compiled function depends on the nodes that already in the graph, whenever you want to generate @@ -293,8 +293,8 @@ a model: print(type(x)) # ==> print(m.free_RVs) # ==> [x] - print(logpt(x, 5.0)) # ==> Elemwise{switch,no_inplace}.0 - print(logpt(x, 5.).eval({})) # ==> -13.418938533204672 + print(logp(x, 5.0)) # ==> Elemwise{switch,no_inplace}.0 + print(logp(x, 5.).eval({})) # ==> -13.418938533204672 print(m.logp({'x': 5.})) # ==> -13.418938533204672 @@ -431,7 +431,7 @@ initialised within the same model) as input, for example: ['d2logp', 'd2logp_nojac', - 'datalogpt', + 'datalogp', 'dlogp', 'dlogp_array', 'dlogp_nojac', @@ -447,8 +447,8 @@ initialised within the same model) as input, for example: 'logp_elemwise', 'logp_nojac', 'logp_nojact', - 'logpt', - 'varlogpt'] + 'logp', + 'varlogp'] @@ -462,10 +462,10 @@ sum them together to get the model logp: .. code:: python @property - def logpt(self): + def logp(self): """Aesara scalar of log-probability of the model""" with self: - factors = [var.logpt for var in self.basic_RVs] + self.potentials + factors = [var.logp for var in self.basic_RVs] + self.potentials logp = at.sum([at.sum(factor) for factor in factors]) ... return logp @@ -491,12 +491,12 @@ using aesara.clone_replace to replace the inputs to a tensor. .. code:: python - type(m.logpt) # ==> aesara.tensor.var.TensorVariable + type(m.logp) # ==> aesara.tensor.var.TensorVariable .. code:: python - m.logpt.eval({x: np.random.randn(*x.tag.test_value.shape) for x in m.free_RVs}) + m.logp.eval({x: np.random.randn(*x.tag.test_value.shape) for x in m.free_RVs}) output: @@ -507,7 +507,7 @@ output: PyMC then compiles a logp function with gradient that takes -``model.free_RVs`` as input and ``model.logpt`` as output. It could be a +``model.free_RVs`` as input and ``model.logp`` as output. It could be a subset of tensors in ``model.free_RVs`` if we want a conditional logp/dlogp function: @@ -521,11 +521,11 @@ logp/dlogp function: varnames = [var.name for var in grad_vars] # In a simple case with only continous RVs, # this is all the free_RVs extra_vars = [var for var in self.free_RVs if var.name not in varnames] - return ValueGradFunction(self.logpt, grad_vars, extra_vars, **kwargs) + return ValueGradFunction(self.logp, grad_vars, extra_vars, **kwargs) ``ValueGradFunction`` is a callable class which isolates part of the Aesara graph to compile additional Aesara functions. PyMC relies on -``aesara.clone_replace`` to copy the ``model.logpt`` and replace its input. It +``aesara.clone_replace`` to copy the ``model.logp`` and replace its input. It does not edit or rewrite the graph directly. The important parts of the above function is highlighted and commented. @@ -595,7 +595,7 @@ logp function in Aesara directly: .. code:: python import aesara - func = aesara.function(m.free_RVs, m.logpt) + func = aesara.function(m.free_RVs, m.logp) func(*inputlist) @@ -607,8 +607,8 @@ logp function in Aesara directly: .. code:: python - logpt_grad = aesara.grad(m.logpt, m.free_RVs) - func_d = aesara.function(m.free_RVs, logpt_grad) + logp_grad = aesara.grad(m.logp, m.free_RVs) + func_d = aesara.function(m.free_RVs, logp_grad) func_d(*inputlist) @@ -626,11 +626,11 @@ Similarly, build a conditional logp: .. code:: python shared = aesara.shared(inputlist[1]) - func2 = aesara.function([m.free_RVs[0]], m.logpt, givens=[(m.free_RVs[1], shared)]) + func2 = aesara.function([m.free_RVs[0]], m.logp, givens=[(m.free_RVs[1], shared)]) print(func2(inputlist[0])) - logpt_grad2 = aesara.grad(m.logpt, m.free_RVs[0]) - func_d2 = aesara.function([m.free_RVs[0]], logpt_grad2, givens=[(m.free_RVs[1], shared)]) + logp_grad2 = aesara.grad(m.logp, m.free_RVs[0]) + func_d2 = aesara.function([m.free_RVs[0]], logp_grad2, givens=[(m.free_RVs[1], shared)]) print(func_d2(inputlist[0])) @@ -647,7 +647,7 @@ everything into a single function: .. code:: python - func_logp_and_grad = aesara.function(m.free_RVs, [m.logpt, logpt_grad]) # ==> ERROR + func_logp_and_grad = aesara.function(m.free_RVs, [m.logp, logp_grad]) # ==> ERROR We want to have a function that return the evaluation and its gradient From 28f5642a553696a3bfd9c64f532c3704de771bc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20Irarr=C3=A1zaval?= Date: Mon, 13 Jun 2022 16:44:52 -0400 Subject: [PATCH 10/28] Update pymc/distributions/logprob.py Co-authored-by: Ricardo Vieira <28983449+ricardoV94@users.noreply.github.com> --- pymc/distributions/logprob.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index 620c6ec511..d67dc35ed1 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -122,7 +122,7 @@ def _get_scaling( def joint_logpt(*args, **kwargs): warnings.warn( - "logprob.joint_logpt has been deprecated. Use logprob.joint_logp instead.", + "joint_logpt has been deprecated. Use joint_logp instead.", FutureWarning, ) return joint_logp(*args, **kwargs) From 04718d9bffeb3a1ceae4dfa0c2ddf0bc9717da12 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 16:50:48 -0400 Subject: [PATCH 11/28] Removed t from varlogp_nojact --- pymc/model.py | 10 +++++++++- pymc/tests/test_model.py | 3 +++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 242f6f7ef6..019015bccd 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -890,7 +890,15 @@ def varlogp(self) -> Variable: return self.logp(vars=self.free_RVs) @property - def varlogp_nojact(self) -> Variable: + def varlogp_nojact(self): + warnings.warn( + "Model.varlogp_nojact has been deprecated. Use Model.varlogp_nojac instead.", + FutureWarning, + ) + return self.varlogp_nojac + + @property + def varlogp_nojac(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic) without jacobian term.""" return self.logp(vars=self.free_RVs, jacobian=False) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 82fe021b53..34dedaa1ca 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -906,6 +906,9 @@ def test_model_logpt_deprecation_warning(): with pytest.warns(FutureWarning): m.potentiallogpt + with pytest.warns(FutureWarning): + m.varlogp_nojact + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): From 99c0afb117445a45587ab2663ada5b4266af27e1 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 17:02:23 -0400 Subject: [PATCH 12/28] Revert Release Notes --- RELEASE-NOTES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index cf54a1b491..4aeb873d29 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -113,8 +113,8 @@ _Read on if you're a developer. Or curious. Or both._ - `pm.GaussianRandomWalk` initial distribution defaults to zero-centered normal with sigma=100 instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - `pm.AR` initial distribution defaults to unit normal instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logp(jacobian=True/False, sum=True/False)` instead. - - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogp` and `d2logp` with `jacobian=False` instead. + - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logpt(jacobian=True/False, sum=True/False)` instead. + - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogpt` and `d2logpt` with `jacobian=False` instead. - `model.makefn` is now called `Model.compile_fn`, and `model.fn` was removed. - Methods starting with `fast_*`, such as `Model.fast_logp`, were removed. Same applies to `PointFunc` classes - `Model(model=...)` kwarg was removed From 299a0091d273caf50c6e3fce80f18196dade9455 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 17:03:19 -0400 Subject: [PATCH 13/28] Revert changes to developer guide --- docs/source/contributing/developer_guide.rst | 42 ++++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/source/contributing/developer_guide.rst b/docs/source/contributing/developer_guide.rst index ec589e6ac6..c1b7c90228 100644 --- a/docs/source/contributing/developer_guide.rst +++ b/docs/source/contributing/developer_guide.rst @@ -204,8 +204,8 @@ distribution. It has the following signature: In the ``logp`` method, parameters and values are either Aesara tensors, or could be converted to tensors. It is rather convenient as the -evaluation of logp is represented as a tensor (``RV.logp``), and when -we linked different ``logp`` together (e.g., summing all ``RVs.logp`` +evaluation of logp is represented as a tensor (``RV.logpt``), and when +we linked different ``logp`` together (e.g., summing all ``RVs.logpt`` to get the model totall logp) the dependence is taken care of by Aesara when the graph is built and compiled. Again, since the compiled function depends on the nodes that already in the graph, whenever you want to generate @@ -293,8 +293,8 @@ a model: print(type(x)) # ==> print(m.free_RVs) # ==> [x] - print(logp(x, 5.0)) # ==> Elemwise{switch,no_inplace}.0 - print(logp(x, 5.).eval({})) # ==> -13.418938533204672 + print(logpt(x, 5.0)) # ==> Elemwise{switch,no_inplace}.0 + print(logpt(x, 5.).eval({})) # ==> -13.418938533204672 print(m.logp({'x': 5.})) # ==> -13.418938533204672 @@ -431,7 +431,7 @@ initialised within the same model) as input, for example: ['d2logp', 'd2logp_nojac', - 'datalogp', + 'datalogpt', 'dlogp', 'dlogp_array', 'dlogp_nojac', @@ -447,8 +447,8 @@ initialised within the same model) as input, for example: 'logp_elemwise', 'logp_nojac', 'logp_nojact', - 'logp', - 'varlogp'] + 'logpt', + 'varlogpt'] @@ -462,10 +462,10 @@ sum them together to get the model logp: .. code:: python @property - def logp(self): + def logpt(self): """Aesara scalar of log-probability of the model""" with self: - factors = [var.logp for var in self.basic_RVs] + self.potentials + factors = [var.logpt for var in self.basic_RVs] + self.potentials logp = at.sum([at.sum(factor) for factor in factors]) ... return logp @@ -491,12 +491,12 @@ using aesara.clone_replace to replace the inputs to a tensor. .. code:: python - type(m.logp) # ==> aesara.tensor.var.TensorVariable + type(m.logpt) # ==> aesara.tensor.var.TensorVariable .. code:: python - m.logp.eval({x: np.random.randn(*x.tag.test_value.shape) for x in m.free_RVs}) + m.logpt.eval({x: np.random.randn(*x.tag.test_value.shape) for x in m.free_RVs}) output: @@ -507,7 +507,7 @@ output: PyMC then compiles a logp function with gradient that takes -``model.free_RVs`` as input and ``model.logp`` as output. It could be a +``model.free_RVs`` as input and ``model.logpt`` as output. It could be a subset of tensors in ``model.free_RVs`` if we want a conditional logp/dlogp function: @@ -521,11 +521,11 @@ logp/dlogp function: varnames = [var.name for var in grad_vars] # In a simple case with only continous RVs, # this is all the free_RVs extra_vars = [var for var in self.free_RVs if var.name not in varnames] - return ValueGradFunction(self.logp, grad_vars, extra_vars, **kwargs) + return ValueGradFunction(self.logpt, grad_vars, extra_vars, **kwargs) ``ValueGradFunction`` is a callable class which isolates part of the Aesara graph to compile additional Aesara functions. PyMC relies on -``aesara.clone_replace`` to copy the ``model.logp`` and replace its input. It +``aesara.clone_replace`` to copy the ``model.logpt`` and replace its input. It does not edit or rewrite the graph directly. The important parts of the above function is highlighted and commented. @@ -595,7 +595,7 @@ logp function in Aesara directly: .. code:: python import aesara - func = aesara.function(m.free_RVs, m.logp) + func = aesara.function(m.free_RVs, m.logpt) func(*inputlist) @@ -607,8 +607,8 @@ logp function in Aesara directly: .. code:: python - logp_grad = aesara.grad(m.logp, m.free_RVs) - func_d = aesara.function(m.free_RVs, logp_grad) + logpt_grad = aesara.grad(m.logpt, m.free_RVs) + func_d = aesara.function(m.free_RVs, logpt_grad) func_d(*inputlist) @@ -626,11 +626,11 @@ Similarly, build a conditional logp: .. code:: python shared = aesara.shared(inputlist[1]) - func2 = aesara.function([m.free_RVs[0]], m.logp, givens=[(m.free_RVs[1], shared)]) + func2 = aesara.function([m.free_RVs[0]], m.logpt, givens=[(m.free_RVs[1], shared)]) print(func2(inputlist[0])) - logp_grad2 = aesara.grad(m.logp, m.free_RVs[0]) - func_d2 = aesara.function([m.free_RVs[0]], logp_grad2, givens=[(m.free_RVs[1], shared)]) + logpt_grad2 = aesara.grad(m.logpt, m.free_RVs[0]) + func_d2 = aesara.function([m.free_RVs[0]], logpt_grad2, givens=[(m.free_RVs[1], shared)]) print(func_d2(inputlist[0])) @@ -647,7 +647,7 @@ everything into a single function: .. code:: python - func_logp_and_grad = aesara.function(m.free_RVs, [m.logp, logp_grad]) # ==> ERROR + func_logp_and_grad = aesara.function(m.free_RVs, [m.logpt, logpt_grad]) # ==> ERROR We want to have a function that return the evaluation and its gradient From df0c1889fc016eb1475c65e16038605b704f47fc Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 10:52:12 -0400 Subject: [PATCH 14/28] Future warning for logpt --- pymc/model.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 4f089e5732..c8ec96716d 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -690,7 +690,14 @@ def compile_d2logp( """ return self.model.compile_fn(self.d2logpt(vars=vars, jacobian=jacobian)) - def logpt( + def logpt(self, *args, **kwargs): + warnings.warn( + "Model.logpt has been deprecated. Use Model.logp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def logp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, From 7633380ab011ea6fab82b52b105a6eb54651c778 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 11:10:00 -0400 Subject: [PATCH 15/28] Future warning for dlogpt and d2logpt --- pymc/model.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/pymc/model.py b/pymc/model.py index c8ec96716d..aa159f28a7 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -671,7 +671,7 @@ def compile_dlogp( jacobian: Whether to include jacobian terms in logprob graph. Defaults to True. """ - return self.model.compile_fn(self.dlogpt(vars=vars, jacobian=jacobian)) + return self.model.compile_fn(self.dlogp(vars=vars, jacobian=jacobian)) def compile_d2logp( self, @@ -688,7 +688,7 @@ def compile_d2logp( jacobian: Whether to include jacobian terms in logprob graph. Defaults to True. """ - return self.model.compile_fn(self.d2logpt(vars=vars, jacobian=jacobian)) + return self.model.compile_fn(self.d2logp(vars=vars, jacobian=jacobian)) def logpt(self, *args, **kwargs): warnings.warn( @@ -771,7 +771,14 @@ def logp( logp_scalar.name = logp_scalar_name return logp_scalar - def dlogpt( + def dlogpt(self, *args, **kwargs): + warnings.warn( + "Model.dlogpt has been deprecated. Use Model.dlogp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def dlogp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, @@ -809,7 +816,14 @@ def dlogpt( cost = self.logpt(jacobian=jacobian) return gradient(cost, value_vars) - def d2logpt( + def d2logpt(self, *args, **kwargs): + warnings.warn( + "Model.d2logpt has been deprecated. Use Model.d2logp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def d2logp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, From b12e823572482d14c83b11335eaf5602f0cee697 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 22:04:08 -0400 Subject: [PATCH 16/28] Updated references to logpt, and updated varlogpt, datalogpt, joint_logpt --- pymc/backends/arviz.py | 4 +- pymc/distributions/__init__.py | 2 + pymc/distributions/continuous.py | 2 +- pymc/distributions/logprob.py | 16 +++-- pymc/model.py | 64 +++++++++++++----- pymc/sampling.py | 4 +- pymc/sampling_jax.py | 10 +-- pymc/smc/smc.py | 4 +- pymc/step_methods/metropolis.py | 6 +- pymc/step_methods/mlda.py | 4 +- pymc/tests/test_distributions.py | 86 ++++++++++++------------ pymc/tests/test_distributions_moments.py | 4 +- pymc/tests/test_logprob.py | 24 +++---- pymc/tests/test_minibatches.py | 38 +++++------ pymc/tests/test_missing.py | 8 +-- pymc/tests/test_mixture.py | 2 +- pymc/tests/test_model.py | 4 +- pymc/tests/test_profile.py | 6 +- pymc/tests/test_smc.py | 18 ++--- pymc/tests/test_transforms.py | 16 ++--- pymc/tuning/scaling.py | 2 +- pymc/variational/opvi.py | 4 +- 22 files changed, 185 insertions(+), 143 deletions(-) diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index 552a2afd44..80da0a89d8 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -251,7 +251,7 @@ def _extract_log_likelihood(self, trace): ( var, self.model.compile_fn( - self.model.logpt(var, sum=False)[0], + self.model.logp(var, sum=False)[0], inputs=self.model.value_vars, on_unused_input="ignore", ), @@ -263,7 +263,7 @@ def _extract_log_likelihood(self, trace): ( var, self.model.compile_fn( - self.model.logpt(var, sum=False)[0], + self.model.logp(var, sum=False)[0], inputs=self.model.value_vars, on_unused_input="ignore", ), diff --git a/pymc/distributions/__init__.py b/pymc/distributions/__init__.py index 8680528682..40ea9894a6 100644 --- a/pymc/distributions/__init__.py +++ b/pymc/distributions/__init__.py @@ -15,6 +15,7 @@ from pymc.distributions.logprob import ( # isort:skip logcdf, logp, + joint_logp, joint_logpt, ) @@ -191,6 +192,7 @@ "CAR", "PolyaGamma", "joint_logpt", + "joint_logp", "logp", "logcdf", ] diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index e1ade92dd2..ef20c39ca6 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -2558,7 +2558,7 @@ def logcdf(value, nu): return logcdf(Gamma.dist(alpha=nu / 2, beta=0.5), value) -# TODO: Remove this once logpt for multiplication is working! +# TODO: Remove this once logp for multiplication is working! class WeibullBetaRV(WeibullRV): ndims_params = [0, 0] diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index fb2b041ff8..b1f76d2b30 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -119,7 +119,15 @@ def _get_scaling( ) -def joint_logpt( +def joint_logpt(self, *args, **kwargs): + warnings.warn( + "logprob.joint_logpt has been deprecated. Use logprob.joint_logp instead.", + FutureWarning, + ) + return self.joint_logp(*args, **kwargs) + + +def joint_logp( var: Union[TensorVariable, List[TensorVariable]], rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None, *, @@ -159,14 +167,14 @@ def joint_logpt( """ # TODO: In future when we drop support for tag.value_var most of the following - # logic can be removed and logpt can just be a wrapper function that calls aeppl's + # logic can be removed and logp can just be a wrapper function that calls aeppl's # joint_logprob directly. # If var is not a list make it one. if not isinstance(var, (list, tuple)): var = [var] - # If logpt isn't provided values it is assumed that the tagged value var or + # If logp isn't provided values it is assumed that the tagged value var or # observation is the value variable for that particular RV. if rv_values is None: rv_values = {} @@ -251,7 +259,7 @@ def joint_logpt( "reference nonlocal variables." ) - # aeppl returns the logpt for every single value term we provided to it. This includes + # aeppl returns the logp for every single value term we provided to it. This includes # the extra values we plugged in above, so we filter those we actually wanted in the # same order they were given in. logp_var_dict = {} diff --git a/pymc/model.py b/pymc/model.py index aa159f28a7..694873c27c 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -57,7 +57,7 @@ ) from pymc.blocking import DictToArrayBijection, RaveledVars from pymc.data import GenTensorVariable, Minibatch -from pymc.distributions import joint_logpt +from pymc.distributions import joint_logp from pymc.distributions.logprob import _get_scaling from pymc.distributions.transforms import _default_transform from pymc.exceptions import ImputationWarning, SamplingError, ShapeError, ShapeWarning @@ -623,9 +623,9 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs): raise ValueError(f"Can only compute the gradient of continuous types: {var}") if tempered: - costs = [self.varlogpt, self.datalogpt] + costs = [self.varlogp, self.datalogp] else: - costs = [self.logpt()] + costs = [self.logp()] input_vars = {i for i in graph_inputs(costs) if not isinstance(i, Constant)} extra_vars = [self.rvs_to_values.get(var, var) for var in self.free_RVs] @@ -654,7 +654,7 @@ def compile_logp( Whether to sum all logp terms or return elemwise logp for each variable. Defaults to True. """ - return self.model.compile_fn(self.logpt(vars=vars, jacobian=jacobian, sum=sum)) + return self.model.compile_fn(self.logp(vars=vars, jacobian=jacobian, sum=sum)) def compile_dlogp( self, @@ -749,7 +749,7 @@ def logp( rv_logps: List[TensorVariable] = [] if rv_values: - rv_logps = joint_logpt(list(rv_values.keys()), rv_values, sum=False, jacobian=jacobian) + rv_logps = joint_logp(list(rv_values.keys()), rv_values, sum=False, jacobian=jacobian) assert isinstance(rv_logps, list) # Replace random variables by their value variables in potential terms @@ -813,7 +813,7 @@ def dlogp( f"Requested variable {var} not found among the model variables" ) - cost = self.logpt(jacobian=jacobian) + cost = self.logp(jacobian=jacobian) return gradient(cost, value_vars) def d2logpt(self, *args, **kwargs): @@ -858,34 +858,66 @@ def d2logp( f"Requested variable {var} not found among the model variables" ) - cost = self.logpt(jacobian=jacobian) + cost = self.logp(jacobian=jacobian) return hessian(cost, value_vars) @property - def datalogpt(self) -> Variable: + def datalogpt(self, *args, **kwargs): + warnings.warn( + "Model.datalogpt has been deprecated. Use Model.datalogp instead.", + FutureWarning, + ) + return self.datalogp(*args, **kwargs) + + @property + def datalogp(self) -> Variable: """Aesara scalar of log-probability of the observed variables and potential terms""" - return self.observedlogpt + self.potentiallogpt + return self.observedlogp + self.potentiallogp + + @property + def varlogpt(self, *args, **kwargs): + warnings.warn( + "Model.varlogpt has been deprecated. Use Model.varlogp instead.", + FutureWarning, + ) + return self.varlogp(*args, **kwargs) @property - def varlogpt(self) -> Variable: + def varlogp(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic).""" - return self.logpt(vars=self.free_RVs) + return self.logp(vars=self.free_RVs) @property def varlogp_nojact(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic) without jacobian term.""" - return self.logpt(vars=self.free_RVs, jacobian=False) + return self.logp(vars=self.free_RVs, jacobian=False) @property - def observedlogpt(self) -> Variable: + def observedlogpt(self, *args, **kwargs): + warnings.warn( + "Model.observedlogpt has been deprecated. Use Model.observedlogp instead.", + FutureWarning, + ) + return self.observedlogp(*args, **kwargs) + + @property + def observedlogp(self) -> Variable: """Aesara scalar of log-probability of the observed variables""" - return self.logpt(vars=self.observed_RVs) + return self.logp(vars=self.observed_RVs) + + @property + def potentiallogpt(self, *args, **kwargs): + warnings.warn( + "Model.potentiallogpt has been deprecated. Use Model.potentiallogp instead.", + FutureWarning, + ) + return self.potentiallogp(*args, **kwargs) @property - def potentiallogpt(self) -> Variable: + def potentiallogp(self) -> Variable: """Aesara scalar of log-probability of the Potential terms""" # Convert random variables in Potential expression into their log-likelihood # inputs and apply their transforms, if any @@ -1776,7 +1808,7 @@ def point_logps(self, point=None, round_vals=2): point = self.initial_point() factors = self.basic_RVs + self.potentials - factor_logps_fn = [at.sum(factor) for factor in self.logpt(factors, sum=False)] + factor_logps_fn = [at.sum(factor) for factor in self.logp(factors, sum=False)] return { factor.name: np.round(np.asarray(factor_logp), round_vals) for factor, factor_logp in zip( diff --git a/pymc/sampling.py b/pymc/sampling.py index df3dcdd213..6827b49436 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -204,7 +204,7 @@ def assign_step_methods(model, step=None, methods=None, step_kwargs=None): # Use competence classmethods to select step methods for remaining # variables selected_steps = defaultdict(list) - model_logpt = model.logpt() + model_logp = model.logp() for var in model.value_vars: if var not in assigned_vars: @@ -212,7 +212,7 @@ def assign_step_methods(model, step=None, methods=None, step_kwargs=None): has_gradient = var.dtype not in discrete_types if has_gradient: try: - tg.grad(model_logpt, var) + tg.grad(model_logp, var) except (NotImplementedError, tg.NullTypeGradError): has_gradient = False diff --git a/pymc/sampling_jax.py b/pymc/sampling_jax.py index a087e005ca..4127dac702 100644 --- a/pymc/sampling_jax.py +++ b/pymc/sampling_jax.py @@ -100,10 +100,10 @@ def get_jaxified_graph( def get_jaxified_logp(model: Model, negative_logp=True) -> Callable: - model_logpt = model.logpt() + model_logp = model.logp() if not negative_logp: - model_logpt = -model_logpt - logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logpt]) + model_logp = -model_logp + logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logp]) def logp_fn_wrap(x): return logp_fn(*x)[0] @@ -136,8 +136,8 @@ def _get_log_likelihood(model: Model, samples, backend=None) -> Dict: """Compute log-likelihood for all observations""" data = {} for v in model.observed_RVs: - v_elemwise_logpt = model.logpt(v, sum=False) - jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logpt) + v_elemwise_logp = model.logp(v, sum=False) + jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logp) result = jax.jit(jax.vmap(jax.vmap(jax_fn)), backend=backend)(*samples)[0] data[v.name] = result return data diff --git a/pymc/smc/smc.py b/pymc/smc/smc.py index 139c736314..185ee3fb07 100644 --- a/pymc/smc/smc.py +++ b/pymc/smc/smc.py @@ -219,10 +219,10 @@ def _initialize_kernel(self): shared = make_shared_replacements(initial_point, self.variables, self.model) self.prior_logp_func = _logp_forw( - initial_point, [self.model.varlogpt], self.variables, shared + initial_point, [self.model.varlogp], self.variables, shared ) self.likelihood_logp_func = _logp_forw( - initial_point, [self.model.datalogpt], self.variables, shared + initial_point, [self.model.datalogp], self.variables, shared ) priors = [self.prior_logp_func(sample) for sample in self.tempered_posterior] diff --git a/pymc/step_methods/metropolis.py b/pymc/step_methods/metropolis.py index 532ac93890..418c65eb2d 100644 --- a/pymc/step_methods/metropolis.py +++ b/pymc/step_methods/metropolis.py @@ -226,7 +226,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def reset_tuning(self): @@ -794,7 +794,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def astep(self, q0: RaveledVars) -> Tuple[RaveledVars, List[Dict[str, Any]]]: @@ -957,7 +957,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def reset_tuning(self): diff --git a/pymc/step_methods/mlda.py b/pymc/step_methods/mlda.py index 441890ebbd..99970111d7 100644 --- a/pymc/step_methods/mlda.py +++ b/pymc/step_methods/mlda.py @@ -538,7 +538,7 @@ def __init__( # Construct Aesara function for current-level model likelihood # (for use in acceptance) shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) # Construct Aesara function for below-level model likelihood # (for use in acceptance) @@ -547,7 +547,7 @@ def __init__( vars_below = pm.inputvars(vars_below) shared_below = pm.make_shared_replacements(initial_values, vars_below, model_below) self.delta_logp_below = delta_logp( - initial_values, model_below.logpt(), vars_below, shared_below + initial_values, model_below.logp(), vars_below, shared_below ) super().__init__(vars, shared) diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 06ba3428d3..c7bd0868af 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -121,7 +121,7 @@ def polyagamma_cdf(*args, **kwargs): ZeroInflatedBinomial, ZeroInflatedNegativeBinomial, ZeroInflatedPoisson, - joint_logpt, + joint_logp, logcdf, logp, ) @@ -924,29 +924,29 @@ def RandomPdMatrix(n): return np.dot(A, A.T) + n * np.identity(n) -def test_hierarchical_logpt(): +def test_hierarchical_logp(): """Make sure there are no random variables in a model's log-likelihood graph.""" with pm.Model() as m: x = pm.Uniform("x", lower=0, upper=1) y = pm.Uniform("y", lower=0, upper=x) - logpt_ancestors = list(ancestors([m.logpt()])) - ops = {a.owner.op for a in logpt_ancestors if a.owner} + logp_ancestors = list(ancestors([m.logp()])) + ops = {a.owner.op for a in logp_ancestors if a.owner} assert len(ops) > 0 assert not any(isinstance(o, RandomVariable) for o in ops) - assert x.tag.value_var in logpt_ancestors - assert y.tag.value_var in logpt_ancestors + assert x.tag.value_var in logp_ancestors + assert y.tag.value_var in logp_ancestors -def test_hierarchical_obs_logpt(): +def test_hierarchical_obs_logp(): obs = np.array([0.5, 0.4, 5, 2]) with pm.Model() as model: x = pm.Uniform("x", 0, 1, observed=obs) pm.Uniform("y", x, 2, observed=obs) - logpt_ancestors = list(ancestors([model.logpt()])) - ops = {a.owner.op for a in logpt_ancestors if a.owner} + logp_ancestors = list(ancestors([model.logp()])) + ops = {a.owner.op for a in logp_ancestors if a.owner} assert len(ops) > 0 assert not any(isinstance(o, RandomVariable) for o in ops) @@ -2638,29 +2638,29 @@ def test_continuous(self): UpperNormalTransform = Bound("uppertrans", dist, upper=10) BoundedNormalTransform = Bound("boundedtrans", dist, lower=1, upper=10) - assert joint_logpt(LowerNormal, -1).eval() == -np.inf - assert joint_logpt(UpperNormal, 1).eval() == -np.inf - assert joint_logpt(BoundedNormal, 0).eval() == -np.inf - assert joint_logpt(BoundedNormal, 11).eval() == -np.inf + assert joint_logp(LowerNormal, -1).eval() == -np.inf + assert joint_logp(UpperNormal, 1).eval() == -np.inf + assert joint_logp(BoundedNormal, 0).eval() == -np.inf + assert joint_logp(BoundedNormal, 11).eval() == -np.inf - assert joint_logpt(UnboundedNormal, 0).eval() != -np.inf - assert joint_logpt(UnboundedNormal, 11).eval() != -np.inf - assert joint_logpt(InfBoundedNormal, 0).eval() != -np.inf - assert joint_logpt(InfBoundedNormal, 11).eval() != -np.inf + assert joint_logp(UnboundedNormal, 0).eval() != -np.inf + assert joint_logp(UnboundedNormal, 11).eval() != -np.inf + assert joint_logp(InfBoundedNormal, 0).eval() != -np.inf + assert joint_logp(InfBoundedNormal, 11).eval() != -np.inf value = model.rvs_to_values[LowerNormalTransform] - assert joint_logpt(LowerNormalTransform, value).eval({value: -1}) != -np.inf + assert joint_logp(LowerNormalTransform, value).eval({value: -1}) != -np.inf value = model.rvs_to_values[UpperNormalTransform] - assert joint_logpt(UpperNormalTransform, value).eval({value: 1}) != -np.inf + assert joint_logp(UpperNormalTransform, value).eval({value: 1}) != -np.inf value = model.rvs_to_values[BoundedNormalTransform] - assert joint_logpt(BoundedNormalTransform, value).eval({value: 0}) != -np.inf - assert joint_logpt(BoundedNormalTransform, value).eval({value: 11}) != -np.inf + assert joint_logp(BoundedNormalTransform, value).eval({value: 0}) != -np.inf + assert joint_logp(BoundedNormalTransform, value).eval({value: 11}) != -np.inf ref_dist = Normal.dist(mu=0, sigma=1) - assert np.allclose(joint_logpt(UnboundedNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(LowerNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(UpperNormal, -5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(BoundedNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UnboundedNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(LowerNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UpperNormal, -5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(BoundedNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) def test_discrete(self): with Model() as model: @@ -2670,19 +2670,19 @@ def test_discrete(self): UpperPoisson = Bound("upper", dist, upper=10) BoundedPoisson = Bound("bounded", dist, lower=1, upper=10) - assert joint_logpt(LowerPoisson, 0).eval() == -np.inf - assert joint_logpt(UpperPoisson, 11).eval() == -np.inf - assert joint_logpt(BoundedPoisson, 0).eval() == -np.inf - assert joint_logpt(BoundedPoisson, 11).eval() == -np.inf + assert joint_logp(LowerPoisson, 0).eval() == -np.inf + assert joint_logp(UpperPoisson, 11).eval() == -np.inf + assert joint_logp(BoundedPoisson, 0).eval() == -np.inf + assert joint_logp(BoundedPoisson, 11).eval() == -np.inf - assert joint_logpt(UnboundedPoisson, 0).eval() != -np.inf - assert joint_logpt(UnboundedPoisson, 11).eval() != -np.inf + assert joint_logp(UnboundedPoisson, 0).eval() != -np.inf + assert joint_logp(UnboundedPoisson, 11).eval() != -np.inf ref_dist = Poisson.dist(mu=4) - assert np.allclose(joint_logpt(UnboundedPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(LowerPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(UpperPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(BoundedPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UnboundedPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(LowerPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UpperPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(BoundedPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) def create_invalid_distribution(self): class MyNormal(RandomVariable): @@ -2786,19 +2786,19 @@ def test_array_bound(self): UpperPoisson = Bound("upper", dist, upper=[np.inf, 10], transform=None) BoundedPoisson = Bound("bounded", dist, lower=[1, 2], upper=[9, 10], transform=None) - first, second = joint_logpt(LowerPoisson, [0, 0], sum=False)[0].eval() + first, second = joint_logp(LowerPoisson, [0, 0], sum=False)[0].eval() assert first == -np.inf assert second != -np.inf - first, second = joint_logpt(UpperPoisson, [11, 11], sum=False)[0].eval() + first, second = joint_logp(UpperPoisson, [11, 11], sum=False)[0].eval() assert first != -np.inf assert second == -np.inf - first, second = joint_logpt(BoundedPoisson, [1, 1], sum=False)[0].eval() + first, second = joint_logp(BoundedPoisson, [1, 1], sum=False)[0].eval() assert first != -np.inf assert second == -np.inf - first, second = joint_logpt(BoundedPoisson, [10, 10], sum=False)[0].eval() + first, second = joint_logp(BoundedPoisson, [10, 10], sum=False)[0].eval() assert first == -np.inf assert second != -np.inf @@ -2914,8 +2914,8 @@ def test_orderedlogistic_dimensions(shape): p=p, observed=obs, ) - ologp = joint_logpt(ol, np.ones_like(obs), sum=True).eval() * loge - clogp = joint_logpt(c, np.ones_like(obs), sum=True).eval() * loge + ologp = joint_logp(ol, np.ones_like(obs), sum=True).eval() * loge + clogp = joint_logp(c, np.ones_like(obs), sum=True).eval() * loge expected = -np.prod((size,) + shape) assert c.owner.inputs[3].ndim == (len(shape) + 1) @@ -3157,7 +3157,7 @@ def logp(value, mu): a_val = np.random.normal(loc=mu_val, scale=1, size=to_tuple(size) + (supp_shape,)).astype( aesara.config.floatX ) - log_densityt = joint_logpt(a, a.tag.value_var, sum=False)[0] + log_densityt = joint_logp(a, a.tag.value_var, sum=False)[0] assert log_densityt.eval( {a.tag.value_var: a_val, mu.tag.value_var: mu_val}, ).shape == to_tuple(size) @@ -3288,7 +3288,7 @@ def test_no_warning_logp(self): sd_dist = pm.Exponential.dist(1, size=3) x = pm.LKJCholeskyCov("x", n=3, eta=1, sd_dist=sd_dist) with pytest.warns(None) as record: - m.logpt() + m.logp() assert not record @pytest.mark.parametrize( diff --git a/pymc/tests/test_distributions_moments.py b/pymc/tests/test_distributions_moments.py index 230989000c..dbe4ae60ba 100644 --- a/pymc/tests/test_distributions_moments.py +++ b/pymc/tests/test_distributions_moments.py @@ -71,7 +71,7 @@ ZeroInflatedPoisson, ) from pymc.distributions.distribution import _moment, moment -from pymc.distributions.logprob import joint_logpt +from pymc.distributions.logprob import joint_logp from pymc.distributions.shape_utils import rv_size_is_none, to_tuple from pymc.initial_point import make_initial_point_fn from pymc.model import Model @@ -163,7 +163,7 @@ def assert_moment_is_expected(model, expected, check_finite_logp=True): assert np.allclose(moment, expected) if check_finite_logp: - logp_moment = joint_logpt(model["x"], at.constant(moment), transformed=False).eval() + logp_moment = joint_logp(model["x"], at.constant(moment), transformed=False).eval() assert np.isfinite(logp_moment) diff --git a/pymc/tests/test_logprob.py b/pymc/tests/test_logprob.py index e3a7d846d8..66abd59ded 100644 --- a/pymc/tests/test_logprob.py +++ b/pymc/tests/test_logprob.py @@ -42,7 +42,7 @@ from pymc.distributions.logprob import ( _get_scaling, ignore_logprob, - joint_logpt, + joint_logp, logcdf, logp, ) @@ -102,7 +102,7 @@ def test_get_scaling(): assert _get_scaling(total_size, shape=rv_var.shape, ndim=rv_var.ndim).eval() == 1.0 -def test_joint_logpt_basic(): +def test_joint_logp_basic(): """Make sure we can compute a log-likelihood for a hierarchical model with transforms.""" with Model() as m: @@ -119,7 +119,7 @@ def test_joint_logpt_basic(): c_value_var = m.rvs_to_values[c] - b_logp = joint_logpt(b, b_value_var, sum=False) + b_logp = joint_logp(b, b_value_var, sum=False) res_ancestors = list(walk_model(b_logp, walk_past_rvs=True)) res_rv_ancestors = [ @@ -142,7 +142,7 @@ def test_joint_logpt_basic(): ((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)), ], ) -def test_joint_logpt_incsubtensor(indices, size): +def test_joint_logp_incsubtensor(indices, size): """Make sure we can compute a log-likelihood for ``Y[idx] = data`` where ``Y`` is univariate.""" mu = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size) @@ -163,7 +163,7 @@ def test_joint_logpt_incsubtensor(indices, size): a_idx_value_var = a_idx.type() a_idx_value_var.name = "a_idx_value" - a_idx_logp = joint_logpt(a_idx, {a_idx: a_value_var}, sum=False) + a_idx_logp = joint_logp(a_idx, {a_idx: a_value_var}, sum=False) logp_vals = a_idx_logp[0].eval({a_value_var: a_val}) @@ -177,7 +177,7 @@ def test_joint_logpt_incsubtensor(indices, size): np.testing.assert_almost_equal(logp_vals, exp_obs_logps) -def test_joint_logpt_subtensor(): +def test_joint_logp_subtensor(): """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables.""" size = 5 @@ -205,7 +205,7 @@ def test_joint_logpt_subtensor(): I_value_var = I_rv.type() I_value_var.name = "I_value" - A_idx_logps = joint_logpt(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) + A_idx_logps = joint_logp(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) A_idx_logp = at.add(*A_idx_logps) logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp) @@ -289,8 +289,8 @@ def test_model_unchanged_logprob_access(): c = Uniform("c", lower=a - 1, upper=1) original_inputs = set(aesara.graph.graph_inputs([c])) - # Extract model.logpt - model.logpt() + # Extract model.logp + model.logp() new_inputs = set(aesara.graph.graph_inputs([c])) assert original_inputs == new_inputs @@ -301,7 +301,7 @@ def test_unexpected_rvs(): y = DensityDist("y", logp=lambda *args: x) with pytest.raises(ValueError, match="^Random variables detected in the logp graph"): - model.logpt() + model.logp() def test_ignore_logprob_basic(): @@ -331,9 +331,9 @@ def logp(value, x): y = DensityDist("y", x, logp=logp) # Aeppl raises a KeyError when it finds an unexpected RV with pytest.raises(KeyError): - joint_logpt([y], {y: y.type()}) + joint_logp([y], {y: y.type()}) with Model() as m: x = ignore_logprob(Normal.dist()) y = DensityDist("y", x, logp=logp) - assert joint_logpt([y], {y: y.type()}) + assert joint_logp([y], {y: y.type()}) diff --git a/pymc/tests/test_minibatches.py b/pymc/tests/test_minibatches.py index 62ff81874a..58bf66c138 100644 --- a/pymc/tests/test_minibatches.py +++ b/pymc/tests/test_minibatches.py @@ -170,11 +170,11 @@ class TestScaling: def test_density_scaling(self): with pm.Model() as model1: Normal("n", observed=[[1]], total_size=1) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: Normal("n", observed=[[1]], total_size=2) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) assert p1() * 2 == p2() def test_density_scaling_with_generator(self): @@ -189,12 +189,12 @@ def true_dens(): # We have same size models with pm.Model() as model1: Normal("n", observed=gen1(), total_size=100) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: gen_var = generator(gen2()) Normal("n", observed=gen_var, total_size=100) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) for i in range(10): _1, _2, _t = p1(), p2(), next(t) @@ -208,12 +208,12 @@ def test_gradient_with_scaling(self): genvar = generator(gen1()) m = Normal("m") Normal("n", observed=genvar, total_size=1000) - grad1 = aesara.function([m.tag.value_var], at.grad(model1.logpt(), m.tag.value_var)) + grad1 = aesara.function([m.tag.value_var], at.grad(model1.logp(), m.tag.value_var)) with pm.Model() as model2: m = Normal("m") shavar = aesara.shared(np.ones((1000, 100))) Normal("n", observed=shavar) - grad2 = aesara.function([m.tag.value_var], at.grad(model2.logpt(), m.tag.value_var)) + grad2 = aesara.function([m.tag.value_var], at.grad(model2.logp(), m.tag.value_var)) for i in range(10): shavar.set_value(np.ones((100, 100)) * i) @@ -224,27 +224,27 @@ def test_gradient_with_scaling(self): def test_multidim_scaling(self): with pm.Model() as model0: Normal("n", observed=[[1, 1], [1, 1]], total_size=[]) - p0 = aesara.function([], model0.logpt()) + p0 = aesara.function([], model0.logp()) with pm.Model() as model1: Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2]) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: Normal("n", observed=[[1], [1]], total_size=[2, 2]) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) with pm.Model() as model3: Normal("n", observed=[[1, 1]], total_size=[2, 2]) - p3 = aesara.function([], model3.logpt()) + p3 = aesara.function([], model3.logp()) with pm.Model() as model4: Normal("n", observed=[[1]], total_size=[2, 2]) - p4 = aesara.function([], model4.logpt()) + p4 = aesara.function([], model4.logp()) with pm.Model() as model5: Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2]) - p5 = aesara.function([], model5.logpt()) + p5 = aesara.function([], model5.logp()) _p0 = p0() assert ( np.allclose(_p0, p1()) @@ -258,27 +258,27 @@ def test_common_errors(self): with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2, 2]) - m.logpt() + m.logp() assert "Length of" in str(e.value) with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[2, 2, 2]) - m.logpt() + m.logp() assert "Length of" in str(e.value) with pytest.raises(TypeError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size="foo") - m.logpt() + m.logp() assert "Unrecognized" in str(e.value) with pytest.raises(TypeError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=["foo"]) - m.logpt() + m.logp() assert "Unrecognized" in str(e.value) with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[Ellipsis, Ellipsis]) - m.logpt() + m.logp() assert "Double Ellipsis" in str(e.value) def test_mixed1(self): @@ -296,11 +296,11 @@ def test_mixed2(self): def test_free_rv(self): with pm.Model() as model4: Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2]) - p4 = aesara.function([], model4.logpt()) + p4 = aesara.function([], model4.logp()) with pm.Model() as model5: n = Normal("n", total_size=[2, Ellipsis, 2], size=(2, 2)) - p5 = aesara.function([n.tag.value_var], model5.logpt()) + p5 = aesara.function([n.tag.value_var], model5.logp()) assert p4() == p5(pm.floatX([[1]])) assert p4() == p5(pm.floatX([[1, 1], [1, 1]])) diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index cbe3165874..2a7f92be78 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -20,7 +20,7 @@ from aesara.graph import graph_inputs from numpy import array, ma -from pymc import joint_logpt +from pymc import joint_logp from pymc.distributions import Dirichlet, Gamma, Normal, Uniform from pymc.exceptions import ImputationWarning from pymc.model import Model @@ -220,12 +220,12 @@ def test_missing_vector_parameter(): def test_missing_symmetric(): - """Check that logpt works when partially observed variable have equal observed and + """Check that logp works when partially observed variable have equal observed and unobserved dimensions. This would fail in a previous implementation because the two variables would be equivalent and one of them would be discarded during MergeOptimization while - buling the logpt graph + buling the logp graph """ with Model() as m: x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) @@ -236,7 +236,7 @@ def test_missing_symmetric(): x_unobs_rv = m["x_missing"] x_unobs_vv = m.rvs_to_values[x_unobs_rv] - logp = joint_logpt([x_obs_rv, x_unobs_rv], {x_obs_rv: x_obs_vv, x_unobs_rv: x_unobs_vv}) + logp = joint_logp([x_obs_rv, x_unobs_rv], {x_obs_rv: x_obs_vv, x_unobs_rv: x_unobs_vv}) logp_inputs = list(graph_inputs([logp])) assert x_obs_vv in logp_inputs assert x_unobs_vv in logp_inputs diff --git a/pymc/tests/test_mixture.py b/pymc/tests/test_mixture.py index 4f19f9c3e2..46249f93f9 100644 --- a/pymc/tests/test_mixture.py +++ b/pymc/tests/test_mixture.py @@ -927,7 +927,7 @@ def logp_matches(self, mixture, latent_mix, z, npop, model): def loose_logp(model, vars): """Return logp function that accepts dictionary with unused variables as input""" return model.compile_fn( - model.logpt(vars=vars, sum=False), + model.logp(vars=vars, sum=False), inputs=model.value_vars, on_unused_input="ignore", ) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 2f8683e9e3..4c4d86479a 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -677,7 +677,7 @@ def test_set_initval(): assert y in model.initial_values -def test_datalogpt_multiple_shapes(): +def test_datalogp_multiple_shapes(): with pm.Model() as m: x = pm.Normal("x", 0, 1) z1 = pm.Potential("z1", x) @@ -688,7 +688,7 @@ def test_datalogpt_multiple_shapes(): # This would raise a TypeError, see #4803 and #4804 x_val = m.rvs_to_values[x] - m.datalogpt.eval({x_val: 0}) + m.datalogp.eval({x_val: 0}) def test_nested_model_coords(): diff --git a/pymc/tests/test_profile.py b/pymc/tests/test_profile.py index 60c9260122..55d54cbaee 100644 --- a/pymc/tests/test_profile.py +++ b/pymc/tests/test_profile.py @@ -20,12 +20,12 @@ def setup_method(self): _, self.model, _ = simple_model() def test_profile_model(self): - assert self.model.profile(self.model.logpt()).fct_call_time > 0 + assert self.model.profile(self.model.logp()).fct_call_time > 0 def test_profile_variable(self): rv = self.model.basic_RVs[0] - assert self.model.profile(self.model.logpt(vars=[rv], sum=False)).fct_call_time + assert self.model.profile(self.model.logp(vars=[rv], sum=False)).fct_call_time def test_profile_count(self): count = 1005 - assert self.model.profile(self.model.logpt(), n=count).fct_callcount == count + assert self.model.profile(self.model.logp(), n=count).fct_callcount == count diff --git a/pymc/tests/test_smc.py b/pymc/tests/test_smc.py index 14cef4624d..a040221cf8 100644 --- a/pymc/tests/test_smc.py +++ b/pymc/tests/test_smc.py @@ -303,7 +303,7 @@ def setup_class(self): s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data) def test_one_gaussian(self): - assert self.count_rvs(self.SMABC_test.logpt()) == 1 + assert self.count_rvs(self.SMABC_test.logp()) == 1 with self.SMABC_test: trace = pm.sample_smc(draws=1000, chains=1, return_inferencedata=False) @@ -339,7 +339,7 @@ def test_custom_dist_sum_stat(self, floatX): observed=self.data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 with m: pm.sample_smc(draws=100) @@ -362,7 +362,7 @@ def test_custom_dist_sum_stat_scalar(self, floatX): sum_stat=self.quantiles, observed=scalar_data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 with pm.Model() as m: s = pm.Simulator( @@ -374,10 +374,10 @@ def test_custom_dist_sum_stat_scalar(self, floatX): sum_stat="mean", observed=scalar_data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 def test_model_with_potential(self): - assert self.count_rvs(self.SMABC_potential.logpt()) == 1 + assert self.count_rvs(self.SMABC_potential.logp()) == 1 with self.SMABC_potential: trace = pm.sample_smc(draws=100, chains=1, return_inferencedata=False) @@ -421,17 +421,17 @@ def test_multiple_simulators(self): observed=data2, ) - assert self.count_rvs(m.logpt()) == 2 + assert self.count_rvs(m.logp()) == 2 # Check that the logps use the correct methods a_val = m.rvs_to_values[a] sim1_val = m.rvs_to_values[sim1] - logp_sim1 = pm.joint_logpt(sim1, sim1_val) + logp_sim1 = pm.joint_logp(sim1, sim1_val) logp_sim1_fn = aesara.function([a_val], logp_sim1) b_val = m.rvs_to_values[b] sim2_val = m.rvs_to_values[sim2] - logp_sim2 = pm.joint_logpt(sim2, sim2_val) + logp_sim2 = pm.joint_logp(sim2, sim2_val) logp_sim2_fn = aesara.function([b_val], logp_sim2) assert any( @@ -471,7 +471,7 @@ def test_nested_simulators(self): observed=data, ) - assert self.count_rvs(m.logpt()) == 2 + assert self.count_rvs(m.logp()) == 2 with m: trace = pm.sample_smc(return_inferencedata=False) diff --git a/pymc/tests/test_transforms.py b/pymc/tests/test_transforms.py index 493f418d33..d939d0acfb 100644 --- a/pymc/tests/test_transforms.py +++ b/pymc/tests/test_transforms.py @@ -24,7 +24,7 @@ import pymc.distributions.transforms as tr from pymc.aesaraf import floatX, jacobian -from pymc.distributions import joint_logpt +from pymc.distributions import joint_logp from pymc.tests.checks import close_to, close_to_logical from pymc.tests.helpers import SeededTest from pymc.tests.test_distributions import ( @@ -287,10 +287,10 @@ def check_transform_elementwise_logp(self, model): x_val_untransf = at.constant(test_array_untransf).type() jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs) - assert joint_logpt(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim + assert joint_logp(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim - v1 = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) - v2 = joint_logpt(x, x_val_untransf, transformed=False).eval( + v1 = joint_logp(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) + v2 = joint_logp(x, x_val_untransf, transformed=False).eval( {x_val_untransf: test_array_untransf} ) close_to(v1, v2, tol) @@ -310,13 +310,13 @@ def check_vectortransform_elementwise_logp(self, model): jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs) # Original distribution is univariate if x.owner.op.ndim_supp == 0: - assert joint_logpt(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1) + assert joint_logp(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1) # Original distribution is multivariate else: - assert joint_logpt(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim + assert joint_logp(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim - a = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) - b = joint_logpt(x, x_val_untransf, transformed=False).eval( + a = joint_logp(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) + b = joint_logp(x, x_val_untransf, transformed=False).eval( {x_val_untransf: test_array_untransf} ) # Hack to get relative tolerance diff --git a/pymc/tuning/scaling.py b/pymc/tuning/scaling.py index 28471a0dda..2209d29cca 100644 --- a/pymc/tuning/scaling.py +++ b/pymc/tuning/scaling.py @@ -75,7 +75,7 @@ def find_hessian_diag(point, vars=None, model=None): Variables for which Hessian is to be calculated. """ model = modelcontext(model) - H = model.compile_fn(hessian_diag(model.logpt(), vars)) + H = model.compile_fn(hessian_diag(model.logp(), vars)) return H(Point(point, model=model)) diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index 5f2efce143..f4080bdbba 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -1232,7 +1232,7 @@ def logq_norm(self): def _sized_symbolic_varlogp_and_datalogp(self): """*Dev* - computes sampled prior term from model via `aesara.scan`""" varlogp_s, datalogp_s = self.symbolic_sample_over_posterior( - [self.model.varlogpt, self.model.datalogpt] + [self.model.varlogp, self.model.datalogp] ) return varlogp_s, datalogp_s # both shape (s,) @@ -1269,7 +1269,7 @@ def datalogp(self): @node_property def _single_symbolic_varlogp_and_datalogp(self): """*Dev* - computes sampled prior term from model via `aesara.scan`""" - varlogp, datalogp = self.symbolic_single_sample([self.model.varlogpt, self.model.datalogpt]) + varlogp, datalogp = self.symbolic_single_sample([self.model.varlogp, self.model.datalogp]) return varlogp, datalogp @node_property From ce0e1d67a17c0d29ad796051520b9f0e09575132 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Tue, 7 Jun 2022 22:09:32 -0400 Subject: [PATCH 17/28] Fix issue with d2logpt --- pymc/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 694873c27c..b0991e1758 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -821,7 +821,7 @@ def d2logpt(self, *args, **kwargs): "Model.d2logpt has been deprecated. Use Model.d2logp instead.", FutureWarning, ) - return self.logp(*args, **kwargs) + return self.d2logp(*args, **kwargs) def d2logp( self, From 242d72f4f5f56c4d30e1e42a10cf31eefef5033e Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:48:56 -0400 Subject: [PATCH 18/28] Added tests --- pymc/distributions/logprob.py | 5 +++-- pymc/model.py | 16 ++++++++-------- pymc/tests/test_logprob.py | 4 ++++ pymc/tests/test_model.py | 20 ++++++++++++++++++++ 4 files changed, 35 insertions(+), 10 deletions(-) diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index b1f76d2b30..620c6ec511 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from collections.abc import Mapping from typing import Dict, List, Optional, Sequence, Union @@ -119,12 +120,12 @@ def _get_scaling( ) -def joint_logpt(self, *args, **kwargs): +def joint_logpt(*args, **kwargs): warnings.warn( "logprob.joint_logpt has been deprecated. Use logprob.joint_logp instead.", FutureWarning, ) - return self.joint_logp(*args, **kwargs) + return joint_logp(*args, **kwargs) def joint_logp( diff --git a/pymc/model.py b/pymc/model.py index b0991e1758..3c38477e65 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -862,12 +862,12 @@ def d2logp( return hessian(cost, value_vars) @property - def datalogpt(self, *args, **kwargs): + def datalogpt(self): warnings.warn( "Model.datalogpt has been deprecated. Use Model.datalogp instead.", FutureWarning, ) - return self.datalogp(*args, **kwargs) + return self.datalogp @property def datalogp(self) -> Variable: @@ -876,12 +876,12 @@ def datalogp(self) -> Variable: return self.observedlogp + self.potentiallogp @property - def varlogpt(self, *args, **kwargs): + def varlogpt(self): warnings.warn( "Model.varlogpt has been deprecated. Use Model.varlogp instead.", FutureWarning, ) - return self.varlogp(*args, **kwargs) + return self.varlogp @property def varlogp(self) -> Variable: @@ -896,12 +896,12 @@ def varlogp_nojact(self) -> Variable: return self.logp(vars=self.free_RVs, jacobian=False) @property - def observedlogpt(self, *args, **kwargs): + def observedlogpt(self): warnings.warn( "Model.observedlogpt has been deprecated. Use Model.observedlogp instead.", FutureWarning, ) - return self.observedlogp(*args, **kwargs) + return self.observedlogp @property def observedlogp(self) -> Variable: @@ -909,12 +909,12 @@ def observedlogp(self) -> Variable: return self.logp(vars=self.observed_RVs) @property - def potentiallogpt(self, *args, **kwargs): + def potentiallogpt(self): warnings.warn( "Model.potentiallogpt has been deprecated. Use Model.potentiallogp instead.", FutureWarning, ) - return self.potentiallogp(*args, **kwargs) + return self.potentiallogp @property def potentiallogp(self) -> Variable: diff --git a/pymc/tests/test_logprob.py b/pymc/tests/test_logprob.py index 66abd59ded..d0be51e833 100644 --- a/pymc/tests/test_logprob.py +++ b/pymc/tests/test_logprob.py @@ -43,6 +43,7 @@ _get_scaling, ignore_logprob, joint_logp, + joint_logpt, logcdf, logp, ) @@ -121,6 +122,9 @@ def test_joint_logp_basic(): b_logp = joint_logp(b, b_value_var, sum=False) + with pytest.warns(FutureWarning): + b_logpt = joint_logpt(b, b_value_var, sum=False) + res_ancestors = list(walk_model(b_logp, walk_past_rvs=True)) res_rv_ancestors = [ v for v in res_ancestors if v.owner and isinstance(v.owner.op, RandomVariable) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 4c4d86479a..11a7380bc9 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -880,6 +880,26 @@ def test_set_data_indirect_resize_with_coords(): pmodel.set_data("mdata", [1, 2], coords=dict(mdim=[1, 2, 3])) +def test_model_logpt_deprecation_warning(): + with pm.Model() as m: + x = pm.Normal("x", 0, 1, size=2) + y = pm.LogNormal("y", 0, 1, size=2) + + with pytest.warns(FutureWarning): + m.logpt() + with pytest.warns(FutureWarning): + m.dlogpt() + with pytest.warns(FutureWarning): + m.d2logpt() + + with pytest.warns(FutureWarning): + m.datalogpt + with pytest.warns(FutureWarning): + m.varlogpt + with pytest.warns(FutureWarning): + m.observedlogpt + + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): with pm.Model() as m: From a8ef69a1112c1bc52a69bc7e522d53e3a2fae674 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:56:38 -0400 Subject: [PATCH 19/28] Fix typo --- pymc/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 3c38477e65..242f6f7ef6 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -776,7 +776,7 @@ def dlogpt(self, *args, **kwargs): "Model.dlogpt has been deprecated. Use Model.dlogp instead.", FutureWarning, ) - return self.logp(*args, **kwargs) + return self.dlogp(*args, **kwargs) def dlogp( self, From d3dcc413241c2f6dc41b689e1cb0dd4ea9c05396 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:58:07 -0400 Subject: [PATCH 20/28] Updated release notes for 4.0 --- RELEASE-NOTES.md | 4 ++-- docs/source/learn/core_notebooks/pymc_aesara.ipynb | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 9187ba91ef..f3a61873aa 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -118,8 +118,8 @@ _Read on if you're a developer. Or curious. Or both._ - `pm.GaussianRandomWalk` initial distribution defaults to zero-centered normal with sigma=100 instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - `pm.AR` initial distribution defaults to unit normal instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logpt(jacobian=True/False, sum=True/False)` instead. - - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogpt` and `d2logpt` with `jacobian=False` instead. + - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logp(jacobian=True/False, sum=True/False)` instead. + - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogp` and `d2logp` with `jacobian=False` instead. - `model.makefn` is now called `Model.compile_fn`, and `model.fn` was removed. - Methods starting with `fast_*`, such as `Model.fast_logp`, were removed. Same applies to `PointFunc` classes - `Model(model=...)` kwarg was removed diff --git a/docs/source/learn/core_notebooks/pymc_aesara.ipynb b/docs/source/learn/core_notebooks/pymc_aesara.ipynb index 329a3aa6d4..48e24053fe 100644 --- a/docs/source/learn/core_notebooks/pymc_aesara.ipynb +++ b/docs/source/learn/core_notebooks/pymc_aesara.ipynb @@ -1844,7 +1844,7 @@ } }, "source": [ - "`pymc` models provide some helpful routines to facilitating the conversion of `RandomVariable`s to probability functions. {meth}`~pymc.Model.logpt`, for instance can be used to extract the joint probability of all variables in the model:" + "`pymc` models provide some helpful routines to facilitating the conversion of `RandomVariable`s to probability functions. {meth}`~pymc.Model.logp`, for instance can be used to extract the joint probability of all variables in the model:" ] }, { @@ -1902,7 +1902,7 @@ } ], "source": [ - "aesara.dprint(model.logpt(sum=False))" + "aesara.dprint(model.logp(sum=False))" ] }, { @@ -2213,7 +2213,7 @@ "sigma_log_value = model_2.rvs_to_values[sigma]\n", "x_value = model_2.rvs_to_values[x]\n", "# element-wise log-probability of the model (we do not take te sum)\n", - "logp_graph = at.stack(model_2.logpt(sum=False))\n", + "logp_graph = at.stack(model_2.logp(sum=False))\n", "# evaluate by passing concrete values\n", "logp_graph.eval({mu_value: 0, sigma_log_value: -10, x_value:0})" ] @@ -2314,7 +2314,7 @@ } }, "source": [ - "The {class}`~pymc.Model` class also has methods to extract the gradient ({meth}`~pymc.Model.dlogpt`) and the hessian ({meth}`~pymc.Model.d2logpt`) of the logp." + "The {class}`~pymc.Model` class also has methods to extract the gradient ({meth}`~pymc.Model.dlogp`) and the hessian ({meth}`~pymc.Model.d2logp`) of the logp." ] }, { From 0788d797c4b8b291affb026b375ac79e813b0e53 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 09:49:27 -0400 Subject: [PATCH 21/28] Added potentiallogpt test --- pymc/tests/test_model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 11a7380bc9..82fe021b53 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -887,18 +887,25 @@ def test_model_logpt_deprecation_warning(): with pytest.warns(FutureWarning): m.logpt() + with pytest.warns(FutureWarning): m.dlogpt() + with pytest.warns(FutureWarning): m.d2logpt() with pytest.warns(FutureWarning): m.datalogpt + with pytest.warns(FutureWarning): m.varlogpt + with pytest.warns(FutureWarning): m.observedlogpt + with pytest.warns(FutureWarning): + m.potentiallogpt + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): From bb0dbe36177f8c8b9817bc830c1c53f081fad6c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fernando=20Irarr=C3=A1zaval?= Date: Mon, 13 Jun 2022 16:44:52 -0400 Subject: [PATCH 22/28] Update pymc/distributions/logprob.py Co-authored-by: Ricardo Vieira <28983449+ricardoV94@users.noreply.github.com> --- pymc/distributions/logprob.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index 620c6ec511..d67dc35ed1 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -122,7 +122,7 @@ def _get_scaling( def joint_logpt(*args, **kwargs): warnings.warn( - "logprob.joint_logpt has been deprecated. Use logprob.joint_logp instead.", + "joint_logpt has been deprecated. Use joint_logp instead.", FutureWarning, ) return joint_logp(*args, **kwargs) From 07ec242b5e0b55b151212f4a905e0e73cec2511e Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 16:50:48 -0400 Subject: [PATCH 23/28] Removed t from varlogp_nojact --- pymc/model.py | 10 +++++++++- pymc/tests/test_model.py | 3 +++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pymc/model.py b/pymc/model.py index 242f6f7ef6..019015bccd 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -890,7 +890,15 @@ def varlogp(self) -> Variable: return self.logp(vars=self.free_RVs) @property - def varlogp_nojact(self) -> Variable: + def varlogp_nojact(self): + warnings.warn( + "Model.varlogp_nojact has been deprecated. Use Model.varlogp_nojac instead.", + FutureWarning, + ) + return self.varlogp_nojac + + @property + def varlogp_nojac(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic) without jacobian term.""" return self.logp(vars=self.free_RVs, jacobian=False) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 82fe021b53..34dedaa1ca 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -906,6 +906,9 @@ def test_model_logpt_deprecation_warning(): with pytest.warns(FutureWarning): m.potentiallogpt + with pytest.warns(FutureWarning): + m.varlogp_nojact + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): From 7322a3258b2401badddb5cf14dd0ccd75e2949e2 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 17:02:23 -0400 Subject: [PATCH 24/28] Revert Release Notes --- RELEASE-NOTES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index f3a61873aa..9187ba91ef 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -118,8 +118,8 @@ _Read on if you're a developer. Or curious. Or both._ - `pm.GaussianRandomWalk` initial distribution defaults to zero-centered normal with sigma=100 instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - `pm.AR` initial distribution defaults to unit normal instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logp(jacobian=True/False, sum=True/False)` instead. - - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogp` and `d2logp` with `jacobian=False` instead. + - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logpt(jacobian=True/False, sum=True/False)` instead. + - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogpt` and `d2logpt` with `jacobian=False` instead. - `model.makefn` is now called `Model.compile_fn`, and `model.fn` was removed. - Methods starting with `fast_*`, such as `Model.fast_logp`, were removed. Same applies to `PointFunc` classes - `Model(model=...)` kwarg was removed From 6027ed8d768c4ead6df851496a5db9aa50f65815 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Wed, 8 Jun 2022 08:58:07 -0400 Subject: [PATCH 25/28] Updated release notes for 4.0 --- RELEASE-NOTES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 9187ba91ef..f3a61873aa 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -118,8 +118,8 @@ _Read on if you're a developer. Or curious. Or both._ - `pm.GaussianRandomWalk` initial distribution defaults to zero-centered normal with sigma=100 instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - `pm.AR` initial distribution defaults to unit normal instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logpt(jacobian=True/False, sum=True/False)` instead. - - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogpt` and `d2logpt` with `jacobian=False` instead. + - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logp(jacobian=True/False, sum=True/False)` instead. + - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogp` and `d2logp` with `jacobian=False` instead. - `model.makefn` is now called `Model.compile_fn`, and `model.fn` was removed. - Methods starting with `fast_*`, such as `Model.fast_logp`, were removed. Same applies to `PointFunc` classes - `Model(model=...)` kwarg was removed From 115802227d502e92493ccad786e78d7969028c0d Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 17:02:23 -0400 Subject: [PATCH 26/28] Revert Release Notes --- RELEASE-NOTES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index f3a61873aa..9187ba91ef 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -118,8 +118,8 @@ _Read on if you're a developer. Or curious. Or both._ - `pm.GaussianRandomWalk` initial distribution defaults to zero-centered normal with sigma=100 instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - `pm.AR` initial distribution defaults to unit normal instead of flat (see[#5779](https://github.com/pymc-devs/pymc/pull/5779)) - - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logp(jacobian=True/False, sum=True/False)` instead. - - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogp` and `d2logp` with `jacobian=False` instead. + - `logpt`, `logpt_sum`, `logp_elemwiset` and `nojac` variations were removed. Use `Model.logpt(jacobian=True/False, sum=True/False)` instead. + - `dlogp_nojact` and `d2logp_nojact` were removed. Use `Model.dlogpt` and `d2logpt` with `jacobian=False` instead. - `model.makefn` is now called `Model.compile_fn`, and `model.fn` was removed. - Methods starting with `fast_*`, such as `Model.fast_logp`, were removed. Same applies to `PointFunc` classes - `Model(model=...)` kwarg was removed From b4b451528884fc02ed5b1864846def8870b8d381 Mon Sep 17 00:00:00 2001 From: Fernando Irarrazaval Date: Mon, 13 Jun 2022 20:04:51 -0400 Subject: [PATCH 27/28] Added deprecation of functions/properties ending with t to release notes --- RELEASE-NOTES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 9187ba91ef..63b4cc1567 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -4,6 +4,7 @@ + Fixed an incorrect entry in `pm.Metropolis.stats_dtypes` (see #5582). + Added a check in `Empirical` approximation which does not yet support `InferenceData` inputs (see #5874, #5884). + Fixed bug when sampling discrete variables with SMC (see #5887). ++ Removed trailing t in functions and properties from the model class and from jointlogpt (see #5859). Deprecated `Model.logpt` (in favor of `Model.logp`), `Model.dlogpt` (in favor of `Model.dlogp`), `Model.d2logpt` (in favor of `Model.d2logp`), `Model.datalogpt` (in favor of `Model.datalogp`), `Model.varlogpt` (in favor of `Model.varlogp`), `Model.observedlogpt` (in favor of `Model.observedlogp`), `Model.potentiallogpt` (in favor of `Model.potentiallogp`), and `Model.varlogp_nojact` (in favor of `Model.varlogp_nojac`), `logprob.joint_logpt` (in favor of `logprob.joint_logp`). ## PyMC 4.0.0 (2022-06-03) From a33cd0babc38cd9576c1baf7ac5a4cedf0e4cfce Mon Sep 17 00:00:00 2001 From: Thomas Wiecki Date: Tue, 14 Jun 2022 10:12:55 +0200 Subject: [PATCH 28/28] Update RELEASE-NOTES.md --- RELEASE-NOTES.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 63b4cc1567..1d7dc1784b 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -4,7 +4,16 @@ + Fixed an incorrect entry in `pm.Metropolis.stats_dtypes` (see #5582). + Added a check in `Empirical` approximation which does not yet support `InferenceData` inputs (see #5874, #5884). + Fixed bug when sampling discrete variables with SMC (see #5887). -+ Removed trailing t in functions and properties from the model class and from jointlogpt (see #5859). Deprecated `Model.logpt` (in favor of `Model.logp`), `Model.dlogpt` (in favor of `Model.dlogp`), `Model.d2logpt` (in favor of `Model.d2logp`), `Model.datalogpt` (in favor of `Model.datalogp`), `Model.varlogpt` (in favor of `Model.varlogp`), `Model.observedlogpt` (in favor of `Model.observedlogp`), `Model.potentiallogpt` (in favor of `Model.potentiallogp`), and `Model.varlogp_nojact` (in favor of `Model.varlogp_nojac`), `logprob.joint_logpt` (in favor of `logprob.joint_logp`). ++ Removed trailing `t` (for tensor) in functions and properties from the model class and from `jointlogpt` (see #5859). + + `Model.logpt` → `Model.logp` + + `Model.dlogpt` → `Model.dlogp` + + `Model.d2logpt` → `Model.d2logp` + + `Model.datalogpt` → `Model.datalogp` + + `Model.varlogpt` → `Model.varlogp` + + `Model.observedlogpt` → `Model.observedlogp` + + `Model.potentiallogpt` → `Model.potentiallogp` + + `Model.varlogp_nojact` → `Model.varlogp_nojac` + + `logprob.joint_logpt` → `logprob.joint_logp` ## PyMC 4.0.0 (2022-06-03)