diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index 9187ba91ef..1d7dc1784b 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -4,6 +4,16 @@ + Fixed an incorrect entry in `pm.Metropolis.stats_dtypes` (see #5582). + Added a check in `Empirical` approximation which does not yet support `InferenceData` inputs (see #5874, #5884). + Fixed bug when sampling discrete variables with SMC (see #5887). ++ Removed trailing `t` (for tensor) in functions and properties from the model class and from `jointlogpt` (see #5859). + + `Model.logpt` → `Model.logp` + + `Model.dlogpt` → `Model.dlogp` + + `Model.d2logpt` → `Model.d2logp` + + `Model.datalogpt` → `Model.datalogp` + + `Model.varlogpt` → `Model.varlogp` + + `Model.observedlogpt` → `Model.observedlogp` + + `Model.potentiallogpt` → `Model.potentiallogp` + + `Model.varlogp_nojact` → `Model.varlogp_nojac` + + `logprob.joint_logpt` → `logprob.joint_logp` ## PyMC 4.0.0 (2022-06-03) diff --git a/docs/source/learn/core_notebooks/pymc_aesara.ipynb b/docs/source/learn/core_notebooks/pymc_aesara.ipynb index 329a3aa6d4..48e24053fe 100644 --- a/docs/source/learn/core_notebooks/pymc_aesara.ipynb +++ b/docs/source/learn/core_notebooks/pymc_aesara.ipynb @@ -1844,7 +1844,7 @@ } }, "source": [ - "`pymc` models provide some helpful routines to facilitating the conversion of `RandomVariable`s to probability functions. {meth}`~pymc.Model.logpt`, for instance can be used to extract the joint probability of all variables in the model:" + "`pymc` models provide some helpful routines to facilitating the conversion of `RandomVariable`s to probability functions. {meth}`~pymc.Model.logp`, for instance can be used to extract the joint probability of all variables in the model:" ] }, { @@ -1902,7 +1902,7 @@ } ], "source": [ - "aesara.dprint(model.logpt(sum=False))" + "aesara.dprint(model.logp(sum=False))" ] }, { @@ -2213,7 +2213,7 @@ "sigma_log_value = model_2.rvs_to_values[sigma]\n", "x_value = model_2.rvs_to_values[x]\n", "# element-wise log-probability of the model (we do not take te sum)\n", - "logp_graph = at.stack(model_2.logpt(sum=False))\n", + "logp_graph = at.stack(model_2.logp(sum=False))\n", "# evaluate by passing concrete values\n", "logp_graph.eval({mu_value: 0, sigma_log_value: -10, x_value:0})" ] @@ -2314,7 +2314,7 @@ } }, "source": [ - "The {class}`~pymc.Model` class also has methods to extract the gradient ({meth}`~pymc.Model.dlogpt`) and the hessian ({meth}`~pymc.Model.d2logpt`) of the logp." + "The {class}`~pymc.Model` class also has methods to extract the gradient ({meth}`~pymc.Model.dlogp`) and the hessian ({meth}`~pymc.Model.d2logp`) of the logp." ] }, { diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index 552a2afd44..80da0a89d8 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -251,7 +251,7 @@ def _extract_log_likelihood(self, trace): ( var, self.model.compile_fn( - self.model.logpt(var, sum=False)[0], + self.model.logp(var, sum=False)[0], inputs=self.model.value_vars, on_unused_input="ignore", ), @@ -263,7 +263,7 @@ def _extract_log_likelihood(self, trace): ( var, self.model.compile_fn( - self.model.logpt(var, sum=False)[0], + self.model.logp(var, sum=False)[0], inputs=self.model.value_vars, on_unused_input="ignore", ), diff --git a/pymc/distributions/__init__.py b/pymc/distributions/__init__.py index 8680528682..40ea9894a6 100644 --- a/pymc/distributions/__init__.py +++ b/pymc/distributions/__init__.py @@ -15,6 +15,7 @@ from pymc.distributions.logprob import ( # isort:skip logcdf, logp, + joint_logp, joint_logpt, ) @@ -191,6 +192,7 @@ "CAR", "PolyaGamma", "joint_logpt", + "joint_logp", "logp", "logcdf", ] diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index e1ade92dd2..ef20c39ca6 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -2558,7 +2558,7 @@ def logcdf(value, nu): return logcdf(Gamma.dist(alpha=nu / 2, beta=0.5), value) -# TODO: Remove this once logpt for multiplication is working! +# TODO: Remove this once logp for multiplication is working! class WeibullBetaRV(WeibullRV): ndims_params = [0, 0] diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index fb2b041ff8..d67dc35ed1 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from collections.abc import Mapping from typing import Dict, List, Optional, Sequence, Union @@ -119,7 +120,15 @@ def _get_scaling( ) -def joint_logpt( +def joint_logpt(*args, **kwargs): + warnings.warn( + "joint_logpt has been deprecated. Use joint_logp instead.", + FutureWarning, + ) + return joint_logp(*args, **kwargs) + + +def joint_logp( var: Union[TensorVariable, List[TensorVariable]], rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None, *, @@ -159,14 +168,14 @@ def joint_logpt( """ # TODO: In future when we drop support for tag.value_var most of the following - # logic can be removed and logpt can just be a wrapper function that calls aeppl's + # logic can be removed and logp can just be a wrapper function that calls aeppl's # joint_logprob directly. # If var is not a list make it one. if not isinstance(var, (list, tuple)): var = [var] - # If logpt isn't provided values it is assumed that the tagged value var or + # If logp isn't provided values it is assumed that the tagged value var or # observation is the value variable for that particular RV. if rv_values is None: rv_values = {} @@ -251,7 +260,7 @@ def joint_logpt( "reference nonlocal variables." ) - # aeppl returns the logpt for every single value term we provided to it. This includes + # aeppl returns the logp for every single value term we provided to it. This includes # the extra values we plugged in above, so we filter those we actually wanted in the # same order they were given in. logp_var_dict = {} diff --git a/pymc/model.py b/pymc/model.py index 4f089e5732..019015bccd 100644 --- a/pymc/model.py +++ b/pymc/model.py @@ -57,7 +57,7 @@ ) from pymc.blocking import DictToArrayBijection, RaveledVars from pymc.data import GenTensorVariable, Minibatch -from pymc.distributions import joint_logpt +from pymc.distributions import joint_logp from pymc.distributions.logprob import _get_scaling from pymc.distributions.transforms import _default_transform from pymc.exceptions import ImputationWarning, SamplingError, ShapeError, ShapeWarning @@ -623,9 +623,9 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs): raise ValueError(f"Can only compute the gradient of continuous types: {var}") if tempered: - costs = [self.varlogpt, self.datalogpt] + costs = [self.varlogp, self.datalogp] else: - costs = [self.logpt()] + costs = [self.logp()] input_vars = {i for i in graph_inputs(costs) if not isinstance(i, Constant)} extra_vars = [self.rvs_to_values.get(var, var) for var in self.free_RVs] @@ -654,7 +654,7 @@ def compile_logp( Whether to sum all logp terms or return elemwise logp for each variable. Defaults to True. """ - return self.model.compile_fn(self.logpt(vars=vars, jacobian=jacobian, sum=sum)) + return self.model.compile_fn(self.logp(vars=vars, jacobian=jacobian, sum=sum)) def compile_dlogp( self, @@ -671,7 +671,7 @@ def compile_dlogp( jacobian: Whether to include jacobian terms in logprob graph. Defaults to True. """ - return self.model.compile_fn(self.dlogpt(vars=vars, jacobian=jacobian)) + return self.model.compile_fn(self.dlogp(vars=vars, jacobian=jacobian)) def compile_d2logp( self, @@ -688,9 +688,16 @@ def compile_d2logp( jacobian: Whether to include jacobian terms in logprob graph. Defaults to True. """ - return self.model.compile_fn(self.d2logpt(vars=vars, jacobian=jacobian)) + return self.model.compile_fn(self.d2logp(vars=vars, jacobian=jacobian)) - def logpt( + def logpt(self, *args, **kwargs): + warnings.warn( + "Model.logpt has been deprecated. Use Model.logp instead.", + FutureWarning, + ) + return self.logp(*args, **kwargs) + + def logp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, @@ -742,7 +749,7 @@ def logpt( rv_logps: List[TensorVariable] = [] if rv_values: - rv_logps = joint_logpt(list(rv_values.keys()), rv_values, sum=False, jacobian=jacobian) + rv_logps = joint_logp(list(rv_values.keys()), rv_values, sum=False, jacobian=jacobian) assert isinstance(rv_logps, list) # Replace random variables by their value variables in potential terms @@ -764,7 +771,14 @@ def logpt( logp_scalar.name = logp_scalar_name return logp_scalar - def dlogpt( + def dlogpt(self, *args, **kwargs): + warnings.warn( + "Model.dlogpt has been deprecated. Use Model.dlogp instead.", + FutureWarning, + ) + return self.dlogp(*args, **kwargs) + + def dlogp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, @@ -799,10 +813,17 @@ def dlogpt( f"Requested variable {var} not found among the model variables" ) - cost = self.logpt(jacobian=jacobian) + cost = self.logp(jacobian=jacobian) return gradient(cost, value_vars) - def d2logpt( + def d2logpt(self, *args, **kwargs): + warnings.warn( + "Model.d2logpt has been deprecated. Use Model.d2logp instead.", + FutureWarning, + ) + return self.d2logp(*args, **kwargs) + + def d2logp( self, vars: Optional[Union[Variable, Sequence[Variable]]] = None, jacobian: bool = True, @@ -837,34 +858,74 @@ def d2logpt( f"Requested variable {var} not found among the model variables" ) - cost = self.logpt(jacobian=jacobian) + cost = self.logp(jacobian=jacobian) return hessian(cost, value_vars) @property - def datalogpt(self) -> Variable: + def datalogpt(self): + warnings.warn( + "Model.datalogpt has been deprecated. Use Model.datalogp instead.", + FutureWarning, + ) + return self.datalogp + + @property + def datalogp(self) -> Variable: """Aesara scalar of log-probability of the observed variables and potential terms""" - return self.observedlogpt + self.potentiallogpt + return self.observedlogp + self.potentiallogp @property - def varlogpt(self) -> Variable: + def varlogpt(self): + warnings.warn( + "Model.varlogpt has been deprecated. Use Model.varlogp instead.", + FutureWarning, + ) + return self.varlogp + + @property + def varlogp(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic).""" - return self.logpt(vars=self.free_RVs) + return self.logp(vars=self.free_RVs) @property - def varlogp_nojact(self) -> Variable: + def varlogp_nojact(self): + warnings.warn( + "Model.varlogp_nojact has been deprecated. Use Model.varlogp_nojac instead.", + FutureWarning, + ) + return self.varlogp_nojac + + @property + def varlogp_nojac(self) -> Variable: """Aesara scalar of log-probability of the unobserved random variables (excluding deterministic) without jacobian term.""" - return self.logpt(vars=self.free_RVs, jacobian=False) + return self.logp(vars=self.free_RVs, jacobian=False) + + @property + def observedlogpt(self): + warnings.warn( + "Model.observedlogpt has been deprecated. Use Model.observedlogp instead.", + FutureWarning, + ) + return self.observedlogp @property - def observedlogpt(self) -> Variable: + def observedlogp(self) -> Variable: """Aesara scalar of log-probability of the observed variables""" - return self.logpt(vars=self.observed_RVs) + return self.logp(vars=self.observed_RVs) + + @property + def potentiallogpt(self): + warnings.warn( + "Model.potentiallogpt has been deprecated. Use Model.potentiallogp instead.", + FutureWarning, + ) + return self.potentiallogp @property - def potentiallogpt(self) -> Variable: + def potentiallogp(self) -> Variable: """Aesara scalar of log-probability of the Potential terms""" # Convert random variables in Potential expression into their log-likelihood # inputs and apply their transforms, if any @@ -1755,7 +1816,7 @@ def point_logps(self, point=None, round_vals=2): point = self.initial_point() factors = self.basic_RVs + self.potentials - factor_logps_fn = [at.sum(factor) for factor in self.logpt(factors, sum=False)] + factor_logps_fn = [at.sum(factor) for factor in self.logp(factors, sum=False)] return { factor.name: np.round(np.asarray(factor_logp), round_vals) for factor, factor_logp in zip( diff --git a/pymc/sampling.py b/pymc/sampling.py index df3dcdd213..6827b49436 100644 --- a/pymc/sampling.py +++ b/pymc/sampling.py @@ -204,7 +204,7 @@ def assign_step_methods(model, step=None, methods=None, step_kwargs=None): # Use competence classmethods to select step methods for remaining # variables selected_steps = defaultdict(list) - model_logpt = model.logpt() + model_logp = model.logp() for var in model.value_vars: if var not in assigned_vars: @@ -212,7 +212,7 @@ def assign_step_methods(model, step=None, methods=None, step_kwargs=None): has_gradient = var.dtype not in discrete_types if has_gradient: try: - tg.grad(model_logpt, var) + tg.grad(model_logp, var) except (NotImplementedError, tg.NullTypeGradError): has_gradient = False diff --git a/pymc/sampling_jax.py b/pymc/sampling_jax.py index a087e005ca..4127dac702 100644 --- a/pymc/sampling_jax.py +++ b/pymc/sampling_jax.py @@ -100,10 +100,10 @@ def get_jaxified_graph( def get_jaxified_logp(model: Model, negative_logp=True) -> Callable: - model_logpt = model.logpt() + model_logp = model.logp() if not negative_logp: - model_logpt = -model_logpt - logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logpt]) + model_logp = -model_logp + logp_fn = get_jaxified_graph(inputs=model.value_vars, outputs=[model_logp]) def logp_fn_wrap(x): return logp_fn(*x)[0] @@ -136,8 +136,8 @@ def _get_log_likelihood(model: Model, samples, backend=None) -> Dict: """Compute log-likelihood for all observations""" data = {} for v in model.observed_RVs: - v_elemwise_logpt = model.logpt(v, sum=False) - jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logpt) + v_elemwise_logp = model.logp(v, sum=False) + jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=v_elemwise_logp) result = jax.jit(jax.vmap(jax.vmap(jax_fn)), backend=backend)(*samples)[0] data[v.name] = result return data diff --git a/pymc/smc/smc.py b/pymc/smc/smc.py index 139c736314..185ee3fb07 100644 --- a/pymc/smc/smc.py +++ b/pymc/smc/smc.py @@ -219,10 +219,10 @@ def _initialize_kernel(self): shared = make_shared_replacements(initial_point, self.variables, self.model) self.prior_logp_func = _logp_forw( - initial_point, [self.model.varlogpt], self.variables, shared + initial_point, [self.model.varlogp], self.variables, shared ) self.likelihood_logp_func = _logp_forw( - initial_point, [self.model.datalogpt], self.variables, shared + initial_point, [self.model.datalogp], self.variables, shared ) priors = [self.prior_logp_func(sample) for sample in self.tempered_posterior] diff --git a/pymc/step_methods/metropolis.py b/pymc/step_methods/metropolis.py index 532ac93890..418c65eb2d 100644 --- a/pymc/step_methods/metropolis.py +++ b/pymc/step_methods/metropolis.py @@ -226,7 +226,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def reset_tuning(self): @@ -794,7 +794,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def astep(self, q0: RaveledVars) -> Tuple[RaveledVars, List[Dict[str, Any]]]: @@ -957,7 +957,7 @@ def __init__( self.mode = mode shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) super().__init__(vars, shared) def reset_tuning(self): diff --git a/pymc/step_methods/mlda.py b/pymc/step_methods/mlda.py index 441890ebbd..99970111d7 100644 --- a/pymc/step_methods/mlda.py +++ b/pymc/step_methods/mlda.py @@ -538,7 +538,7 @@ def __init__( # Construct Aesara function for current-level model likelihood # (for use in acceptance) shared = pm.make_shared_replacements(initial_values, vars, model) - self.delta_logp = delta_logp(initial_values, model.logpt(), vars, shared) + self.delta_logp = delta_logp(initial_values, model.logp(), vars, shared) # Construct Aesara function for below-level model likelihood # (for use in acceptance) @@ -547,7 +547,7 @@ def __init__( vars_below = pm.inputvars(vars_below) shared_below = pm.make_shared_replacements(initial_values, vars_below, model_below) self.delta_logp_below = delta_logp( - initial_values, model_below.logpt(), vars_below, shared_below + initial_values, model_below.logp(), vars_below, shared_below ) super().__init__(vars, shared) diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index 06ba3428d3..c7bd0868af 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -121,7 +121,7 @@ def polyagamma_cdf(*args, **kwargs): ZeroInflatedBinomial, ZeroInflatedNegativeBinomial, ZeroInflatedPoisson, - joint_logpt, + joint_logp, logcdf, logp, ) @@ -924,29 +924,29 @@ def RandomPdMatrix(n): return np.dot(A, A.T) + n * np.identity(n) -def test_hierarchical_logpt(): +def test_hierarchical_logp(): """Make sure there are no random variables in a model's log-likelihood graph.""" with pm.Model() as m: x = pm.Uniform("x", lower=0, upper=1) y = pm.Uniform("y", lower=0, upper=x) - logpt_ancestors = list(ancestors([m.logpt()])) - ops = {a.owner.op for a in logpt_ancestors if a.owner} + logp_ancestors = list(ancestors([m.logp()])) + ops = {a.owner.op for a in logp_ancestors if a.owner} assert len(ops) > 0 assert not any(isinstance(o, RandomVariable) for o in ops) - assert x.tag.value_var in logpt_ancestors - assert y.tag.value_var in logpt_ancestors + assert x.tag.value_var in logp_ancestors + assert y.tag.value_var in logp_ancestors -def test_hierarchical_obs_logpt(): +def test_hierarchical_obs_logp(): obs = np.array([0.5, 0.4, 5, 2]) with pm.Model() as model: x = pm.Uniform("x", 0, 1, observed=obs) pm.Uniform("y", x, 2, observed=obs) - logpt_ancestors = list(ancestors([model.logpt()])) - ops = {a.owner.op for a in logpt_ancestors if a.owner} + logp_ancestors = list(ancestors([model.logp()])) + ops = {a.owner.op for a in logp_ancestors if a.owner} assert len(ops) > 0 assert not any(isinstance(o, RandomVariable) for o in ops) @@ -2638,29 +2638,29 @@ def test_continuous(self): UpperNormalTransform = Bound("uppertrans", dist, upper=10) BoundedNormalTransform = Bound("boundedtrans", dist, lower=1, upper=10) - assert joint_logpt(LowerNormal, -1).eval() == -np.inf - assert joint_logpt(UpperNormal, 1).eval() == -np.inf - assert joint_logpt(BoundedNormal, 0).eval() == -np.inf - assert joint_logpt(BoundedNormal, 11).eval() == -np.inf + assert joint_logp(LowerNormal, -1).eval() == -np.inf + assert joint_logp(UpperNormal, 1).eval() == -np.inf + assert joint_logp(BoundedNormal, 0).eval() == -np.inf + assert joint_logp(BoundedNormal, 11).eval() == -np.inf - assert joint_logpt(UnboundedNormal, 0).eval() != -np.inf - assert joint_logpt(UnboundedNormal, 11).eval() != -np.inf - assert joint_logpt(InfBoundedNormal, 0).eval() != -np.inf - assert joint_logpt(InfBoundedNormal, 11).eval() != -np.inf + assert joint_logp(UnboundedNormal, 0).eval() != -np.inf + assert joint_logp(UnboundedNormal, 11).eval() != -np.inf + assert joint_logp(InfBoundedNormal, 0).eval() != -np.inf + assert joint_logp(InfBoundedNormal, 11).eval() != -np.inf value = model.rvs_to_values[LowerNormalTransform] - assert joint_logpt(LowerNormalTransform, value).eval({value: -1}) != -np.inf + assert joint_logp(LowerNormalTransform, value).eval({value: -1}) != -np.inf value = model.rvs_to_values[UpperNormalTransform] - assert joint_logpt(UpperNormalTransform, value).eval({value: 1}) != -np.inf + assert joint_logp(UpperNormalTransform, value).eval({value: 1}) != -np.inf value = model.rvs_to_values[BoundedNormalTransform] - assert joint_logpt(BoundedNormalTransform, value).eval({value: 0}) != -np.inf - assert joint_logpt(BoundedNormalTransform, value).eval({value: 11}) != -np.inf + assert joint_logp(BoundedNormalTransform, value).eval({value: 0}) != -np.inf + assert joint_logp(BoundedNormalTransform, value).eval({value: 11}) != -np.inf ref_dist = Normal.dist(mu=0, sigma=1) - assert np.allclose(joint_logpt(UnboundedNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(LowerNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(UpperNormal, -5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(BoundedNormal, 5).eval(), joint_logpt(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UnboundedNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(LowerNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UpperNormal, -5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(BoundedNormal, 5).eval(), joint_logp(ref_dist, 5).eval()) def test_discrete(self): with Model() as model: @@ -2670,19 +2670,19 @@ def test_discrete(self): UpperPoisson = Bound("upper", dist, upper=10) BoundedPoisson = Bound("bounded", dist, lower=1, upper=10) - assert joint_logpt(LowerPoisson, 0).eval() == -np.inf - assert joint_logpt(UpperPoisson, 11).eval() == -np.inf - assert joint_logpt(BoundedPoisson, 0).eval() == -np.inf - assert joint_logpt(BoundedPoisson, 11).eval() == -np.inf + assert joint_logp(LowerPoisson, 0).eval() == -np.inf + assert joint_logp(UpperPoisson, 11).eval() == -np.inf + assert joint_logp(BoundedPoisson, 0).eval() == -np.inf + assert joint_logp(BoundedPoisson, 11).eval() == -np.inf - assert joint_logpt(UnboundedPoisson, 0).eval() != -np.inf - assert joint_logpt(UnboundedPoisson, 11).eval() != -np.inf + assert joint_logp(UnboundedPoisson, 0).eval() != -np.inf + assert joint_logp(UnboundedPoisson, 11).eval() != -np.inf ref_dist = Poisson.dist(mu=4) - assert np.allclose(joint_logpt(UnboundedPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(LowerPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(UpperPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) - assert np.allclose(joint_logpt(BoundedPoisson, 5).eval(), joint_logpt(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UnboundedPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(LowerPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(UpperPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) + assert np.allclose(joint_logp(BoundedPoisson, 5).eval(), joint_logp(ref_dist, 5).eval()) def create_invalid_distribution(self): class MyNormal(RandomVariable): @@ -2786,19 +2786,19 @@ def test_array_bound(self): UpperPoisson = Bound("upper", dist, upper=[np.inf, 10], transform=None) BoundedPoisson = Bound("bounded", dist, lower=[1, 2], upper=[9, 10], transform=None) - first, second = joint_logpt(LowerPoisson, [0, 0], sum=False)[0].eval() + first, second = joint_logp(LowerPoisson, [0, 0], sum=False)[0].eval() assert first == -np.inf assert second != -np.inf - first, second = joint_logpt(UpperPoisson, [11, 11], sum=False)[0].eval() + first, second = joint_logp(UpperPoisson, [11, 11], sum=False)[0].eval() assert first != -np.inf assert second == -np.inf - first, second = joint_logpt(BoundedPoisson, [1, 1], sum=False)[0].eval() + first, second = joint_logp(BoundedPoisson, [1, 1], sum=False)[0].eval() assert first != -np.inf assert second == -np.inf - first, second = joint_logpt(BoundedPoisson, [10, 10], sum=False)[0].eval() + first, second = joint_logp(BoundedPoisson, [10, 10], sum=False)[0].eval() assert first == -np.inf assert second != -np.inf @@ -2914,8 +2914,8 @@ def test_orderedlogistic_dimensions(shape): p=p, observed=obs, ) - ologp = joint_logpt(ol, np.ones_like(obs), sum=True).eval() * loge - clogp = joint_logpt(c, np.ones_like(obs), sum=True).eval() * loge + ologp = joint_logp(ol, np.ones_like(obs), sum=True).eval() * loge + clogp = joint_logp(c, np.ones_like(obs), sum=True).eval() * loge expected = -np.prod((size,) + shape) assert c.owner.inputs[3].ndim == (len(shape) + 1) @@ -3157,7 +3157,7 @@ def logp(value, mu): a_val = np.random.normal(loc=mu_val, scale=1, size=to_tuple(size) + (supp_shape,)).astype( aesara.config.floatX ) - log_densityt = joint_logpt(a, a.tag.value_var, sum=False)[0] + log_densityt = joint_logp(a, a.tag.value_var, sum=False)[0] assert log_densityt.eval( {a.tag.value_var: a_val, mu.tag.value_var: mu_val}, ).shape == to_tuple(size) @@ -3288,7 +3288,7 @@ def test_no_warning_logp(self): sd_dist = pm.Exponential.dist(1, size=3) x = pm.LKJCholeskyCov("x", n=3, eta=1, sd_dist=sd_dist) with pytest.warns(None) as record: - m.logpt() + m.logp() assert not record @pytest.mark.parametrize( diff --git a/pymc/tests/test_distributions_moments.py b/pymc/tests/test_distributions_moments.py index 230989000c..dbe4ae60ba 100644 --- a/pymc/tests/test_distributions_moments.py +++ b/pymc/tests/test_distributions_moments.py @@ -71,7 +71,7 @@ ZeroInflatedPoisson, ) from pymc.distributions.distribution import _moment, moment -from pymc.distributions.logprob import joint_logpt +from pymc.distributions.logprob import joint_logp from pymc.distributions.shape_utils import rv_size_is_none, to_tuple from pymc.initial_point import make_initial_point_fn from pymc.model import Model @@ -163,7 +163,7 @@ def assert_moment_is_expected(model, expected, check_finite_logp=True): assert np.allclose(moment, expected) if check_finite_logp: - logp_moment = joint_logpt(model["x"], at.constant(moment), transformed=False).eval() + logp_moment = joint_logp(model["x"], at.constant(moment), transformed=False).eval() assert np.isfinite(logp_moment) diff --git a/pymc/tests/test_logprob.py b/pymc/tests/test_logprob.py index e3a7d846d8..d0be51e833 100644 --- a/pymc/tests/test_logprob.py +++ b/pymc/tests/test_logprob.py @@ -42,6 +42,7 @@ from pymc.distributions.logprob import ( _get_scaling, ignore_logprob, + joint_logp, joint_logpt, logcdf, logp, @@ -102,7 +103,7 @@ def test_get_scaling(): assert _get_scaling(total_size, shape=rv_var.shape, ndim=rv_var.ndim).eval() == 1.0 -def test_joint_logpt_basic(): +def test_joint_logp_basic(): """Make sure we can compute a log-likelihood for a hierarchical model with transforms.""" with Model() as m: @@ -119,7 +120,10 @@ def test_joint_logpt_basic(): c_value_var = m.rvs_to_values[c] - b_logp = joint_logpt(b, b_value_var, sum=False) + b_logp = joint_logp(b, b_value_var, sum=False) + + with pytest.warns(FutureWarning): + b_logpt = joint_logpt(b, b_value_var, sum=False) res_ancestors = list(walk_model(b_logp, walk_past_rvs=True)) res_rv_ancestors = [ @@ -142,7 +146,7 @@ def test_joint_logpt_basic(): ((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)), ], ) -def test_joint_logpt_incsubtensor(indices, size): +def test_joint_logp_incsubtensor(indices, size): """Make sure we can compute a log-likelihood for ``Y[idx] = data`` where ``Y`` is univariate.""" mu = floatX(np.power(10, np.arange(np.prod(size)))).reshape(size) @@ -163,7 +167,7 @@ def test_joint_logpt_incsubtensor(indices, size): a_idx_value_var = a_idx.type() a_idx_value_var.name = "a_idx_value" - a_idx_logp = joint_logpt(a_idx, {a_idx: a_value_var}, sum=False) + a_idx_logp = joint_logp(a_idx, {a_idx: a_value_var}, sum=False) logp_vals = a_idx_logp[0].eval({a_value_var: a_val}) @@ -177,7 +181,7 @@ def test_joint_logpt_incsubtensor(indices, size): np.testing.assert_almost_equal(logp_vals, exp_obs_logps) -def test_joint_logpt_subtensor(): +def test_joint_logp_subtensor(): """Make sure we can compute a log-likelihood for ``Y[I]`` where ``Y`` and ``I`` are random variables.""" size = 5 @@ -205,7 +209,7 @@ def test_joint_logpt_subtensor(): I_value_var = I_rv.type() I_value_var.name = "I_value" - A_idx_logps = joint_logpt(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) + A_idx_logps = joint_logp(A_idx, {A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) A_idx_logp = at.add(*A_idx_logps) logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp) @@ -289,8 +293,8 @@ def test_model_unchanged_logprob_access(): c = Uniform("c", lower=a - 1, upper=1) original_inputs = set(aesara.graph.graph_inputs([c])) - # Extract model.logpt - model.logpt() + # Extract model.logp + model.logp() new_inputs = set(aesara.graph.graph_inputs([c])) assert original_inputs == new_inputs @@ -301,7 +305,7 @@ def test_unexpected_rvs(): y = DensityDist("y", logp=lambda *args: x) with pytest.raises(ValueError, match="^Random variables detected in the logp graph"): - model.logpt() + model.logp() def test_ignore_logprob_basic(): @@ -331,9 +335,9 @@ def logp(value, x): y = DensityDist("y", x, logp=logp) # Aeppl raises a KeyError when it finds an unexpected RV with pytest.raises(KeyError): - joint_logpt([y], {y: y.type()}) + joint_logp([y], {y: y.type()}) with Model() as m: x = ignore_logprob(Normal.dist()) y = DensityDist("y", x, logp=logp) - assert joint_logpt([y], {y: y.type()}) + assert joint_logp([y], {y: y.type()}) diff --git a/pymc/tests/test_minibatches.py b/pymc/tests/test_minibatches.py index 62ff81874a..58bf66c138 100644 --- a/pymc/tests/test_minibatches.py +++ b/pymc/tests/test_minibatches.py @@ -170,11 +170,11 @@ class TestScaling: def test_density_scaling(self): with pm.Model() as model1: Normal("n", observed=[[1]], total_size=1) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: Normal("n", observed=[[1]], total_size=2) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) assert p1() * 2 == p2() def test_density_scaling_with_generator(self): @@ -189,12 +189,12 @@ def true_dens(): # We have same size models with pm.Model() as model1: Normal("n", observed=gen1(), total_size=100) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: gen_var = generator(gen2()) Normal("n", observed=gen_var, total_size=100) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) for i in range(10): _1, _2, _t = p1(), p2(), next(t) @@ -208,12 +208,12 @@ def test_gradient_with_scaling(self): genvar = generator(gen1()) m = Normal("m") Normal("n", observed=genvar, total_size=1000) - grad1 = aesara.function([m.tag.value_var], at.grad(model1.logpt(), m.tag.value_var)) + grad1 = aesara.function([m.tag.value_var], at.grad(model1.logp(), m.tag.value_var)) with pm.Model() as model2: m = Normal("m") shavar = aesara.shared(np.ones((1000, 100))) Normal("n", observed=shavar) - grad2 = aesara.function([m.tag.value_var], at.grad(model2.logpt(), m.tag.value_var)) + grad2 = aesara.function([m.tag.value_var], at.grad(model2.logp(), m.tag.value_var)) for i in range(10): shavar.set_value(np.ones((100, 100)) * i) @@ -224,27 +224,27 @@ def test_gradient_with_scaling(self): def test_multidim_scaling(self): with pm.Model() as model0: Normal("n", observed=[[1, 1], [1, 1]], total_size=[]) - p0 = aesara.function([], model0.logpt()) + p0 = aesara.function([], model0.logp()) with pm.Model() as model1: Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2]) - p1 = aesara.function([], model1.logpt()) + p1 = aesara.function([], model1.logp()) with pm.Model() as model2: Normal("n", observed=[[1], [1]], total_size=[2, 2]) - p2 = aesara.function([], model2.logpt()) + p2 = aesara.function([], model2.logp()) with pm.Model() as model3: Normal("n", observed=[[1, 1]], total_size=[2, 2]) - p3 = aesara.function([], model3.logpt()) + p3 = aesara.function([], model3.logp()) with pm.Model() as model4: Normal("n", observed=[[1]], total_size=[2, 2]) - p4 = aesara.function([], model4.logpt()) + p4 = aesara.function([], model4.logp()) with pm.Model() as model5: Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2]) - p5 = aesara.function([], model5.logpt()) + p5 = aesara.function([], model5.logp()) _p0 = p0() assert ( np.allclose(_p0, p1()) @@ -258,27 +258,27 @@ def test_common_errors(self): with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[2, Ellipsis, 2, 2]) - m.logpt() + m.logp() assert "Length of" in str(e.value) with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[2, 2, 2]) - m.logpt() + m.logp() assert "Length of" in str(e.value) with pytest.raises(TypeError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size="foo") - m.logpt() + m.logp() assert "Unrecognized" in str(e.value) with pytest.raises(TypeError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=["foo"]) - m.logpt() + m.logp() assert "Unrecognized" in str(e.value) with pytest.raises(ValueError) as e: with pm.Model() as m: Normal("n", observed=[[1]], total_size=[Ellipsis, Ellipsis]) - m.logpt() + m.logp() assert "Double Ellipsis" in str(e.value) def test_mixed1(self): @@ -296,11 +296,11 @@ def test_mixed2(self): def test_free_rv(self): with pm.Model() as model4: Normal("n", observed=[[1, 1], [1, 1]], total_size=[2, 2]) - p4 = aesara.function([], model4.logpt()) + p4 = aesara.function([], model4.logp()) with pm.Model() as model5: n = Normal("n", total_size=[2, Ellipsis, 2], size=(2, 2)) - p5 = aesara.function([n.tag.value_var], model5.logpt()) + p5 = aesara.function([n.tag.value_var], model5.logp()) assert p4() == p5(pm.floatX([[1]])) assert p4() == p5(pm.floatX([[1, 1], [1, 1]])) diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index cbe3165874..2a7f92be78 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -20,7 +20,7 @@ from aesara.graph import graph_inputs from numpy import array, ma -from pymc import joint_logpt +from pymc import joint_logp from pymc.distributions import Dirichlet, Gamma, Normal, Uniform from pymc.exceptions import ImputationWarning from pymc.model import Model @@ -220,12 +220,12 @@ def test_missing_vector_parameter(): def test_missing_symmetric(): - """Check that logpt works when partially observed variable have equal observed and + """Check that logp works when partially observed variable have equal observed and unobserved dimensions. This would fail in a previous implementation because the two variables would be equivalent and one of them would be discarded during MergeOptimization while - buling the logpt graph + buling the logp graph """ with Model() as m: x = Gamma("x", alpha=3, beta=10, observed=np.array([1, np.nan])) @@ -236,7 +236,7 @@ def test_missing_symmetric(): x_unobs_rv = m["x_missing"] x_unobs_vv = m.rvs_to_values[x_unobs_rv] - logp = joint_logpt([x_obs_rv, x_unobs_rv], {x_obs_rv: x_obs_vv, x_unobs_rv: x_unobs_vv}) + logp = joint_logp([x_obs_rv, x_unobs_rv], {x_obs_rv: x_obs_vv, x_unobs_rv: x_unobs_vv}) logp_inputs = list(graph_inputs([logp])) assert x_obs_vv in logp_inputs assert x_unobs_vv in logp_inputs diff --git a/pymc/tests/test_mixture.py b/pymc/tests/test_mixture.py index 4f19f9c3e2..46249f93f9 100644 --- a/pymc/tests/test_mixture.py +++ b/pymc/tests/test_mixture.py @@ -927,7 +927,7 @@ def logp_matches(self, mixture, latent_mix, z, npop, model): def loose_logp(model, vars): """Return logp function that accepts dictionary with unused variables as input""" return model.compile_fn( - model.logpt(vars=vars, sum=False), + model.logp(vars=vars, sum=False), inputs=model.value_vars, on_unused_input="ignore", ) diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index 2f8683e9e3..34dedaa1ca 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -677,7 +677,7 @@ def test_set_initval(): assert y in model.initial_values -def test_datalogpt_multiple_shapes(): +def test_datalogp_multiple_shapes(): with pm.Model() as m: x = pm.Normal("x", 0, 1) z1 = pm.Potential("z1", x) @@ -688,7 +688,7 @@ def test_datalogpt_multiple_shapes(): # This would raise a TypeError, see #4803 and #4804 x_val = m.rvs_to_values[x] - m.datalogpt.eval({x_val: 0}) + m.datalogp.eval({x_val: 0}) def test_nested_model_coords(): @@ -880,6 +880,36 @@ def test_set_data_indirect_resize_with_coords(): pmodel.set_data("mdata", [1, 2], coords=dict(mdim=[1, 2, 3])) +def test_model_logpt_deprecation_warning(): + with pm.Model() as m: + x = pm.Normal("x", 0, 1, size=2) + y = pm.LogNormal("y", 0, 1, size=2) + + with pytest.warns(FutureWarning): + m.logpt() + + with pytest.warns(FutureWarning): + m.dlogpt() + + with pytest.warns(FutureWarning): + m.d2logpt() + + with pytest.warns(FutureWarning): + m.datalogpt + + with pytest.warns(FutureWarning): + m.varlogpt + + with pytest.warns(FutureWarning): + m.observedlogpt + + with pytest.warns(FutureWarning): + m.potentiallogpt + + with pytest.warns(FutureWarning): + m.varlogp_nojact + + @pytest.mark.parametrize("jacobian", [True, False]) def test_model_logp(jacobian): with pm.Model() as m: diff --git a/pymc/tests/test_profile.py b/pymc/tests/test_profile.py index 60c9260122..55d54cbaee 100644 --- a/pymc/tests/test_profile.py +++ b/pymc/tests/test_profile.py @@ -20,12 +20,12 @@ def setup_method(self): _, self.model, _ = simple_model() def test_profile_model(self): - assert self.model.profile(self.model.logpt()).fct_call_time > 0 + assert self.model.profile(self.model.logp()).fct_call_time > 0 def test_profile_variable(self): rv = self.model.basic_RVs[0] - assert self.model.profile(self.model.logpt(vars=[rv], sum=False)).fct_call_time + assert self.model.profile(self.model.logp(vars=[rv], sum=False)).fct_call_time def test_profile_count(self): count = 1005 - assert self.model.profile(self.model.logpt(), n=count).fct_callcount == count + assert self.model.profile(self.model.logp(), n=count).fct_callcount == count diff --git a/pymc/tests/test_smc.py b/pymc/tests/test_smc.py index 14cef4624d..a040221cf8 100644 --- a/pymc/tests/test_smc.py +++ b/pymc/tests/test_smc.py @@ -303,7 +303,7 @@ def setup_class(self): s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data) def test_one_gaussian(self): - assert self.count_rvs(self.SMABC_test.logpt()) == 1 + assert self.count_rvs(self.SMABC_test.logp()) == 1 with self.SMABC_test: trace = pm.sample_smc(draws=1000, chains=1, return_inferencedata=False) @@ -339,7 +339,7 @@ def test_custom_dist_sum_stat(self, floatX): observed=self.data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 with m: pm.sample_smc(draws=100) @@ -362,7 +362,7 @@ def test_custom_dist_sum_stat_scalar(self, floatX): sum_stat=self.quantiles, observed=scalar_data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 with pm.Model() as m: s = pm.Simulator( @@ -374,10 +374,10 @@ def test_custom_dist_sum_stat_scalar(self, floatX): sum_stat="mean", observed=scalar_data, ) - assert self.count_rvs(m.logpt()) == 1 + assert self.count_rvs(m.logp()) == 1 def test_model_with_potential(self): - assert self.count_rvs(self.SMABC_potential.logpt()) == 1 + assert self.count_rvs(self.SMABC_potential.logp()) == 1 with self.SMABC_potential: trace = pm.sample_smc(draws=100, chains=1, return_inferencedata=False) @@ -421,17 +421,17 @@ def test_multiple_simulators(self): observed=data2, ) - assert self.count_rvs(m.logpt()) == 2 + assert self.count_rvs(m.logp()) == 2 # Check that the logps use the correct methods a_val = m.rvs_to_values[a] sim1_val = m.rvs_to_values[sim1] - logp_sim1 = pm.joint_logpt(sim1, sim1_val) + logp_sim1 = pm.joint_logp(sim1, sim1_val) logp_sim1_fn = aesara.function([a_val], logp_sim1) b_val = m.rvs_to_values[b] sim2_val = m.rvs_to_values[sim2] - logp_sim2 = pm.joint_logpt(sim2, sim2_val) + logp_sim2 = pm.joint_logp(sim2, sim2_val) logp_sim2_fn = aesara.function([b_val], logp_sim2) assert any( @@ -471,7 +471,7 @@ def test_nested_simulators(self): observed=data, ) - assert self.count_rvs(m.logpt()) == 2 + assert self.count_rvs(m.logp()) == 2 with m: trace = pm.sample_smc(return_inferencedata=False) diff --git a/pymc/tests/test_transforms.py b/pymc/tests/test_transforms.py index 493f418d33..d939d0acfb 100644 --- a/pymc/tests/test_transforms.py +++ b/pymc/tests/test_transforms.py @@ -24,7 +24,7 @@ import pymc.distributions.transforms as tr from pymc.aesaraf import floatX, jacobian -from pymc.distributions import joint_logpt +from pymc.distributions import joint_logp from pymc.tests.checks import close_to, close_to_logical from pymc.tests.helpers import SeededTest from pymc.tests.test_distributions import ( @@ -287,10 +287,10 @@ def check_transform_elementwise_logp(self, model): x_val_untransf = at.constant(test_array_untransf).type() jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs) - assert joint_logpt(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim + assert joint_logp(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim - v1 = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) - v2 = joint_logpt(x, x_val_untransf, transformed=False).eval( + v1 = joint_logp(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) + v2 = joint_logp(x, x_val_untransf, transformed=False).eval( {x_val_untransf: test_array_untransf} ) close_to(v1, v2, tol) @@ -310,13 +310,13 @@ def check_vectortransform_elementwise_logp(self, model): jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs) # Original distribution is univariate if x.owner.op.ndim_supp == 0: - assert joint_logpt(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1) + assert joint_logp(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1) # Original distribution is multivariate else: - assert joint_logpt(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim + assert joint_logp(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim - a = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) - b = joint_logpt(x, x_val_untransf, transformed=False).eval( + a = joint_logp(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf}) + b = joint_logp(x, x_val_untransf, transformed=False).eval( {x_val_untransf: test_array_untransf} ) # Hack to get relative tolerance diff --git a/pymc/tuning/scaling.py b/pymc/tuning/scaling.py index 28471a0dda..2209d29cca 100644 --- a/pymc/tuning/scaling.py +++ b/pymc/tuning/scaling.py @@ -75,7 +75,7 @@ def find_hessian_diag(point, vars=None, model=None): Variables for which Hessian is to be calculated. """ model = modelcontext(model) - H = model.compile_fn(hessian_diag(model.logpt(), vars)) + H = model.compile_fn(hessian_diag(model.logp(), vars)) return H(Point(point, model=model)) diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index 5f2efce143..f4080bdbba 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -1232,7 +1232,7 @@ def logq_norm(self): def _sized_symbolic_varlogp_and_datalogp(self): """*Dev* - computes sampled prior term from model via `aesara.scan`""" varlogp_s, datalogp_s = self.symbolic_sample_over_posterior( - [self.model.varlogpt, self.model.datalogpt] + [self.model.varlogp, self.model.datalogp] ) return varlogp_s, datalogp_s # both shape (s,) @@ -1269,7 +1269,7 @@ def datalogp(self): @node_property def _single_symbolic_varlogp_and_datalogp(self): """*Dev* - computes sampled prior term from model via `aesara.scan`""" - varlogp, datalogp = self.symbolic_single_sample([self.model.varlogpt, self.model.datalogpt]) + varlogp, datalogp = self.symbolic_single_sample([self.model.varlogp, self.model.datalogp]) return varlogp, datalogp @node_property