|
1 | 1 | from functools import partial
|
2 | 2 |
|
3 | 3 | import numpy as np
|
4 |
| -import numpy.linalg |
5 | 4 | import pytest
|
6 | 5 | import scipy.linalg
|
7 | 6 | from numpy.testing import assert_allclose
|
|
17 | 16 | from pytensor.tensor.math import _allclose, dot, matmul
|
18 | 17 | from pytensor.tensor.nlinalg import Det, MatrixInverse, matrix_inverse
|
19 | 18 | from pytensor.tensor.rewriting.linalg import inv_as_solve
|
20 |
| -from pytensor.tensor.slinalg import Cholesky, Solve, SolveTriangular, cholesky, solve |
| 19 | +from pytensor.tensor.slinalg import ( |
| 20 | + Cholesky, |
| 21 | + Solve, |
| 22 | + SolveBase, |
| 23 | + SolveTriangular, |
| 24 | + cho_solve, |
| 25 | + cholesky, |
| 26 | + solve, |
| 27 | + solve_triangular, |
| 28 | +) |
21 | 29 | from pytensor.tensor.type import dmatrix, matrix, tensor, vector
|
22 | 30 | from tests import unittest_tools as utt
|
23 | 31 | from tests.test_rop import break_op
|
@@ -231,3 +239,70 @@ def test_local_det_chol():
|
231 | 239 | f = function([X], [L, det_X, X])
|
232 | 240 | nodes = f.maker.fgraph.toposort()
|
233 | 241 | assert not any(isinstance(node, Det) for node in nodes)
|
| 242 | + |
| 243 | + |
| 244 | +class TestBatchedVectorBSolveToMatrixBSolve: |
| 245 | + rewrite_name = "batched_vector_b_solve_to_matrix_b_solve" |
| 246 | + |
| 247 | + @staticmethod |
| 248 | + def any_vector_b_solve(fn): |
| 249 | + return any( |
| 250 | + ( |
| 251 | + isinstance(node.op, Blockwise) |
| 252 | + and isinstance(node.op.core_op, SolveBase) |
| 253 | + and node.op.core_op.b_ndim == 1 |
| 254 | + ) |
| 255 | + for node in fn.maker.fgraph.apply_nodes |
| 256 | + ) |
| 257 | + |
| 258 | + @pytest.mark.parametrize("solve_op", (solve, solve_triangular, cho_solve)) |
| 259 | + def test_valid_cases(self, solve_op): |
| 260 | + rng = np.random.default_rng(sum(map(ord, solve_op.__name__))) |
| 261 | + |
| 262 | + a = tensor(shape=(None, None)) |
| 263 | + b = tensor(shape=(None, None, None)) |
| 264 | + |
| 265 | + if solve_op is cho_solve: |
| 266 | + # cho_solves expects a tuple (a, lower) as the first input |
| 267 | + out = solve_op((a, True), b, b_ndim=1) |
| 268 | + else: |
| 269 | + out = solve_op(a, b, b_ndim=1) |
| 270 | + |
| 271 | + mode = get_default_mode().excluding(self.rewrite_name) |
| 272 | + ref_fn = pytensor.function([a, b], out, mode=mode) |
| 273 | + assert self.any_vector_b_solve(ref_fn) |
| 274 | + |
| 275 | + mode = get_default_mode().including(self.rewrite_name) |
| 276 | + opt_fn = pytensor.function([a, b], out, mode=mode) |
| 277 | + assert not self.any_vector_b_solve(opt_fn) |
| 278 | + |
| 279 | + test_a = rng.normal(size=(3, 3)).astype(config.floatX) |
| 280 | + test_b = rng.normal(size=(7, 5, 3)).astype(config.floatX) |
| 281 | + np.testing.assert_allclose( |
| 282 | + opt_fn(test_a, test_b), |
| 283 | + ref_fn(test_a, test_b), |
| 284 | + rtol=1e-7 if config.floatX == "float64" else 1e-5, |
| 285 | + ) |
| 286 | + |
| 287 | + def test_invalid_batched_a(self): |
| 288 | + rng = np.random.default_rng(sum(map(ord, self.rewrite_name))) |
| 289 | + |
| 290 | + # Rewrite is not applicable if a has batched dims |
| 291 | + a = tensor(shape=(None, None, None)) |
| 292 | + b = tensor(shape=(None, None, None)) |
| 293 | + |
| 294 | + out = solve(a, b, b_ndim=1) |
| 295 | + |
| 296 | + mode = get_default_mode().including(self.rewrite_name) |
| 297 | + opt_fn = pytensor.function([a, b], out, mode=mode) |
| 298 | + assert self.any_vector_b_solve(opt_fn) |
| 299 | + |
| 300 | + ref_fn = np.vectorize(np.linalg.solve, signature="(m,m),(m)->(m)") |
| 301 | + |
| 302 | + test_a = rng.normal(size=(5, 3, 3)).astype(config.floatX) |
| 303 | + test_b = rng.normal(size=(7, 5, 3)).astype(config.floatX) |
| 304 | + np.testing.assert_allclose( |
| 305 | + opt_fn(test_a, test_b), |
| 306 | + ref_fn(test_a, test_b), |
| 307 | + rtol=1e-7 if config.floatX == "float64" else 1e-5, |
| 308 | + ) |
0 commit comments