|
17 | 17 | from pytensor.tensor.math import _allclose, dot, matmul
|
18 | 18 | from pytensor.tensor.nlinalg import Det, MatrixInverse, matrix_inverse
|
19 | 19 | from pytensor.tensor.rewriting.linalg import inv_as_solve
|
20 |
| -from pytensor.tensor.slinalg import Cholesky, Solve, SolveTriangular, cholesky, solve |
| 20 | +from pytensor.tensor.slinalg import ( |
| 21 | + Cholesky, |
| 22 | + Solve, |
| 23 | + SolveBase, |
| 24 | + SolveTriangular, |
| 25 | + cho_solve, |
| 26 | + cholesky, |
| 27 | + solve, |
| 28 | + solve_triangular, |
| 29 | +) |
21 | 30 | from pytensor.tensor.type import dmatrix, matrix, tensor, vector
|
22 | 31 | from tests import unittest_tools as utt
|
23 | 32 | from tests.test_rop import break_op
|
@@ -231,3 +240,68 @@ def test_local_det_chol():
|
231 | 240 | f = function([X], [L, det_X, X])
|
232 | 241 | nodes = f.maker.fgraph.toposort()
|
233 | 242 | assert not any(isinstance(node, Det) for node in nodes)
|
| 243 | + |
| 244 | + |
| 245 | +class TestBatchedVectorBSolveToMatrixBSolve: |
| 246 | + rewrite_name = "batched_vector_b_solve_to_matrix_b_solve" |
| 247 | + |
| 248 | + @staticmethod |
| 249 | + def any_vector_b_solve(fn): |
| 250 | + return any( |
| 251 | + ( |
| 252 | + isinstance(node.op, Blockwise) |
| 253 | + and isinstance(node.op.core_op, SolveBase) |
| 254 | + and node.op.core_op.b_ndim == 1 |
| 255 | + ) |
| 256 | + for node in fn.maker.fgraph.apply_nodes |
| 257 | + ) |
| 258 | + |
| 259 | + @pytest.mark.parametrize("solve_op", (solve, solve_triangular, cho_solve)) |
| 260 | + def test_valid_cases(self, solve_op): |
| 261 | + rng = np.random.default_rng(sum(map(ord, solve_op.__name__))) |
| 262 | + |
| 263 | + a = tensor(shape=(None, None)) |
| 264 | + b = tensor(shape=(None, None, None)) |
| 265 | + |
| 266 | + if solve_op is cho_solve: |
| 267 | + # cho_solves expects a tuple (a, lower) as the first input |
| 268 | + out = solve_op((a, True), b, b_ndim=1) |
| 269 | + else: |
| 270 | + out = solve_op(a, b, b_ndim=1) |
| 271 | + |
| 272 | + mode = get_default_mode().excluding(self.rewrite_name) |
| 273 | + ref_fn = pytensor.function([a, b], out, mode=mode) |
| 274 | + assert self.any_vector_b_solve(ref_fn) |
| 275 | + |
| 276 | + mode = get_default_mode().including(self.rewrite_name) |
| 277 | + opt_fn = pytensor.function([a, b], out, mode=mode) |
| 278 | + assert not self.any_vector_b_solve(opt_fn) |
| 279 | + |
| 280 | + test_a = rng.normal(size=(3, 3)) |
| 281 | + test_b = rng.normal(size=(7, 5, 3)) |
| 282 | + np.testing.assert_allclose( |
| 283 | + opt_fn(test_a, test_b), |
| 284 | + ref_fn(test_a, test_b), |
| 285 | + ) |
| 286 | + |
| 287 | + def test_invalid_batched_a(self): |
| 288 | + rng = np.random.default_rng(sum(map(ord, self.rewrite_name))) |
| 289 | + |
| 290 | + # Rewrite is not applicable if a has batched dims |
| 291 | + a = tensor(shape=(None, None, None)) |
| 292 | + b = tensor(shape=(None, None, None)) |
| 293 | + |
| 294 | + out = solve(a, b, b_ndim=1) |
| 295 | + |
| 296 | + mode = get_default_mode().including(self.rewrite_name) |
| 297 | + opt_fn = pytensor.function([a, b], out, mode=mode) |
| 298 | + assert self.any_vector_b_solve(opt_fn) |
| 299 | + |
| 300 | + ref_fn = np.vectorize(np.linalg.solve, signature="(m,m),(m)->(m)") |
| 301 | + |
| 302 | + test_a = rng.normal(size=(5, 3, 3)) |
| 303 | + test_b = rng.normal(size=(7, 5, 3)) |
| 304 | + np.testing.assert_allclose( |
| 305 | + opt_fn(test_a, test_b), |
| 306 | + ref_fn(test_a, test_b), |
| 307 | + ) |
0 commit comments