diff --git a/doc/extending/creating_an_op.rst b/doc/extending/creating_an_op.rst index 746342ad4a..50e1e425b5 100644 --- a/doc/extending/creating_an_op.rst +++ b/doc/extending/creating_an_op.rst @@ -569,9 +569,9 @@ exception. You can use the ``assert`` keyword to automatically raise an inp = np.asarray(rng.random((5, 4)), dtype=pytensor.config.floatX) out = f(inp) # Compare the result computed to the expected value. - utt.assert_allclose(inp * 2, out) + np.testing.assert_allclose(inp * 2, out) -We call ``utt.assert_allclose(expected_value, value)`` to compare +We call ``np.testing.assert_allclose(expected_value, value)`` to compare NumPy ndarray.This raise an error message with more information. Also, the default tolerance can be changed with the PyTensor flags ``config.tensor__cmp_sloppy`` that take values in 0, 1 and 2. The diff --git a/tests/link/c/test_params_type.py b/tests/link/c/test_params_type.py index a7aa76a3a7..2c24238e9e 100644 --- a/tests/link/c/test_params_type.py +++ b/tests/link/c/test_params_type.py @@ -9,7 +9,6 @@ from pytensor.link.c.type import EnumList, Generic from pytensor.scalar import ScalarType from pytensor.tensor.type import TensorType, matrix -from tests import unittest_tools as utt tensor_type_0d = TensorType("float64", shape=tuple()) @@ -355,5 +354,5 @@ def test_op_params(self): vy1 = f1(vx) vy2 = f2(vx) ref = a * (vx**2) + b * vx + c - utt.assert_allclose(vy1, vy2) - utt.assert_allclose(ref, vy1) + np.testing.assert_allclose(vy1, vy2) + np.testing.assert_allclose(ref, vy1) diff --git a/tests/link/test_vm.py b/tests/link/test_vm.py index 8091a92ac7..f8248b2189 100644 --- a/tests/link/test_vm.py +++ b/tests/link/test_vm.py @@ -19,7 +19,6 @@ from pytensor.tensor.math import cosh, tanh from pytensor.tensor.type import lscalar, scalar, scalars, vector, vectors from pytensor.tensor.variable import TensorConstant -from tests import unittest_tools as utt class SomeOp(Op): @@ -221,7 +220,7 @@ def test_partial_function(linker): assert f(3, output_subset=[0, 1, 2]) == f(3) assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]] - utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858])) + np.testing.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858])) @pytest.mark.parametrize( diff --git a/tests/scalar/test_basic.py b/tests/scalar/test_basic.py index 24f3148acb..6a5a977ca1 100644 --- a/tests/scalar/test_basic.py +++ b/tests/scalar/test_basic.py @@ -3,7 +3,6 @@ import pytensor import pytensor.tensor as pt -import tests.unittest_tools as utt from pytensor.compile.mode import Mode from pytensor.graph.fg import FunctionGraph from pytensor.link.c.basic import DualLinker @@ -477,11 +476,11 @@ def test_grad_inrange(): # x is equal to the lower or higher bound but in that case # PyTensor defines the gradient to be zero for stability. f = pytensor.function([x, low, high], [gx, glow, ghigh]) - utt.assert_allclose(f(0, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(1, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(2, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(5, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(7, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(0, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(1, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(2, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(5, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(7, 1, 5), [0, 0, 0]) def test_grad_abs(): diff --git a/tests/scan/test_basic.py b/tests/scan/test_basic.py index 968cb13dbc..b2c004cb8e 100644 --- a/tests/scan/test_basic.py +++ b/tests/scan/test_basic.py @@ -354,7 +354,7 @@ def f_pow2(x_tm1): numpy_values = np.array([state * (2 ** (k + 1)) for k in range(steps)]) pytensor_values = my_f(state, steps) - utt.assert_allclose(numpy_values, pytensor_values) + np.testing.assert_allclose(numpy_values, pytensor_values) def test_inner_storage_leak(self): """ @@ -501,8 +501,8 @@ def test_only_nonseq_inputs(self): expected_out2 = np.ones(inputs.shape, dtype="int8") * n_steps out1, out2 = fun(inputs) - utt.assert_allclose(out1, expected_out1) - utt.assert_allclose(out2, expected_out2) + np.testing.assert_allclose(out1, expected_out1) + np.testing.assert_allclose(out2, expected_out2) def test_one_sequence_one_output_weights(self): """ @@ -544,7 +544,7 @@ def f_rnn(u_t, x_tm1, W_in, W): for step in range(1, 4): v_out[step] = v_u[step] * W_in + v_out[step - 1] * W pytensor_values = f2(v_u, v_x0, W_in, W) - utt.assert_allclose(pytensor_values, v_out) + np.testing.assert_allclose(pytensor_values, v_out) def test_one_sequence_one_output_weights_shared(self): """ @@ -710,7 +710,7 @@ def f_rnn(u_t): v_u = rng.uniform(-5.0, 5.0, size=(5,)) numpy_result = v_u + 3 pytensor_result = f2(v_u) - utt.assert_allclose(pytensor_result, numpy_result) + np.testing.assert_allclose(pytensor_result, numpy_result) def test_backwards(self): def f_rnn(u_t, x_tm1, W_in, W): @@ -748,7 +748,7 @@ def f_rnn(u_t, x_tm1, W_in, W): v_out[step] = v_u[3 - step] * W_in + v_out[step - 1] * W pytensor_values = f2(v_u, v_x0, W_in, W) - utt.assert_allclose(pytensor_values, v_out) + np.testing.assert_allclose(pytensor_values, v_out) def test_output_padding(self): """ @@ -836,7 +836,7 @@ def lp(x, x2): output = f([1, 2, 3, 4, 5]) expected_output = np.array([1, 2, 3], dtype="float32") - utt.assert_allclose(output, expected_output) + np.testing.assert_allclose(output, expected_output) def test_shared_arguments_with_updates(self): rng = np.random.default_rng(utt.fetch_seed()) @@ -899,11 +899,11 @@ def f(u1_t, u2_t, y0_tm3, y0_tm2, y0_tm1, y1_tm1): numpy_W1 = numpy_W1 + 0.1 numpy_W2 = numpy_W2 + 0.05 - utt.assert_allclose(pytensor_y0, numpy_y0[3:]) - utt.assert_allclose(pytensor_y1, numpy_y1[1:]) - utt.assert_allclose(pytensor_y2, numpy_y2) - utt.assert_allclose(W1.get_value(), numpy_W1) - utt.assert_allclose(W2.get_value(), numpy_W2) + np.testing.assert_allclose(pytensor_y0, numpy_y0[3:]) + np.testing.assert_allclose(pytensor_y1, numpy_y1[1:]) + np.testing.assert_allclose(pytensor_y2, numpy_y2) + np.testing.assert_allclose(W1.get_value(), numpy_W1) + np.testing.assert_allclose(W2.get_value(), numpy_W2) def test_simple_shared_random(self): pytensor_rng = RandomStream(utt.fetch_seed()) @@ -928,9 +928,9 @@ def test_simple_shared_random(self): numpy_v[i] = rng.uniform(-1, 1, size=(2,)) pytensor_v = my_f() - utt.assert_allclose(pytensor_v, numpy_v[:5, :]) + np.testing.assert_allclose(pytensor_v, numpy_v[:5, :]) pytensor_v = my_f() - utt.assert_allclose(pytensor_v, numpy_v[5:, :]) + np.testing.assert_allclose(pytensor_v, numpy_v[5:, :]) def test_only_shared_no_input_no_output(self): rng = np.random.default_rng(utt.fetch_seed()) @@ -948,7 +948,7 @@ def f_2(): n_steps = 3 this_f(n_steps) numpy_state = v_state * (2 ** (n_steps)) - utt.assert_allclose(state.get_value(), numpy_state) + np.testing.assert_allclose(state.get_value(), numpy_state) def test_random_as_input_to_scan(self): trng = RandomStream(123) @@ -964,8 +964,8 @@ def test_random_as_input_to_scan(self): ny1, nz1 = f(nx) ny2, nz2 = f(nx) - utt.assert_allclose([ny1, ny1], nz1) - utt.assert_allclose([ny2, ny2], nz2) + np.testing.assert_allclose([ny1, ny1], nz1) + np.testing.assert_allclose([ny2, ny2], nz2) assert not np.allclose(ny1, ny2) def test_shared_updates(self): @@ -1116,7 +1116,7 @@ def test_inner_grad(self): vR = np.array([[3.6, 1.8], [1.8, 0.9]], dtype=config.floatX) out = f(vx, vA) - utt.assert_allclose(out, vR) + np.testing.assert_allclose(out, vR) @pytest.mark.parametrize( "mode", [Mode(linker="cvm", optimizer=None), Mode(linker="cvm")] @@ -1707,7 +1707,7 @@ def reset_rng_grad_fn(*args): multiple_outputs_numeric_grad(reset_rng_cost_fn, [v_u, v_x0, vW_in]) analytic_grad = reset_rng_grad_fn(v_u, v_x0, vW_in) - utt.assert_allclose(analytic_grad[0][:2], np.zeros((2, 2))) + np.testing.assert_allclose(analytic_grad[0][:2], np.zeros((2, 2))) def test_grad_wrt_shared(self): x1 = shared(3.0) @@ -1717,7 +1717,7 @@ def test_grad_wrt_shared(self): m = grad(y.sum(), x1) f = function([x2], m, allow_input_downcast=True) - utt.assert_allclose(f([2, 3]), 5) + np.testing.assert_allclose(f([2, 3]), 5) def test_inner_grad_wrt_shared(self): x1 = scalar("x1") @@ -1793,12 +1793,12 @@ def inner_fct(inp1, inp2, inp3): expected_g_out_init = expected_g_seq[:3] expected_g_non_seq = np.array([22, 22, 22]) - utt.assert_allclose(outputs[0], expected_g_seq) - utt.assert_allclose(outputs[1], expected_g_out_init) - utt.assert_allclose(outputs[2], expected_g_non_seq) - utt.assert_allclose(outputs[3], expected_g_seq) - utt.assert_allclose(outputs[4], expected_g_out_init) - utt.assert_allclose(outputs[5], expected_g_non_seq) + np.testing.assert_allclose(outputs[0], expected_g_seq) + np.testing.assert_allclose(outputs[1], expected_g_out_init) + np.testing.assert_allclose(outputs[2], expected_g_non_seq) + np.testing.assert_allclose(outputs[3], expected_g_seq) + np.testing.assert_allclose(outputs[4], expected_g_out_init) + np.testing.assert_allclose(outputs[5], expected_g_non_seq) def test_grad_duplicate_outputs_connection_pattern(self): """ @@ -1998,9 +1998,9 @@ def rnn_fn(_u, _y, _W): vnu, vnh0, vnW = fn_rop(v_u, v_h0, v_W, v_eu, v_eh0, v_eW) tnu, tnh0, tnW = fn_test(v_u, v_h0, v_W, v_eu, v_eh0, v_eW) - utt.assert_allclose(vnu, tnu, atol=1e-6) - utt.assert_allclose(vnh0, tnh0, atol=1e-6) - utt.assert_allclose(vnW, tnW, atol=1e-6) + np.testing.assert_allclose(vnu, tnu, atol=1e-6) + np.testing.assert_allclose(vnh0, tnh0, atol=1e-6) + np.testing.assert_allclose(vnW, tnW, atol=1e-6) @pytest.mark.slow def test_R_op_2(self): @@ -2080,9 +2080,9 @@ def rnn_fn(_u, _y, _W): ) tnu, tnh0, tnW, tno = fn_test(v_u, v_h0, v_W, v_eu, v_eh0, v_eW) - utt.assert_allclose(vnu, tnu, atol=1e-6) - utt.assert_allclose(vnh0, tnh0, atol=1e-6) - utt.assert_allclose(vnW, tnW, atol=2e-6) + np.testing.assert_allclose(vnu, tnu, atol=1e-6) + np.testing.assert_allclose(vnh0, tnh0, atol=1e-6) + np.testing.assert_allclose(vnW, tnW, atol=2e-6) def test_R_op_mitmot(self): # this test is a copy paste from the script given by Justin Bayer to @@ -2383,8 +2383,8 @@ def test_grad_until(self): f = function([self.x, self.threshold], [r, g]) pytensor_output, pytensor_gradient = f(self.seq, 5) - utt.assert_allclose(pytensor_output, self.numpy_output) - utt.assert_allclose(pytensor_gradient, self.numpy_gradient) + np.testing.assert_allclose(pytensor_output, self.numpy_output) + np.testing.assert_allclose(pytensor_gradient, self.numpy_gradient) def test_grad_until_ndim_greater_one(self): def tile_array(inp): @@ -2402,8 +2402,8 @@ def tile_array(inp): f = function([X, self.threshold], [r, g]) pytensor_output, pytensor_gradient = f(arr, 5) - utt.assert_allclose(pytensor_output, tile_array(self.numpy_output)) - utt.assert_allclose(pytensor_gradient, tile_array(self.numpy_gradient)) + np.testing.assert_allclose(pytensor_output, tile_array(self.numpy_output)) + np.testing.assert_allclose(pytensor_gradient, tile_array(self.numpy_gradient)) def test_grad_until_and_truncate(self): n = 3 @@ -2418,8 +2418,8 @@ def test_grad_until_and_truncate(self): pytensor_output, pytensor_gradient = f(self.seq, 5) self.numpy_gradient[: 7 - n] = 0 - utt.assert_allclose(pytensor_output, self.numpy_output) - utt.assert_allclose(pytensor_gradient, self.numpy_gradient) + np.testing.assert_allclose(pytensor_output, self.numpy_output) + np.testing.assert_allclose(pytensor_gradient, self.numpy_gradient) def test_grad_until_and_truncate_sequence_taps(self): n = 3 @@ -2436,7 +2436,7 @@ def test_grad_until_and_truncate_sequence_taps(self): # Gradient computed by hand: numpy_grad = np.array([0, 0, 0, 5, 6, 10, 4, 5, 0, 0, 0, 0, 0, 0, 0]) numpy_grad = numpy_grad.astype(config.floatX) - utt.assert_allclose(pytensor_gradient, numpy_grad) + np.testing.assert_allclose(pytensor_gradient, numpy_grad) def test_mintap_onestep(): @@ -2645,7 +2645,7 @@ def numpy_implementation(vsample): t_result = my_f(v_vsample) n_result = numpy_implementation(v_vsample) - utt.assert_allclose(t_result, n_result) + np.testing.assert_allclose(t_result, n_result) def test_reordering(self, benchmark): """Test re-ordering of inputs. @@ -2711,8 +2711,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): f4, v_u1, v_u2, v_x0, v_y0, vW_in1 ) - utt.assert_allclose(pytensor_x, v_x) - utt.assert_allclose(pytensor_y, v_y) + np.testing.assert_allclose(pytensor_x, v_x) + np.testing.assert_allclose(pytensor_y, v_y) def test_scan_as_tensor_on_gradients(self, benchmark): to_scan = dvector("to_scan") @@ -2765,7 +2765,7 @@ def one_step(x_t, h_tm1, W): rval = np.asarray([[5187989] * 5] * 5, dtype=config.floatX) arg1 = np.ones((5, 5), dtype=config.floatX) arg2 = np.ones((10, 5), dtype=config.floatX) - utt.assert_allclose(f(arg1, arg2), rval) + np.testing.assert_allclose(f(arg1, arg2), rval) def test_use_scan_direct_output(self): """ @@ -2808,8 +2808,8 @@ def test_use_scan_direct_output(self): expected_output2.append(expected_output1[-1] + expected_output2[-1]) expected_output1.append(expected_output1[-1] + i) - utt.assert_allclose(output1, expected_output1) - utt.assert_allclose(output2, expected_output2) + np.testing.assert_allclose(output1, expected_output1) + np.testing.assert_allclose(output2, expected_output2) def test_use_scan_direct_output2(self): """ @@ -2846,8 +2846,8 @@ def test_use_scan_direct_output2(self): for i in range(5): expected_out1[i] = expected_out2[i] + x_val - utt.assert_allclose(out1, expected_out1) - utt.assert_allclose(out2, expected_out2) + np.testing.assert_allclose(out1, expected_out1) + np.testing.assert_allclose(out2, expected_out2) def test_same(self): x = fmatrix("x") @@ -2875,7 +2875,7 @@ def f(inp, mem): f_vals = f(x_val) memory.set_value(mem_val) f2_vals = f2(x_val) - utt.assert_allclose(f_vals, f2_vals) + np.testing.assert_allclose(f_vals, f2_vals) def test_eliminate_seqs(self): U = vector("U") @@ -2908,10 +2908,10 @@ def rec_fn(*args): rng = np.random.default_rng(utt.fetch_seed()) v_u = asarrayX(rng.uniform(size=(5,))) outs = f(v_u, [0, 0, 0], 0) - utt.assert_allclose(outs[0], v_u + 1) - utt.assert_allclose(outs[1], v_u + 2) - utt.assert_allclose(outs[2], v_u + 3) - utt.assert_allclose(sh.get_value(), v_u[-1] + 4) + np.testing.assert_allclose(outs[0], v_u + 1) + np.testing.assert_allclose(outs[1], v_u + 2) + np.testing.assert_allclose(outs[2], v_u + 3) + np.testing.assert_allclose(sh.get_value(), v_u[-1] + 4) def test_eliminate_nonseqs(self): W = scalar("W") @@ -2945,10 +2945,10 @@ def rec_fn(*args): rng = np.random.default_rng(utt.fetch_seed()) v_w = asarrayX(rng.uniform()) outs = f(v_w, [0, 0, 0], 0) - utt.assert_allclose(outs[0], v_w + 1) - utt.assert_allclose(outs[1], v_w + 2) - utt.assert_allclose(outs[2], v_w + 3) - utt.assert_allclose(sh.get_value(), v_w + 4) + np.testing.assert_allclose(outs[0], v_w + 1) + np.testing.assert_allclose(outs[1], v_w + 2) + np.testing.assert_allclose(outs[2], v_w + 3) + np.testing.assert_allclose(sh.get_value(), v_w + 4) def test_seq_tap_bug_jeremiah(self): inp = np.arange(10).reshape(-1, 1).astype(config.floatX) @@ -3047,7 +3047,7 @@ def inner_fn(tap_m3, tap_m2, tap_m1): states[3:6], ] - utt.assert_allclose(outputs, expected_outputs) + np.testing.assert_allclose(outputs, expected_outputs) @pytest.mark.slow def test_hessian_bug_grad_grad_two_scans(self, benchmark): @@ -3149,7 +3149,7 @@ def step(seq): # Ensure the output of the function is valid output = f(np.random.default_rng(utt.fetch_seed()).random(5)) - utt.assert_allclose(output, np.ones(5)) + np.testing.assert_allclose(output, np.ones(5)) def test_grad_bug_disconnected_input(self): W = shared(np.zeros((3, 3)), name="W") @@ -3158,7 +3158,7 @@ def test_grad_bug_disconnected_input(self): # This used to raise an exception f = function([v], grad(y.sum(), W)) - utt.assert_allclose(f([1, 2]), [[0, 0, 0], [1, 1, 1], [1, 1, 1]]) + np.testing.assert_allclose(f([1, 2]), [[0, 0, 0], [1, 1, 1], [1, 1, 1]]) def test_grad_find_input(self): w = shared(np.array(0, dtype="float32"), name="w") @@ -3215,7 +3215,7 @@ def f_rnn_shared(u_tm2, x_tm1, x_tm2): numpy_out = np.zeros((2,)) numpy_out[0] = vu[0] * vW_in + vx0[1] * vW + vx0[0] numpy_out[1] = vu[1] * vW_in + numpy_out[0] * vW + vx0[1] - utt.assert_allclose(numpy_out, pytensor_out) + np.testing.assert_allclose(numpy_out, pytensor_out) def test_past_future_taps_shared(self): """ @@ -3255,7 +3255,7 @@ def f_rnn_shared(u_tm2, u_tp2, x_tm1, x_tm2): # and vx0[0] as vx0[-2], vx0[1] as vx0[-1] numpy_out[0] = (vu[0] + vu[4]) * vW_in + vx0[1] * vW + vx0[0] numpy_out[1] = (vu[1] + vu[5]) * vW_in + numpy_out[0] * vW + vx0[1] - utt.assert_allclose(numpy_out, pytensor_out) + np.testing.assert_allclose(numpy_out, pytensor_out) def test_generator_one_output_scalar(self): """ @@ -3289,7 +3289,7 @@ def f_pow2(x_tm1): numpy_values = np.array([state * (2 ** (k + 1)) for k in range(steps)]) pytensor_values = my_f(state, steps) - utt.assert_allclose(numpy_values, pytensor_values[0]) + np.testing.assert_allclose(numpy_values, pytensor_values[0]) def test_default_value_broadcasted(self): def floatx(X): @@ -3576,8 +3576,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): (pytensor_dump, pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - utt.assert_allclose(pytensor_x, v_x[-2:]) - utt.assert_allclose(pytensor_y, v_y[-4:]) + np.testing.assert_allclose(pytensor_x, v_x[-2:]) + np.testing.assert_allclose(pytensor_y, v_y[-4:]) def test_until_random_infer_shape(self): """ @@ -3696,8 +3696,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, W_in1): v_y[i] = np.dot(v_x[i - 1], vWout) (pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - utt.assert_allclose(pytensor_x, v_x) - utt.assert_allclose(pytensor_y, v_y) + np.testing.assert_allclose(pytensor_x, v_x) + np.testing.assert_allclose(pytensor_y, v_y) def test_multiple_outs_taps(self, benchmark): l = 5 diff --git a/tests/scan/test_rewriting.py b/tests/scan/test_rewriting.py index 864712a7c5..fc5b83bba2 100644 --- a/tests/scan/test_rewriting.py +++ b/tests/scan/test_rewriting.py @@ -214,7 +214,7 @@ def lambda_fn(h, W1, W2): # pytensor. Note that what we ask pytensor to do is to repeat the 2 # elements vector v_out 5 times sol[:, :] = v_out - utt.assert_allclose(sol, f(v_h, v_W1, v_W2)) + np.testing.assert_allclose(sol, f(v_h, v_W1, v_W2)) def test_pushout_while(self): """ @@ -257,7 +257,7 @@ def lambda_fn(step_idx, W1, W2): out = f(*input_values) out_ref = f_ref(*input_values) - utt.assert_allclose(out, out_ref) + np.testing.assert_allclose(out, out_ref) def test_pushout(self): W1 = matrix("W1") @@ -297,8 +297,8 @@ def fn(i, i_tm1): f = function([inp], [i_t, i_tm1]) val = np.arange(10).reshape(5, 2).astype(config.floatX) ret = f(val) - utt.assert_allclose(ret[0], val + 10) - utt.assert_allclose( + np.testing.assert_allclose(ret[0], val + 10) + np.testing.assert_allclose( ret[1], [[0.0, 0.0], [10.0, 11.0], [12.0, 13.0], [14.0, 15.0], [16.0, 17.0]] ) @@ -390,8 +390,8 @@ def predict_mean_i(i, x_star, s_star, X, beta, h): ) jacobian_outputs = dfdm_j(X, Y, test_m, test_s) - utt.assert_allclose(expected_output, scan_output) - utt.assert_allclose(expected_output, jacobian_outputs) + np.testing.assert_allclose(expected_output, scan_output) + np.testing.assert_allclose(expected_output, jacobian_outputs) @config.change_flags(on_opt_error="raise") def test_pushout_seqs2(self): @@ -422,7 +422,7 @@ def test_pushout_nonseq(self): outs = f() expected_outs = [[4, 4], [2, 2]] - utt.assert_allclose(outs, expected_outs) + np.testing.assert_allclose(outs, expected_outs) def test_dot_not_output(self): """ @@ -459,7 +459,7 @@ def test_dot_not_output(self): output_opt = f_opt(v_value, m_value) output_no_opt = f_no_opt(v_value, m_value) - utt.assert_allclose(output_opt, output_no_opt) + np.testing.assert_allclose(output_opt, output_no_opt) def test_dot_nitsot_output(self): """ @@ -505,8 +505,8 @@ def inner_fct(vect, mat): output_opt = f_opt(a_value, b_value) output_no_opt = f_no_opt(a_value, b_value) - utt.assert_allclose(output_opt[0], output_no_opt[0]) - utt.assert_allclose(output_opt[1], output_no_opt[1]) + np.testing.assert_allclose(output_opt[0], output_no_opt[0]) + np.testing.assert_allclose(output_opt[1], output_no_opt[1]) def test_dot_sitsot_output(self): """ @@ -551,8 +551,8 @@ def inner_fct(seq1, previous_output1, nonseq1): output_opt = f_opt(a_value, b_value) output_no_opt = f_no_opt(a_value, b_value) - utt.assert_allclose(output_opt[0], output_no_opt[0]) - utt.assert_allclose(output_opt[1], output_no_opt[1]) + np.testing.assert_allclose(output_opt[0], output_no_opt[0]) + np.testing.assert_allclose(output_opt[1], output_no_opt[1]) def test_OpFromGraph_shared(self): """Make sure that a simple `OpFromGraph` with a shared variable can be pushed out.""" @@ -618,7 +618,7 @@ def test_sum_dot(self): rng = np.random.default_rng(utt.fetch_seed()) vA = rng.uniform(size=(5, 5)).astype(config.floatX) vB = rng.uniform(size=(5, 5)).astype(config.floatX) - utt.assert_allclose(f(vA, vB), np.dot(vA.T, vB)) + np.testing.assert_allclose(f(vA, vB), np.dot(vA.T, vB)) def test_pregreedy_optimizer(self, benchmark): W = pt.zeros((5, 4)) @@ -740,7 +740,7 @@ def rnn_step1( # Compare the outputs of the two functions on the same input data. f_opt_output = f_opt(x_value, ri_value, zi_value) f_no_opt_output = f_no_opt(x_value, ri_value, zi_value) - utt.assert_allclose(f_opt_output, f_no_opt_output) + np.testing.assert_allclose(f_opt_output, f_no_opt_output) def test_non_zero_init(self): """Test the case where the initial value for the nitsot output is non-zero.""" @@ -793,7 +793,7 @@ def inner_fct(seq1, seq2, seq3, previous_output): output_opt = f_opt(input1_value, input2_value, input3_value) output_no_opt = f_no_opt(input1_value, input2_value, input3_value) - utt.assert_allclose(output_opt, output_no_opt) + np.testing.assert_allclose(output_opt, output_no_opt) class TestScanMerge: @@ -1113,8 +1113,8 @@ def f_rnn_shared(u0_t, u1_t, u2_t, x0_tm1, x1_tm1): # equivalent is done (pytensor_x0, pytensor_x1) = f9(vu0, vu1, vu2, vx0, vx1) # assert that pytensor does what it should - utt.assert_allclose(pytensor_x0, numpy_x0) - utt.assert_allclose(pytensor_x1, numpy_x1) + np.testing.assert_allclose(pytensor_x0, numpy_x0) + np.testing.assert_allclose(pytensor_x1, numpy_x1) @utt.assertFailure_fast def test_simple_rnn_2(self): @@ -1181,8 +1181,8 @@ def f_rnn_shared(u0_t, u1_t, u1_tp1, u2_tm1, u2_t, u2_tp1, x0_tm1, x1_tm1): # equivalent is done (pytensor_x0, pytensor_x1) = f9(vu0, vu1, vu2, vx0, vx1) # assert that pytensor does what it should - utt.assert_allclose(pytensor_x0, numpy_x0) - utt.assert_allclose(pytensor_x1, numpy_x1) + np.testing.assert_allclose(pytensor_x0, numpy_x0) + np.testing.assert_allclose(pytensor_x1, numpy_x1) @utt.assertFailure_fast def test_inplace3(self): @@ -1269,8 +1269,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): (pytensor_dump, pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - utt.assert_allclose(pytensor_x, v_x[-1:]) - utt.assert_allclose(pytensor_y, v_y[-1:]) + np.testing.assert_allclose(pytensor_x, v_x[-1:]) + np.testing.assert_allclose(pytensor_y, v_y[-1:]) def test_save_mem_reduced_number_of_steps(self): def f_rnn(u_t): @@ -1305,13 +1305,13 @@ def f_rnn(u_t): # compute the output in numpy tx1, tx2, tx3, tx4, tx5, tx6, tx7 = f2(v_u, 3, 15) - utt.assert_allclose(tx1, v_u[:2] + 1.0) - utt.assert_allclose(tx2, v_u[4] + 2.0) - utt.assert_allclose(tx3, v_u[3] + 3.0) - utt.assert_allclose(tx4, v_u[:3] + 4.0) - utt.assert_allclose(tx5, v_u[-10] + 5.0) - utt.assert_allclose(tx6, v_u[-15] + 6.0) - utt.assert_allclose(tx7, v_u[:-15] + 7.0) + np.testing.assert_allclose(tx1, v_u[:2] + 1.0) + np.testing.assert_allclose(tx2, v_u[4] + 2.0) + np.testing.assert_allclose(tx3, v_u[3] + 3.0) + np.testing.assert_allclose(tx4, v_u[:3] + 4.0) + np.testing.assert_allclose(tx5, v_u[-10] + 5.0) + np.testing.assert_allclose(tx6, v_u[-15] + 6.0) + np.testing.assert_allclose(tx7, v_u[:-15] + 7.0) def test_save_mem_store_steps(self): def f_rnn(u_t, x1_tm1, x1_tm3, x2_tm1, x3tm2, x3_tm1, x4_tm1): @@ -1362,11 +1362,11 @@ def f_rnn(u_t, x1_tm1, x1_tm3, x2_tm1, x3tm2, x3_tm1, x4_tm1): # compute the output in numpy tx1, tx2, tx3, tx4, tx5 = f2(v_u, [0, 0], 0, [0, 0], 0) - utt.assert_allclose(tx1, v_u[-7] + 1.0) - utt.assert_allclose(tx2, v_u[-3:-1] + 2.0) - utt.assert_allclose(tx3, v_u[-6:] + 3.0) - utt.assert_allclose(tx4, v_u[-1] + 4.0) - utt.assert_allclose(tx5, v_u[-1] + 5.0) + np.testing.assert_allclose(tx1, v_u[-7] + 1.0) + np.testing.assert_allclose(tx2, v_u[-3:-1] + 2.0) + np.testing.assert_allclose(tx3, v_u[-6:] + 3.0) + np.testing.assert_allclose(tx4, v_u[-1] + 4.0) + np.testing.assert_allclose(tx5, v_u[-1] + 5.0) def test_savemem_does_not_duplicate_number_of_scan_nodes(self): var = pt.ones(()) @@ -1445,7 +1445,7 @@ def get_outputs(x, w): expected_output = np.tile(x_value[:, 0].sum(0), (3, 1)).transpose() output = f(x_value, w_value) - utt.assert_allclose(output, expected_output) + np.testing.assert_allclose(output, expected_output) @pytest.mark.skip( reason="The 'assertion' of this test relied on something that no longer exists " @@ -1736,4 +1736,4 @@ def test_opt_order(): vx = np.array([[1.0, 1.0], [2.0, 2.0]], dtype=config.floatX) vA = np.array([[1.0, 1.0], [1.0, 0.0]], dtype=config.floatX) vR = np.array([[[2, 1], [4, 2]], [[2, 1], [4, 2]]], dtype=config.floatX) - utt.assert_allclose(f(vx, vA), vR) + np.testing.assert_allclose(f(vx, vA), vR) diff --git a/tests/scan/test_views.py b/tests/scan/test_views.py index 38c9b9cfcd..e3fa3e59d8 100644 --- a/tests/scan/test_views.py +++ b/tests/scan/test_views.py @@ -34,7 +34,7 @@ def test_map(): vals = rng.uniform(-5.0, 5.0, size=(10,)) abs_vals = abs(vals) pytensor_vals = f(vals) - utt.assert_allclose(abs_vals, pytensor_vals) + np.testing.assert_allclose(abs_vals, pytensor_vals) def test_reduce_memory_consumption(): @@ -66,7 +66,7 @@ def test_reduce_memory_consumption(): gx = grad(o, x) f2 = function([], gx) - utt.assert_allclose(f2(), np.ones((10,))) + np.testing.assert_allclose(f2(), np.ones((10,))) def test_foldl_memory_consumption(): @@ -99,7 +99,7 @@ def test_foldl_memory_consumption(): gx = grad(o, x) f2 = function([], gx) - utt.assert_allclose(f2(), np.ones((10,))) + np.testing.assert_allclose(f2(), np.ones((10,))) def test_foldr_memory_consumption(): @@ -132,4 +132,4 @@ def test_foldr_memory_consumption(): gx = grad(o, x) f2 = function([], gx) - utt.assert_allclose(f2(), np.ones((10,))) + np.testing.assert_allclose(f2(), np.ones((10,))) diff --git a/tests/sparse/test_basic.py b/tests/sparse/test_basic.py index 16fd5fef04..b3af3c1ac9 100644 --- a/tests/sparse/test_basic.py +++ b/tests/sparse/test_basic.py @@ -1313,7 +1313,7 @@ def test_upcast(self): scipy_result = spmat * mat assert pytensor_result.shape == scipy_result.shape assert pytensor_result.dtype == scipy_result.dtype - utt.assert_allclose(scipy_result, pytensor_result) + np.testing.all_close(scipy_result, pytensor_result) def test_opt_unpack(self): # @@ -1431,7 +1431,7 @@ def test_csc_correct_output_faster_than_scipy(self): # fail if PyTensor is slower than scipy by more than a certain amount overhead_tol = 0.003 # seconds overall overhead_rtol = 1.2 # times as long - utt.assert_allclose(scipy_result, pytensor_result) + np.testing.assert_allclose(scipy_result, pytensor_result) if pytensor.config.mode == "FAST_RUN" and pytensor.config.cxx: assert pytensor_time <= overhead_rtol * scipy_time + overhead_tol @@ -1466,7 +1466,7 @@ def test_csr_correct_output_faster_than_scipy(self): # print 'scipy took', scipy_time overhead_tol = 0.002 # seconds overhead_rtol = 1.1 # times as long - utt.assert_allclose(scipy_result, pytensor_result) + np.testing.assert_allclose(scipy_result, pytensor_result) if pytensor.config.mode == "FAST_RUN" and pytensor.config.cxx: assert pytensor_time <= overhead_rtol * scipy_time + overhead_tol, ( pytensor_time, @@ -1520,7 +1520,7 @@ def test_csr_dense(self): def f_b(x, y): return x * y - utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) + np.testing.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check( @@ -1542,7 +1542,7 @@ def test_csc_dense(self): def f_b(x, y): return x * y - utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) + np.testing.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check( @@ -1574,7 +1574,7 @@ def f_a(x, y): vx = getattr(self, "x_" + x_f).astype(d1) vy = getattr(self, "y_" + y_f).astype(d2) - utt.assert_allclose(f_a(vx, vy).toarray(), f_b(vx, vy)) + np.testing.assert_allclose(f_a(vx, vy).toarray(), f_b(vx, vy)) # Test infer_shape f_a = pytensor.function([x, y], sparse.dot(x, y).shape) @@ -1749,7 +1749,7 @@ def f_b(z, a, x, y): else: atol = None rtol = None - utt.assert_allclose(f_a_out, f_b_out, rtol=rtol, atol=atol) + np.testing.assert_allclose(f_a_out, f_b_out, rtol=rtol, atol=atol) topo = f_a.maker.fgraph.toposort() up = pytensor.scalar.upcast(dtype1, dtype2, dtype3, dtype4) @@ -2008,7 +2008,7 @@ def test_op(self): expected = x * s assert tested.format == format - utt.assert_allclose(expected, tested.toarray()) + np.testing.assert_allclose(expected, tested.toarray()) def test_infer_shape(self): for format, cls in [("csc", sparse.ColScaleCSC), ("csr", sparse.RowScaleCSC)]: @@ -2045,7 +2045,7 @@ def test_op(self): expected = x * s assert tested.format == format - utt.assert_allclose(expected, tested.toarray()) + np.testing.assert_allclose(expected, tested.toarray()) def test_infer_shape(self): for format, cls in [("csc", sparse.RowScaleCSC), ("csr", sparse.ColScaleCSC)]: @@ -2091,7 +2091,7 @@ def test_op(self, op_type): f = pytensor.function(variable, self.op(variable[0], axis=axis)) tested = f(*data) expected = data[0].todense().sum(axis).ravel() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -2130,7 +2130,7 @@ def test_op(self): tested = f(*data) expected = data[0].toarray().diagonal() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -2161,7 +2161,7 @@ def test_op(self): tested = f(*data).toarray() expected = np.diag(*data) - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) assert tested.dtype == expected.dtype assert tested.shape == expected.shape @@ -2198,7 +2198,7 @@ def test_op(self): tested = f(*data).toarray() expected = data[0].sorted_indices().toarray() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -2237,7 +2237,7 @@ def test_op(self): tested = tested.toarray() expected = expected.toarray() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_grad(self): for format in sparse.sparse_formats: @@ -2350,8 +2350,8 @@ def test_GetItemList(self): s_geta = sp.sparse.csr_matrix(A[0])[[0, 1, 2, 3, 1]].todense() s_getb = sp.sparse.csc_matrix(B[0])[[0, 1, 2, 3, 1]].todense() - utt.assert_allclose(t_geta, s_geta) - utt.assert_allclose(t_getb, s_getb) + np.testing.assert_allclose(t_geta, s_geta) + np.testing.assert_allclose(t_getb, s_getb) def test_GetItemList_wrong_index(self): a, A = sparse_random_inputs("csr", (4, 5)) @@ -2389,8 +2389,8 @@ def test_GetItem2Lists(self): s_geta = np.asarray(sp.sparse.csr_matrix(A[0])[[0, 0, 1, 3], [0, 1, 2, 4]]) s_getb = np.asarray(sp.sparse.csc_matrix(B[0])[[0, 0, 1, 3], [0, 1, 2, 4]]) - utt.assert_allclose(t_geta, s_geta) - utt.assert_allclose(t_getb, s_getb) + np.testing.assert_allclose(t_geta, s_geta) + np.testing.assert_allclose(t_getb, s_getb) def test_GetItem2Lists_wrong_index(self): a, A = sparse_random_inputs("csr", (4, 5)) @@ -2651,9 +2651,9 @@ def test_cast(self): t_cls = t_cls.toarray() t_prop = t_prop.toarray() - utt.assert_allclose(expected, t_func) - utt.assert_allclose(expected, t_cls) - utt.assert_allclose(expected, t_prop) + np.testing.assert_allclose(expected, t_func) + np.testing.assert_allclose(expected, t_cls) + np.testing.assert_allclose(expected, t_prop) @pytest.mark.slow def test_infer_shape(self): @@ -2723,7 +2723,7 @@ def test_op(self): tested = f(*blocks) expected = self.expected_f(blocks, format=out_f, dtype=dtype) - utt.assert_allclose(expected.toarray(), tested.toarray()) + np.testing.assert_allclose(expected.toarray(), tested.toarray()) assert tested.format == expected.format assert tested.dtype == expected.dtype @@ -2800,7 +2800,7 @@ def test_op(self): tested = f(*self.a[format]) expected = 2 * self.a[format][0] - utt.assert_allclose(expected.toarray(), tested.toarray()) + np.testing.assert_allclose(expected.toarray(), tested.toarray()) assert tested.format == expected.format assert tested.dtype == expected.dtype @@ -2894,7 +2894,7 @@ def test_op(self): tested = tested.toarray() try: - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) except AssertionError: raise AssertionError(self.__name__) @@ -2959,7 +2959,7 @@ def test_op(self): tested = tested.toarray() try: - utt.assert_allclose(tested, expected, rtol=1e-2) + np.testing.assert_allclose(tested, expected, rtol=1e-2) except AssertionError: raise AssertionError(self.__name__) @@ -3201,7 +3201,7 @@ def test_mul_s_v(self): out = f(spmat, mat) - utt.assert_allclose(spmat.toarray() * mat, out.toarray()) + np.testing.assert_allclose(spmat.toarray() * mat, out.toarray()) class TestStructuredAddSV: @@ -3231,7 +3231,7 @@ def test_structured_add_s_v(self): out = f(spmat, mat) - utt.assert_allclose( + np.testing.assert_allclose( as_ndarray(spones.multiply(spmat + mat)), out.toarray() ) @@ -3259,7 +3259,7 @@ def test_op_ss(self): assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() - utt.assert_allclose(tested, expected) + np.testing.assert_allclose(tested, expected) def test_op_sd(self): for format in sparse.sparse_formats: @@ -3278,7 +3278,7 @@ def test_op_sd(self): assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() - utt.assert_allclose(tested, expected) + np.testing.assert_allclose(tested, expected) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -3339,7 +3339,7 @@ def test_op(self): x, y, p = self.a expected = p.multiply(np.dot(x, y.T)) - utt.assert_allclose(as_ndarray(expected), tested.toarray()) + np.testing.assert_allclose(as_ndarray(expected), tested.toarray()) assert tested.format == "csr" assert tested.dtype == expected.dtype @@ -3351,7 +3351,7 @@ def test_negative_stride(self): x, y, p = a2 expected = p.multiply(np.dot(x, y.T)) - utt.assert_allclose(as_ndarray(expected), tested.toarray()) + np.testing.assert_allclose(as_ndarray(expected), tested.toarray()) assert tested.format == "csr" assert tested.dtype == expected.dtype diff --git a/tests/sparse/test_rewriting.py b/tests/sparse/test_rewriting.py index 2ceb904213..e06357e698 100644 --- a/tests/sparse/test_rewriting.py +++ b/tests/sparse/test_rewriting.py @@ -10,7 +10,6 @@ from pytensor.tensor.basic import as_tensor_variable from pytensor.tensor.math import sum as pt_sum from pytensor.tensor.type import ivector, matrix, vector -from tests import unittest_tools as utt from tests.sparse.test_basic import random_lil @@ -173,4 +172,4 @@ def test_sd_csc(): res = sd_csc(a_val, a_ind, a_ptr, nrows, b).eval() - utt.assert_allclose(res, target) + np.testing.assert_allclose(res, target) diff --git a/tests/tensor/conv/test_abstract_conv.py b/tests/tensor/conv/test_abstract_conv.py index 5f6e2afc92..788281dc0b 100644 --- a/tests/tensor/conv/test_abstract_conv.py +++ b/tests/tensor/conv/test_abstract_conv.py @@ -559,7 +559,7 @@ def run_fwd( res_ref = np.array(f_ref()) res = np.array(f()) - utt.assert_allclose(res_ref, res) + np.testing.assert_allclose(res_ref, res) if ( verify_grad and inputs_val.size > 0 @@ -644,7 +644,7 @@ def run_gradweight( res_ref = np.array(f_ref()) res = np.array(f()) - utt.assert_allclose(res_ref, res) + np.testing.assert_allclose(res_ref, res) def abstract_conv_gradweight(inputs_val, output_val): conv_op = gradWeights_fn( @@ -731,7 +731,7 @@ def run_gradinput( if ref is not None: res_ref = np.array(f_ref()) - utt.assert_allclose(res_ref, res) + np.testing.assert_allclose(res_ref, res) def abstract_conv_gradinputs(filters_val, output_val): conv_op = gradInputs_fn( @@ -1444,13 +1444,13 @@ def test_bilinear_kernel_2D(self): kernel = bilinear_kernel_2D(ratio=ratio, normalize=False) f = pytensor.function([], kernel) kernel_2D = self.numerical_kernel_2D(ratio) - utt.assert_allclose(kernel_2D, f()) + np.testing.assert_allclose(kernel_2D, f()) # getting the normalized kernel kernel = bilinear_kernel_2D(ratio=ratio, normalize=True) f = pytensor.function([], kernel) kernel_2D = kernel_2D / float(ratio**2) - utt.assert_allclose(kernel_2D, f()) + np.testing.assert_allclose(kernel_2D, f()) def test_bilinear_kernel_1D(self): # Test 1D kernels used in bilinear upsampling @@ -1471,15 +1471,15 @@ def test_bilinear_kernel_1D(self): kernel = bilinear_kernel_1D(ratio=ratio, normalize=False) f = pytensor.function([], kernel) kernel_1D = self.numerical_kernel_1D(ratio) - utt.assert_allclose(kernel_1D, f()) - utt.assert_allclose(kernel_1D, f_ten(ratio)) + np.testing.assert_allclose(kernel_1D, f()) + np.testing.assert_allclose(kernel_1D, f_ten(ratio)) # getting the normalized kernel kernel = bilinear_kernel_1D(ratio=ratio, normalize=True) f = pytensor.function([], kernel) kernel_1D = kernel_1D / float(ratio) - utt.assert_allclose(kernel_1D, f()) - utt.assert_allclose(kernel_1D, f_ten_norm(ratio)) + np.testing.assert_allclose(kernel_1D, f()) + np.testing.assert_allclose(kernel_1D, f_ten_norm(ratio)) def numerical_upsampling_multiplier(self, ratio): """ @@ -1564,7 +1564,7 @@ def test_bilinear_upsampling_1D(self): ) f = pytensor.function([], bilin_mat, mode=self.compile_mode) up_mat_2d = self.get_upsampled_twobytwo_mat(input_x, ratio) - utt.assert_allclose(f(), up_mat_2d, rtol=1e-06) + np.testing.assert_allclose(f(), up_mat_2d, rtol=1e-06) def test_bilinear_upsampling_reshaping(self): # Test bilinear upsampling without giving shape information @@ -1586,7 +1586,7 @@ def test_bilinear_upsampling_reshaping(self): ) f = pytensor.function([], bilin_mat, mode=self.compile_mode) up_mat_2d = self.get_upsampled_twobytwo_mat(input_x, ratio) - utt.assert_allclose(f(), up_mat_2d, rtol=1e-06) + np.testing.assert_allclose(f(), up_mat_2d, rtol=1e-06) def test_compare_1D_and_2D_upsampling_values(self): # Compare 1D and 2D upsampling @@ -1614,7 +1614,7 @@ def test_compare_1D_and_2D_upsampling_values(self): ) f_1D = pytensor.function([], mat_1D, mode=self.compile_mode) f_2D = pytensor.function([], mat_2D, mode=self.compile_mode) - utt.assert_allclose(f_1D(), f_2D(), rtol=1e-06) + np.testing.assert_allclose(f_1D(), f_2D(), rtol=1e-06) # checking upsampling with ratio 8 input_x = rng.random((12, 11, 10, 7)).astype(config.floatX) @@ -1634,7 +1634,7 @@ def test_compare_1D_and_2D_upsampling_values(self): ) f_1D = pytensor.function([], mat_1D, mode=self.compile_mode) f_2D = pytensor.function([], mat_2D, mode=self.compile_mode) - utt.assert_allclose(f_1D(), f_2D(), rtol=1e-06) + np.testing.assert_allclose(f_1D(), f_2D(), rtol=1e-06) def test_fractional_bilinear_upsampling(self): """Test bilinear upsampling with nonsimilar fractional @@ -1671,7 +1671,7 @@ def test_fractional_bilinear_upsampling(self): ] ).astype(config.floatX) f_up_x = pytensor.function([], up_x, mode=self.compile_mode) - utt.assert_allclose(f_up_x(), num_up_x, rtol=1e-6) + np.testing.assert_allclose(f_up_x(), num_up_x, rtol=1e-6) def test_fractional_bilinear_upsampling_shape(self): x = np.random.random((1, 1, 200, 200)).astype(config.floatX) @@ -1680,7 +1680,7 @@ def test_fractional_bilinear_upsampling_shape(self): pt.as_tensor_variable(x), frac_ratio=resize, use_1D_kernel=False ) out = pytensor.function([], z.shape, mode="FAST_RUN")() - utt.assert_allclose(out, (1, 1, 240, 240)) + np.testing.assert_allclose(out, (1, 1, 240, 240)) class TestConv2dTranspose: @@ -1811,7 +1811,7 @@ def test_conv2d_grad_wrt_inputs(self): ) # check that they're equal - utt.assert_allclose( + np.testing.assert_allclose( f_new(filter_val, out_grad_val), f_old(input_val, filter_val, out_grad_val), ) @@ -1871,7 +1871,7 @@ def test_conv2d_grad_wrt_weights(self): f_new = pytensor.function( [self.x, self.output_grad_wrt], conv_wrt_w_out ) - utt.assert_allclose( + np.testing.assert_allclose( f_new(input_val, out_grad_val), f_old(input_val, filter_val, out_grad_val), ) @@ -1954,7 +1954,7 @@ def test_fwd(self): ] ref_concat_output = np.concatenate(ref_concat_output, axis=1) - utt.assert_allclose(grouped_output, ref_concat_output) + np.testing.assert_allclose(grouped_output, ref_concat_output) utt.verify_grad(grouped_conv_op, [img, kern], mode=self.mode, eps=1) @@ -2008,7 +2008,7 @@ def test_gradweights(self): ] ref_concat_output = np.concatenate(ref_concat_output, axis=0) - utt.assert_allclose(grouped_output, ref_concat_output) + np.testing.assert_allclose(grouped_output, ref_concat_output) def conv_gradweight(inputs_val, output_val): return grouped_convgrad_op( @@ -2069,7 +2069,7 @@ def test_gradinputs(self): ] ref_concat_output = np.concatenate(ref_concat_output, axis=1) - utt.assert_allclose(grouped_output, ref_concat_output) + np.testing.assert_allclose(grouped_output, ref_concat_output) def conv_gradinputs(filters_val, output_val): return grouped_convgrad_op( @@ -2203,11 +2203,11 @@ def test_interface2d(self): # test for square matrix top = fun(self.x, self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_valid) + np.testing.assert_allclose(top, self.precomp_output_valid) # test for non-square matrix top = fun(self.x[:, :, :3, :], self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_valid[:, :, :1, :]) + np.testing.assert_allclose(top, self.precomp_output_valid[:, :, :1, :]) # test if it infers shape sep_op = separable_conv2d( @@ -2223,7 +2223,7 @@ def test_interface2d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(self.x, self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_valid) + np.testing.assert_allclose(top, self.precomp_output_valid) # test non-default subsample sep_op = separable_conv2d( @@ -2233,7 +2233,7 @@ def test_interface2d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(self.x, self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose( + np.testing.assert_allclose( top, np.delete(np.delete(self.precomp_output_valid, 1, axis=3), 1, axis=2) ) @@ -2245,7 +2245,7 @@ def test_interface2d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(self.x[:, :, :3, :3], self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_full) + np.testing.assert_allclose(top, self.precomp_output_full) @pytest.mark.skipif(config.cxx == "", reason="test needs cxx") def test_interface3d(self): @@ -2271,10 +2271,10 @@ def test_interface3d(self): # test for square matrix top = fun(x, depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output) + np.testing.assert_allclose(top, precomp_output) # test for non-square matrix top = fun(x[:, :, :3, :, :3], depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output[:, :, :1, :, :1]) + np.testing.assert_allclose(top, precomp_output[:, :, :1, :, :1]) # test if it infers shape sep_op = separable_conv3d( x_sym, @@ -2289,7 +2289,7 @@ def test_interface3d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(x, depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output) + np.testing.assert_allclose(top, precomp_output) # test non-default subsample sep_op = separable_conv3d( @@ -2299,7 +2299,7 @@ def test_interface3d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(x, depthwise_filter, pointwise_filter) - utt.assert_allclose( + np.testing.assert_allclose( top, np.delete( np.delete(np.delete(precomp_output, 1, axis=4), 1, axis=3), 1, axis=2 @@ -2317,7 +2317,7 @@ def test_interface3d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(x[:, :, :3, :3, :3], depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output) + np.testing.assert_allclose(top, precomp_output) @pytest.mark.skipif( @@ -2406,7 +2406,7 @@ def test_fwd(self): for j in range(0, kshp[2]): single_kern = kern[:, i, j, ...].reshape(single_kshp) ref_val = ref_func(img, single_kern) - utt.assert_allclose( + np.testing.assert_allclose( ref_val[:, :, i, j], unshared_output[:, :, i, j] ) @@ -2469,7 +2469,9 @@ def test_gradweight(self): top_single = np.zeros_like(top) top_single[:, :, i, j] = top[:, :, i, j] ref_output = ref_func(img, top_single) - utt.assert_allclose(unshared_output[:, i, j, ...], ref_output) + np.testing.assert_allclose( + unshared_output[:, i, j, ...], ref_output + ) def conv_gradweight(inputs_val, output_val): return unshared_conv_op( @@ -2541,7 +2543,7 @@ def test_gradinput(self): top_single[:, :, i, j] = top[:, :, i, j] ref_output += ref_func(single_kern, top_single) - utt.assert_allclose(ref_output, unshared_output) + np.testing.assert_allclose(ref_output, unshared_output) def conv_gradinputs(filters_val, output_val): return unshared_conv_op( @@ -2613,7 +2615,7 @@ def test_fwd(self): ] = img ref_output = ref_func(exp_img, kern) - utt.assert_allclose(asymmetric_output, ref_output) + np.testing.assert_allclose(asymmetric_output, ref_output) utt.verify_grad(asymmetric_conv_op, [img, kern], mode=self.mode, eps=1) @@ -2665,7 +2667,7 @@ def test_gradweight(self): ] = img ref_output = ref_func(exp_img, top) - utt.assert_allclose(asymmetric_output, ref_output) + np.testing.assert_allclose(asymmetric_output, ref_output) def conv_gradweight(inputs_val, output_val): return asymmetric_conv_op( @@ -2719,7 +2721,7 @@ def test_gradinput(self): :, :, pad[0][0] : imshp[2] + pad[0][0], pad[1][0] : imshp[3] + pad[1][0] ] - utt.assert_allclose(asymmetric_output, ref_output) + np.testing.assert_allclose(asymmetric_output, ref_output) def conv_gradinputs(filters_val, output_val): return asymmetric_conv_op( @@ -2766,7 +2768,7 @@ def test_interface(self): output = causal_func(self.img, self.kern) - utt.assert_allclose(output, self.precomp_top) + np.testing.assert_allclose(output, self.precomp_top) def causal_conv_fn(inputs_val, filters_val): return causal_conv1d( diff --git a/tests/tensor/rewriting/test_basic.py b/tests/tensor/rewriting/test_basic.py index c62398e36f..b6baf529b6 100644 --- a/tests/tensor/rewriting/test_basic.py +++ b/tests/tensor/rewriting/test_basic.py @@ -390,7 +390,7 @@ def test_advanced_inc_subtensor(self): r1 = f1(x_value, i_value, y_value) r2 = f2(x_value, i_value, y_value) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) # Check stacktrace was copied over correctly after rewrite was applied assert check_stack_trace(f1, ops_to_check=AdvancedIncSubtensor1) @@ -422,7 +422,7 @@ def test_advanced_inc_subtensor1(self): r1 = f1(x_value, i_value, y_value) r2 = f2(x_value, i_value, y_value) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) assert check_stack_trace(f1, ops_to_check=AdvancedIncSubtensor1) assert check_stack_trace(f2, ops_to_check="all") @@ -453,7 +453,7 @@ def test_incsubtensor(self): r1 = f1(x_value, i_value, y_value) r2 = f2(x_value, i_value, y_value) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) assert check_stack_trace(f1, ops_to_check="last") assert check_stack_trace(f2, ops_to_check="last") @@ -1066,7 +1066,7 @@ def test_broadcasting_3(self): f = function([x, y], z, mode=self.mode) vx = np.array([[0, 1], [1, 0]], dtype="int32") vy = np.array([7, 8], dtype="int64") - utt.assert_allclose(f(vx, vy), np.where(vx, vy, vy)) + np.testing.assert_allclose(f(vx, vy), np.where(vx, vy, vy)) assert isinstance(f.maker.fgraph.outputs[0].owner.op, Alloc) assert not any(node.op == pt.switch for node in f.maker.fgraph.toposort()) diff --git a/tests/tensor/rewriting/test_elemwise.py b/tests/tensor/rewriting/test_elemwise.py index 7b25192a89..48821de2b2 100644 --- a/tests/tensor/rewriting/test_elemwise.py +++ b/tests/tensor/rewriting/test_elemwise.py @@ -67,7 +67,6 @@ vector, vectors, ) -from tests import unittest_tools as utt dimshuffle_lift = out2in(local_dimshuffle_lift) @@ -1443,22 +1442,22 @@ def test_local_useless_composite_outputs(): assert len(topo[0].inputs) == 2 assert len(topo[0].outputs) == 2 res1, res2 = f([[1.0]], [[1.0]], [[np.nan]]) - utt.assert_allclose(res1, [[2.0]]) - utt.assert_allclose(res2, [[0.0]]) + np.testing.assert_allclose(res1, [[2.0]]) + np.testing.assert_allclose(res2, [[0.0]]) f = function([X, Y, Z], o1, mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert len(topo[0].inputs) == 1 assert len(topo[0].outputs) == 1 - utt.assert_allclose(f([[1.0]], [[np.nan]], [[np.nan]]), [[2.0]]) + np.testing.assert_allclose(f([[1.0]], [[np.nan]], [[np.nan]]), [[2.0]]) f = function([X, Y, Z], o2, mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert len(topo[0].inputs) == 1 assert len(topo[0].outputs) == 1 - utt.assert_allclose(f([[np.nan]], [[1.0]], [[np.nan]]), [[0.0]]) + np.testing.assert_allclose(f([[np.nan]], [[1.0]], [[np.nan]]), [[0.0]]) @pytest.mark.parametrize("const_shape", [(), (1,), (5,), (1, 5), (2, 5)]) diff --git a/tests/tensor/rewriting/test_linalg.py b/tests/tensor/rewriting/test_linalg.py index 9cdb69ce6b..0bb30552d8 100644 --- a/tests/tensor/rewriting/test_linalg.py +++ b/tests/tensor/rewriting/test_linalg.py @@ -3,7 +3,6 @@ import numpy as np import pytest import scipy.linalg -from numpy.testing import assert_allclose import pytensor from pytensor import function @@ -117,7 +116,7 @@ def test_generic_solve_to_solve_triangular(): assert not any(isinstance(op, Solve) for op in op_list) assert any(isinstance(op, SolveTriangular) for op in op_list) - assert_allclose( + np.testing.assert_allclose( f(X, eye) @ X_chol, eye, atol=1e-8 if config.floatX.endswith("64") else 1e-4 ) @@ -128,7 +127,7 @@ def test_generic_solve_to_solve_triangular(): op_list = [node.op for node in toposort] assert not any(isinstance(op, Solve) for op in op_list) assert any(isinstance(op, SolveTriangular) for op in op_list) - assert_allclose( + np.testing.assert_allclose( f(X, eye).T @ X_chol, eye, atol=1e-8 if config.floatX.endswith("64") else 1e-4, diff --git a/tests/tensor/rewriting/test_math.py b/tests/tensor/rewriting/test_math.py index dc7927db05..c3593ee0ce 100644 --- a/tests/tensor/rewriting/test_math.py +++ b/tests/tensor/rewriting/test_math.py @@ -598,7 +598,7 @@ def test_mul_div_cases(self): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) assert out_dtype == out.dtype - utt.assert_allclose(out, val_inputs[1]) + np.testing.assert_allclose(out, val_inputs[1]) topo = f.maker.fgraph.toposort() assert not any(node.op == pt.true_div for node in topo) @@ -618,7 +618,7 @@ def test_mul_div_cases(self): ): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, (1 / val_inputs[1])) + np.testing.assert_allclose(out, (1 / val_inputs[1])) topo = f.maker.fgraph.toposort() elem = [t for t in topo if isinstance(t.op, Elemwise)] assert len(elem) == nb_elemwise @@ -699,7 +699,7 @@ def test_mul_div_cases(self): ): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, (val_inputs[0] / val_inputs[3])) + np.testing.assert_allclose(out, (val_inputs[0] / val_inputs[3])) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, (Elemwise,)) @@ -749,7 +749,7 @@ def test_mul_div_cases(self): out_dtype = out_dtype[config.cast_policy] f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, (0.5 * val_inputs[0] / val_inputs[1])) + np.testing.assert_allclose(out, (0.5 * val_inputs[0] / val_inputs[1])) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, (Elemwise,)) @@ -791,7 +791,7 @@ def test_mul_div_cases(self): out_dtype = out_dtype[config.cast_policy] f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, val_inputs[0]) + np.testing.assert_allclose(out, val_inputs[0]) topo = f.maker.fgraph.toposort() assert len(topo) == 1 topo[0].op == deep_copy_op @@ -811,7 +811,7 @@ def test_mul_div_cases(self): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) assert np.all(np.isfinite(out)) - utt.assert_allclose(out, np.sign(val_inputs[0])) + np.testing.assert_allclose(out, np.sign(val_inputs[0])) assert out_dtype == out.dtype assert len(f.maker.fgraph.toposort()) == 1 @@ -859,7 +859,7 @@ def test_mul_div_cases(self): topo = f.maker.fgraph.toposort() out = f(*val_inputs) assert np.all(np.isfinite(out)) - utt.assert_allclose(out, np.sign(val_inputs[0]) * 2 / 3) + np.testing.assert_allclose(out, np.sign(val_inputs[0]) * 2 / 3) assert out_dtype == out.dtype def test_abs_mul_div(self): @@ -922,7 +922,9 @@ def test_multiple_case_that_fail(self): ]: f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, val_inputs[0] / val_inputs[1] / val_inputs[2]) + np.testing.assert_allclose( + out, val_inputs[0] / val_inputs[1] / val_inputs[2] + ) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, (Elemwise,)) @@ -937,7 +939,9 @@ def test_multiple_case_that_fail(self): ]: f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, val_inputs[0] / (val_inputs[1] / val_inputs[2])) + np.testing.assert_allclose( + out, val_inputs[0] / (val_inputs[1] / val_inputs[2]) + ) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, (Elemwise,)) @@ -1071,7 +1075,7 @@ def sigm(x): betaval = np.random.random(5) aval = np.random.random(5) - utt.assert_allclose( + np.testing.assert_allclose( f2(ival, wval, visbval, hidbval, betaval, aval), f1(ival, wval, visbval, hidbval, betaval, aval), ) @@ -1168,10 +1172,10 @@ def test_local_log_add_exp(): # test that it gives the correct result when it doesn't overflow f([10], [10]) # doesn't causes overflow - utt.assert_allclose(f([10], [10]), 10 + np.log1p(1)) + np.testing.assert_allclose(f([10], [10]), 10 + np.log1p(1)) assert np.isfinite(f([10000], [10000])) # causes overflow if handled incorrectly - utt.assert_allclose(f([10000], [10000]), 10000 + np.log1p(1)) + np.testing.assert_allclose(f([10000], [10000]), 10000 + np.log1p(1)) # test that when max = +-inf, rewritten output still works correctly assert f([-np.inf], [-np.inf]) == -np.inf @@ -1184,7 +1188,7 @@ def test_local_log_add_exp(): f = function([x, y], log(exp(x) + exp(y) + exp(x - y) + exp(x + y)), mode=m) assert np.isfinite(f([10000], [10000])) # causes overflow if handled incorrectly - utt.assert_allclose(f([10000], [10000]), 20000) + np.testing.assert_allclose(f([10000], [10000]), 20000) # TODO: test that the rewrite works in the presence of broadcasting. @@ -1264,7 +1268,7 @@ def test_local_elemwise_sub_zeros(): assert isinstance( f.maker.fgraph.toposort()[0].inputs[1], TensorConstant ) or isinstance(f.maker.fgraph.toposort()[0].inputs[1], TensorConstant) - utt.assert_allclose(f(scalar_val), 0.0) + np.testing.assert_allclose(f(scalar_val), 0.0) assert check_stack_trace(f, ops_to_check="all") # Test vector minus vector @@ -1274,7 +1278,7 @@ def test_local_elemwise_sub_zeros(): assert isinstance( f.maker.fgraph.toposort()[0].inputs[1], TensorConstant ) or isinstance(f.maker.fgraph.toposort()[0].inputs[1], TensorConstant) - utt.assert_allclose(f(vect_val), np.zeros(vect_val.shape)) + np.testing.assert_allclose(f(vect_val), np.zeros(vect_val.shape)) assert check_stack_trace(f, ops_to_check="all") # Test vector minus vector @@ -1284,7 +1288,7 @@ def test_local_elemwise_sub_zeros(): assert isinstance( f.maker.fgraph.toposort()[0].inputs[1], TensorConstant ) or isinstance(f.maker.fgraph.toposort()[0].inputs[1], TensorConstant) - utt.assert_allclose(f(mat_val), np.zeros(mat_val.shape)) + np.testing.assert_allclose(f(mat_val), np.zeros(mat_val.shape)) assert check_stack_trace(f, ops_to_check="all") @@ -1449,7 +1453,7 @@ def test_shape_inequality_with_self(self): f = function([x], minimum([0, 0], x.shape[0]), mode=mode) # This case isn't rewritten. # self.assert_eqs_const(f, 0) - utt.assert_allclose(f(x_val), [0, 0]) + np.testing.assert_allclose(f(x_val), [0, 0]) def test_shape_add_inequality(self): x = vector("x", dtype=config.floatX) @@ -1658,41 +1662,41 @@ def test_local_pow_specialize(): f = function([v], v**0, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [Shape_i(0), pt.alloc] - utt.assert_allclose(f(val), val**0) + np.testing.assert_allclose(f(val), val**0) f = function([v], v**1, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] nodes == [deep_copy_op] - utt.assert_allclose(f(val), val**1) + np.testing.assert_allclose(f(val), val**1) f = function([v], v ** (-1), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [reciprocal] - utt.assert_allclose(f(val_no0), val_no0 ** (-1)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-1)) f = function([v], v**2, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [sqr] - utt.assert_allclose(f(val), val**2) + np.testing.assert_allclose(f(val), val**2) f = function([v], v ** (-2), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert len(nodes) == 2 assert nodes[0] == sqr assert isinstance(nodes[1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-2)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-2)) f = function([v], v ** (0.5), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [sqrt] - utt.assert_allclose(f(val), val ** (0.5)) + np.testing.assert_allclose(f(val), val ** (0.5)) f = function([v], v ** (-0.5), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert len(nodes) == 2 assert nodes[0] == sqrt assert isinstance(nodes[1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-0.5)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-0.5)) twos = np.full(shape=(10,), fill_value=2.0).astype(config.floatX) f = function([v], v**twos, mode=mode) @@ -1704,7 +1708,7 @@ def test_local_pow_specialize(): else: assert isinstance(topo[0].op, SpecifyShape) assert topo[1].op == sqr - utt.assert_allclose(f(val), val**twos) + np.testing.assert_allclose(f(val), val**twos) def test_local_pow_to_nested_squaring(): @@ -1722,7 +1726,7 @@ def test_local_pow_to_nested_squaring(): assert len(nodes) == 1 assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6 assert isinstance(nodes[0].scalar_op, ps.Composite) - utt.assert_allclose(f(val), val**15) + np.testing.assert_allclose(f(val), val**15) f = function([v], v ** (-15), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] @@ -1730,14 +1734,14 @@ def test_local_pow_to_nested_squaring(): assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6 assert isinstance(nodes[0].scalar_op, ps.Composite) assert isinstance(nodes[-1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-15)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-15)) f = function([v], v ** (16), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert len(nodes) == 1 assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4 assert isinstance(nodes[0].scalar_op, ps.Composite) - utt.assert_allclose(f(val), val**16) + np.testing.assert_allclose(f(val), val**16) f = function([v], v ** (-16), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] @@ -1745,7 +1749,7 @@ def test_local_pow_to_nested_squaring(): assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4 assert isinstance(nodes[0].scalar_op, ps.Composite) assert isinstance(nodes[-1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-16)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-16)) def test_local_pow_to_nested_squaring_fails_gracefully(): @@ -2534,7 +2538,7 @@ def test_reduction_rewrite( mul_out = mul(*inputs) f = function(inputs, reduction_op(axis=axis)(mul_out), mode=self.mode) out = f(*inputs_val) - utt.assert_allclose(out, expected_output) + np.testing.assert_allclose(out, expected_output) # Ensure that the rewrite has been applied properly by # ensuring that the rewritten graph contains the expected number @@ -2818,23 +2822,23 @@ def test_local_sum_prod_all_to_none(self): # test sum f = function([a], a.sum(), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) # test prod f = function([a], a.prod(), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.prod()) + np.testing.assert_allclose(f(input), input.prod()) # test sum f = function([a], a.sum([0, 1, 2]), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) # test prod f = function([a], a.prod([0, 1, 2]), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.prod()) + np.testing.assert_allclose(f(input), input.prod()) f = function([a], a.sum(0).sum(0).sum(0), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) def test_local_sum_sum_prod_prod(self): a = tensor3() @@ -2889,54 +2893,54 @@ def my_sum_prod(data, d, dd): for d, dd in dims: expected = my_sum(input, d, dd) f = function([a], a.sum(d).sum(dd), mode=self.mode) - utt.assert_allclose(f(input), expected) + np.testing.assert_allclose(f(input), expected) assert len(f.maker.fgraph.apply_nodes) == 1 for d, dd in dims[:6]: f = function([a], a.sum(d).sum(dd).sum(0), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).sum(dd).sum(0)) + np.testing.assert_allclose(f(input), input.sum(d).sum(dd).sum(0)) assert len(f.maker.fgraph.apply_nodes) == 1 for d in [0, 1, 2]: f = function([a], a.sum(d).sum(None), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).sum()) + np.testing.assert_allclose(f(input), input.sum(d).sum()) assert len(f.maker.fgraph.apply_nodes) == 1 f = function([a], a.sum(None).sum(), mode=self.mode) - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) assert len(f.maker.fgraph.apply_nodes) == 1 # test prod for d, dd in dims: expected = my_prod(input, d, dd) f = function([a], a.prod(d).prod(dd), mode=self.mode) - utt.assert_allclose(f(input), expected) + np.testing.assert_allclose(f(input), expected) assert len(f.maker.fgraph.apply_nodes) == 1 for d, dd in dims[:6]: f = function([a], a.prod(d).prod(dd).prod(0), mode=self.mode) - utt.assert_allclose(f(input), input.prod(d).prod(dd).prod(0)) + np.testing.assert_allclose(f(input), input.prod(d).prod(dd).prod(0)) assert len(f.maker.fgraph.apply_nodes) == 1 for d in [0, 1, 2]: f = function([a], a.prod(d).prod(None), mode=self.mode) - utt.assert_allclose(f(input), input.prod(d).prod()) + np.testing.assert_allclose(f(input), input.prod(d).prod()) assert len(f.maker.fgraph.apply_nodes) == 1 f = function([a], a.prod(None).prod(), mode=self.mode) - utt.assert_allclose(f(input), input.prod()) + np.testing.assert_allclose(f(input), input.prod()) assert len(f.maker.fgraph.apply_nodes) == 1 # Test that sum prod didn't get rewritten. for d, dd in dims: expected = my_sum_prod(input, d, dd) f = function([a], a.sum(d).prod(dd), mode=self.mode) - utt.assert_allclose(f(input), expected) + np.testing.assert_allclose(f(input), expected) assert len(f.maker.fgraph.apply_nodes) == 2 for d, dd in dims[:6]: f = function([a], a.sum(d).prod(dd).prod(0), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).prod(dd).prod(0)) + np.testing.assert_allclose(f(input), input.sum(d).prod(dd).prod(0)) assert len(f.maker.fgraph.apply_nodes) == 2 for d in [0, 1, 2]: f = function([a], a.sum(d).prod(None), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).prod()) + np.testing.assert_allclose(f(input), input.sum(d).prod()) assert len(f.maker.fgraph.apply_nodes) == 2 f = function([a], a.sum(None).prod(), mode=self.mode) - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) assert len(f.maker.fgraph.apply_nodes) == 1 def test_local_sum_prod_alloc(self): @@ -2950,23 +2954,23 @@ def test_local_sum_prod_alloc(self): ]: # test sum f = function([a], t_like(a).sum(None), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum()) + np.testing.assert_allclose(f(input), n_like(input).sum()) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] f = function([a], t_like(a).sum([0, 1, 2]), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum()) + np.testing.assert_allclose(f(input), n_like(input).sum()) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] for d in range(3): f = function([a], t_like(a).sum(d), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum(d)) + np.testing.assert_allclose(f(input), n_like(input).sum(d)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc assert not any(isinstance(node.op, Sum) for node in topo) for i in range(3): f = function([a], t_like(a).sum(i), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum(i)) + np.testing.assert_allclose(f(input), n_like(input).sum(i)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc @@ -2974,23 +2978,23 @@ def test_local_sum_prod_alloc(self): # test prod f = function([a], t_like(a).prod(None), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod()) + np.testing.assert_allclose(f(input), n_like(input).prod()) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] f = function([a], t_like(a).prod([0, 1, 2]), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod()) + np.testing.assert_allclose(f(input), n_like(input).prod()) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] for d in range(3): f = function([a], t_like(a).prod(d), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod(d)) + np.testing.assert_allclose(f(input), n_like(input).prod(d)) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc assert not any(isinstance(node.op, Prod) for node in topo) for i in range(3): f = function([a], t_like(a).prod(i), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod(i)) + np.testing.assert_allclose(f(input), n_like(input).prod(i)) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc @@ -2998,7 +3002,7 @@ def test_local_sum_prod_alloc(self): for d, dd in [(0, 0), (1, 0), (2, 0), (0, 1), (1, 1), (2, 1)]: f = function([a], t_like(a).sum(d).sum(dd), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum(d).sum(dd)) + np.testing.assert_allclose(f(input), n_like(input).sum(d).sum(dd)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[3] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc @@ -3177,7 +3181,7 @@ def test_local_prod_of_div(self): [a, b, c, d], s, on_unused_input="ignore", mode=mode_with_rewrite ) - utt.assert_allclose( + np.testing.assert_allclose( f(a_val, b_val, c_val, d_val), g(a_val, b_val, c_val, d_val) ) @@ -3317,27 +3321,27 @@ def test_local_reduce_join(self): A = shared(np.array([1, 2, 3, 4, 5], dtype="int64")) f = function([], pt_sum(pt.stack([A, A]), axis=0), mode=self.mode) - utt.assert_allclose(f(), [2, 4, 6, 8, 10]) + np.testing.assert_allclose(f(), [2, 4, 6, 8, 10]) topo = f.maker.fgraph.toposort() assert isinstance(topo[-1].op, Elemwise) # Test a case that was bugged in a old PyTensor bug f = function([], pt_sum(pt.stack([A, A]), axis=1), mode=self.mode) - utt.assert_allclose(f(), [15, 15]) + np.testing.assert_allclose(f(), [15, 15]) topo = f.maker.fgraph.toposort() assert not isinstance(topo[-1].op, Elemwise) # This case could be rewritten A = shared(np.array([1, 2, 3, 4, 5]).reshape(5, 1)) f = function([], pt_sum(pt.concatenate((A, A), axis=1), axis=1), mode=self.mode) - utt.assert_allclose(f(), [2, 4, 6, 8, 10]) + np.testing.assert_allclose(f(), [2, 4, 6, 8, 10]) topo = f.maker.fgraph.toposort() assert not isinstance(topo[-1].op, Elemwise) A = shared(np.array([1, 2, 3, 4, 5]).reshape(5, 1)) f = function([], pt_sum(pt.concatenate((A, A), axis=1), axis=0), mode=self.mode) - utt.assert_allclose(f(), [15, 15]) + np.testing.assert_allclose(f(), [15, 15]) topo = f.maker.fgraph.toposort() assert not isinstance(topo[-1].op, Elemwise) @@ -3403,7 +3407,7 @@ def test_local_div_to_reciprocal(): f = function([num_len_s, denom_s], out) out_val = f(3, 2.0) assert out_val.shape == (1, 3) - utt.assert_allclose(out_val, 0.5) + np.testing.assert_allclose(out_val, 0.5) class TestIntDivByOne: @@ -3499,7 +3503,7 @@ def test_local_sumsqr2dot(): f_val = f(w_val, g_val) f_test = np.dot(np.square(g_val), np.square(w_val).sum(axis=0)) - utt.assert_allclose(f_val, f_test) + np.testing.assert_allclose(f_val, f_test) assert any( isinstance( n.op, @@ -3532,7 +3536,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * e^y * e^z * e^w = e^(x+y+z+w) op = expx * expy * expz * expw f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3541,7 +3545,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * e^y * e^z / e^w = e^(x+y+z-w) op = expx * expy * expz / expw f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 - 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 - 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3552,7 +3556,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * e^y / e^z * e^w = e^(x+y-z+w) op = expx * expy / expz * expw f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 - 5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 - 5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3563,7 +3567,7 @@ def test_local_mul_exp_to_exp_add(): # e^x / e^y / e^z = (e^x / e^y) / e^z = e^(x-y-z) op = expx / expy / expz f = function([x, y, z], op, mode) - utt.assert_allclose(f(3, 4, 5), np.exp(3 - 4 - 5)) + np.testing.assert_allclose(f(3, 4, 5), np.exp(3 - 4 - 5)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Sub) for n in graph) @@ -3572,7 +3576,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * y * e^z * w = e^(x+z) * y * w op = expx * y * expz * w f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 5) * 4 * 6) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 5) * 4 * 6) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3584,7 +3588,7 @@ def test_local_mul_exp_to_exp_add(): f = function([mx, my], exp(mx) * exp(my), mode, allow_input_downcast=True) M1 = np.array([[1.0, 2.0], [3.0, 4.0]]) M2 = np.array([[5.0, 6.0], [7.0, 8.0]]) - utt.assert_allclose(f(M1, M2), np.exp(M1 + M2)) + np.testing.assert_allclose(f(M1, M2), np.exp(M1 + M2)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3593,13 +3597,13 @@ def test_local_mul_exp_to_exp_add(): # checking whether further rewrites can proceed after this one as one would expect # e^x * e^(-x) = e^(x-x) = e^0 = 1 f = function([x], expx * exp(neg(x)), mode) - utt.assert_allclose(f(42), 1) + np.testing.assert_allclose(f(42), 1) graph = f.maker.fgraph.toposort() assert isinstance(graph[0].inputs[0], TensorConstant) # e^x / e^x = e^(x-x) = e^0 = 1 f = function([x], expx / expx, mode) - utt.assert_allclose(f(42), 1) + np.testing.assert_allclose(f(42), 1) graph = f.maker.fgraph.toposort() assert isinstance(graph[0].inputs[0], TensorConstant) @@ -3630,7 +3634,7 @@ def test_local_mul_pow_to_pow_add(): # 2^x * 2^y * 2^z * 2^w = 2^(x+y+z+w) op = 2**x * 2**y * 2**z * 2**w f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), 2 ** (3 + 4 + 5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), 2 ** (3 + 4 + 5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3639,7 +3643,7 @@ def test_local_mul_pow_to_pow_add(): # 2^x * a^y * 2^z * b^w * c^v * a^u * s * b^t = 2^(x+z) * a^(y+u) * b^(w+t) * c^v * s op = 2**x * a**y * 2**z * b**w * c**v * a**u * s * b**t f = function([x, y, z, w, v, u, t, s, a, b, c], op, mode) - utt.assert_allclose( + np.testing.assert_allclose( f(4, 5, 6, 7, 8, 9, 10, 11, 2.5, 3, 3.5), 2 ** (4 + 6) * 2.5 ** (5 + 9) * 3 ** (7 + 10) * 3.5**8 * 11, ) @@ -3652,7 +3656,7 @@ def test_local_mul_pow_to_pow_add(): # (2^x / 2^y) * (a^z / a^w) = 2^(x-y) * a^(z-w) op = 2**x / 2**y * (a**z / a**w) f = function([x, y, z, w, a], op, mode) - utt.assert_allclose(f(3, 5, 6, 4, 7), 2 ** (3 - 5) * 7 ** (6 - 4)) + np.testing.assert_allclose(f(3, 5, 6, 4, 7), 2 ** (3 - 5) * 7 ** (6 - 4)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert len([True for n in graph if isinstance(n.op.scalar_op, ps.Sub)]) == 2 @@ -3661,7 +3665,7 @@ def test_local_mul_pow_to_pow_add(): # a^x * a^y * exp(z) * exp(w) = a^(x+y) * exp(z+w) op = a**x * a**y * exp(z) * exp(w) f = function([x, y, z, w, a], op, mode) - utt.assert_allclose(f(3, 4, 5, 6, 2), 2 ** (3 + 4) * np.exp(5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6, 2), 2 ** (3 + 4) * np.exp(5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert len([True for n in graph if isinstance(n.op.scalar_op, ps.Add)]) == 2 @@ -3685,7 +3689,7 @@ def test_local_expm1(): f_val = f(x_val) f_test = function([x], expm1(x), mode=MODE) - utt.assert_allclose(f_val, f_test(x_val)) + np.testing.assert_allclose(f_val, f_test(x_val)) assert any( isinstance(n.op, Elemwise) and isinstance(n.op.scalar_op, ps.basic.Expm1) diff --git a/tests/tensor/rewriting/test_shape.py b/tests/tensor/rewriting/test_shape.py index c0fd7513b3..62c58ce777 100644 --- a/tests/tensor/rewriting/test_shape.py +++ b/tests/tensor/rewriting/test_shape.py @@ -379,14 +379,14 @@ def test_perform(self): advec_val = rng.random(3).astype(config.floatX) f = function([advec], Shape_i(0)(advec)) out = f(advec_val) - utt.assert_allclose(out, advec_val.shape[0]) + np.testing.assert_allclose(out, advec_val.shape[0]) admat = matrix() admat_val = rng.random((4, 3)).astype(config.floatX) for i in range(2): f = function([admat], Shape_i(i)(admat)) out = f(admat_val) - utt.assert_allclose(out, admat_val.shape[i]) + np.testing.assert_allclose(out, admat_val.shape[i]) def test_infer_shape(self): admat = matrix() diff --git a/tests/tensor/rewriting/test_subtensor.py b/tests/tensor/rewriting/test_subtensor.py index f855a06194..afdb0f21a4 100644 --- a/tests/tensor/rewriting/test_subtensor.py +++ b/tests/tensor/rewriting/test_subtensor.py @@ -1512,7 +1512,7 @@ def test_basic(self): f = function([x, y, idx], o, self.mode_no_assert) res = f(dx, dy, didx) - utt.assert_allclose(dy, res) + np.testing.assert_allclose(dy, res) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, (DeepCopyOp, Elemwise)) @@ -1525,7 +1525,7 @@ def test_basic(self): res = f(dx, dy, didx) _dx = dx.copy() np.add.at(_dx, didx, dy) - utt.assert_allclose(_dx[didx], res) + np.testing.assert_allclose(_dx[didx], res) topo = f.maker.fgraph.toposort() len(topo) == 2 @@ -1535,7 +1535,7 @@ def test_basic(self): f = function([x, y, idx], o, self.mode_no_assert) res = f(dx, dy, didx) - utt.assert_allclose(np.vstack([dy[0], 2 * dy[1], 2 * dy[2]]), res) + np.testing.assert_allclose(np.vstack([dy[0], 2 * dy[1], 2 * dy[2]]), res) def test_assert(self): x = matrix("x") @@ -1667,7 +1667,7 @@ def test_incsubtensor_x_zeros(self): node_is_set_instead_of_inc = inc_nodes[0].op.set_instead_of_inc assert node_is_set_instead_of_inc test_X = np.random.random((4, 4)).astype(config.floatX) - utt.assert_allclose(f(test_X), test_X) + np.testing.assert_allclose(f(test_X), test_X) # also check the flag doesn't get set if first input is not zeros: not_all_zeros = np.zeros((4, 4)) @@ -1682,7 +1682,7 @@ def test_incsubtensor_x_zeros(self): assert len(inc_nodes) == 1 assert inc_nodes[0].op.set_instead_of_inc is False test_X = np.random.random((4, 4)).astype(config.floatX) - utt.assert_allclose(f(test_X), test_X + not_all_zeros) + np.testing.assert_allclose(f(test_X), test_X + not_all_zeros) def test_advancedincsubtensor1_allocs0(self): x = matrix() @@ -1861,7 +1861,7 @@ def test_local_set_to_inc_subtensor(): r1 = f1(val) r2 = f2(val) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) # Finally, test that the stack trace is copied over properly, # before and after optimization. diff --git a/tests/tensor/test_basic.py b/tests/tensor/test_basic.py index ba5e1cf648..95a232411e 100644 --- a/tests/tensor/test_basic.py +++ b/tests/tensor/test_basic.py @@ -416,7 +416,7 @@ def test_make_vector(self, dtype, inputs): if dtype in int_dtypes: # The gradient should be 0 - utt.assert_allclose(g_val, 0) + np.test.assert_allclose(g_val, 0) else: for var, grval in zip((b, i, d), g_val): float_inputs = [] @@ -2937,7 +2937,7 @@ def test_mgrid_numpy_equiv(self): ) for n, t in zip(nmgrid, tmgrid): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg.eval()) + np.test.assert_allclose(ng, tg.eval()) def test_ogrid_numpy_equiv(self): nogrid = ( @@ -2952,7 +2952,7 @@ def test_ogrid_numpy_equiv(self): ) for n, t in zip(nogrid, togrid): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg.eval()) + np.test.assert_allclose(ng, tg.eval()) def test_mgrid_pytensor_variable_numpy_equiv(self): nfmgrid = np.mgrid[0:1:0.1, 1:10:1.0, 10:100:10.0] @@ -2965,7 +2965,7 @@ def test_mgrid_pytensor_variable_numpy_equiv(self): fi = pytensor.function([l, m, n], timgrid) for n, t in zip((nfmgrid, nimgrid), (ff(0, 10, 10.0), fi(0, 10, 10))): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg) + np.test.assert_allclose(ng, tg) def test_ogrid_pytensor_variable_numpy_equiv(self): nfogrid = np.ogrid[0:1:0.1, 1:10:1.0, 10:100:10.0] @@ -2978,7 +2978,7 @@ def test_ogrid_pytensor_variable_numpy_equiv(self): fi = pytensor.function([l, m, n], tiogrid) for n, t in zip((nfogrid, niogrid), (ff(0, 10, 10.0), fi(0, 10, 10))): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg) + np.test.assert_allclose(ng, tg) class TestInversePermutation: diff --git a/tests/tensor/test_blas.py b/tests/tensor/test_blas.py index 743dc53cc6..c44a1a4a9e 100644 --- a/tests/tensor/test_blas.py +++ b/tests/tensor/test_blas.py @@ -139,7 +139,7 @@ def cmp_linker(z, a, x, y, b, l): z_after = self._gemm(z_orig, a, x, y, b) # print z_orig, z_after, z, type(z_orig), type(z_after), type(z) - unittest_tools.assert_allclose(z_after, z) + np.testing.assert_allclose(z_after, z) if a == 0.0 and b == 1.0: return elif z_orig.size == 0: @@ -324,11 +324,11 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): mode=Mode(optimizer=None, linker=l), ) f() - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True)) f() - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True)) f() - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True)) # tz.value *= 0 # clear z's value y_T = ty.get_value(borrow=True).T @@ -338,7 +338,7 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): f() # test that the transposed version of multiplication gives # same answer - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True).T) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True).T) t(C, A, B) t(C.T, A, B) @@ -389,7 +389,7 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): z = tz.get_value(borrow=True, return_internal_type=True) z[:, :, i] = z_i - unittest_tools.assert_allclose( + np.testing.assert_allclose( z_after[:, :, i], tz.get_value(borrow=True)[:, :, i] ) @@ -402,7 +402,7 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): ) for j in range(3): g_i() - unittest_tools.assert_allclose( + np.testing.assert_allclose( z_after[:, :, i], tz.get_value(borrow=True)[:, :, i] ) @@ -553,7 +553,7 @@ def run_gemm( + (transpose_A, transpose_B, transpose_C, slice_A, slice_B, slice_C) ) ) - unittest_tools.assert_allclose(ref_val, z_val) + np.testing.assert_allclose(ref_val, z_val) def test_gemm(self): rng = np.random.default_rng(seed=utt.fetch_seed()) @@ -2462,7 +2462,7 @@ def test_gemm_non_contiguous(self): f(0) ref_output = np.ones((3, 5)) * 2 - unittest_tools.assert_allclose(c.get_value(), ref_output) + np.testing.assert_allclose(c.get_value(), ref_output) class TestInferShape(unittest_tools.InferShapeTester): @@ -2695,7 +2695,7 @@ def check_first_dim(inverted): assert not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"]) result = f(x, w) ref_result = np.asarray([np.dot(u, v) for u, v in zip(x, w)]) - utt.assert_allclose(ref_result, result) + np.testing.assert_allclose(ref_result, result) for inverted in (0, 1): check_first_dim(inverted) diff --git a/tests/tensor/test_blas_c.py b/tests/tensor/test_blas_c.py index 58b425d53a..d44f1feaac 100644 --- a/tests/tensor/test_blas_c.py +++ b/tests/tensor/test_blas_c.py @@ -389,7 +389,7 @@ def run_cgemv(self, dtype, ALPHA, BETA, transpose_A, slice_tensors): assert z_val.ndim == 1 assert z_val.shape[0] == self.M ref_val = self.compute_ref(*(values + (transpose_A, slice_tensors))) - unittest_tools.assert_allclose(ref_val, z_val) + np.testing.assert_allclose(ref_val, z_val) def test_cgemv(self): for dtype in ("float32", "float64"): diff --git a/tests/tensor/test_elemwise.py b/tests/tensor/test_elemwise.py index 9c6c140901..3f89d24009 100644 --- a/tests/tensor/test_elemwise.py +++ b/tests/tensor/test_elemwise.py @@ -8,7 +8,6 @@ import pytensor import pytensor.scalar as ps -import tests.unittest_tools as utt from pytensor.compile.mode import Mode from pytensor.configdefaults import config from pytensor.graph.basic import Apply, Variable @@ -251,7 +250,7 @@ def with_linker(self, linker, op, type, rand_val): yv = rand_val(ysh) zv = xv + yv - unittest_tools.assert_allclose(f(xv, yv), zv) + np.testing.assert_allclose(f(xv, yv), zv) # test Elemwise.infer_shape # the Shape op don't implement c_code! @@ -529,7 +528,7 @@ def with_mode( else: f_xv = f(xv) assert f_xv.shape == zv.shape, (f_xv, zv) - utt.assert_allclose(zv, f_xv) + np.testing.assert_allclose(zv, f_xv) x = self.type( dtype, shape=tuple(entry if entry == 1 else None for entry in xsh) diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index cda745d023..e89d82043e 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -890,7 +890,7 @@ def test_basic_vector(self, x, inp, axis): f = pytensor.function(inputs=[x], outputs=out) outs = f(inp) for out, out_exp in zip(outs, outs_expected): - utt.assert_allclose(out, out_exp) + np.testing.assert_allclose(out, out_exp) @pytest.mark.parametrize( ("x", "inp", "axis"), diff --git a/tests/tensor/test_fft.py b/tests/tensor/test_fft.py index 3599c97de3..38e72b7e3a 100644 --- a/tests/tensor/test_fft.py +++ b/tests/tensor/test_fft.py @@ -40,7 +40,7 @@ def test_1Drfft(self): rfft_ref = np.fft.rfft(inputs_val, axis=1) - utt.assert_allclose(rfft_ref, res_rfft_comp) + np.testing.assert_allclose(rfft_ref, res_rfft_comp) m = rfft.type() print(m.ndim) @@ -48,7 +48,7 @@ def test_1Drfft(self): f_irfft = pytensor.function([m], irfft) res_irfft = f_irfft(res_rfft) - utt.assert_allclose(inputs_val, np.asarray(res_irfft)) + np.testing.assert_allclose(inputs_val, np.asarray(res_irfft)) # The numerical gradient of the FFT is sensitive, must set large # enough epsilon to get good accuracy. @@ -79,7 +79,7 @@ def test_rfft(self): rfft_ref = np.fft.rfftn(inputs_val, axes=(1, 2)) - utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) def test_irfft(self): inputs_val = np.random.random((1, N, N)).astype(pytensor.config.floatX) @@ -94,7 +94,7 @@ def test_irfft(self): f_irfft = pytensor.function([m], irfft) res_irfft = f_irfft(res_fft) - utt.assert_allclose(inputs_val, np.asarray(res_irfft)) + np.testing.assert_allclose(inputs_val, np.asarray(res_irfft)) inputs_val = np.random.random((1, N, N, 2)).astype(pytensor.config.floatX) inputs = pytensor.shared(inputs_val) @@ -106,7 +106,7 @@ def test_irfft(self): irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2)) - utt.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4) def test_norm_rfft(self): inputs_val = np.random.random((1, N, N)).astype(pytensor.config.floatX) @@ -122,7 +122,7 @@ def test_norm_rfft(self): rfft_ref = np.fft.rfftn(inputs_val, axes=(1, 2)) - utt.assert_allclose(rfft_ref / N, res_rfft_comp, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(rfft_ref / N, res_rfft_comp, atol=1e-4, rtol=1e-4) # No normalization rfft = fft.rfft(inputs, norm="no_norm") @@ -132,7 +132,7 @@ def test_norm_rfft(self): res_rfft[:, :, :, 1] ) - utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) # Inverse FFT inputs inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype( @@ -148,14 +148,14 @@ def test_norm_rfft(self): irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2)) - utt.assert_allclose(irfft_ref * N, res_irfft, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(irfft_ref * N, res_irfft, atol=1e-4, rtol=1e-4) # No normalization inverse FFT irfft = fft.irfft(inputs, norm="no_norm") f_irfft = pytensor.function([], irfft) res_irfft = f_irfft() - utt.assert_allclose(irfft_ref * N**2, res_irfft, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(irfft_ref * N**2, res_irfft, atol=1e-4, rtol=1e-4) def test_params(self): inputs_val = np.random.random((1, N)).astype(pytensor.config.floatX) diff --git a/tests/tensor/test_math.py b/tests/tensor/test_math.py index d543019f8d..d682a1dc89 100644 --- a/tests/tensor/test_math.py +++ b/tests/tensor/test_math.py @@ -1502,7 +1502,7 @@ def test_outer(self): v1 = np.asarray(self.rng.random(s1)).astype(config.floatX) v2 = np.asarray(self.rng.random(s2)).astype(config.floatX) o = outer(x, y).eval({x: v1, y: v2}) - utt.assert_allclose(o, np.outer(v1, v2)) + np.testing.assert_allclose(o, np.outer(v1, v2)) def test_grad(self): # Test the combined graph of the graph of outer @@ -1882,7 +1882,7 @@ def test_mean_f16(self): x = vector(dtype="float16") y = x.mean() f = function([x], y) - utt.assert_allclose(f(np.ones((100000,), dtype="float16")), 1.0) + np.testing.assert_allclose(f(np.ones((100000,), dtype="float16")), 1.0) def test_basic(self): x = vector() @@ -2005,7 +2005,7 @@ def test_basic(self): bval = random(5, rng=rng) out0 = np.tensordot(aval, bval, axes) out1 = f1(aval, bval) - utt.assert_allclose(out0, out1) + np.testing.assert_allclose(out0, out1) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test matrix-vector @@ -2015,7 +2015,7 @@ def test_basic(self): f2 = inplace_func([avec, bmat], c) aval = random(5, rng=rng) bval = random(8, 5, rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f2(aval, bval)) + np.testing.assert_allclose(np.tensordot(aval, bval, axes), f2(aval, bval)) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test matrix-matrix @@ -2034,7 +2034,7 @@ def test_basic(self): f3 = inplace_func([amat, bmat], c) aval = random(*shps[0], rng=rng) bval = random(*shps[1], rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) + np.testing.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test ndarray-matrix, sum over one dim of matrix @@ -2052,7 +2052,7 @@ def test_basic(self): f4 = inplace_func([atens, bmat], c) aval = random(*shps[0], rng=rng) bval = random(*shps[1], rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f4(aval, bval)) + np.testing.assert_allclose(np.tensordot(aval, bval, axes), f4(aval, bval)) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test ndarray-ndarray @@ -2063,13 +2063,13 @@ def test_basic(self): f5 = inplace_func([atens, btens], c) aval = random(4, 3, 5, 2, rng=rng) bval = random(3, 4, 2, rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f5(aval, bval)) + np.testing.assert_allclose(np.tensordot(aval, bval, axes), f5(aval, bval)) utt.verify_grad(self.TensorDot(axes), [aval, bval]) axes = (axes[1], axes[0]) c = tensordot(btens, atens, axes) f6 = inplace_func([btens, atens], c) - utt.assert_allclose(np.tensordot(bval, aval, axes), f6(bval, aval)) + np.testing.assert_allclose(np.tensordot(bval, aval, axes), f6(bval, aval)) utt.verify_grad(self.TensorDot(axes), [bval, aval]) def test_raise_error(self): @@ -2107,7 +2107,7 @@ def test_weird_valid_axes(self): f3 = inplace_func([amat, bmat], c) aval = random(4, 7, rng=rng) bval = random(7, 9, rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) + np.testing.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) utt.verify_grad(self.TensorDot(axes), [aval, bval]) def test_scalar_axes(self): @@ -2555,10 +2555,10 @@ def test_dot(self): x, y = self.vals # Use allclose comparison as a user reported on the mailing # list failure otherwise with array that print exactly the same. - utt.assert_allclose(x.dot(y), X.dot(Y).eval({X: x, Y: y})) + np.testing.assert_allclose(x.dot(y), X.dot(Y).eval({X: x, Y: y})) Z = X.dot(Y) z = x.dot(y) - utt.assert_allclose(x.dot(z), X.dot(Z).eval({X: x, Z: z})) + np.testing.assert_allclose(x.dot(z), X.dot(Z).eval({X: x, Z: z})) def test_real_imag(self): X, Y = self.vars @@ -2591,7 +2591,7 @@ def test_std(self): # std() is implemented as PyTensor tree and does not pass its # args directly to numpy. This sometimes results in small # difference, so we use allclose test. - utt.assert_allclose(X.std().eval({X: x}), x.std()) + np.testing.assert_allclose(X.std().eval({X: x}), x.std()) def test_cumsum(self): X, _ = self.vars diff --git a/tests/tensor/test_sort.py b/tests/tensor/test_sort.py index 9334776652..855c2898b7 100644 --- a/tests/tensor/test_sort.py +++ b/tests/tensor/test_sort.py @@ -50,7 +50,7 @@ def test1(self): a = dmatrix() w = sort(a) f = pytensor.function([a], w) - utt.assert_allclose(f(self.m_val), np.sort(self.m_val)) + np.testing.assert_allclose(f(self.m_val), np.sort(self.m_val)) def test2(self): a = dmatrix() @@ -60,7 +60,7 @@ def test2(self): for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test3(self): a = dvector() @@ -68,7 +68,7 @@ def test3(self): f = pytensor.function([a], w2) gv = f(self.v_val) gt = np.sort(self.v_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test4(self): a = dmatrix() @@ -78,7 +78,7 @@ def test4(self): for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test5(self): a1 = SortOp("mergesort", []) @@ -95,7 +95,7 @@ def test_None(self): f = pytensor.function([a], l) gv = f(self.m_val) gt = np.sort(self.m_val, None) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test_grad_vector(self): data = self.rng.random(10).astype(pytensor.config.floatX) @@ -193,7 +193,7 @@ def test_argsort(): f = pytensor.function([a], w) gv = f(m_val) gt = np.argsort(m_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 2 a = dmatrix() @@ -203,7 +203,7 @@ def test_argsort(): for axis_val in 0, 1: gv = f(m_val, axis_val) gt = np.argsort(m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 3 a = dvector() @@ -211,7 +211,7 @@ def test_argsort(): f = pytensor.function([a], w2) gv = f(v_val) gt = np.argsort(v_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 4 a = dmatrix() @@ -221,7 +221,7 @@ def test_argsort(): for axis_val in 0, 1: gv = f(m_val, axis_val) gt = np.argsort(m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 5 a = dmatrix() @@ -239,7 +239,7 @@ def test_argsort(): f = pytensor.function([a], w2) gv = f(m_val) gt = np.argsort(m_val, None) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test_argsort_grad(): @@ -305,7 +305,7 @@ def test_combined_sanity(self, dtype, idx_dtype, axis, sorted): xval = np.asarray([1]).astype(dtype) yvval, yival = fn(xval) assert yival == np.asarray([0], dtype=idx_dtype) - utt.assert_allclose(xval, yvval) + np.testing.assert_allclose(xval, yvval) assert yvval.dtype == xval.dtype assert yival.dtype == np.dtype(idx_dtype) @@ -339,7 +339,7 @@ def test_topk_1d(self, size, k, dtype, sorted): goal = np.sort(xval)[idx] assert yval.dtype == goal.dtype - utt.assert_allclose(goal, np.sort(yval)) + np.testing.assert_allclose(goal, np.sort(yval)) @pytest.mark.parametrize( "size, k, dtype, sorted, idx_dtype", @@ -405,7 +405,7 @@ def test_combined_1d(self, size, k, dtype, sorted, idx_dtype): # due to uniqueness, we expect indices same assert np.all(xval[np.sort(yival)] == xval[np.sort(goali)]) - utt.assert_allclose(np.sort(yvval), goalv) + np.testing.assert_allclose(np.sort(yvval), goalv) @pytest.mark.parametrize( "size, k, dtype, sorted", @@ -434,7 +434,7 @@ def test_argtopk_1d_collision(self, size, k, dtype, sorted): yval = fn(xval) idx = slice(-k, None) if k > 0 else slice(-k) goal = np.argsort(xval)[idx].astype("int32") - utt.assert_allclose(np.sort(xval[yval]), np.sort(xval[goal])) + np.testing.assert_allclose(np.sort(xval[yval]), np.sort(xval[goal])) @pytest.mark.parametrize( "shp, k_, dtype, sorted, idx_dtype", diff --git a/tests/tensor/test_subtensor.py b/tests/tensor/test_subtensor.py index 63acbabb29..20056cac7a 100644 --- a/tests/tensor/test_subtensor.py +++ b/tests/tensor/test_subtensor.py @@ -848,7 +848,7 @@ def test_noncontiguous_idx(self): n = self.shared(data) t = n[self.shared(np.asarray(idx).astype("int64"))[::2]] val = self.eval_output_and_check(t, op_type=AdvancedSubtensor1, length=2) - utt.assert_allclose(data[idx[::2]], val) + np.testing.assert_allclose(data[idx[::2]], val) def test_err_invalid_list(self): n = self.shared(np.asarray(5, dtype=self.dtype)) @@ -1466,7 +1466,7 @@ def test_simple_2d(self): else: expected_result[:, :val_sl2_end] += val_inc - utt.assert_allclose(result, expected_result) + np.testing.assert_allclose(result, expected_result) def test_wrong_dims(self): a = matrix() @@ -1530,7 +1530,7 @@ def test_simple_3d(self): else: expected_result[:, sl3, :val_sl2_end] += val_inc - utt.assert_allclose(result, expected_result) + np.testing.assert_allclose(result, expected_result) # Test when we broadcast the result result = method(a[sl1, sl2], increment) @@ -1545,7 +1545,7 @@ def test_simple_3d(self): else: expected_result[:, :val_sl2_end] += val_inc - utt.assert_allclose(result, expected_result) + np.testing.assert_allclose(result, expected_result) def test_grad_inc_set(self): def inc_slice(*s): @@ -1675,10 +1675,10 @@ def test_matrix_idx(self): idxval = np.array([[1, 2], [3, 2]]) a2val = f(mval, idxval) - utt.assert_allclose(a2val[0], mval[0]) - utt.assert_allclose(a2val[1], mval[1] * 2) - utt.assert_allclose(a2val[2], mval[2] * 3) - utt.assert_allclose(a2val[3], mval[3] * 2) + np.testing.assert_allclose(a2val[0], mval[0]) + np.testing.assert_allclose(a2val[1], mval[1] * 2) + np.testing.assert_allclose(a2val[2], mval[2] * 3) + np.testing.assert_allclose(a2val[3], mval[3] * 2) def test_inc_bcastableidx(self): idx = ptb.constant([0]) @@ -1692,7 +1692,7 @@ def test_inc_bcastableidx(self): incval = self.rng.random((10, 1)).astype(config.floatX) out1val, out2val = f(mval, incval, incval) - utt.assert_allclose(out1val, out2val) + np.testing.assert_allclose(out1val, out2val) class TestAdvancedSubtensor: @@ -1843,7 +1843,7 @@ def test_adv_subtensor_w_int_and_matrix(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[0, :, ix2v, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_none_and_matrix(self): subt = self.ft4[:, None, :, self.ix2, :] @@ -1852,7 +1852,7 @@ def test_adv_subtensor_w_none_and_matrix(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, None, :, ix2v, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_slice_and_matrix(self): subt = self.ft4[:, 0:1, self.ix2, :] @@ -1861,7 +1861,7 @@ def test_adv_subtensor_w_slice_and_matrix(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, 0:1, ix2v, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_matrix_and_int(self): subt = self.ft4[:, :, self.ix2, 0] @@ -1870,7 +1870,7 @@ def test_adv_subtensor_w_matrix_and_int(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, :, ix2v, 0] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_matrix_and_none(self): subt = self.ft4[:, :, self.ix2, None, :] @@ -1879,7 +1879,7 @@ def test_adv_subtensor_w_matrix_and_none(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, :, ix2v, None, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) @pytest.mark.parametrize( "ignore_duplicates", @@ -2059,7 +2059,7 @@ def test_adv_sub_3d(self): f = pytensor.function([X], X[b_idx, r_idx, c_idx], mode=self.mode) out = f(xx) - utt.assert_allclose(out, xx[b_idx, r_idx, c_idx]) + np.testing.assert_allclose(out, xx[b_idx, r_idx, c_idx]) def test_adv_sub_slice(self): # Reported in https://github.com/Theano/Theano/issues/5898 @@ -2083,7 +2083,7 @@ def test_adv_grouped(self): assert out_v.shape == (3, 5, 4) out_np = var_v[:, idx1_v, np.arange(4)] - utt.assert_allclose(out_v, out_np) + np.testing.assert_allclose(out_v, out_np) def test_grad(self): ones = np.ones((1, 3), dtype=self.dtype) diff --git a/tests/tensor/test_variable.py b/tests/tensor/test_variable.py index 70ad04999a..96b4380ec3 100644 --- a/tests/tensor/test_variable.py +++ b/tests/tensor/test_variable.py @@ -5,7 +5,6 @@ from numpy.testing import assert_array_equal, assert_equal, assert_string_equal import pytensor -import tests.unittest_tools as utt from pytensor.compile import DeepCopyOp from pytensor.compile.mode import get_default_mode from pytensor.graph.basic import Constant, equal_computations @@ -77,7 +76,7 @@ def test_numpy_method(fct, value): x = dscalar("x") y = fct(x) f = pytensor.function([x], y) - utt.assert_allclose(np.nan_to_num(f(value)), np.nan_to_num(fct(value))) + np.testing.assert_allclose(np.nan_to_num(f(value)), np.nan_to_num(fct(value))) def test_dot_method(): diff --git a/tests/unittest_tools.py b/tests/unittest_tools.py index 823f5653dc..43692ea3db 100644 --- a/tests/unittest_tools.py +++ b/tests/unittest_tools.py @@ -11,7 +11,6 @@ from pytensor.configdefaults import config from pytensor.gradient import verify_grad as orig_verify_grad from pytensor.tensor.basic import as_tensor_variable -from pytensor.tensor.math import _allclose from pytensor.tensor.math import add as pt_add @@ -277,11 +276,6 @@ def __str__(self): return s + str_diagnostic(self.val1, self.val2, self.rtol, self.atol) -def assert_allclose(expected, value, rtol=None, atol=None): - if not _allclose(expected, value, rtol, atol): - raise WrongValue(expected, value, rtol, atol) - - class AttemptManyTimes: """Decorator for unit tests that forces a unit test to be attempted multiple times. The test needs to pass a certain number of times for it to