diff --git a/doc/extending/creating_an_op.rst b/doc/extending/creating_an_op.rst index 746342ad4a..e8623cc258 100644 --- a/doc/extending/creating_an_op.rst +++ b/doc/extending/creating_an_op.rst @@ -551,7 +551,6 @@ exception. You can use the ``assert`` keyword to automatically raise an import numpy as np import pytensor - from tests import unittest_tools as utt class TestDouble(utt.InferShapeTester): @@ -569,9 +568,9 @@ exception. You can use the ``assert`` keyword to automatically raise an inp = np.asarray(rng.random((5, 4)), dtype=pytensor.config.floatX) out = f(inp) # Compare the result computed to the expected value. - utt.assert_allclose(inp * 2, out) + np.testing.assert_allclose(inp * 2, out) -We call ``utt.assert_allclose(expected_value, value)`` to compare +We call ``np.testing.assert_allclose(expected_value, value)`` to compare NumPy ndarray.This raise an error message with more information. Also, the default tolerance can be changed with the PyTensor flags ``config.tensor__cmp_sloppy`` that take values in 0, 1 and 2. The diff --git a/pytensor/tensor/math.py b/pytensor/tensor/math.py index d1e4dc6195..dcbd60b80b 100644 --- a/pytensor/tensor/math.py +++ b/pytensor/tensor/math.py @@ -125,18 +125,6 @@ def _get_atol_rtol(a, b): return atol, rtol -def _allclose(a, b, rtol=None, atol=None): - a = np.asarray(a) - b = np.asarray(b) - atol_, rtol_ = _get_atol_rtol(a, b) - if rtol is not None: - rtol_ = rtol - if atol is not None: - atol_ = atol - - return np.allclose(a, b, atol=atol_, rtol=rtol_) - - class Argmax(COp): """ Calculate the argmax over a given axis or over all axes. diff --git a/pytensor/tensor/type.py b/pytensor/tensor/type.py index 5fdaba8fd8..e4842b6616 100644 --- a/pytensor/tensor/type.py +++ b/pytensor/tensor/type.py @@ -663,7 +663,12 @@ def values_eq_approx( if str(a.dtype) not in continuous_dtypes: return np.all(a == b) else: - cmp = pytensor.tensor.math._allclose(a, b, rtol=rtol, atol=atol) + atol_, rtol_ = pytensor.tensor.math._get_atol_rtol(a, b) + if rtol is not None: + rtol_ = rtol + if atol is not None: + atol_ = atol + cmp = np.allclose(np.asarray(a), np.asarray(b), rtol=rtol_, atol=atol_) if cmp: # Numpy claims they are close, this is good enough for us. return True diff --git a/tests/graph/test_compute_test_value.py b/tests/graph/test_compute_test_value.py index ea59ff68f8..1fdc0db514 100644 --- a/tests/graph/test_compute_test_value.py +++ b/tests/graph/test_compute_test_value.py @@ -10,7 +10,7 @@ from pytensor.graph.op import Op from pytensor.graph.type import Type from pytensor.link.c.op import COp -from pytensor.tensor.math import _allclose, dot +from pytensor.tensor.math import _get_atol_rtol, dot from pytensor.tensor.type import fmatrix, iscalar, matrix, vector @@ -85,7 +85,15 @@ def test_variable_only(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([x, y], z) - assert _allclose(f(x.tag.test_value, y.tag.test_value), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol( + f(x.tag.test_value, y.tag.test_value), z.tag.test_value + ) + assert np.allclose( + f(x.tag.test_value, y.tag.test_value), + z.tag.test_value, + atol=atol_, + rtol=rtol_, + ) # this test should fail y.tag.test_value = np.random.random((6, 5)).astype(config.floatX) @@ -122,7 +130,16 @@ def test_string_var(self): out = dot(dot(x, y), z) assert hasattr(out.tag, "test_value") tf = pytensor.function([x, y], out) - assert _allclose(tf(x.tag.test_value, y.tag.test_value), out.tag.test_value) + + atol_, rtol_ = _get_atol_rtol( + tf(x.tag.test_value, y.tag.test_value), out.tag.test_value + ) + assert np.allclose( + tf(x.tag.test_value, y.tag.test_value), + out.tag.test_value, + atol=atol_, + rtol=rtol_, + ) def f(x, y, z): return dot(dot(x, y), z) @@ -141,7 +158,10 @@ def test_shared(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([x], z) - assert _allclose(f(x.tag.test_value), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(x.tag.test_value), z.tag.test_value) + assert np.allclose( + f(x.tag.test_value), z.tag.test_value, atol=atol_, rtol=rtol_ + ) # this test should fail y.set_value(np.random.random((5, 6)).astype(config.floatX)) @@ -156,7 +176,8 @@ def test_ndarray(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([], z) - assert _allclose(f(), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value) + assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_) # this test should fail x = np.random.random((2, 4)).astype(config.floatX) @@ -170,7 +191,8 @@ def test_empty_elemwise(self): z = (x + 2) * 3 assert hasattr(z.tag, "test_value") f = pytensor.function([], z) - assert _allclose(f(), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value) + assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_) def test_constant(self): x = pt.constant(np.random.random((2, 3)), dtype=config.floatX) @@ -180,7 +202,8 @@ def test_constant(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([], z) - assert _allclose(f(), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value) + assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_) # this test should fail x = pt.constant(np.random.random((2, 4)), dtype=config.floatX) diff --git a/tests/graph/test_replace.py b/tests/graph/test_replace.py index 2c822587a3..9ee83bc6e2 100644 --- a/tests/graph/test_replace.py +++ b/tests/graph/test_replace.py @@ -12,7 +12,6 @@ vectorize_node, ) from pytensor.tensor import dvector, fvector, vector -from tests import unittest_tools as utt from tests.graph.utils import MyOp, MyVariable, op_multiple_outputs @@ -133,10 +132,10 @@ def test(x, y, mention_y): return function([], out)() x = shared(np.asarray(0.0, dtype=config.floatX)) - utt.assert_allclose( + np.testing.assert_allclose( test(x, pt.sum((x + 1) ** 2), mention_y=False), 1.21000003815 ) - utt.assert_allclose( + np.testing.assert_allclose( test(x, pt.sum((x + 1) ** 2), mention_y=True), 1.21000003815 ) diff --git a/tests/link/c/test_params_type.py b/tests/link/c/test_params_type.py index 8fb47077af..1c9f2481d0 100644 --- a/tests/link/c/test_params_type.py +++ b/tests/link/c/test_params_type.py @@ -9,7 +9,6 @@ from pytensor.link.c.type import EnumList, Generic from pytensor.scalar import ScalarType from pytensor.tensor.type import TensorType, matrix -from tests import unittest_tools as utt tensor_type_0d = TensorType("float64", shape=tuple()) @@ -348,5 +347,5 @@ def test_op_params(self): vy1 = f1(vx) vy2 = f2(vx) ref = a * (vx**2) + b * vx + c - utt.assert_allclose(vy1, vy2) - utt.assert_allclose(ref, vy1) + np.testing.assert_allclose(vy1, vy2) + np.testing.assert_allclose(ref, vy1) diff --git a/tests/link/test_vm.py b/tests/link/test_vm.py index 69a922e731..8238df156c 100644 --- a/tests/link/test_vm.py +++ b/tests/link/test_vm.py @@ -20,7 +20,6 @@ from pytensor.tensor.math import cosh, tanh from pytensor.tensor.type import lscalar, scalar, scalars, vector, vectors from pytensor.tensor.variable import TensorConstant -from tests import unittest_tools as utt class SomeOp(Op): @@ -221,7 +220,7 @@ def test_partial_function(linker): assert f(3, output_subset=[0, 1, 2]) == f(3) assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]] - utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858])) + np.testing.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858])) @pytest.mark.parametrize( diff --git a/tests/scalar/test_basic.py b/tests/scalar/test_basic.py index c8f0fc335b..fbbc857267 100644 --- a/tests/scalar/test_basic.py +++ b/tests/scalar/test_basic.py @@ -3,7 +3,6 @@ import pytensor import pytensor.tensor as pt -import tests.unittest_tools as utt from pytensor.compile.mode import Mode from pytensor.graph.fg import FunctionGraph from pytensor.link.c.basic import DualLinker @@ -477,11 +476,11 @@ def test_grad_inrange(): # x is equal to the lower or higher bound but in that case # PyTensor defines the gradient to be zero for stability. f = pytensor.function([x, low, high], [gx, glow, ghigh]) - utt.assert_allclose(f(0, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(1, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(2, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(5, 1, 5), [0, 0, 0]) - utt.assert_allclose(f(7, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(0, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(1, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(2, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(5, 1, 5), [0, 0, 0]) + np.testing.assert_allclose(f(7, 1, 5), [0, 0, 0]) def test_grad_abs(): diff --git a/tests/scan/test_basic.py b/tests/scan/test_basic.py index 7bdf490b68..af7c2d37dd 100644 --- a/tests/scan/test_basic.py +++ b/tests/scan/test_basic.py @@ -348,7 +348,7 @@ def f_pow2(x_tm1): numpy_values = np.array([state * (2 ** (k + 1)) for k in range(steps)]) pytensor_values = my_f(state, steps) - utt.assert_allclose(numpy_values, pytensor_values) + np.testing.assert_allclose(numpy_values, pytensor_values) def test_inner_storage_leak(self): """ @@ -495,8 +495,8 @@ def test_only_nonseq_inputs(self): expected_out2 = np.ones(inputs.shape, dtype="int8") * n_steps out1, out2 = fun(inputs) - utt.assert_allclose(out1, expected_out1) - utt.assert_allclose(out2, expected_out2) + np.testing.assert_allclose(out1, expected_out1) + np.testing.assert_allclose(out2, expected_out2) def test_one_sequence_one_output_weights(self): """ @@ -538,7 +538,7 @@ def f_rnn(u_t, x_tm1, W_in, W): for step in range(1, 4): v_out[step] = v_u[step] * W_in + v_out[step - 1] * W pytensor_values = f2(v_u, v_x0, W_in, W) - utt.assert_allclose(pytensor_values, v_out) + np.testing.assert_allclose(pytensor_values, v_out) def test_one_sequence_one_output_weights_shared(self): """ @@ -704,7 +704,7 @@ def f_rnn(u_t): v_u = rng.uniform(-5.0, 5.0, size=(5,)) numpy_result = v_u + 3 pytensor_result = f2(v_u) - utt.assert_allclose(pytensor_result, numpy_result) + np.testing.assert_allclose(pytensor_result, numpy_result) def test_backwards(self): def f_rnn(u_t, x_tm1, W_in, W): @@ -742,7 +742,7 @@ def f_rnn(u_t, x_tm1, W_in, W): v_out[step] = v_u[3 - step] * W_in + v_out[step - 1] * W pytensor_values = f2(v_u, v_x0, W_in, W) - utt.assert_allclose(pytensor_values, v_out) + np.testing.assert_allclose(pytensor_values, v_out) def test_output_padding(self): """ @@ -828,7 +828,7 @@ def lp(x, x2): output = f([1, 2, 3, 4, 5]) expected_output = np.array([1, 2, 3], dtype="float32") - utt.assert_allclose(output, expected_output) + np.testing.assert_allclose(output, expected_output) def test_shared_arguments_with_updates(self): rng = np.random.default_rng(utt.fetch_seed()) @@ -891,11 +891,11 @@ def f(u1_t, u2_t, y0_tm3, y0_tm2, y0_tm1, y1_tm1): numpy_W1 = numpy_W1 + 0.1 numpy_W2 = numpy_W2 + 0.05 - utt.assert_allclose(pytensor_y0, numpy_y0[3:]) - utt.assert_allclose(pytensor_y1, numpy_y1[1:]) - utt.assert_allclose(pytensor_y2, numpy_y2) - utt.assert_allclose(W1.get_value(), numpy_W1) - utt.assert_allclose(W2.get_value(), numpy_W2) + np.testing.assert_allclose(pytensor_y0, numpy_y0[3:]) + np.testing.assert_allclose(pytensor_y1, numpy_y1[1:]) + np.testing.assert_allclose(pytensor_y2, numpy_y2) + np.testing.assert_allclose(W1.get_value(), numpy_W1) + np.testing.assert_allclose(W2.get_value(), numpy_W2) def test_simple_shared_random(self): pytensor_rng = RandomStream(utt.fetch_seed()) @@ -920,9 +920,9 @@ def test_simple_shared_random(self): numpy_v[i] = rng.uniform(-1, 1, size=(2,)) pytensor_v = my_f() - utt.assert_allclose(pytensor_v, numpy_v[:5, :]) + np.testing.assert_allclose(pytensor_v, numpy_v[:5, :]) pytensor_v = my_f() - utt.assert_allclose(pytensor_v, numpy_v[5:, :]) + np.testing.assert_allclose(pytensor_v, numpy_v[5:, :]) def test_only_shared_no_input_no_output(self): rng = np.random.default_rng(utt.fetch_seed()) @@ -940,7 +940,7 @@ def f_2(): n_steps = 3 this_f(n_steps) numpy_state = v_state * (2 ** (n_steps)) - utt.assert_allclose(state.get_value(), numpy_state) + np.testing.assert_allclose(state.get_value(), numpy_state) def test_random_as_input_to_scan(self): trng = RandomStream(123) @@ -956,8 +956,8 @@ def test_random_as_input_to_scan(self): ny1, nz1 = f(nx) ny2, nz2 = f(nx) - utt.assert_allclose([ny1, ny1], nz1) - utt.assert_allclose([ny2, ny2], nz2) + np.testing.assert_allclose([ny1, ny1], nz1) + np.testing.assert_allclose([ny2, ny2], nz2) assert not np.allclose(ny1, ny2) def test_shared_updates(self): @@ -1108,7 +1108,7 @@ def test_inner_grad(self): vR = np.array([[3.6, 1.8], [1.8, 0.9]], dtype=config.floatX) out = f(vx, vA) - utt.assert_allclose(out, vR) + np.testing.assert_allclose(out, vR) @pytest.mark.parametrize( "mode", [Mode(linker="cvm", optimizer=None), Mode(linker="cvm")] @@ -1699,7 +1699,7 @@ def reset_rng_grad_fn(*args): multiple_outputs_numeric_grad(reset_rng_cost_fn, [v_u, v_x0, vW_in]) analytic_grad = reset_rng_grad_fn(v_u, v_x0, vW_in) - utt.assert_allclose(analytic_grad[0][:2], np.zeros((2, 2))) + np.testing.assert_allclose(analytic_grad[0][:2], np.zeros((2, 2))) def test_grad_wrt_shared(self): x1 = shared(3.0) @@ -1709,7 +1709,7 @@ def test_grad_wrt_shared(self): m = grad(y.sum(), x1) f = function([x2], m, allow_input_downcast=True) - utt.assert_allclose(f([2, 3]), 5) + np.testing.assert_allclose(f([2, 3]), 5) def test_inner_grad_wrt_shared(self): x1 = scalar("x1") @@ -1785,12 +1785,12 @@ def inner_fct(inp1, inp2, inp3): expected_g_out_init = expected_g_seq[:3] expected_g_non_seq = np.array([22, 22, 22]) - utt.assert_allclose(outputs[0], expected_g_seq) - utt.assert_allclose(outputs[1], expected_g_out_init) - utt.assert_allclose(outputs[2], expected_g_non_seq) - utt.assert_allclose(outputs[3], expected_g_seq) - utt.assert_allclose(outputs[4], expected_g_out_init) - utt.assert_allclose(outputs[5], expected_g_non_seq) + np.testing.assert_allclose(outputs[0], expected_g_seq) + np.testing.assert_allclose(outputs[1], expected_g_out_init) + np.testing.assert_allclose(outputs[2], expected_g_non_seq) + np.testing.assert_allclose(outputs[3], expected_g_seq) + np.testing.assert_allclose(outputs[4], expected_g_out_init) + np.testing.assert_allclose(outputs[5], expected_g_non_seq) def test_grad_duplicate_outputs_connection_pattern(self): """ @@ -1990,9 +1990,9 @@ def rnn_fn(_u, _y, _W): vnu, vnh0, vnW = fn_rop(v_u, v_h0, v_W, v_eu, v_eh0, v_eW) tnu, tnh0, tnW = fn_test(v_u, v_h0, v_W, v_eu, v_eh0, v_eW) - utt.assert_allclose(vnu, tnu, atol=1e-6) - utt.assert_allclose(vnh0, tnh0, atol=1e-6) - utt.assert_allclose(vnW, tnW, atol=1e-6) + np.testing.assert_allclose(vnu, tnu, atol=1e-6) + np.testing.assert_allclose(vnh0, tnh0, atol=1e-6) + np.testing.assert_allclose(vnW, tnW, atol=1e-6) @pytest.mark.slow def test_R_op_2(self): @@ -2072,9 +2072,9 @@ def rnn_fn(_u, _y, _W): ) tnu, tnh0, tnW, tno = fn_test(v_u, v_h0, v_W, v_eu, v_eh0, v_eW) - utt.assert_allclose(vnu, tnu, atol=1e-6) - utt.assert_allclose(vnh0, tnh0, atol=1e-6) - utt.assert_allclose(vnW, tnW, atol=2e-6) + np.testing.assert_allclose(vnu, tnu, atol=1e-6) + np.testing.assert_allclose(vnh0, tnh0, atol=1e-6) + np.testing.assert_allclose(vnW, tnW, atol=2e-6) def test_R_op_mitmot(self): # this test is a copy paste from the script given by Justin Bayer to @@ -2375,8 +2375,8 @@ def test_grad_until(self): f = function([self.x, self.threshold], [r, g]) pytensor_output, pytensor_gradient = f(self.seq, 5) - utt.assert_allclose(pytensor_output, self.numpy_output) - utt.assert_allclose(pytensor_gradient, self.numpy_gradient) + np.testing.assert_allclose(pytensor_output, self.numpy_output) + np.testing.assert_allclose(pytensor_gradient, self.numpy_gradient) def test_grad_until_ndim_greater_one(self): def tile_array(inp): @@ -2394,8 +2394,8 @@ def tile_array(inp): f = function([X, self.threshold], [r, g]) pytensor_output, pytensor_gradient = f(arr, 5) - utt.assert_allclose(pytensor_output, tile_array(self.numpy_output)) - utt.assert_allclose(pytensor_gradient, tile_array(self.numpy_gradient)) + np.testing.assert_allclose(pytensor_output, tile_array(self.numpy_output)) + np.testing.assert_allclose(pytensor_gradient, tile_array(self.numpy_gradient)) def test_grad_until_and_truncate(self): n = 3 @@ -2410,8 +2410,8 @@ def test_grad_until_and_truncate(self): pytensor_output, pytensor_gradient = f(self.seq, 5) self.numpy_gradient[: 7 - n] = 0 - utt.assert_allclose(pytensor_output, self.numpy_output) - utt.assert_allclose(pytensor_gradient, self.numpy_gradient) + np.testing.assert_allclose(pytensor_output, self.numpy_output) + np.testing.assert_allclose(pytensor_gradient, self.numpy_gradient) def test_grad_until_and_truncate_sequence_taps(self): n = 3 @@ -2428,7 +2428,7 @@ def test_grad_until_and_truncate_sequence_taps(self): # Gradient computed by hand: numpy_grad = np.array([0, 0, 0, 5, 6, 10, 4, 5, 0, 0, 0, 0, 0, 0, 0]) numpy_grad = numpy_grad.astype(config.floatX) - utt.assert_allclose(pytensor_gradient, numpy_grad) + np.testing.assert_allclose(pytensor_gradient, numpy_grad) def test_mintap_onestep(): @@ -2637,7 +2637,7 @@ def numpy_implementation(vsample): t_result = my_f(v_vsample) n_result = numpy_implementation(v_vsample) - utt.assert_allclose(t_result, n_result) + np.testing.assert_allclose(t_result, n_result) def test_reordering(self, benchmark): """Test re-ordering of inputs. @@ -2703,8 +2703,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): f4, v_u1, v_u2, v_x0, v_y0, vW_in1 ) - utt.assert_allclose(pytensor_x, v_x) - utt.assert_allclose(pytensor_y, v_y) + np.testing.assert_allclose(pytensor_x, v_x) + np.testing.assert_allclose(pytensor_y, v_y) def test_scan_as_tensor_on_gradients(self, benchmark): to_scan = dvector("to_scan") @@ -2757,7 +2757,7 @@ def one_step(x_t, h_tm1, W): rval = np.asarray([[5187989] * 5] * 5, dtype=config.floatX) arg1 = np.ones((5, 5), dtype=config.floatX) arg2 = np.ones((10, 5), dtype=config.floatX) - utt.assert_allclose(f(arg1, arg2), rval) + np.testing.assert_allclose(f(arg1, arg2), rval) def test_use_scan_direct_output(self): """ @@ -2800,8 +2800,8 @@ def test_use_scan_direct_output(self): expected_output2.append(expected_output1[-1] + expected_output2[-1]) expected_output1.append(expected_output1[-1] + i) - utt.assert_allclose(output1, expected_output1) - utt.assert_allclose(output2, expected_output2) + np.testing.assert_allclose(output1, expected_output1) + np.testing.assert_allclose(output2, expected_output2) def test_use_scan_direct_output2(self): """ @@ -2838,8 +2838,8 @@ def test_use_scan_direct_output2(self): for i in range(5): expected_out1[i] = expected_out2[i] + x_val - utt.assert_allclose(out1, expected_out1) - utt.assert_allclose(out2, expected_out2) + np.testing.assert_allclose(out1, expected_out1) + np.testing.assert_allclose(out2, expected_out2) def test_same(self): x = fmatrix("x") @@ -2867,7 +2867,7 @@ def f(inp, mem): f_vals = f(x_val) memory.set_value(mem_val) f2_vals = f2(x_val) - utt.assert_allclose(f_vals, f2_vals) + np.testing.assert_allclose(f_vals, f2_vals) def test_eliminate_seqs(self): U = vector("U") @@ -2900,10 +2900,10 @@ def rec_fn(*args): rng = np.random.default_rng(utt.fetch_seed()) v_u = asarrayX(rng.uniform(size=(5,))) outs = f(v_u, [0, 0, 0], 0) - utt.assert_allclose(outs[0], v_u + 1) - utt.assert_allclose(outs[1], v_u + 2) - utt.assert_allclose(outs[2], v_u + 3) - utt.assert_allclose(sh.get_value(), v_u[-1] + 4) + np.testing.assert_allclose(outs[0], v_u + 1) + np.testing.assert_allclose(outs[1], v_u + 2) + np.testing.assert_allclose(outs[2], v_u + 3) + np.testing.assert_allclose(sh.get_value(), v_u[-1] + 4) def test_eliminate_nonseqs(self): W = scalar("W") @@ -2937,10 +2937,10 @@ def rec_fn(*args): rng = np.random.default_rng(utt.fetch_seed()) v_w = asarrayX(rng.uniform()) outs = f(v_w, [0, 0, 0], 0) - utt.assert_allclose(outs[0], v_w + 1) - utt.assert_allclose(outs[1], v_w + 2) - utt.assert_allclose(outs[2], v_w + 3) - utt.assert_allclose(sh.get_value(), v_w + 4) + np.testing.assert_allclose(outs[0], v_w + 1) + np.testing.assert_allclose(outs[1], v_w + 2) + np.testing.assert_allclose(outs[2], v_w + 3) + np.testing.assert_allclose(sh.get_value(), v_w + 4) def test_seq_tap_bug_jeremiah(self): inp = np.arange(10).reshape(-1, 1).astype(config.floatX) @@ -3039,7 +3039,7 @@ def inner_fn(tap_m3, tap_m2, tap_m1): states[3:6], ] - utt.assert_allclose(outputs, expected_outputs) + np.testing.assert_allclose(outputs, expected_outputs) @pytest.mark.slow def test_hessian_bug_grad_grad_two_scans(self, benchmark): @@ -3141,7 +3141,7 @@ def step(seq): # Ensure the output of the function is valid output = f(np.random.default_rng(utt.fetch_seed()).random(5)) - utt.assert_allclose(output, np.ones(5)) + np.testing.assert_allclose(output[0], np.ones(5)) def test_grad_bug_disconnected_input(self): W = shared(np.zeros((3, 3)), name="W") @@ -3150,7 +3150,7 @@ def test_grad_bug_disconnected_input(self): # This used to raise an exception f = function([v], grad(y.sum(), W)) - utt.assert_allclose(f([1, 2]), [[0, 0, 0], [1, 1, 1], [1, 1, 1]]) + np.testing.assert_allclose(f([1, 2]), [[0, 0, 0], [1, 1, 1], [1, 1, 1]]) def test_grad_find_input(self): w = shared(np.array(0, dtype="float32"), name="w") @@ -3207,7 +3207,7 @@ def f_rnn_shared(u_tm2, x_tm1, x_tm2): numpy_out = np.zeros((2,)) numpy_out[0] = vu[0] * vW_in + vx0[1] * vW + vx0[0] numpy_out[1] = vu[1] * vW_in + numpy_out[0] * vW + vx0[1] - utt.assert_allclose(numpy_out, pytensor_out) + np.testing.assert_allclose(numpy_out, pytensor_out) def test_past_future_taps_shared(self): """ @@ -3247,7 +3247,7 @@ def f_rnn_shared(u_tm2, u_tp2, x_tm1, x_tm2): # and vx0[0] as vx0[-2], vx0[1] as vx0[-1] numpy_out[0] = (vu[0] + vu[4]) * vW_in + vx0[1] * vW + vx0[0] numpy_out[1] = (vu[1] + vu[5]) * vW_in + numpy_out[0] * vW + vx0[1] - utt.assert_allclose(numpy_out, pytensor_out) + np.testing.assert_allclose(numpy_out, pytensor_out) def test_generator_one_output_scalar(self): """ @@ -3281,7 +3281,7 @@ def f_pow2(x_tm1): numpy_values = np.array([state * (2 ** (k + 1)) for k in range(steps)]) pytensor_values = my_f(state, steps) - utt.assert_allclose(numpy_values, pytensor_values[0]) + np.testing.assert_allclose(numpy_values, pytensor_values[0]) def test_default_value_broadcasted(self): def floatx(X): @@ -3568,8 +3568,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): (pytensor_dump, pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - utt.assert_allclose(pytensor_x, v_x[-2:]) - utt.assert_allclose(pytensor_y, v_y[-4:]) + np.testing.assert_allclose(pytensor_x, v_x[-2:]) + np.testing.assert_allclose(pytensor_y, v_y[-4:]) def test_until_random_infer_shape(self): """ @@ -3688,8 +3688,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, W_in1): v_y[i] = np.dot(v_x[i - 1], vWout) (pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - utt.assert_allclose(pytensor_x, v_x) - utt.assert_allclose(pytensor_y, v_y) + np.testing.assert_allclose(pytensor_x, v_x) + np.testing.assert_allclose(pytensor_y, v_y) def test_multiple_outs_taps(self, benchmark): l = 5 diff --git a/tests/scan/test_rewriting.py b/tests/scan/test_rewriting.py index 6f77625f2f..983a8c5812 100644 --- a/tests/scan/test_rewriting.py +++ b/tests/scan/test_rewriting.py @@ -213,7 +213,7 @@ def lambda_fn(h, W1, W2): # pytensor. Note that what we ask pytensor to do is to repeat the 2 # elements vector v_out 5 times sol[:, :] = v_out - utt.assert_allclose(sol, f(v_h, v_W1, v_W2)) + np.testing.assert_allclose(sol, f(v_h, v_W1, v_W2)) def test_pushout_while(self): """ @@ -256,7 +256,7 @@ def lambda_fn(step_idx, W1, W2): out = f(*input_values) out_ref = f_ref(*input_values) - utt.assert_allclose(out, out_ref) + np.testing.assert_allclose(out, out_ref) def test_pushout(self): W1 = matrix("W1") @@ -296,8 +296,8 @@ def fn(i, i_tm1): f = function([inp], [i_t, i_tm1]) val = np.arange(10).reshape(5, 2).astype(config.floatX) ret = f(val) - utt.assert_allclose(ret[0], val + 10) - utt.assert_allclose( + np.testing.assert_allclose(ret[0], val + 10) + np.testing.assert_allclose( ret[1], [[0.0, 0.0], [10.0, 11.0], [12.0, 13.0], [14.0, 15.0], [16.0, 17.0]] ) @@ -389,8 +389,8 @@ def predict_mean_i(i, x_star, s_star, X, beta, h): ) jacobian_outputs = dfdm_j(X, Y, test_m, test_s) - utt.assert_allclose(expected_output, scan_output) - utt.assert_allclose(expected_output, jacobian_outputs) + np.testing.assert_allclose(expected_output, scan_output) + np.testing.assert_allclose(expected_output, jacobian_outputs) @config.change_flags(on_opt_error="raise") def test_pushout_seqs2(self): @@ -421,7 +421,7 @@ def test_pushout_nonseq(self): outs = f() expected_outs = [[4, 4], [2, 2]] - utt.assert_allclose(outs, expected_outs) + np.testing.assert_allclose(outs, expected_outs) def test_dot_not_output(self): """ @@ -458,7 +458,7 @@ def test_dot_not_output(self): output_opt = f_opt(v_value, m_value) output_no_opt = f_no_opt(v_value, m_value) - utt.assert_allclose(output_opt, output_no_opt) + np.testing.assert_allclose(output_opt, output_no_opt) def test_dot_nitsot_output(self): """ @@ -504,8 +504,8 @@ def inner_fct(vect, mat): output_opt = f_opt(a_value, b_value) output_no_opt = f_no_opt(a_value, b_value) - utt.assert_allclose(output_opt[0], output_no_opt[0]) - utt.assert_allclose(output_opt[1], output_no_opt[1]) + np.testing.assert_allclose(output_opt[0], output_no_opt[0]) + np.testing.assert_allclose(output_opt[1], output_no_opt[1]) def test_dot_sitsot_output(self): """ @@ -550,8 +550,8 @@ def inner_fct(seq1, previous_output1, nonseq1): output_opt = f_opt(a_value, b_value) output_no_opt = f_no_opt(a_value, b_value) - utt.assert_allclose(output_opt[0], output_no_opt[0]) - utt.assert_allclose(output_opt[1], output_no_opt[1]) + np.testing.assert_allclose(output_opt[0], output_no_opt[0]) + np.testing.assert_allclose(output_opt[1], output_no_opt[1]) def test_OpFromGraph_shared(self): """Make sure that a simple `OpFromGraph` with a shared variable can be pushed out.""" @@ -617,7 +617,7 @@ def test_sum_dot(self): rng = np.random.default_rng(utt.fetch_seed()) vA = rng.uniform(size=(5, 5)).astype(config.floatX) vB = rng.uniform(size=(5, 5)).astype(config.floatX) - utt.assert_allclose(f(vA, vB), np.dot(vA.T, vB)) + np.testing.assert_allclose(f(vA, vB), np.dot(vA.T, vB)) def test_pregreedy_optimizer(self, benchmark): W = pt.zeros((5, 4)) @@ -739,7 +739,9 @@ def rnn_step1( # Compare the outputs of the two functions on the same input data. f_opt_output = f_opt(x_value, ri_value, zi_value) f_no_opt_output = f_no_opt(x_value, ri_value, zi_value) - utt.assert_allclose(f_opt_output, f_no_opt_output) + np.testing.assert_allclose( + f_opt_output, f_no_opt_output, atol=1e-08, rtol=1e-05 + ) def test_non_zero_init(self): """Test the case where the initial value for the nitsot output is non-zero.""" @@ -792,7 +794,7 @@ def inner_fct(seq1, seq2, seq3, previous_output): output_opt = f_opt(input1_value, input2_value, input3_value) output_no_opt = f_no_opt(input1_value, input2_value, input3_value) - utt.assert_allclose(output_opt, output_no_opt) + np.testing.assert_allclose(output_opt, output_no_opt) class TestScanMerge: @@ -1112,8 +1114,8 @@ def f_rnn_shared(u0_t, u1_t, u2_t, x0_tm1, x1_tm1): # equivalent is done (pytensor_x0, pytensor_x1) = f9(vu0, vu1, vu2, vx0, vx1) # assert that pytensor does what it should - utt.assert_allclose(pytensor_x0, numpy_x0) - utt.assert_allclose(pytensor_x1, numpy_x1) + np.testing.assert_allclose(pytensor_x0, numpy_x0, atol=1e-08, rtol=1e-05) + np.testing.assert_allclose(pytensor_x1, numpy_x1, atol=1e-08, rtol=1e-05) @utt.assertFailure_fast def test_simple_rnn_2(self): @@ -1180,8 +1182,8 @@ def f_rnn_shared(u0_t, u1_t, u1_tp1, u2_tm1, u2_t, u2_tp1, x0_tm1, x1_tm1): # equivalent is done (pytensor_x0, pytensor_x1) = f9(vu0, vu1, vu2, vx0, vx1) # assert that pytensor does what it should - utt.assert_allclose(pytensor_x0, numpy_x0) - utt.assert_allclose(pytensor_x1, numpy_x1) + np.testing.assert_allclose(pytensor_x0, numpy_x0, atol=1e-08, rtol=1e-05) + np.testing.assert_allclose(pytensor_x1, numpy_x1, atol=1e-08, rtol=1e-05) @utt.assertFailure_fast def test_inplace3(self): @@ -1268,8 +1270,8 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): (pytensor_dump, pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - utt.assert_allclose(pytensor_x, v_x[-1:]) - utt.assert_allclose(pytensor_y, v_y[-1:]) + np.testing.assert_allclose(pytensor_x, v_x[-1:].squeeze(0)) + np.testing.assert_allclose(pytensor_y, v_y[-1:]) def test_save_mem_reduced_number_of_steps(self): def f_rnn(u_t): @@ -1304,13 +1306,13 @@ def f_rnn(u_t): # compute the output in numpy tx1, tx2, tx3, tx4, tx5, tx6, tx7 = f2(v_u, 3, 15) - utt.assert_allclose(tx1, v_u[:2] + 1.0) - utt.assert_allclose(tx2, v_u[4] + 2.0) - utt.assert_allclose(tx3, v_u[3] + 3.0) - utt.assert_allclose(tx4, v_u[:3] + 4.0) - utt.assert_allclose(tx5, v_u[-10] + 5.0) - utt.assert_allclose(tx6, v_u[-15] + 6.0) - utt.assert_allclose(tx7, v_u[:-15] + 7.0) + np.testing.assert_allclose(tx1, v_u[:2] + 1.0) + np.testing.assert_allclose(tx2, v_u[4] + 2.0) + np.testing.assert_allclose(tx3, v_u[3] + 3.0) + np.testing.assert_allclose(tx4, v_u[:3] + 4.0) + np.testing.assert_allclose(tx5, v_u[-10] + 5.0) + np.testing.assert_allclose(tx6, v_u[-15] + 6.0) + np.testing.assert_allclose(tx7, v_u[:-15] + 7.0) def test_save_mem_store_steps(self): def f_rnn(u_t, x1_tm1, x1_tm3, x2_tm1, x3tm2, x3_tm1, x4_tm1): @@ -1361,11 +1363,11 @@ def f_rnn(u_t, x1_tm1, x1_tm3, x2_tm1, x3tm2, x3_tm1, x4_tm1): # compute the output in numpy tx1, tx2, tx3, tx4, tx5 = f2(v_u, [0, 0], 0, [0, 0], 0) - utt.assert_allclose(tx1, v_u[-7] + 1.0) - utt.assert_allclose(tx2, v_u[-3:-1] + 2.0) - utt.assert_allclose(tx3, v_u[-6:] + 3.0) - utt.assert_allclose(tx4, v_u[-1] + 4.0) - utt.assert_allclose(tx5, v_u[-1] + 5.0) + np.testing.assert_allclose(tx1, v_u[-7] + 1.0, atol=1e-08, rtol=1e-05) + np.testing.assert_allclose(tx2, v_u[-3:-1] + 2.0, atol=1e-08, rtol=1e-05) + np.testing.assert_allclose(tx3, v_u[-6:] + 3.0, atol=1e-08, rtol=1e-05) + np.testing.assert_allclose(tx4, v_u[-1] + 4.0, atol=1e-08, rtol=1e-05) + np.testing.assert_allclose(tx5, v_u[-1] + 5.0, atol=1e-08, rtol=1e-05) def test_savemem_does_not_duplicate_number_of_scan_nodes(self): var = pt.ones(()) @@ -1444,7 +1446,7 @@ def get_outputs(x, w): expected_output = np.tile(x_value[:, 0].sum(0), (3, 1)).transpose() output = f(x_value, w_value) - utt.assert_allclose(output, expected_output) + np.testing.assert_allclose(output, expected_output) @pytest.mark.skip( reason="The 'assertion' of this test relied on something that no longer exists " @@ -1735,4 +1737,4 @@ def test_opt_order(): vx = np.array([[1.0, 1.0], [2.0, 2.0]], dtype=config.floatX) vA = np.array([[1.0, 1.0], [1.0, 0.0]], dtype=config.floatX) vR = np.array([[[2, 1], [4, 2]], [[2, 1], [4, 2]]], dtype=config.floatX) - utt.assert_allclose(f(vx, vA), vR) + np.testing.assert_allclose(f(vx, vA), vR) diff --git a/tests/scan/test_views.py b/tests/scan/test_views.py index 38c9b9cfcd..e3fa3e59d8 100644 --- a/tests/scan/test_views.py +++ b/tests/scan/test_views.py @@ -34,7 +34,7 @@ def test_map(): vals = rng.uniform(-5.0, 5.0, size=(10,)) abs_vals = abs(vals) pytensor_vals = f(vals) - utt.assert_allclose(abs_vals, pytensor_vals) + np.testing.assert_allclose(abs_vals, pytensor_vals) def test_reduce_memory_consumption(): @@ -66,7 +66,7 @@ def test_reduce_memory_consumption(): gx = grad(o, x) f2 = function([], gx) - utt.assert_allclose(f2(), np.ones((10,))) + np.testing.assert_allclose(f2(), np.ones((10,))) def test_foldl_memory_consumption(): @@ -99,7 +99,7 @@ def test_foldl_memory_consumption(): gx = grad(o, x) f2 = function([], gx) - utt.assert_allclose(f2(), np.ones((10,))) + np.testing.assert_allclose(f2(), np.ones((10,))) def test_foldr_memory_consumption(): @@ -132,4 +132,4 @@ def test_foldr_memory_consumption(): gx = grad(o, x) f2 = function([], gx) - utt.assert_allclose(f2(), np.ones((10,))) + np.testing.assert_allclose(f2(), np.ones((10,))) diff --git a/tests/sparse/test_basic.py b/tests/sparse/test_basic.py index afae9b2187..8c7838f43a 100644 --- a/tests/sparse/test_basic.py +++ b/tests/sparse/test_basic.py @@ -92,6 +92,7 @@ ) from pytensor.tensor.basic import MakeVector from pytensor.tensor.elemwise import DimShuffle, Elemwise +from pytensor.tensor.math import _get_atol_rtol from pytensor.tensor.math import sum as pt_sum from pytensor.tensor.shape import Shape_i from pytensor.tensor.subtensor import ( @@ -1313,7 +1314,7 @@ def test_upcast(self): scipy_result = spmat * mat assert pytensor_result.shape == scipy_result.shape assert pytensor_result.dtype == scipy_result.dtype - utt.assert_allclose(scipy_result, pytensor_result) + np.testing.assert_allclose(scipy_result, pytensor_result) def test_opt_unpack(self): # @@ -1431,7 +1432,7 @@ def test_csc_correct_output_faster_than_scipy(self): # fail if PyTensor is slower than scipy by more than a certain amount overhead_tol = 0.003 # seconds overall overhead_rtol = 1.2 # times as long - utt.assert_allclose(scipy_result, pytensor_result) + np.testing.assert_allclose(scipy_result, pytensor_result) if pytensor.config.mode == "FAST_RUN" and pytensor.config.cxx: assert pytensor_time <= overhead_rtol * scipy_time + overhead_tol @@ -1466,7 +1467,7 @@ def test_csr_correct_output_faster_than_scipy(self): # print 'scipy took', scipy_time overhead_tol = 0.002 # seconds overhead_rtol = 1.1 # times as long - utt.assert_allclose(scipy_result, pytensor_result) + np.testing.assert_allclose(scipy_result, pytensor_result) if pytensor.config.mode == "FAST_RUN" and pytensor.config.cxx: assert pytensor_time <= overhead_rtol * scipy_time + overhead_tol, ( pytensor_time, @@ -1520,7 +1521,7 @@ def test_csr_dense(self): def f_b(x, y): return x * y - utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) + np.testing.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check( @@ -1542,7 +1543,7 @@ def test_csc_dense(self): def f_b(x, y): return x * y - utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) + np.testing.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check( @@ -1574,7 +1575,7 @@ def f_a(x, y): vx = getattr(self, "x_" + x_f).astype(d1) vy = getattr(self, "y_" + y_f).astype(d2) - utt.assert_allclose(f_a(vx, vy).toarray(), f_b(vx, vy)) + np.testing.assert_allclose(f_a(vx, vy).toarray(), f_b(vx, vy)) # Test infer_shape f_a = pytensor.function([x, y], sparse.dot(x, y).shape) @@ -1743,13 +1744,11 @@ def f_b(z, a, x, y): # As we do a dot product of 2 vector of 100 element, # This mean we can have 2*100*eps abs error. + atol, rtol = _get_atol_rtol(f_a_out, f_b_out) if f_a_out.dtype in ["float64", "complex128"]: atol = 3e-8 rtol = 1e-5 - else: - atol = None - rtol = None - utt.assert_allclose(f_a_out, f_b_out, rtol=rtol, atol=atol) + np.testing.assert_allclose(f_a_out, f_b_out, rtol=rtol, atol=atol) topo = f_a.maker.fgraph.toposort() up = pytensor.scalar.upcast(dtype1, dtype2, dtype3, dtype4) @@ -2009,7 +2008,7 @@ def test_op(self): expected = x * s assert tested.format == format - utt.assert_allclose(expected, tested.toarray()) + np.testing.assert_allclose(expected, tested.toarray()) def test_infer_shape(self): for format, cls in [("csc", sparse.ColScaleCSC), ("csr", sparse.RowScaleCSC)]: @@ -2046,7 +2045,7 @@ def test_op(self): expected = x * s assert tested.format == format - utt.assert_allclose(expected, tested.toarray()) + np.testing.assert_allclose(expected, tested.toarray()) def test_infer_shape(self): for format, cls in [("csc", sparse.RowScaleCSC), ("csr", sparse.ColScaleCSC)]: @@ -2092,7 +2091,7 @@ def test_op(self, op_type): f = pytensor.function(variable, self.op(variable[0], axis=axis)) tested = f(*data) expected = data[0].todense().sum(axis).ravel() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, [tested], atol=1e-08, rtol=1e-05) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -2131,7 +2130,7 @@ def test_op(self): tested = f(*data) expected = data[0].toarray().diagonal() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -2162,7 +2161,7 @@ def test_op(self): tested = f(*data).toarray() expected = np.diag(*data) - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) assert tested.dtype == expected.dtype assert tested.shape == expected.shape @@ -2199,7 +2198,7 @@ def test_op(self): tested = f(*data).toarray() expected = data[0].sorted_indices().toarray() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -2238,7 +2237,7 @@ def test_op(self): tested = tested.toarray() expected = expected.toarray() - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) def test_grad(self): for format in sparse.sparse_formats: @@ -2351,8 +2350,8 @@ def test_GetItemList(self): s_geta = sp.sparse.csr_matrix(A[0])[[0, 1, 2, 3, 1]].todense() s_getb = sp.sparse.csc_matrix(B[0])[[0, 1, 2, 3, 1]].todense() - utt.assert_allclose(t_geta, s_geta) - utt.assert_allclose(t_getb, s_getb) + np.testing.assert_allclose(t_geta, s_geta) + np.testing.assert_allclose(t_getb, s_getb) def test_GetItemList_wrong_index(self): a, A = sparse_random_inputs("csr", (4, 5)) @@ -2390,8 +2389,8 @@ def test_GetItem2Lists(self): s_geta = np.asarray(sp.sparse.csr_matrix(A[0])[[0, 0, 1, 3], [0, 1, 2, 4]]) s_getb = np.asarray(sp.sparse.csc_matrix(B[0])[[0, 0, 1, 3], [0, 1, 2, 4]]) - utt.assert_allclose(t_geta, s_geta) - utt.assert_allclose(t_getb, s_getb) + np.testing.assert_allclose(t_geta, s_geta[0]) + np.testing.assert_allclose(t_getb, s_getb[0]) def test_GetItem2Lists_wrong_index(self): a, A = sparse_random_inputs("csr", (4, 5)) @@ -2652,9 +2651,9 @@ def test_cast(self): t_cls = t_cls.toarray() t_prop = t_prop.toarray() - utt.assert_allclose(expected, t_func) - utt.assert_allclose(expected, t_cls) - utt.assert_allclose(expected, t_prop) + np.testing.assert_allclose(expected, t_func) + np.testing.assert_allclose(expected, t_cls) + np.testing.assert_allclose(expected, t_prop) @pytest.mark.slow def test_infer_shape(self): @@ -2724,7 +2723,7 @@ def test_op(self): tested = f(*blocks) expected = self.expected_f(blocks, format=out_f, dtype=dtype) - utt.assert_allclose(expected.toarray(), tested.toarray()) + np.testing.assert_allclose(expected.toarray(), tested.toarray()) assert tested.format == expected.format assert tested.dtype == expected.dtype @@ -2801,7 +2800,7 @@ def test_op(self): tested = f(*self.a[format]) expected = 2 * self.a[format][0] - utt.assert_allclose(expected.toarray(), tested.toarray()) + np.testing.assert_allclose(expected.toarray(), tested.toarray()) assert tested.format == expected.format assert tested.dtype == expected.dtype @@ -2895,7 +2894,7 @@ def test_op(self): tested = tested.toarray() try: - utt.assert_allclose(expected, tested) + np.testing.assert_allclose(expected, tested) except AssertionError: raise AssertionError(self.__name__) @@ -2960,7 +2959,7 @@ def test_op(self): tested = tested.toarray() try: - utt.assert_allclose(tested, expected, rtol=1e-2) + np.testing.assert_allclose(tested, expected, rtol=1e-2) except AssertionError: raise AssertionError(self.__name__) @@ -3202,7 +3201,7 @@ def test_mul_s_v(self): out = f(spmat, mat) - utt.assert_allclose(spmat.toarray() * mat, out.toarray()) + np.testing.assert_allclose(spmat.toarray() * mat, out.toarray()) class TestStructuredAddSV: @@ -3232,7 +3231,7 @@ def test_structured_add_s_v(self): out = f(spmat, mat) - utt.assert_allclose( + np.testing.assert_allclose( as_ndarray(spones.multiply(spmat + mat)), out.toarray() ) @@ -3260,7 +3259,7 @@ def test_op_ss(self): assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() - utt.assert_allclose(tested, expected) + np.testing.assert_allclose(tested, expected) def test_op_sd(self): for format in sparse.sparse_formats: @@ -3279,7 +3278,7 @@ def test_op_sd(self): assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() - utt.assert_allclose(tested, expected) + np.testing.assert_allclose(tested, expected, atol=1e-05, rtol=1e-05) def test_infer_shape(self): for format in sparse.sparse_formats: @@ -3340,7 +3339,7 @@ def test_op(self): x, y, p = self.a expected = p.multiply(np.dot(x, y.T)) - utt.assert_allclose(as_ndarray(expected), tested.toarray()) + np.testing.assert_allclose(as_ndarray(expected), tested.toarray()) assert tested.format == "csr" assert tested.dtype == expected.dtype @@ -3352,7 +3351,7 @@ def test_negative_stride(self): x, y, p = a2 expected = p.multiply(np.dot(x, y.T)) - utt.assert_allclose(as_ndarray(expected), tested.toarray()) + np.testing.assert_allclose(as_ndarray(expected), tested.toarray()) assert tested.format == "csr" assert tested.dtype == expected.dtype diff --git a/tests/sparse/test_rewriting.py b/tests/sparse/test_rewriting.py index 280d9dbf70..be95ba59a0 100644 --- a/tests/sparse/test_rewriting.py +++ b/tests/sparse/test_rewriting.py @@ -10,7 +10,6 @@ from pytensor.tensor.basic import as_tensor_variable from pytensor.tensor.math import sum as pt_sum from pytensor.tensor.type import ivector, matrix, vector -from tests import unittest_tools as utt from tests.sparse.test_basic import random_lil @@ -173,4 +172,4 @@ def test_sd_csc(): res = sd_csc(a_val, a_ind, a_ptr, nrows, b).eval() - utt.assert_allclose(res, target) + np.testing.assert_allclose(res, target, atol=1e-05, rtol=1e-05) diff --git a/tests/tensor/conv/test_abstract_conv.py b/tests/tensor/conv/test_abstract_conv.py index 223e3774c2..43b58121d6 100644 --- a/tests/tensor/conv/test_abstract_conv.py +++ b/tests/tensor/conv/test_abstract_conv.py @@ -560,7 +560,7 @@ def run_fwd( res_ref = np.array(f_ref()) res = np.array(f()) - utt.assert_allclose(res_ref, res) + np.testing.assert_allclose(res_ref, res, rtol=1e-05, atol=1e-05) if ( verify_grad and inputs_val.size > 0 @@ -645,7 +645,7 @@ def run_gradweight( res_ref = np.array(f_ref()) res = np.array(f()) - utt.assert_allclose(res_ref, res) + np.testing.assert_allclose(res_ref, res, rtol=1e-05, atol=1e-05) def abstract_conv_gradweight(inputs_val, output_val): conv_op = gradWeights_fn( @@ -732,7 +732,7 @@ def run_gradinput( if ref is not None: res_ref = np.array(f_ref()) - utt.assert_allclose(res_ref, res) + np.testing.assert_allclose(res_ref, res, rtol=1e-06) def abstract_conv_gradinputs(filters_val, output_val): conv_op = gradInputs_fn( @@ -1445,13 +1445,13 @@ def test_bilinear_kernel_2D(self): kernel = bilinear_kernel_2D(ratio=ratio, normalize=False) f = pytensor.function([], kernel) kernel_2D = self.numerical_kernel_2D(ratio) - utt.assert_allclose(kernel_2D, f()) + np.testing.assert_allclose(kernel_2D, f()) # getting the normalized kernel kernel = bilinear_kernel_2D(ratio=ratio, normalize=True) f = pytensor.function([], kernel) kernel_2D = kernel_2D / float(ratio**2) - utt.assert_allclose(kernel_2D, f()) + np.testing.assert_allclose(kernel_2D, f()) def test_bilinear_kernel_1D(self): # Test 1D kernels used in bilinear upsampling @@ -1472,15 +1472,15 @@ def test_bilinear_kernel_1D(self): kernel = bilinear_kernel_1D(ratio=ratio, normalize=False) f = pytensor.function([], kernel) kernel_1D = self.numerical_kernel_1D(ratio) - utt.assert_allclose(kernel_1D, f()) - utt.assert_allclose(kernel_1D, f_ten(ratio)) + np.testing.assert_allclose(kernel_1D, f()) + np.testing.assert_allclose(kernel_1D, f_ten(ratio)) # getting the normalized kernel kernel = bilinear_kernel_1D(ratio=ratio, normalize=True) f = pytensor.function([], kernel) kernel_1D = kernel_1D / float(ratio) - utt.assert_allclose(kernel_1D, f()) - utt.assert_allclose(kernel_1D, f_ten_norm(ratio)) + np.testing.assert_allclose(kernel_1D, f()) + np.testing.assert_allclose(kernel_1D, f_ten_norm(ratio)) def numerical_upsampling_multiplier(self, ratio): """ @@ -1565,7 +1565,7 @@ def test_bilinear_upsampling_1D(self): ) f = pytensor.function([], bilin_mat, mode=self.compile_mode) up_mat_2d = self.get_upsampled_twobytwo_mat(input_x, ratio) - utt.assert_allclose(f(), up_mat_2d, rtol=1e-06) + np.testing.assert_allclose(f(), up_mat_2d, rtol=1e-06) def test_bilinear_upsampling_reshaping(self): # Test bilinear upsampling without giving shape information @@ -1587,7 +1587,7 @@ def test_bilinear_upsampling_reshaping(self): ) f = pytensor.function([], bilin_mat, mode=self.compile_mode) up_mat_2d = self.get_upsampled_twobytwo_mat(input_x, ratio) - utt.assert_allclose(f(), up_mat_2d, rtol=1e-06) + np.testing.assert_allclose(f(), up_mat_2d, rtol=1e-06) def test_compare_1D_and_2D_upsampling_values(self): # Compare 1D and 2D upsampling @@ -1615,7 +1615,7 @@ def test_compare_1D_and_2D_upsampling_values(self): ) f_1D = pytensor.function([], mat_1D, mode=self.compile_mode) f_2D = pytensor.function([], mat_2D, mode=self.compile_mode) - utt.assert_allclose(f_1D(), f_2D(), rtol=1e-06) + np.testing.assert_allclose(f_1D(), f_2D(), rtol=1e-06) # checking upsampling with ratio 8 input_x = rng.random((12, 11, 10, 7)).astype(config.floatX) @@ -1635,7 +1635,7 @@ def test_compare_1D_and_2D_upsampling_values(self): ) f_1D = pytensor.function([], mat_1D, mode=self.compile_mode) f_2D = pytensor.function([], mat_2D, mode=self.compile_mode) - utt.assert_allclose(f_1D(), f_2D(), rtol=1e-06) + np.testing.assert_allclose(f_1D(), f_2D(), rtol=1e-06) def test_fractional_bilinear_upsampling(self): """Test bilinear upsampling with nonsimilar fractional @@ -1672,7 +1672,7 @@ def test_fractional_bilinear_upsampling(self): ] ).astype(config.floatX) f_up_x = pytensor.function([], up_x, mode=self.compile_mode) - utt.assert_allclose(f_up_x(), num_up_x, rtol=1e-6) + np.testing.assert_allclose(f_up_x(), num_up_x, rtol=1e-6) def test_fractional_bilinear_upsampling_shape(self): x = np.random.random((1, 1, 200, 200)).astype(config.floatX) @@ -1681,7 +1681,7 @@ def test_fractional_bilinear_upsampling_shape(self): pt.as_tensor_variable(x), frac_ratio=resize, use_1D_kernel=False ) out = pytensor.function([], z.shape, mode="FAST_RUN")() - utt.assert_allclose(out, (1, 1, 240, 240)) + np.testing.assert_allclose(out, (1, 1, 240, 240)) class TestConv2dTranspose: @@ -1812,7 +1812,7 @@ def test_conv2d_grad_wrt_inputs(self): ) # check that they're equal - utt.assert_allclose( + np.testing.assert_allclose( f_new(filter_val, out_grad_val), f_old(input_val, filter_val, out_grad_val), ) @@ -1872,7 +1872,7 @@ def test_conv2d_grad_wrt_weights(self): f_new = pytensor.function( [self.x, self.output_grad_wrt], conv_wrt_w_out ) - utt.assert_allclose( + np.testing.assert_allclose( f_new(input_val, out_grad_val), f_old(input_val, filter_val, out_grad_val), ) @@ -1955,7 +1955,9 @@ def test_fwd(self): ] ref_concat_output = np.concatenate(ref_concat_output, axis=1) - utt.assert_allclose(grouped_output, ref_concat_output) + np.testing.assert_allclose( + grouped_output, ref_concat_output, atol=1e-08, rtol=1e-05 + ) utt.verify_grad(grouped_conv_op, [img, kern], mode=self.mode, eps=1) @@ -2009,7 +2011,9 @@ def test_gradweights(self): ] ref_concat_output = np.concatenate(ref_concat_output, axis=0) - utt.assert_allclose(grouped_output, ref_concat_output) + np.testing.assert_allclose( + grouped_output, ref_concat_output, atol=1e-08, rtol=1e-05 + ) def conv_gradweight(inputs_val, output_val): return grouped_convgrad_op( @@ -2070,7 +2074,7 @@ def test_gradinputs(self): ] ref_concat_output = np.concatenate(ref_concat_output, axis=1) - utt.assert_allclose(grouped_output, ref_concat_output) + np.allclose(grouped_output, ref_concat_output, atol=1e-08, rtol=1e-05) def conv_gradinputs(filters_val, output_val): return grouped_convgrad_op( @@ -2204,11 +2208,11 @@ def test_interface2d(self): # test for square matrix top = fun(self.x, self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_valid) + np.testing.assert_allclose(top, self.precomp_output_valid) # test for non-square matrix top = fun(self.x[:, :, :3, :], self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_valid[:, :, :1, :]) + np.testing.assert_allclose(top, self.precomp_output_valid[:, :, :1, :]) # test if it infers shape sep_op = separable_conv2d( @@ -2224,7 +2228,7 @@ def test_interface2d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(self.x, self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_valid) + np.testing.assert_allclose(top, self.precomp_output_valid) # test non-default subsample sep_op = separable_conv2d( @@ -2234,7 +2238,7 @@ def test_interface2d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(self.x, self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose( + np.testing.assert_allclose( top, np.delete(np.delete(self.precomp_output_valid, 1, axis=3), 1, axis=2) ) @@ -2246,7 +2250,7 @@ def test_interface2d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(self.x[:, :, :3, :3], self.depthwise_filter, self.pointwise_filter) - utt.assert_allclose(top, self.precomp_output_full) + np.testing.assert_allclose(top, self.precomp_output_full) @pytest.mark.skipif(config.cxx == "", reason="test needs cxx") def test_interface3d(self): @@ -2272,10 +2276,10 @@ def test_interface3d(self): # test for square matrix top = fun(x, depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output) + np.testing.assert_allclose(top, precomp_output) # test for non-square matrix top = fun(x[:, :, :3, :, :3], depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output[:, :, :1, :, :1]) + np.testing.assert_allclose(top, precomp_output[:, :, :1, :, :1]) # test if it infers shape sep_op = separable_conv3d( x_sym, @@ -2290,7 +2294,7 @@ def test_interface3d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(x, depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output) + np.testing.assert_allclose(top, precomp_output) # test non-default subsample sep_op = separable_conv3d( @@ -2300,7 +2304,7 @@ def test_interface3d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(x, depthwise_filter, pointwise_filter) - utt.assert_allclose( + np.testing.assert_allclose( top, np.delete( np.delete(np.delete(precomp_output, 1, axis=4), 1, axis=3), 1, axis=2 @@ -2318,7 +2322,7 @@ def test_interface3d(self): [x_sym, dfilter_sym, pfilter_sym], sep_op, mode="FAST_RUN" ) top = fun(x[:, :, :3, :3, :3], depthwise_filter, pointwise_filter) - utt.assert_allclose(top, precomp_output) + np.testing.assert_allclose(top, precomp_output) @pytest.mark.skipif( @@ -2407,8 +2411,11 @@ def test_fwd(self): for j in range(0, kshp[2]): single_kern = kern[:, i, j, ...].reshape(single_kshp) ref_val = ref_func(img, single_kern) - utt.assert_allclose( - ref_val[:, :, i, j], unshared_output[:, :, i, j] + np.testing.assert_allclose( + ref_val[:, :, i, j], + unshared_output[:, :, i, j], + atol=1e-08, + rtol=1e-05, ) if verify: @@ -2470,7 +2477,9 @@ def test_gradweight(self): top_single = np.zeros_like(top) top_single[:, :, i, j] = top[:, :, i, j] ref_output = ref_func(img, top_single) - utt.assert_allclose(unshared_output[:, i, j, ...], ref_output) + np.testing.assert_allclose( + unshared_output[:, i, j, ...], ref_output + ) def conv_gradweight(inputs_val, output_val): return unshared_conv_op( @@ -2542,7 +2551,9 @@ def test_gradinput(self): top_single[:, :, i, j] = top[:, :, i, j] ref_output += ref_func(single_kern, top_single) - utt.assert_allclose(ref_output, unshared_output) + np.testing.assert_allclose( + ref_output, unshared_output, atol=1e-08, rtol=1e-05 + ) def conv_gradinputs(filters_val, output_val): return unshared_conv_op( @@ -2614,7 +2625,7 @@ def test_fwd(self): ] = img ref_output = ref_func(exp_img, kern) - utt.assert_allclose(asymmetric_output, ref_output) + np.testing.assert_allclose(asymmetric_output, ref_output) utt.verify_grad(asymmetric_conv_op, [img, kern], mode=self.mode, eps=1) @@ -2666,7 +2677,7 @@ def test_gradweight(self): ] = img ref_output = ref_func(exp_img, top) - utt.assert_allclose(asymmetric_output, ref_output) + np.testing.assert_allclose(asymmetric_output, ref_output) def conv_gradweight(inputs_val, output_val): return asymmetric_conv_op( @@ -2720,7 +2731,7 @@ def test_gradinput(self): :, :, pad[0][0] : imshp[2] + pad[0][0], pad[1][0] : imshp[3] + pad[1][0] ] - utt.assert_allclose(asymmetric_output, ref_output) + np.testing.assert_allclose(asymmetric_output, ref_output) def conv_gradinputs(filters_val, output_val): return asymmetric_conv_op( @@ -2767,7 +2778,7 @@ def test_interface(self): output = causal_func(self.img, self.kern) - utt.assert_allclose(output, self.precomp_top) + np.testing.assert_allclose(output, self.precomp_top) def causal_conv_fn(inputs_val, filters_val): return causal_conv1d( diff --git a/tests/tensor/rewriting/test_basic.py b/tests/tensor/rewriting/test_basic.py index 4ff773dbb8..08ad2779cf 100644 --- a/tests/tensor/rewriting/test_basic.py +++ b/tests/tensor/rewriting/test_basic.py @@ -395,7 +395,7 @@ def test_advanced_inc_subtensor(self): r1 = f1(x_value, i_value, y_value) r2 = f2(x_value, i_value, y_value) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) # Check stacktrace was copied over correctly after rewrite was applied assert check_stack_trace(f1, ops_to_check=AdvancedIncSubtensor1) @@ -427,7 +427,7 @@ def test_advanced_inc_subtensor1(self): r1 = f1(x_value, i_value, y_value) r2 = f2(x_value, i_value, y_value) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) assert check_stack_trace(f1, ops_to_check=AdvancedIncSubtensor1) assert check_stack_trace(f2, ops_to_check="all") @@ -458,7 +458,7 @@ def test_incsubtensor(self): r1 = f1(x_value, i_value, y_value) r2 = f2(x_value, i_value, y_value) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) assert check_stack_trace(f1, ops_to_check="last") assert check_stack_trace(f2, ops_to_check="last") @@ -1086,7 +1086,7 @@ def test_broadcasting_3(self): f = function([x, y], z, mode=self.mode) vx = np.array([[0, 1], [1, 0]], dtype="int32") vy = np.array([7, 8], dtype="int64") - utt.assert_allclose(f(vx, vy), np.where(vx, vy, vy)) + np.testing.assert_allclose(f(vx, vy), np.where(vx, vy, vy)) assert isinstance(f.maker.fgraph.outputs[0].owner.op, Alloc) assert not any(node.op == pt.switch for node in f.maker.fgraph.toposort()) diff --git a/tests/tensor/rewriting/test_elemwise.py b/tests/tensor/rewriting/test_elemwise.py index 9488a9f688..fbc1eb179c 100644 --- a/tests/tensor/rewriting/test_elemwise.py +++ b/tests/tensor/rewriting/test_elemwise.py @@ -71,7 +71,6 @@ vector, vectors, ) -from tests import unittest_tools as utt dimshuffle_lift = out2in(local_dimshuffle_lift) @@ -1443,22 +1442,22 @@ def test_local_useless_composite_outputs(): assert len(topo[0].inputs) == 2 assert len(topo[0].outputs) == 2 res1, res2 = f([[1.0]], [[1.0]], [[np.nan]]) - utt.assert_allclose(res1, [[2.0]]) - utt.assert_allclose(res2, [[0.0]]) + np.testing.assert_allclose(res1, [[2.0]]) + np.testing.assert_allclose(res2, [[0.0]]) f = function([X, Y, Z], o1, mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert len(topo[0].inputs) == 1 assert len(topo[0].outputs) == 1 - utt.assert_allclose(f([[1.0]], [[np.nan]], [[np.nan]]), [[2.0]]) + np.testing.assert_allclose(f([[1.0]], [[np.nan]], [[np.nan]]), [[2.0]]) f = function([X, Y, Z], o2, mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert len(topo[0].inputs) == 1 assert len(topo[0].outputs) == 1 - utt.assert_allclose(f([[np.nan]], [[1.0]], [[np.nan]]), [[0.0]]) + np.testing.assert_allclose(f([[np.nan]], [[1.0]], [[np.nan]]), [[0.0]]) @pytest.mark.parametrize("const_shape", [(), (1,), (5,), (1, 5), (2, 5)]) diff --git a/tests/tensor/rewriting/test_linalg.py b/tests/tensor/rewriting/test_linalg.py index 9dd2a247a8..4d6b029918 100644 --- a/tests/tensor/rewriting/test_linalg.py +++ b/tests/tensor/rewriting/test_linalg.py @@ -14,7 +14,7 @@ from pytensor.tensor import swapaxes from pytensor.tensor.blockwise import Blockwise from pytensor.tensor.elemwise import DimShuffle -from pytensor.tensor.math import _allclose, dot, matmul +from pytensor.tensor.math import _get_atol_rtol, dot, matmul from pytensor.tensor.nlinalg import ( SVD, Det, @@ -67,7 +67,8 @@ def test_rop_lop(): v1 = rop_f(vx, vv) v2 = scan_f(vx, vv) - assert _allclose(v1, v2), f"ROP mismatch: {v1} {v2}" + atol_, rtol_ = _get_atol_rtol(v1, v2) + assert np.allclose(v1, v2, atol=atol_, rtol=rtol_), f"ROP mismatch: {v1} {v2}" raised = False try: @@ -91,7 +92,8 @@ def test_rop_lop(): v1 = lop_f(vx, vv) v2 = scan_f(vx, vv) - assert _allclose(v1, v2), f"LOP mismatch: {v1} {v2}" + atol_, rtol_ = _get_atol_rtol(v1, v2) + assert np.allclose(v1, v2, atol=atol_, rtol=rtol_), f"LOP mismatch: {v1} {v2}" def test_transinv_to_invtrans(): @@ -396,7 +398,7 @@ def test_local_lift_through_linalg(constructor, f_op, f, g_op, g): test_vals = [rng.normal(size=(3,) * A.ndim).astype(config.floatX) for _ in range(2)] test_vals = [x @ np.swapaxes(x, -1, -2) for x in test_vals] - np.testing.assert_allclose(f1(*test_vals), f2(*test_vals), atol=1e-8) + np.testing.assert_allclose(f1(*test_vals), f2(*test_vals), atol=1e-8, rtol=1e-05) @pytest.mark.parametrize( diff --git a/tests/tensor/rewriting/test_math.py b/tests/tensor/rewriting/test_math.py index 019833a9d5..1cac6a724a 100644 --- a/tests/tensor/rewriting/test_math.py +++ b/tests/tensor/rewriting/test_math.py @@ -607,7 +607,7 @@ def test_mul_div_cases(self): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) assert out_dtype == out.dtype - utt.assert_allclose(out, val_inputs[1]) + np.allclose(out, val_inputs[1]) topo = f.maker.fgraph.toposort() assert not any(node.op == pt.true_div for node in topo) @@ -627,7 +627,7 @@ def test_mul_div_cases(self): ): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, (1 / val_inputs[1])) + np.allclose(out, (1 / val_inputs[1])) topo = f.maker.fgraph.toposort() elem = [t for t in topo if isinstance(t.op, Elemwise)] assert len(elem) == nb_elemwise @@ -708,7 +708,7 @@ def test_mul_div_cases(self): ): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, (val_inputs[0] / val_inputs[3])) + np.testing.assert_allclose(out, (val_inputs[0] / val_inputs[3])) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, Elemwise) @@ -758,7 +758,7 @@ def test_mul_div_cases(self): out_dtype = out_dtype[config.cast_policy] f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, (0.5 * val_inputs[0] / val_inputs[1])) + np.testing.assert_allclose(out, (0.5 * val_inputs[0] / val_inputs[1])) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, Elemwise) @@ -800,7 +800,7 @@ def test_mul_div_cases(self): out_dtype = out_dtype[config.cast_policy] f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, val_inputs[0]) + np.testing.assert_allclose(out, val_inputs[0]) topo = f.maker.fgraph.toposort() assert len(topo) == 1 topo[0].op == deep_copy_op @@ -820,7 +820,7 @@ def test_mul_div_cases(self): f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) assert np.all(np.isfinite(out)) - utt.assert_allclose(out, np.sign(val_inputs[0])) + np.testing.assert_allclose(out, np.sign(val_inputs[0])) assert out_dtype == out.dtype assert len(f.maker.fgraph.toposort()) == 1 @@ -868,7 +868,7 @@ def test_mul_div_cases(self): topo = f.maker.fgraph.toposort() out = f(*val_inputs) assert np.all(np.isfinite(out)) - utt.assert_allclose(out, np.sign(val_inputs[0]) * 2 / 3) + np.testing.assert_allclose(out, np.sign(val_inputs[0]) * 2 / 3) assert out_dtype == out.dtype def test_abs_mul_div(self): @@ -931,7 +931,9 @@ def test_multiple_case_that_fail(self): ]: f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, val_inputs[0] / val_inputs[1] / val_inputs[2]) + np.testing.assert_allclose( + out, val_inputs[0] / val_inputs[1] / val_inputs[2] + ) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, Elemwise) @@ -946,7 +948,9 @@ def test_multiple_case_that_fail(self): ]: f = function(list(sym_inputs), g, mode=mode) out = f(*val_inputs) - utt.assert_allclose(out, val_inputs[0] / (val_inputs[1] / val_inputs[2])) + np.testing.assert_allclose( + out, val_inputs[0] / (val_inputs[1] / val_inputs[2]) + ) topo = f.maker.fgraph.toposort() assert len(topo) == 2 assert isinstance(topo[0].op, Elemwise) @@ -1080,7 +1084,7 @@ def sigm(x): betaval = np.random.random(5) aval = np.random.random(5) - utt.assert_allclose( + np.testing.assert_allclose( f2(ival, wval, visbval, hidbval, betaval, aval), f1(ival, wval, visbval, hidbval, betaval, aval), ) @@ -1177,10 +1181,10 @@ def test_local_log_add_exp(): # test that it gives the correct result when it doesn't overflow f([10], [10]) # doesn't causes overflow - utt.assert_allclose(f([10], [10]), 10 + np.log1p(1)) + np.testing.assert_allclose(f([10], [10]), 10 + np.log1p(1)) assert np.isfinite(f([10000], [10000])) # causes overflow if handled incorrectly - utt.assert_allclose(f([10000], [10000]), 10000 + np.log1p(1)) + np.testing.assert_allclose(f([10000], [10000]), 10000 + np.log1p(1)) # test that when max = +-inf, rewritten output still works correctly assert f([-np.inf], [-np.inf]) == -np.inf @@ -1193,7 +1197,7 @@ def test_local_log_add_exp(): f = function([x, y], log(exp(x) + exp(y) + exp(x - y) + exp(x + y)), mode=m) assert np.isfinite(f([10000], [10000])) # causes overflow if handled incorrectly - utt.assert_allclose(f([10000], [10000]), 20000) + np.testing.assert_allclose(f([10000], [10000]), 20000) # TODO: test that the rewrite works in the presence of broadcasting. @@ -1273,7 +1277,7 @@ def test_local_elemwise_sub_zeros(): assert isinstance( f.maker.fgraph.toposort()[0].inputs[1], TensorConstant ) or isinstance(f.maker.fgraph.toposort()[0].inputs[1], TensorConstant) - utt.assert_allclose(f(scalar_val), 0.0) + np.testing.assert_allclose(f(scalar_val), 0.0) assert check_stack_trace(f, ops_to_check="all") # Test vector minus vector @@ -1283,7 +1287,7 @@ def test_local_elemwise_sub_zeros(): assert isinstance( f.maker.fgraph.toposort()[0].inputs[1], TensorConstant ) or isinstance(f.maker.fgraph.toposort()[0].inputs[1], TensorConstant) - utt.assert_allclose(f(vect_val), np.zeros(vect_val.shape)) + np.testing.assert_allclose(f(vect_val), np.zeros(vect_val.shape)) assert check_stack_trace(f, ops_to_check="all") # Test vector minus vector @@ -1293,7 +1297,7 @@ def test_local_elemwise_sub_zeros(): assert isinstance( f.maker.fgraph.toposort()[0].inputs[1], TensorConstant ) or isinstance(f.maker.fgraph.toposort()[0].inputs[1], TensorConstant) - utt.assert_allclose(f(mat_val), np.zeros(mat_val.shape)) + np.testing.assert_allclose(f(mat_val), np.zeros(mat_val.shape)) assert check_stack_trace(f, ops_to_check="all") @@ -1458,7 +1462,7 @@ def test_shape_inequality_with_self(self): f = function([x], minimum([0, 0], x.shape[0]), mode=mode) # This case isn't rewritten. # self.assert_eqs_const(f, 0) - utt.assert_allclose(f(x_val), [0, 0]) + np.testing.assert_allclose(f(x_val), [0, 0]) def test_shape_add_inequality(self): x = vector("x", dtype=config.floatX) @@ -1667,41 +1671,41 @@ def test_local_pow_specialize(): f = function([v], v**0, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [Shape_i(0), pt.alloc] - utt.assert_allclose(f(val), val**0) + np.testing.assert_allclose(f(val), val**0) f = function([v], v**1, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] nodes == [deep_copy_op] - utt.assert_allclose(f(val), val**1) + np.testing.assert_allclose(f(val), val**1) f = function([v], v ** (-1), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [reciprocal] - utt.assert_allclose(f(val_no0), val_no0 ** (-1)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-1)) f = function([v], v**2, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [sqr] - utt.assert_allclose(f(val), val**2) + np.testing.assert_allclose(f(val), val**2) f = function([v], v ** (-2), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert len(nodes) == 2 assert nodes[0] == sqr assert isinstance(nodes[1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-2)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-2)) f = function([v], v ** (0.5), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert nodes == [sqrt] - utt.assert_allclose(f(val), val ** (0.5)) + np.testing.assert_allclose(f(val), val ** (0.5)) f = function([v], v ** (-0.5), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert len(nodes) == 2 assert nodes[0] == sqrt assert isinstance(nodes[1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-0.5)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-0.5)) twos = np.full(shape=(10,), fill_value=2.0).astype(config.floatX) f = function([v], v**twos, mode=mode) @@ -1713,7 +1717,7 @@ def test_local_pow_specialize(): else: assert isinstance(topo[0].op, SpecifyShape) assert topo[1].op == sqr - utt.assert_allclose(f(val), val**twos) + np.testing.assert_allclose(f(val), val**twos) def test_local_pow_to_nested_squaring(): @@ -1731,7 +1735,7 @@ def test_local_pow_to_nested_squaring(): assert len(nodes) == 1 assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6 assert isinstance(nodes[0].scalar_op, ps.Composite) - utt.assert_allclose(f(val), val**15) + np.testing.assert_allclose(f(val), val**15) f = function([v], v ** (-15), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] @@ -1739,14 +1743,14 @@ def test_local_pow_to_nested_squaring(): assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 6 assert isinstance(nodes[0].scalar_op, ps.Composite) assert isinstance(nodes[-1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-15)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-15)) f = function([v], v ** (16), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] assert len(nodes) == 1 assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4 assert isinstance(nodes[0].scalar_op, ps.Composite) - utt.assert_allclose(f(val), val**16) + np.testing.assert_allclose(f(val), val**16) f = function([v], v ** (-16), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] @@ -1754,7 +1758,7 @@ def test_local_pow_to_nested_squaring(): assert len(f.maker.fgraph.toposort()[0].op.scalar_op.fgraph.apply_nodes) == 4 assert isinstance(nodes[0].scalar_op, ps.Composite) assert isinstance(nodes[-1].scalar_op, ps.basic.Reciprocal) - utt.assert_allclose(f(val_no0), val_no0 ** (-16)) + np.testing.assert_allclose(f(val_no0), val_no0 ** (-16)) def test_local_pow_to_nested_squaring_works_with_static_type(): @@ -2507,23 +2511,23 @@ def test_local_sum_prod_all_to_none(self): # test sum f = function([a], a.sum(), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) # test prod f = function([a], a.prod(), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.prod()) + np.testing.assert_allclose(f(input), input.prod()) # test sum f = function([a], a.sum([0, 1, 2]), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) # test prod f = function([a], a.prod([0, 1, 2]), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.prod()) + np.testing.assert_allclose(f(input), input.prod()) f = function([a], a.sum(0).sum(0).sum(0), mode=self.mode) assert len(f.maker.fgraph.apply_nodes) == 1 - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) def test_local_sum_sum_prod_prod(self): a = tensor3() @@ -2578,54 +2582,54 @@ def my_sum_prod(data, d, dd): for d, dd in dims: expected = my_sum(input, d, dd) f = function([a], a.sum(d).sum(dd), mode=self.mode) - utt.assert_allclose(f(input), expected) + np.testing.assert_allclose(f(input), expected) assert len(f.maker.fgraph.apply_nodes) == 1 for d, dd in dims[:6]: f = function([a], a.sum(d).sum(dd).sum(0), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).sum(dd).sum(0)) + np.testing.assert_allclose(f(input), input.sum(d).sum(dd).sum(0)) assert len(f.maker.fgraph.apply_nodes) == 1 for d in [0, 1, 2]: f = function([a], a.sum(d).sum(None), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).sum()) + np.testing.assert_allclose(f(input), input.sum(d).sum()) assert len(f.maker.fgraph.apply_nodes) == 1 f = function([a], a.sum(None).sum(), mode=self.mode) - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum()) assert len(f.maker.fgraph.apply_nodes) == 1 # test prod for d, dd in dims: expected = my_prod(input, d, dd) f = function([a], a.prod(d).prod(dd), mode=self.mode) - utt.assert_allclose(f(input), expected) + np.testing.assert_allclose(f(input), expected) assert len(f.maker.fgraph.apply_nodes) == 1 for d, dd in dims[:6]: f = function([a], a.prod(d).prod(dd).prod(0), mode=self.mode) - utt.assert_allclose(f(input), input.prod(d).prod(dd).prod(0)) + np.testing.assert_allclose(f(input), input.prod(d).prod(dd).prod(0)) assert len(f.maker.fgraph.apply_nodes) == 1 for d in [0, 1, 2]: f = function([a], a.prod(d).prod(None), mode=self.mode) - utt.assert_allclose(f(input), input.prod(d).prod()) + np.testing.assert_allclose(f(input), input.prod(d).prod()) assert len(f.maker.fgraph.apply_nodes) == 1 f = function([a], a.prod(None).prod(), mode=self.mode) - utt.assert_allclose(f(input), input.prod()) + np.testing.assert_allclose(f(input), input.prod()) assert len(f.maker.fgraph.apply_nodes) == 1 # Test that sum prod didn't get rewritten. for d, dd in dims: expected = my_sum_prod(input, d, dd) f = function([a], a.sum(d).prod(dd), mode=self.mode) - utt.assert_allclose(f(input), expected) + np.testing.assert_allclose(f(input), expected) assert len(f.maker.fgraph.apply_nodes) == 2 for d, dd in dims[:6]: f = function([a], a.sum(d).prod(dd).prod(0), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).prod(dd).prod(0)) + np.testing.assert_allclose(f(input), input.sum(d).prod(dd).prod(0)) assert len(f.maker.fgraph.apply_nodes) == 2 for d in [0, 1, 2]: f = function([a], a.sum(d).prod(None), mode=self.mode) - utt.assert_allclose(f(input), input.sum(d).prod()) + np.testing.assert_allclose(f(input), input.sum(d).prod()) assert len(f.maker.fgraph.apply_nodes) == 2 f = function([a], a.sum(None).prod(), mode=self.mode) - utt.assert_allclose(f(input), input.sum()) + np.testing.assert_allclose(f(input), input.sum(), atol=1e-08, rtol=1e-05) assert len(f.maker.fgraph.apply_nodes) == 1 def test_local_sum_sum_int8(self): @@ -2697,7 +2701,7 @@ def test_reduction_rewrite( mul_out = mul(*inputs) f = function(inputs, reduction_op(axis=axis)(mul_out), mode=self.mode) out = f(*inputs_val) - utt.assert_allclose(out, expected_output) + np.testing.assert_allclose(out, expected_output) # Ensure that the rewrite has been applied properly by # ensuring that the rewritten graph contains the expected number @@ -2986,23 +2990,23 @@ def test_local_sum_prod_alloc(self): ]: # test sum f = function([a], t_like(a).sum(None), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum()) + np.testing.assert_allclose(f(input), n_like(input).sum()) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] f = function([a], t_like(a).sum([0, 1, 2]), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum()) + np.testing.assert_allclose(f(input), n_like(input).sum()) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] for d in range(3): f = function([a], t_like(a).sum(d), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum(d)) + np.testing.assert_allclose(f(input), n_like(input).sum(d)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc assert not any(isinstance(node.op, Sum) for node in topo) for i in range(3): f = function([a], t_like(a).sum(i), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum(i)) + np.testing.assert_allclose(f(input), n_like(input).sum(i)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc @@ -3010,23 +3014,23 @@ def test_local_sum_prod_alloc(self): # test prod f = function([a], t_like(a).prod(None), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod()) + np.testing.assert_allclose(f(input), n_like(input).prod()) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] f = function([a], t_like(a).prod([0, 1, 2]), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod()) + np.testing.assert_allclose(f(input), n_like(input).prod()) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[0] for d in range(3): f = function([a], t_like(a).prod(d), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod(d)) + np.testing.assert_allclose(f(input), n_like(input).prod(d)) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc assert not any(isinstance(node.op, Prod) for node in topo) for i in range(3): f = function([a], t_like(a).prod(i), mode=mode) - utt.assert_allclose(f(input), n_like(input).prod(i)) + np.testing.assert_allclose(f(input), n_like(input).prod(i)) # assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc @@ -3034,7 +3038,7 @@ def test_local_sum_prod_alloc(self): for d, dd in [(0, 0), (1, 0), (2, 0), (0, 1), (1, 1), (2, 1)]: f = function([a], t_like(a).sum(d).sum(dd), mode=mode) - utt.assert_allclose(f(input), n_like(input).sum(d).sum(dd)) + np.testing.assert_allclose(f(input), n_like(input).sum(d).sum(dd)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[3] topo = f.maker.fgraph.toposort() assert topo[-1].op == pt.alloc @@ -3190,8 +3194,11 @@ def test_local_prod_of_div(self): [a, b, c, d], s, on_unused_input="ignore", mode=mode_with_rewrite ) - utt.assert_allclose( - f(a_val, b_val, c_val, d_val), g(a_val, b_val, c_val, d_val) + np.testing.assert_allclose( + f(a_val, b_val, c_val, d_val), + g(a_val, b_val, c_val, d_val), + atol=1e-08, + rtol=1e-05, ) # Logical tests: tests whether the rewrite has been appplied or not @@ -3448,7 +3455,7 @@ def test_local_div_to_reciprocal(): f = function([num_len_s, denom_s], out) out_val = f(3, 2.0) assert out_val.shape == (1, 3) - utt.assert_allclose(out_val, 0.5) + np.testing.assert_allclose(out_val, 0.5) class TestIntDivByOne: @@ -3543,8 +3550,7 @@ def test_local_sumsqr2dot(): f_val = f(w_val, g_val) f_test = np.dot(np.square(g_val), np.square(w_val).sum(axis=0)) - - utt.assert_allclose(f_val, f_test) + np.testing.assert_allclose(f_val, f_test, atol=1e-08, rtol=1e-05) assert any( isinstance( n.op, @@ -3572,7 +3578,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * e^y * e^z * e^w = e^(x+y+z+w) op = expx * expy * expz * expw f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3581,7 +3587,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * e^y * e^z / e^w = e^(x+y+z-w) op = expx * expy * expz / expw f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 - 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 + 5 - 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3592,7 +3598,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * e^y / e^z * e^w = e^(x+y-z+w) op = expx * expy / expz * expw f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 - 5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 4 - 5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3603,7 +3609,7 @@ def test_local_mul_exp_to_exp_add(): # e^x / e^y / e^z = (e^x / e^y) / e^z = e^(x-y-z) op = expx / expy / expz f = function([x, y, z], op, mode) - utt.assert_allclose(f(3, 4, 5), np.exp(3 - 4 - 5)) + np.testing.assert_allclose(f(3, 4, 5), np.exp(3 - 4 - 5)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Sub) for n in graph) @@ -3612,7 +3618,7 @@ def test_local_mul_exp_to_exp_add(): # e^x * y * e^z * w = e^(x+z) * y * w op = expx * y * expz * w f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 5) * 4 * 6) + np.testing.assert_allclose(f(3, 4, 5, 6), np.exp(3 + 5) * 4 * 6) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3624,7 +3630,7 @@ def test_local_mul_exp_to_exp_add(): f = function([mx, my], exp(mx) * exp(my), mode, allow_input_downcast=True) M1 = np.array([[1.0, 2.0], [3.0, 4.0]]) M2 = np.array([[5.0, 6.0], [7.0, 8.0]]) - utt.assert_allclose(f(M1, M2), np.exp(M1 + M2)) + np.testing.assert_allclose(f(M1, M2), np.exp(M1 + M2)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3633,13 +3639,13 @@ def test_local_mul_exp_to_exp_add(): # checking whether further rewrites can proceed after this one as one would expect # e^x * e^(-x) = e^(x-x) = e^0 = 1 f = function([x], expx * exp(neg(x)), mode) - utt.assert_allclose(f(42), 1) + np.testing.assert_allclose(f(42), 1) graph = f.maker.fgraph.toposort() assert isinstance(graph[0].inputs[0], TensorConstant) # e^x / e^x = e^(x-x) = e^0 = 1 f = function([x], expx / expx, mode) - utt.assert_allclose(f(42), 1) + np.testing.assert_allclose(f(42), 1) graph = f.maker.fgraph.toposort() assert isinstance(graph[0].inputs[0], TensorConstant) @@ -3670,7 +3676,7 @@ def test_local_mul_pow_to_pow_add(): # 2^x * 2^y * 2^z * 2^w = 2^(x+y+z+w) op = 2**x * 2**y * 2**z * 2**w f = function([x, y, z, w], op, mode) - utt.assert_allclose(f(3, 4, 5, 6), 2 ** (3 + 4 + 5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6), 2 ** (3 + 4 + 5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert any(isinstance(n.op.scalar_op, ps.Add) for n in graph) @@ -3679,7 +3685,7 @@ def test_local_mul_pow_to_pow_add(): # 2^x * a^y * 2^z * b^w * c^v * a^u * s * b^t = 2^(x+z) * a^(y+u) * b^(w+t) * c^v * s op = 2**x * a**y * 2**z * b**w * c**v * a**u * s * b**t f = function([x, y, z, w, v, u, t, s, a, b, c], op, mode) - utt.assert_allclose( + np.testing.assert_allclose( f(4, 5, 6, 7, 8, 9, 10, 11, 2.5, 3, 3.5), 2 ** (4 + 6) * 2.5 ** (5 + 9) * 3 ** (7 + 10) * 3.5**8 * 11, ) @@ -3692,7 +3698,7 @@ def test_local_mul_pow_to_pow_add(): # (2^x / 2^y) * (a^z / a^w) = 2^(x-y) * a^(z-w) op = 2**x / 2**y * (a**z / a**w) f = function([x, y, z, w, a], op, mode) - utt.assert_allclose(f(3, 5, 6, 4, 7), 2 ** (3 - 5) * 7 ** (6 - 4)) + np.testing.assert_allclose(f(3, 5, 6, 4, 7), 2 ** (3 - 5) * 7 ** (6 - 4)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert len([True for n in graph if isinstance(n.op.scalar_op, ps.Sub)]) == 2 @@ -3701,7 +3707,7 @@ def test_local_mul_pow_to_pow_add(): # a^x * a^y * exp(z) * exp(w) = a^(x+y) * exp(z+w) op = a**x * a**y * exp(z) * exp(w) f = function([x, y, z, w, a], op, mode) - utt.assert_allclose(f(3, 4, 5, 6, 2), 2 ** (3 + 4) * np.exp(5 + 6)) + np.testing.assert_allclose(f(3, 4, 5, 6, 2), 2 ** (3 + 4) * np.exp(5 + 6)) graph = f.maker.fgraph.toposort() assert all(isinstance(n.op, Elemwise) for n in graph) assert len([True for n in graph if isinstance(n.op.scalar_op, ps.Add)]) == 2 @@ -3725,7 +3731,7 @@ def test_local_expm1(): f_val = f(x_val) f_test = function([x], expm1(x), mode=MODE) - utt.assert_allclose(f_val, f_test(x_val)) + np.testing.assert_allclose(f_val, f_test(x_val)) assert any( isinstance(n.op, Elemwise) and isinstance(n.op.scalar_op, ps.basic.Expm1) diff --git a/tests/tensor/rewriting/test_shape.py b/tests/tensor/rewriting/test_shape.py index bbfd829070..d2a684935c 100644 --- a/tests/tensor/rewriting/test_shape.py +++ b/tests/tensor/rewriting/test_shape.py @@ -445,14 +445,14 @@ def test_perform(self): advec_val = rng.random(3).astype(config.floatX) f = function([advec], Shape_i(0)(advec)) out = f(advec_val) - utt.assert_allclose(out, advec_val.shape[0]) + np.testing.assert_allclose(out, advec_val.shape[0]) admat = matrix() admat_val = rng.random((4, 3)).astype(config.floatX) for i in range(2): f = function([admat], Shape_i(i)(admat)) out = f(admat_val) - utt.assert_allclose(out, admat_val.shape[i]) + np.testing.assert_allclose(out, admat_val.shape[i]) def test_infer_shape(self): admat = matrix() diff --git a/tests/tensor/rewriting/test_subtensor.py b/tests/tensor/rewriting/test_subtensor.py index 91575bc7da..f32d5421bd 100644 --- a/tests/tensor/rewriting/test_subtensor.py +++ b/tests/tensor/rewriting/test_subtensor.py @@ -1510,7 +1510,7 @@ def test_basic(self): f = function([x, y, idx], o, self.mode_no_assert) res = f(dx, dy, didx) - utt.assert_allclose(dy, res) + np.testing.assert_allclose(dy, res) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, DeepCopyOp | Elemwise) @@ -1523,7 +1523,7 @@ def test_basic(self): res = f(dx, dy, didx) _dx = dx.copy() np.add.at(_dx, didx, dy) - utt.assert_allclose(_dx[didx], res) + np.testing.assert_allclose(_dx[didx], res, atol=1e-05, rtol=1e-05) topo = f.maker.fgraph.toposort() len(topo) == 2 @@ -1533,7 +1533,7 @@ def test_basic(self): f = function([x, y, idx], o, self.mode_no_assert) res = f(dx, dy, didx) - utt.assert_allclose(np.vstack([dy[0], 2 * dy[1], 2 * dy[2]]), res) + np.testing.assert_allclose(np.vstack([dy[0], 2 * dy[1], 2 * dy[2]]), res) def test_assert(self): x = matrix("x") @@ -1665,7 +1665,7 @@ def test_incsubtensor_x_zeros(self): node_is_set_instead_of_inc = inc_nodes[0].op.set_instead_of_inc assert node_is_set_instead_of_inc test_X = np.random.random((4, 4)).astype(config.floatX) - utt.assert_allclose(f(test_X), test_X) + np.testing.assert_allclose(f(test_X), test_X) # also check the flag doesn't get set if first input is not zeros: not_all_zeros = np.zeros((4, 4)) @@ -1680,7 +1680,7 @@ def test_incsubtensor_x_zeros(self): assert len(inc_nodes) == 1 assert inc_nodes[0].op.set_instead_of_inc is False test_X = np.random.random((4, 4)).astype(config.floatX) - utt.assert_allclose(f(test_X), test_X + not_all_zeros) + np.testing.assert_allclose(f(test_X), test_X + not_all_zeros) def test_advancedincsubtensor1_allocs0(self): x = matrix() @@ -1855,7 +1855,7 @@ def test_local_set_to_inc_subtensor(): r1 = f1(val) r2 = f2(val) - utt.assert_allclose(r1, r2) + np.testing.assert_allclose(r1, r2) # Finally, test that the stack trace is copied over properly, # before and after optimization. diff --git a/tests/tensor/test_basic.py b/tests/tensor/test_basic.py index 323d401f42..c7f14db364 100644 --- a/tests/tensor/test_basic.py +++ b/tests/tensor/test_basic.py @@ -8,7 +8,6 @@ import pytensor import pytensor.scalar as ps import pytensor.tensor.basic as ptb -import pytensor.tensor.math as ptm from pytensor import compile, config, function, shared from pytensor.compile import SharedVariable from pytensor.compile.io import In, Out @@ -418,7 +417,7 @@ def test_make_vector(self, dtype, inputs): if dtype in int_dtypes: # The gradient should be 0 - utt.assert_allclose(g_val, 0) + np.testing.assert_allclose(g_val, 0) else: for var, grval in zip((b, i, d), g_val): float_inputs = [] @@ -1258,11 +1257,6 @@ def test_cast_from_complex_to_real_raises_error(self, real_dtype, complex_dtype) # gradient numerically -def test_basic_allclose(): - # This was raised by a user in https://github.com/Theano/Theano/issues/2975 - assert ptm._allclose(-0.311023883434, -0.311022856884) - - def test_get_vector_length(): # Test `Constant`s empty_tuple = as_tensor_variable(()) @@ -2953,7 +2947,7 @@ def test_mgrid_numpy_equiv(self): ) for n, t in zip(nmgrid, tmgrid): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg.eval()) + np.testing.assert_allclose(ng, tg.eval()) def test_ogrid_numpy_equiv(self): nogrid = ( @@ -2968,7 +2962,7 @@ def test_ogrid_numpy_equiv(self): ) for n, t in zip(nogrid, togrid): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg.eval()) + np.testing.assert_allclose(ng, tg.eval()) def test_mgrid_pytensor_variable_numpy_equiv(self): nfmgrid = np.mgrid[0:1:0.1, 1:10:1.0, 10:100:10.0] @@ -2981,7 +2975,7 @@ def test_mgrid_pytensor_variable_numpy_equiv(self): fi = pytensor.function([l, m, n], timgrid) for n, t in zip((nfmgrid, nimgrid), (ff(0, 10, 10.0), fi(0, 10, 10))): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg) + np.testing.assert_allclose(ng, tg) def test_ogrid_pytensor_variable_numpy_equiv(self): nfogrid = np.ogrid[0:1:0.1, 1:10:1.0, 10:100:10.0] @@ -2994,7 +2988,7 @@ def test_ogrid_pytensor_variable_numpy_equiv(self): fi = pytensor.function([l, m, n], tiogrid) for n, t in zip((nfogrid, niogrid), (ff(0, 10, 10.0), fi(0, 10, 10))): for ng, tg in zip(n, t): - utt.assert_allclose(ng, tg) + np.testing.assert_allclose(ng, tg) class TestInversePermutation: diff --git a/tests/tensor/test_blas.py b/tests/tensor/test_blas.py index 3b6115a107..5feb9dcc40 100644 --- a/tests/tensor/test_blas.py +++ b/tests/tensor/test_blas.py @@ -138,7 +138,7 @@ def cmp_linker(z, a, x, y, b, l): z_after = self._gemm(z_orig, a, x, y, b) # print z_orig, z_after, z, type(z_orig), type(z_after), type(z) - unittest_tools.assert_allclose(z_after, z) + np.testing.assert_allclose(z_after, z, rtol=1e-05, atol=1e-05) if a == 0.0 and b == 1.0: return elif z_orig.size == 0: @@ -323,11 +323,11 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): mode=Mode(optimizer=None, linker=l), ) f() - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True)) f() - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True)) f() - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True)) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True)) # tz.value *= 0 # clear z's value y_T = ty.get_value(borrow=True).T @@ -337,7 +337,7 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): f() # test that the transposed version of multiplication gives # same answer - unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True).T) + np.testing.assert_allclose(z_after, tz.get_value(borrow=True).T) t(C, A, B) t(C.T, A, B) @@ -388,8 +388,11 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): z = tz.get_value(borrow=True, return_internal_type=True) z[:, :, i] = z_i - unittest_tools.assert_allclose( - z_after[:, :, i], tz.get_value(borrow=True)[:, :, i] + np.testing.assert_allclose( + z_after[:, :, i], + tz.get_value(borrow=True)[:, :, i], + rtol=1e-05, + atol=1e-08, ) tz_i = gemm_no_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb) @@ -401,8 +404,11 @@ def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): ) for j in range(3): g_i() - unittest_tools.assert_allclose( - z_after[:, :, i], tz.get_value(borrow=True)[:, :, i] + np.testing.assert_allclose( + z_after[:, :, i], + tz.get_value(borrow=True)[:, :, i], + rtol=1e-05, + atol=1e-08, ) t(C, A, B) @@ -559,7 +565,7 @@ def run_gemm( ) ) ) - unittest_tools.assert_allclose(ref_val, z_val) + np.testing.assert_allclose(ref_val, z_val) def test_gemm(self): rng = np.random.default_rng(seed=utt.fetch_seed()) @@ -2465,7 +2471,7 @@ def test_gemm_non_contiguous(self): f(0) ref_output = np.ones((3, 5)) * 2 - unittest_tools.assert_allclose(c.get_value(), ref_output) + np.testing.assert_allclose(c.get_value(), ref_output) class TestInferShape(unittest_tools.InferShapeTester): @@ -2698,7 +2704,7 @@ def check_first_dim(inverted): assert not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"]) result = f(x, w) ref_result = np.asarray([np.dot(u, v) for u, v in zip(x, w)]) - utt.assert_allclose(ref_result, result) + np.testing.assert_allclose(ref_result, result) for inverted in (0, 1): check_first_dim(inverted) diff --git a/tests/tensor/test_blas_c.py b/tests/tensor/test_blas_c.py index 8298cae5ba..ffe4531a47 100644 --- a/tests/tensor/test_blas_c.py +++ b/tests/tensor/test_blas_c.py @@ -388,7 +388,7 @@ def run_cgemv(self, dtype, ALPHA, BETA, transpose_A, slice_tensors): assert z_val.ndim == 1 assert z_val.shape[0] == self.M ref_val = self.compute_ref(*((*values, transpose_A, slice_tensors))) - unittest_tools.assert_allclose(ref_val, z_val) + np.testing.assert_allclose(ref_val, z_val, atol=1e-05, rtol=1e-05) def test_cgemv(self): for dtype in ("float32", "float64"): diff --git a/tests/tensor/test_blockwise.py b/tests/tensor/test_blockwise.py index f6783cf945..57b0aa023c 100644 --- a/tests/tensor/test_blockwise.py +++ b/tests/tensor/test_blockwise.py @@ -503,7 +503,10 @@ def core_scipy_fn(A, b): A_val_copy, b_val_copy ) np.testing.assert_allclose( - out, expected_out, atol=1e-6 if config.floatX == "float32" else 0 + out, + expected_out, + atol=1e-08 if config.floatX == "float32" else 0, + rtol=1e-05, ) # Confirm input was destroyed diff --git a/tests/tensor/test_elemwise.py b/tests/tensor/test_elemwise.py index 76906232af..18ac021b07 100644 --- a/tests/tensor/test_elemwise.py +++ b/tests/tensor/test_elemwise.py @@ -9,7 +9,6 @@ import pytensor import pytensor.scalar as ps import pytensor.tensor as pt -import tests.unittest_tools as utt from pytensor.compile.function import function from pytensor.compile.mode import Mode from pytensor.configdefaults import config @@ -245,7 +244,7 @@ def with_linker(self, linker, op, type, rand_val): yv = rand_val(ysh) zv = xv + yv - unittest_tools.assert_allclose(f(xv, yv), zv) + np.testing.assert_allclose(f(xv, yv), zv) # test Elemwise.infer_shape # the Shape op don't implement c_code! @@ -523,7 +522,7 @@ def with_mode( else: f_xv = f(xv) assert f_xv.shape == zv.shape, (f_xv, zv) - utt.assert_allclose(zv, f_xv) + np.testing.assert_allclose(zv, f_xv, rtol=1e-06) x = self.type( dtype, shape=tuple(entry if entry == 1 else None for entry in xsh) diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index 0da714c3bf..6a3d28d043 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -894,7 +894,7 @@ def test_basic_vector(self, x, inp, axis): f = pytensor.function(inputs=[x], outputs=out) outs = f(inp) for out, out_exp in zip(outs, outs_expected): - utt.assert_allclose(out, out_exp) + np.testing.assert_allclose(out, out_exp) @pytest.mark.parametrize( ("x", "inp", "axis"), diff --git a/tests/tensor/test_fft.py b/tests/tensor/test_fft.py index 94c49662bc..c57df9d7aa 100644 --- a/tests/tensor/test_fft.py +++ b/tests/tensor/test_fft.py @@ -40,7 +40,7 @@ def test_1Drfft(self): rfft_ref = np.fft.rfft(inputs_val, axis=1) - utt.assert_allclose(rfft_ref, res_rfft_comp) + np.testing.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-08, rtol=1e-05) m = rfft.type() print(m.ndim) @@ -48,7 +48,9 @@ def test_1Drfft(self): f_irfft = pytensor.function([m], irfft) res_irfft = f_irfft(res_rfft) - utt.assert_allclose(inputs_val, np.asarray(res_irfft)) + np.testing.assert_allclose( + inputs_val, np.asarray(res_irfft), atol=1e-08, rtol=1e-05 + ) # The numerical gradient of the FFT is sensitive, must set large # enough epsilon to get good accuracy. @@ -79,7 +81,7 @@ def test_rfft(self): rfft_ref = np.fft.rfftn(inputs_val, axes=(1, 2)) - utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) def test_irfft(self): inputs_val = np.random.random((1, N, N)).astype(pytensor.config.floatX) @@ -94,7 +96,9 @@ def test_irfft(self): f_irfft = pytensor.function([m], irfft) res_irfft = f_irfft(res_fft) - utt.assert_allclose(inputs_val, np.asarray(res_irfft)) + np.testing.assert_allclose( + inputs_val, np.asarray(res_irfft), atol=1e-08, rtol=1e-05 + ) inputs_val = np.random.random((1, N, N, 2)).astype(pytensor.config.floatX) inputs = pytensor.shared(inputs_val) @@ -106,7 +110,7 @@ def test_irfft(self): irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2)) - utt.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4) def test_norm_rfft(self): inputs_val = np.random.random((1, N, N)).astype(pytensor.config.floatX) @@ -122,7 +126,7 @@ def test_norm_rfft(self): rfft_ref = np.fft.rfftn(inputs_val, axes=(1, 2)) - utt.assert_allclose(rfft_ref / N, res_rfft_comp, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(rfft_ref / N, res_rfft_comp, atol=1e-4, rtol=1e-4) # No normalization rfft = fft.rfft(inputs, norm="no_norm") @@ -132,7 +136,7 @@ def test_norm_rfft(self): res_rfft[:, :, :, 1] ) - utt.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(rfft_ref, res_rfft_comp, atol=1e-4, rtol=1e-4) # Inverse FFT inputs inputs_val = np.random.random((1, N, N // 2 + 1, 2)).astype( @@ -148,14 +152,14 @@ def test_norm_rfft(self): irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2)) - utt.assert_allclose(irfft_ref * N, res_irfft, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(irfft_ref * N, res_irfft, atol=1e-4, rtol=1e-4) # No normalization inverse FFT irfft = fft.irfft(inputs, norm="no_norm") f_irfft = pytensor.function([], irfft) res_irfft = f_irfft() - utt.assert_allclose(irfft_ref * N**2, res_irfft, atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(irfft_ref * N**2, res_irfft, atol=1e-4, rtol=1e-4) def test_params(self): inputs_val = np.random.random((1, N)).astype(pytensor.config.floatX) diff --git a/tests/tensor/test_math.py b/tests/tensor/test_math.py index 14bc2614e3..bd73a262b5 100644 --- a/tests/tensor/test_math.py +++ b/tests/tensor/test_math.py @@ -44,8 +44,8 @@ Prod, ProdWithoutZeros, Sum, - _allclose, _dot, + _get_atol_rtol, abs, add, allclose, @@ -1563,7 +1563,7 @@ def test_outer(self): v1 = np.asarray(self.rng.random(s1)).astype(config.floatX) v2 = np.asarray(self.rng.random(s2)).astype(config.floatX) o = outer(x, y).eval({x: v1, y: v2}) - utt.assert_allclose(o, np.outer(v1, v2)) + np.testing.assert_allclose(o, np.outer(v1, v2)) def test_grad(self): # Test the combined graph of the graph of outer @@ -1943,7 +1943,7 @@ def test_mean_f16(self): x = vector(dtype="float16") y = x.mean() f = function([x], y) - utt.assert_allclose(f(np.ones((100000,), dtype="float16")), 1.0) + np.testing.assert_allclose(f(np.ones((100000,), dtype="float16")), 1.0) def test_basic(self): x = vector() @@ -2066,7 +2066,7 @@ def test_basic(self): bval = random(5, rng=rng) out0 = np.tensordot(aval, bval, axes) out1 = f1(aval, bval) - utt.assert_allclose(out0, out1) + np.testing.assert_allclose(out0, out1) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test matrix-vector @@ -2076,7 +2076,9 @@ def test_basic(self): f2 = inplace_func([avec, bmat], c) aval = random(5, rng=rng) bval = random(8, 5, rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f2(aval, bval)) + np.testing.assert_allclose( + np.tensordot(aval, bval, axes), f2(aval, bval), atol=1e-08, rtol=1e-05 + ) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test matrix-matrix @@ -2095,7 +2097,9 @@ def test_basic(self): f3 = inplace_func([amat, bmat], c) aval = random(*shps[0], rng=rng) bval = random(*shps[1], rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) + np.testing.assert_allclose( + np.tensordot(aval, bval, axes), f3(aval, bval), atol=1e-08, rtol=1e-05 + ) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test ndarray-matrix, sum over one dim of matrix @@ -2113,7 +2117,9 @@ def test_basic(self): f4 = inplace_func([atens, bmat], c) aval = random(*shps[0], rng=rng) bval = random(*shps[1], rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f4(aval, bval)) + np.testing.assert_allclose( + np.tensordot(aval, bval, axes), f4(aval, bval), atol=1e-08, rtol=1e-05 + ) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test ndarray-ndarray @@ -2124,13 +2130,17 @@ def test_basic(self): f5 = inplace_func([atens, btens], c) aval = random(4, 3, 5, 2, rng=rng) bval = random(3, 4, 2, rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f5(aval, bval)) + np.testing.assert_allclose( + np.tensordot(aval, bval, axes), f5(aval, bval), atol=1e-08, rtol=1e-05 + ) utt.verify_grad(self.TensorDot(axes), [aval, bval]) axes = (axes[1], axes[0]) c = tensordot(btens, atens, axes) f6 = inplace_func([btens, atens], c) - utt.assert_allclose(np.tensordot(bval, aval, axes), f6(bval, aval)) + np.testing.assert_allclose( + np.tensordot(bval, aval, axes), f6(bval, aval), atol=1e-08, rtol=1e-05 + ) utt.verify_grad(self.TensorDot(axes), [bval, aval]) def test_raise_error(self): @@ -2168,7 +2178,7 @@ def test_weird_valid_axes(self): f3 = inplace_func([amat, bmat], c) aval = random(4, 7, rng=rng) bval = random(7, 9, rng=rng) - utt.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) + np.testing.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval)) utt.verify_grad(self.TensorDot(axes), [aval, bval]) def test_scalar_axes(self): @@ -2729,10 +2739,10 @@ def test_dot(self): x, y = self.vals # Use allclose comparison as a user reported on the mailing # list failure otherwise with array that print exactly the same. - utt.assert_allclose(x.dot(y), X.dot(Y).eval({X: x, Y: y})) + np.testing.assert_allclose(x.dot(y), X.dot(Y).eval({X: x, Y: y})) Z = X.dot(Y) z = x.dot(y) - utt.assert_allclose(x.dot(z), X.dot(Z).eval({X: x, Z: z})) + np.testing.assert_allclose(x.dot(z), X.dot(Z).eval({X: x, Z: z})) def test_real_imag(self): X, Y = self.vars @@ -2765,7 +2775,7 @@ def test_std(self): # std() is implemented as PyTensor tree and does not pass its # args directly to numpy. This sometimes results in small # difference, so we use allclose test. - utt.assert_allclose(X.std().eval({X: x}), x.std()) + np.testing.assert_allclose(X.std().eval({X: x}), x.std()) def test_cumsum(self): X, _ = self.vars @@ -3602,7 +3612,8 @@ def setup_method(self): def _validate_output(self, a, b): pytensor_sol = self.op(a, b).eval() numpy_sol = np.matmul(a, b) - assert _allclose(numpy_sol, pytensor_sol) + atol_, rtol_ = _get_atol_rtol(numpy_sol, pytensor_sol) + assert np.allclose(numpy_sol, pytensor_sol, atol=atol_, rtol=rtol_) @pytest.mark.parametrize( "x1, x2", diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 1a13992011..eeeffcc108 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -9,7 +9,7 @@ from pytensor import function from pytensor.configdefaults import config from pytensor.tensor.basic import as_tensor_variable -from pytensor.tensor.math import _allclose +from pytensor.tensor.math import _get_atol_rtol from pytensor.tensor.nlinalg import ( SVD, Eig, @@ -60,7 +60,8 @@ def test_pseudoinverse_correctness(): assert ri.dtype == r.dtype # Note that pseudoinverse can be quite imprecise so I prefer to compare # the result with what np.linalg returns - assert _allclose(ri, np.linalg.pinv(r)) + atol_, rtol_ = _get_atol_rtol(ri, np.linalg.pinv(r)) + assert np.allclose(ri, np.linalg.pinv(r), atol=atol_, rtol=rtol_) def test_pseudoinverse_grad(): @@ -92,8 +93,11 @@ def test_inverse_correctness(self): rir = np.dot(ri, r) rri = np.dot(r, ri) - assert _allclose(np.identity(4), rir), rir - assert _allclose(np.identity(4), rri), rri + atol_, rtol_ = _get_atol_rtol(np.identity(4), rir) + assert np.allclose(np.identity(4), rir, atol=atol_, rtol=rtol_), rir + + atol_, rtol_ = _get_atol_rtol(np.identity(4), rri) + assert np.allclose(np.identity(4), rri, atol=atol_, rtol=rtol_), rri def test_infer_shape(self): r = self.rng.standard_normal((4, 4)).astype(config.floatX) @@ -119,7 +123,8 @@ def test_matrix_dot(): for r in rs[1:]: numpy_sol = np.dot(numpy_sol, r) - assert _allclose(numpy_sol, pytensor_sol) + atol_, rtol_ = _get_atol_rtol(numpy_sol, pytensor_sol) + assert np.allclose(numpy_sol, pytensor_sol, atol=atol_, rtol=rtol_) def test_qr_modes(): @@ -131,23 +136,34 @@ def test_qr_modes(): f = function([A], qr(A)) t_qr = f(a) n_qr = np.linalg.qr(a) - assert _allclose(n_qr, t_qr) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr)) + assert np.allclose(np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_) for mode in ["reduced", "r", "raw"]: f = function([A], qr(A, mode)) t_qr = f(a) n_qr = np.linalg.qr(a, mode) if isinstance(n_qr, list | tuple): - assert _allclose(n_qr[0], t_qr[0]) - assert _allclose(n_qr[1], t_qr[1]) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr[0]), np.asarray(t_qr[0])) + assert np.allclose( + np.asarray(n_qr[0]), np.asarray(t_qr[0]), atol=atol_, rtol=rtol_ + ) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr[1]), np.asarray(t_qr[1])) + assert np.allclose( + np.asarray(n_qr[1]), np.asarray(t_qr[1]), atol=atol_, rtol=rtol_ + ) else: - assert _allclose(n_qr, t_qr) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr)) + assert np.allclose( + np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_ + ) try: n_qr = np.linalg.qr(a, "complete") f = function([A], qr(A, "complete")) t_qr = f(a) - assert _allclose(n_qr, t_qr) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr)) + assert np.allclose(np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_) except TypeError as e: assert "name 'complete' is not defined" in str(e) @@ -199,7 +215,8 @@ def test_svd(self, core_shape, full_matrix, compute_uv, batched, test_imag): np_outputs = np_outputs if isinstance(np_outputs, tuple) else [np_outputs] for np_val, pt_val in zip(np_outputs, pt_outputs): - assert _allclose(np_val, pt_val) + atol_, rtol_ = _get_atol_rtol(np_val, pt_val) + assert np.allclose(np_val, pt_val, atol=atol_, rtol=rtol_) def test_svd_infer_shape(self): self.validate_shape((4, 4), full_matrices=True, compute_uv=True) @@ -306,7 +323,8 @@ def test_tensorsolve(): n_x = np.linalg.tensorsolve(a, b) t_x = fn(a, b) - assert _allclose(n_x, t_x) + atol_, rtol_ = _get_atol_rtol(n_x, np.asarray(t_x)) + assert np.allclose(n_x, t_x, atol=atol_, rtol=rtol_) # check the type upcast now C = tensor4("C", dtype="float32") @@ -319,7 +337,8 @@ def test_tensorsolve(): d = rng.random((2 * 3, 4)).astype("float64") n_y = np.linalg.tensorsolve(c, d) t_y = fn(c, d) - assert _allclose(n_y, t_y) + atol_, rtol_ = _get_atol_rtol(n_y, np.asarray(t_y)) + assert np.allclose(n_y, t_y, atol=atol_, rtol=rtol_) assert n_y.dtype == Y.dtype # check the type upcast now @@ -333,7 +352,8 @@ def test_tensorsolve(): f = rng.random((2 * 3, 4)).astype("float64") n_z = np.linalg.tensorsolve(e, f) t_z = fn(e, f) - assert _allclose(n_z, t_z) + atol_, rtol_ = _get_atol_rtol(n_z, np.asarray(t_z)) + assert np.allclose(n_z, t_z, atol=atol_, rtol=rtol_) assert n_z.dtype == Z.dtype @@ -623,7 +643,7 @@ def test_numpy_compare( pt_norm = norm(X, ord=ord, axis=axis, keepdims=keepdims) f = function([], pt_norm, mode="FAST_COMPILE") - utt.assert_allclose(np_norm, f()) + np.testing.assert_allclose(np_norm, f(), atol=1e-08, rtol=1e-05) class TestTensorInv(utt.InferShapeTester): @@ -653,7 +673,8 @@ def test_eval(self): n_ainv = np.linalg.tensorinv(self.a) tf_a = function([A], [Ai]) t_ainv = tf_a(self.a) - assert _allclose(n_ainv, t_ainv) + atol_, rtol_ = _get_atol_rtol(n_ainv, np.asarray(t_ainv)) + assert np.allclose(n_ainv, t_ainv, atol=atol_, rtol=rtol_) B = self.B Bi = tensorinv(B) @@ -664,8 +685,10 @@ def test_eval(self): tf_b1 = function([B], [Bi1]) t_binv = tf_b(self.b) t_binv1 = tf_b1(self.b1) - assert _allclose(t_binv, n_binv) - assert _allclose(t_binv1, n_binv1) + atol_, rtol_ = _get_atol_rtol(np.asarray(t_binv), n_binv) + assert np.allclose(t_binv, n_binv, atol=atol_, rtol=rtol_) + atol_, rtol_ = _get_atol_rtol(np.asarray(t_binv1), n_binv1) + assert np.allclose(t_binv1, n_binv1, atol=atol_, rtol=rtol_) class TestKron(utt.InferShapeTester): diff --git a/tests/tensor/test_sort.py b/tests/tensor/test_sort.py index b2e2b5a44f..bd00b36862 100644 --- a/tests/tensor/test_sort.py +++ b/tests/tensor/test_sort.py @@ -35,7 +35,7 @@ def test1(self): a = dmatrix() w = sort(a) f = pytensor.function([a], w) - utt.assert_allclose(f(self.m_val), np.sort(self.m_val)) + np.testing.assert_allclose(f(self.m_val), np.sort(self.m_val)) def test2(self): a = dmatrix() @@ -45,7 +45,7 @@ def test2(self): for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test3(self): a = dvector() @@ -53,7 +53,7 @@ def test3(self): f = pytensor.function([a], w2) gv = f(self.v_val) gt = np.sort(self.v_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test4(self): a = dmatrix() @@ -63,7 +63,7 @@ def test4(self): for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test5(self): a1 = SortOp("mergesort") @@ -79,7 +79,7 @@ def test_None(self): f = pytensor.function([a], l) gv = f(self.m_val) gt = np.sort(self.m_val, None) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test_grad_vector(self): data = self.rng.random(10).astype(pytensor.config.floatX) @@ -177,7 +177,7 @@ def test_argsort(): f = pytensor.function([a], w) gv = f(m_val) gt = np.argsort(m_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 2 a = dmatrix() @@ -187,7 +187,7 @@ def test_argsort(): for axis_val in 0, 1: gv = f(m_val, axis_val) gt = np.argsort(m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 3 a = dvector() @@ -195,7 +195,7 @@ def test_argsort(): f = pytensor.function([a], w2) gv = f(v_val) gt = np.argsort(v_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 4 a = dmatrix() @@ -205,7 +205,7 @@ def test_argsort(): for axis_val in 0, 1: gv = f(m_val, axis_val) gt = np.argsort(m_val, axis_val) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) # Example 5 a1 = ArgSortOp("mergesort") @@ -220,7 +220,7 @@ def test_argsort(): f = pytensor.function([a], w2) gv = f(m_val) gt = np.argsort(m_val, None) - utt.assert_allclose(gv, gt) + np.testing.assert_allclose(gv, gt) def test_argsort_grad(): diff --git a/tests/tensor/test_subtensor.py b/tests/tensor/test_subtensor.py index d02880f543..e65f218eed 100644 --- a/tests/tensor/test_subtensor.py +++ b/tests/tensor/test_subtensor.py @@ -900,7 +900,7 @@ def test_noncontiguous_idx(self): n = self.shared(data) t = n[self.shared(np.asarray(idx).astype("int64"))[::2]] val = self.eval_output_and_check(t, op_type=AdvancedSubtensor1, length=2) - utt.assert_allclose(data[idx[::2]], val) + np.testing.assert_allclose(data[idx[::2]], val) def test_err_invalid_list(self): n = self.shared(np.asarray(5, dtype=self.dtype)) @@ -1520,7 +1520,7 @@ def test_simple_2d(self): else: expected_result[:, :val_sl2_end] += val_inc - utt.assert_allclose(result, expected_result) + np.testing.assert_allclose(result, expected_result) def test_wrong_dims(self): a = matrix() @@ -1584,7 +1584,7 @@ def test_simple_3d(self): else: expected_result[:, sl3, :val_sl2_end] += val_inc - utt.assert_allclose(result, expected_result) + np.testing.assert_allclose(result, expected_result) # Test when we broadcast the result result = method(a[sl1, sl2], increment) @@ -1599,7 +1599,7 @@ def test_simple_3d(self): else: expected_result[:, :val_sl2_end] += val_inc - utt.assert_allclose(result, expected_result) + np.testing.assert_allclose(result, expected_result) def test_grad_inc_set(self): def inc_slice(*s): @@ -1729,10 +1729,10 @@ def test_matrix_idx(self): idxval = np.array([[1, 2], [3, 2]]) a2val = f(mval, idxval) - utt.assert_allclose(a2val[0], mval[0]) - utt.assert_allclose(a2val[1], mval[1] * 2) - utt.assert_allclose(a2val[2], mval[2] * 3) - utt.assert_allclose(a2val[3], mval[3] * 2) + np.testing.assert_allclose(a2val[0], mval[0]) + np.testing.assert_allclose(a2val[1], mval[1] * 2) + np.testing.assert_allclose(a2val[2], mval[2] * 3) + np.testing.assert_allclose(a2val[3], mval[3] * 2) def test_inc_bcastableidx(self): idx = ptb.constant([0]) @@ -1746,7 +1746,7 @@ def test_inc_bcastableidx(self): incval = self.rng.random((10, 1)).astype(config.floatX) out1val, out2val = f(mval, incval, incval) - utt.assert_allclose(out1val, out2val) + np.testing.assert_allclose(out1val, out2val) class TestAdvancedSubtensor: @@ -1897,7 +1897,7 @@ def test_adv_subtensor_w_int_and_matrix(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[0, :, ix2v, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_none_and_matrix(self): subt = self.ft4[:, None, :, self.ix2, :] @@ -1906,7 +1906,7 @@ def test_adv_subtensor_w_none_and_matrix(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, None, :, ix2v, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_slice_and_matrix(self): subt = self.ft4[:, 0:1, self.ix2, :] @@ -1915,7 +1915,7 @@ def test_adv_subtensor_w_slice_and_matrix(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, 0:1, ix2v, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_matrix_and_int(self): subt = self.ft4[:, :, self.ix2, 0] @@ -1924,7 +1924,7 @@ def test_adv_subtensor_w_matrix_and_int(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, :, ix2v, 0] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) def test_adv_subtensor_w_matrix_and_none(self): subt = self.ft4[:, :, self.ix2, None, :] @@ -1933,7 +1933,7 @@ def test_adv_subtensor_w_matrix_and_none(self): ix2v = np.asarray([[0, 1], [1, 0]]) aval = f(ft4v, ix2v) rval = ft4v[:, :, ix2v, None, :] - utt.assert_allclose(rval, aval) + np.testing.assert_allclose(rval, aval) @pytest.mark.parametrize( "ignore_duplicates", @@ -2113,7 +2113,7 @@ def test_adv_sub_3d(self): f = pytensor.function([X], X[b_idx, r_idx, c_idx], mode=self.mode) out = f(xx) - utt.assert_allclose(out, xx[b_idx, r_idx, c_idx]) + np.testing.assert_allclose(out, xx[b_idx, r_idx, c_idx]) def test_adv_sub_slice(self): # Reported in https://github.com/Theano/Theano/issues/5898 @@ -2137,7 +2137,7 @@ def test_adv_grouped(self): assert out_v.shape == (3, 5, 4) out_np = var_v[:, idx1_v, np.arange(4)] - utt.assert_allclose(out_v, out_np) + np.testing.assert_allclose(out_v, out_np) def test_grad(self): ones = np.ones((1, 3), dtype=self.dtype) diff --git a/tests/tensor/test_variable.py b/tests/tensor/test_variable.py index 50c36a05fc..eb599884aa 100644 --- a/tests/tensor/test_variable.py +++ b/tests/tensor/test_variable.py @@ -5,7 +5,6 @@ from numpy.testing import assert_array_equal, assert_equal, assert_string_equal import pytensor -import tests.unittest_tools as utt from pytensor.compile import DeepCopyOp from pytensor.compile.mode import get_default_mode from pytensor.graph.basic import Constant, equal_computations @@ -77,7 +76,7 @@ def test_numpy_method(fct, value): x = dscalar("x") y = fct(x) f = pytensor.function([x], y) - utt.assert_allclose(np.nan_to_num(f(value)), np.nan_to_num(fct(value))) + np.testing.assert_allclose(np.nan_to_num(f(value)), np.nan_to_num(fct(value))) def test_dot_method(): diff --git a/tests/unittest_tools.py b/tests/unittest_tools.py index 9134b29b65..40f5a24a53 100644 --- a/tests/unittest_tools.py +++ b/tests/unittest_tools.py @@ -11,7 +11,6 @@ from pytensor.configdefaults import config from pytensor.gradient import verify_grad as orig_verify_grad from pytensor.tensor.basic import as_tensor_variable -from pytensor.tensor.math import _allclose from pytensor.tensor.math import add as pt_add @@ -277,11 +276,6 @@ def __str__(self): return s + str_diagnostic(self.val1, self.val2, self.rtol, self.atol) -def assert_allclose(expected, value, rtol=None, atol=None): - if not _allclose(expected, value, rtol, atol): - raise WrongValue(expected, value, rtol, atol) - - class AttemptManyTimes: """Decorator for unit tests that forces a unit test to be attempted multiple times. The test needs to pass a certain number of times for it to