diff --git a/.zenodo.json b/.zenodo.json index d79c0cf93..b96c10234 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -381,6 +381,11 @@ { "name": "Suter, Peter" } + { + "affiliation": "Human Neuroscience Platform, Fondation Campus Biotech Geneva, Geneva, Switzerland", + "name": "Mathieu Scheltienne", + "orcid": "0000-0001-8316-7436" + }, ], "keywords": [ "neuroimaging" diff --git a/doc/source/index.rst b/doc/source/index.rst index 48db1d31a..65e1aded4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -125,6 +125,7 @@ contributed code and discussion (in rough order of appearance): * Jacob Roberts * Horea Christian * Fabian Perez +* Mathieu Scheltienne License reprise =============== diff --git a/nibabel/casting.py b/nibabel/casting.py index 6232c615b..743ce4706 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -6,7 +6,6 @@ from __future__ import annotations import warnings -from numbers import Integral from platform import machine, processor import numpy as np @@ -23,6 +22,43 @@ class CastingError(Exception): _test_val = 2**63 + 2**11 # Should be exactly representable in float64 TRUNC_UINT64 = np.float64(_test_val).astype(np.uint64) != _test_val +# np.sctypes is deprecated in numpy 2.0 and np.core.sctypes should not be used instead. +sctypes = { + 'int': [ + getattr(np, dtype) for dtype in ('int8', 'int16', 'int32', 'int64') if hasattr(np, dtype) + ], + 'uint': [ + getattr(np, dtype) + for dtype in ('uint8', 'uint16', 'uint32', 'uint64') + if hasattr(np, dtype) + ], + 'float': [ + getattr(np, dtype) + for dtype in ('float16', 'float32', 'float64', 'float96', 'float128') + if hasattr(np, dtype) + ], + 'complex': [ + getattr(np, dtype) + for dtype in ('complex64', 'complex128', 'complex192', 'complex256') + if hasattr(np, dtype) + ], + 'others': [bool, object, bytes, str, np.void], +} +sctypes_aliases = { + getattr(np, dtype) + for dtype in ( + 'int8', 'byte', 'int16', 'short', 'int32', 'intc', 'int_', 'int64', 'longlong', + 'uint8', 'ubyte', 'uint16', 'ushort', 'uint32', 'uintc', 'uint', 'uint64', 'ulonglong', # noqa: E501 + 'float16', 'half', 'float32', 'single', 'float64', 'double', 'float96', 'float128', 'longdouble', # noqa: E501 + 'complex64', 'csingle', 'complex128', 'cdouble', 'complex192', 'complex256', 'clongdouble', # noqa: E501 + # other names of the built-in scalar types + 'int_', 'float_', 'complex_', 'bytes_', 'str_', 'bool_', 'datetime64', 'timedelta64', # noqa: E501 + # other + 'object_', 'void', + ) + if hasattr(np, dtype) +} # fmt:skip + def float_to_int(arr, int_type, nan2zero=True, infmax=False): """Convert floating point array `arr` to type `int_type` @@ -252,7 +288,7 @@ def type_info(np_type): return ret info_64 = np.finfo(np.float64) if dt.kind == 'c': - assert np_type is np.longcomplex + assert np_type is np.clongdouble vals = (nmant, nexp, width / 2) else: assert np_type is np.longdouble @@ -280,7 +316,7 @@ def type_info(np_type): # Oh dear, we don't recognize the type information. Try some known types # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. - if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): + if np_type not in (np.longdouble, np.clongdouble) or width not in (16, 32): raise FloatingError(f'We had not expected type {np_type}') if vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024): # double pair on PPC. The _check_nmant routine does not work for this @@ -290,13 +326,13 @@ def type_info(np_type): # Got float64 despite everything pass elif _check_nmant(np.longdouble, 112) and _check_maxexp(np.longdouble, 16384): - # binary 128, but with some busted type information. np.longcomplex + # binary 128, but with some busted type information. np.clongdouble # seems to break here too, so we need to use np.longdouble and # complexify two = np.longdouble(2) # See: https://matthew-brett.github.io/pydagogue/floating_point.html max_val = (two**113 - 1) / (two**112) * two**16383 - if np_type is np.longcomplex: + if np_type is np.clongdouble: max_val += 0j ret = dict( min=-max_val, @@ -453,9 +489,7 @@ def int_to_float(val, flt_type): return flt_type(val) # The following works around a nasty numpy 1.4.1 bug such that: # >>> int(np.uint32(2**32-1) - # -1 - if not isinstance(val, Integral): - val = int(str(val)) + val = int(val) faval = np.longdouble(0) while val != 0: f64 = np.float64(val) @@ -714,7 +748,7 @@ def ok_floats(): Remove longdouble if it has no higher precision than float64 """ # copy float list so we don't change the numpy global - floats = np.sctypes['float'][:] + floats = sctypes['float'][:] if best_float() != np.longdouble and np.longdouble in floats: floats.remove(np.longdouble) return sorted(floats, key=lambda f: type_info(f)['nmant']) @@ -750,10 +784,10 @@ def able_int_type(values): mn = min(values) mx = max(values) if mn >= 0: - for ityp in np.sctypes['uint']: + for ityp in sctypes['uint']: if mx <= np.iinfo(ityp).max: return ityp - for ityp in np.sctypes['int']: + for ityp in sctypes['int']: info = np.iinfo(ityp) if mn >= info.min and mx <= info.max: return ityp diff --git a/nibabel/conftest.py b/nibabel/conftest.py index 1f9ecd09c..cf0139232 100644 --- a/nibabel/conftest.py +++ b/nibabel/conftest.py @@ -1,5 +1,14 @@ +import numpy as np import pytest # Ignore warning requesting help with nicom with pytest.warns(UserWarning): import nibabel.nicom + + +@pytest.fixture(scope='session', autouse=True) +def legacy_printoptions(): + from packaging.version import Version + + if Version(np.__version__) >= Version('1.22'): + np.set_printoptions(legacy='1.21') diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 7f477e4a9..1db902d10 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -923,7 +923,7 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None): endianness = native_code stream.seek(pos) - make_array_writer(data.newbyteorder(endianness), dtype).to_fileobj(stream) + make_array_writer(data.view(data.dtype.newbyteorder(endianness)), dtype).to_fileobj(stream) def to_file_map(self, file_map=None): """Write ECAT7 image to `file_map` or contained ``self.file_map`` diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index ec6b474b0..95d4eed0f 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -31,7 +31,7 @@ def _fread3(fobj): n : int A 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, '>u1', 3) + b1, b2, b3 = np.fromfile(fobj, '>u1', 3).astype(np.int64) return (b1 << 16) + (b2 << 8) + b3 diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 183a67ed2..d6c9649ca 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -4,14 +4,13 @@ import struct import time import unittest -import warnings from os.path import isdir from os.path import join as pjoin from pathlib import Path import numpy as np import pytest -from numpy.testing import assert_allclose, assert_array_equal +from numpy.testing import assert_allclose from ...fileslice import strided_scalar from ...testing import clear_and_catch_warnings @@ -105,8 +104,10 @@ def test_geometry(): assert np.array_equal(faces, faces2) # Validate byte ordering - coords_swapped = coords.byteswap().newbyteorder() - faces_swapped = faces.byteswap().newbyteorder() + coords_swapped = coords.byteswap() + coords_swapped = coords_swapped.view(coords_swapped.dtype.newbyteorder()) + faces_swapped = faces.byteswap() + faces_swapped = faces_swapped.view(faces_swapped.dtype.newbyteorder()) assert np.array_equal(coords_swapped, coords) assert np.array_equal(faces_swapped, faces) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 5a400119b..189f1a9dd 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -345,7 +345,7 @@ def test_mghheader_default_structarr(): for endianness in (None,) + BIG_CODES: hdr2 = MGHHeader.default_structarr(endianness=endianness) assert hdr2 == hdr - assert hdr2.newbyteorder('>') == hdr + assert hdr2.view(hdr2.dtype.newbyteorder('>')) == hdr for endianness in LITTLE_CODES: with pytest.raises(ValueError): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index c1b0124eb..890bc2e22 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -89,7 +89,7 @@ if have_binary128(): # Only enable 128 bit floats if we really have IEEE binary 128 longdoubles _float128t: type[np.generic] = np.longdouble - _complex256t: type[np.generic] = np.longcomplex + _complex256t: type[np.generic] = np.clongdouble else: _float128t = np.void _complex256t = np.void @@ -2443,10 +2443,14 @@ def _get_analyze_compat_dtype(arr): return np.dtype('int16' if arr.max() <= np.iinfo(np.int16).max else 'int32') mn, mx = arr.min(), arr.max() - if np.can_cast(mn, np.int32) and np.can_cast(mx, np.int32): - return np.dtype('int32') - if np.can_cast(mn, np.float32) and np.can_cast(mx, np.float32): - return np.dtype('float32') + if arr.dtype.kind in 'iu': + info = np.iinfo('int32') + if mn >= info.min and mx <= info.max: + return np.dtype('int32') + elif arr.dtype.kind == 'f': + info = np.finfo('float32') + if mn >= info.min and mx <= info.max: + return np.dtype('float32') raise ValueError( f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})' diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index ec4066060..d2fc3ac4c 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -29,7 +29,9 @@ import numpy as np -MAX_FLOAT = np.maximum_sctype(float) +from .casting import sctypes + +MAX_FLOAT = sctypes['float'][-1] FLOAT_EPS = np.finfo(float).eps diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index ef34fe946..bcc4336f7 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -139,6 +139,7 @@ import numpy as np from .arrayproxy import ArrayLike +from .casting import sctypes_aliases from .dataobj_images import DataobjImage from .filebasedimages import FileBasedHeader, FileBasedImage from .fileholders import FileMap @@ -333,7 +334,7 @@ def _supported_np_types(klass: type[HasDtype]) -> set[type[np.generic]]: else: raise e supported = set() - for np_type in set(np.sctypeDict.values()): + for np_type in sctypes_aliases: try: obj.set_data_dtype(np_type) except HeaderDataError: diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 04ac56a51..966b133d1 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -577,7 +577,7 @@ def _read_header(fileobj): endianness = swapped_code # Swap byte order - header_rec = header_rec.newbyteorder() + header_rec = header_rec.view(header_rec.dtype.newbyteorder()) if header_rec['hdr_size'] != TrkFile.HEADER_SIZE: msg = ( f"Invalid hdr_size: {header_rec['hdr_size']} " diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index b4a3cd297..4e024d6e3 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -26,8 +26,7 @@ from .. import imageglobals from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError -from ..casting import as_int -from ..loadsave import read_img_data +from ..casting import as_int, sctypes_aliases from ..nifti1 import Nifti1Header from ..optpkg import optional_package from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types @@ -52,9 +51,7 @@ def add_duplicate_types(supported_np_types): # Update supported numpy types with named scalar types that map to the same set of dtypes dtypes = {np.dtype(t) for t in supported_np_types} - supported_np_types.update( - scalar for scalar in set(np.sctypeDict.values()) if np.dtype(scalar) in dtypes - ) + supported_np_types.update(scalar for scalar in sctypes_aliases if np.dtype(scalar) in dtypes) class TestAnalyzeHeader(tws._TestLabeledWrapStruct): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 7558c55ea..e50caa54c 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -12,7 +12,6 @@ import contextlib import gzip import pickle -import warnings from io import BytesIO from unittest import mock diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index e77c2fd11..89e7ac675 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -20,14 +20,14 @@ get_slope_inter, make_array_writer, ) -from ..casting import int_abs, on_powerpc, shared_range, type_info +from ..casting import int_abs, sctypes, shared_range, type_info from ..testing import assert_allclose_safely, suppress_warnings from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file -FLOAT_TYPES = np.sctypes['float'] -COMPLEX_TYPES = np.sctypes['complex'] -INT_TYPES = np.sctypes['int'] -UINT_TYPES = np.sctypes['uint'] +FLOAT_TYPES = sctypes['float'] +COMPLEX_TYPES = sctypes['complex'] +INT_TYPES = sctypes['int'] +UINT_TYPES = sctypes['uint'] CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES IUINT_TYPES = INT_TYPES + UINT_TYPES NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES @@ -61,7 +61,8 @@ def test_arraywriters(): assert aw.out_dtype == arr.dtype assert_array_equal(arr, round_trip(aw)) # Byteswapped should be OK - bs_arr = arr.byteswap().newbyteorder('S') + bs_arr = arr.byteswap() + bs_arr = bs_arr.view(bs_arr.dtype.newbyteorder('S')) bs_aw = klass(bs_arr) bs_aw_rt = round_trip(bs_aw) # assert against original array because POWER7 was running into diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index a082394b7..d04b996bb 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_equal from ..casting import ( CastingError, @@ -17,6 +17,7 @@ int_abs, int_to_float, longdouble_precision_improved, + sctypes, shared_range, ulp, ) @@ -24,8 +25,8 @@ def test_shared_range(): - for ft in np.sctypes['float']: - for it in np.sctypes['int'] + np.sctypes['uint']: + for ft in sctypes['float']: + for it in sctypes['int'] + sctypes['uint']: # Test that going a bit above or below the calculated min and max # either generates the same number when cast, or the max int value # (if this system generates that) or something smaller (because of @@ -54,7 +55,7 @@ def test_shared_range(): assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax)) else: assert np.all(bit_bigger <= casted_mx) - if it in np.sctypes['uint']: + if it in sctypes['uint']: assert mn == 0 continue # And something larger for the minimum @@ -90,8 +91,8 @@ def test_shared_range_inputs(): def test_casting(): - for ft in np.sctypes['float']: - for it in np.sctypes['int'] + np.sctypes['uint']: + for ft in sctypes['float']: + for it in sctypes['int'] + sctypes['uint']: ii = np.iinfo(it) arr = [ii.min - 1, ii.max + 1, -np.inf, np.inf, np.nan, 0.2, 10.6] farr_orig = np.array(arr, dtype=ft) @@ -140,7 +141,7 @@ def test_casting(): def test_int_abs(): - for itype in np.sctypes['int']: + for itype in sctypes['int']: info = np.iinfo(itype) in_arr = np.array([info.min, info.max], dtype=itype) idtype = np.dtype(itype) @@ -188,7 +189,7 @@ def test_able_int_type(): def test_able_casting(): # Check the able_int_type function guesses numpy out type - types = np.sctypes['int'] + np.sctypes['uint'] + types = sctypes['int'] + sctypes['uint'] for in_type in types: in_info = np.iinfo(in_type) in_mn, in_mx = in_info.min, in_info.max diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index a06c180b8..82fdc4402 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,9 +1,11 @@ """Test floating point deconstructions and floor methods """ import sys +from contextlib import nullcontext import numpy as np import pytest +from packaging.version import Version from ..casting import ( FloatingError, @@ -18,6 +20,7 @@ longdouble_precision_improved, ok_floats, on_powerpc, + sctypes, type_info, ) from ..testing import suppress_warnings @@ -26,6 +29,8 @@ LD_INFO = type_info(np.longdouble) +FP_OVERFLOW_WARN = Version(np.__version__) < Version('2.0.0.dev0') + def dtt2dict(dtt): """Create info dictionary from numpy type""" @@ -43,7 +48,7 @@ def dtt2dict(dtt): def test_type_info(): # Test routine to get min, max, nmant, nexp - for dtt in np.sctypes['int'] + np.sctypes['uint']: + for dtt in sctypes['int'] + sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) assert infod == dict( @@ -148,10 +153,21 @@ def test_as_int(): nexp64 = floor_log2(type_info(np.float64)['max']) with np.errstate(over='ignore'): val = np.longdouble(2**nexp64) * 2 # outside float64 range - with pytest.raises(OverflowError): - as_int(val) - with pytest.raises(OverflowError): - as_int(-val) + assert val > np.finfo('float64').max + # TODO: Should this actually still overflow? Does it matter? + if FP_OVERFLOW_WARN: + ctx = pytest.raises(OverflowError) + else: + ctx = nullcontext() + out_val = None + with ctx: + out_val = as_int(val) + if out_val is not None: + assert out_val == val + with ctx: + out_val = as_int(-val) + if out_val is not None: + assert out_val == -val def test_int_to_float(): @@ -212,7 +228,7 @@ def test_int_to_float(): def test_as_int_np_fix(): # Test as_int works for integers. We need as_int for integers because of a # numpy 1.4.1 bug such that int(np.uint32(2**32-1) == -1 - for t in np.sctypes['int'] + np.sctypes['uint']: + for t in sctypes['int'] + sctypes['uint']: info = np.iinfo(t) mn, mx = np.array([info.min, info.max], dtype=t) assert (mn, mx) == (as_int(mn), as_int(mx)) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 890619bad..f1fc72071 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -40,7 +40,7 @@ import unittest import pytest -from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal, assert_warns +from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal from nibabel.arraywriters import WriterError from nibabel.testing import ( @@ -70,7 +70,7 @@ minc2, parrec, ) -from ..deprecator import ExpiredDeprecationError +from ..casting import sctypes from ..spatialimages import SpatialImage from ..tmpdirs import InTemporaryDirectory from .test_api_validators import ValidateAPI @@ -173,7 +173,7 @@ def validate_filenames(self, imaker, params): for path in (fname, pathlib.Path(fname)): with InTemporaryDirectory(): # Validate that saving or loading a file doesn't use deprecated methods internally - with clear_and_catch_warnings() as w: + with clear_and_catch_warnings(): warnings.filterwarnings( 'error', category=DeprecationWarning, module=r'nibabel.*' ) @@ -404,7 +404,7 @@ def _check_array_caching(self, imaker, meth_name, caching): return # Return original array from get_fdata only if the input array is the # requested dtype. - float_types = np.sctypes['float'] + float_types = sctypes['float'] if arr_dtype not in float_types: return for float_type in float_types: diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index 9fd48ee69..da2f93e21 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -15,19 +15,14 @@ import numpy as np from .. import ( - AnalyzeHeader, - AnalyzeImage, MGHImage, Minc1Image, Minc2Image, - Nifti1Header, Nifti1Image, Nifti1Pair, - Nifti2Header, Nifti2Image, Nifti2Pair, Spm2AnalyzeImage, - Spm99AnalyzeImage, all_image_classes, ) diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 877c045f6..2317a6397 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -4,9 +4,9 @@ import pytest try: - from importlib.resources import as_file, files + from importlib.resources import files except ImportError: - from importlib_resources import as_file, files + from importlib_resources import files import nibabel as nib diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index fbefe99e5..c7c4d1d84 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -79,7 +79,7 @@ class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): (np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) ) if have_binary128(): - supported_np_types = supported_np_types.union((np.longdouble, np.longcomplex)) + supported_np_types = supported_np_types.union((np.longdouble, np.clongdouble)) tana.add_duplicate_types(supported_np_types) def test_empty(self): diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 0d8146fdb..dfe18c975 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -2,7 +2,6 @@ """ import pytest -from packaging.version import Version import nibabel as nib from nibabel.pkg_info import cmp_pkg_version diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 1c9e02186..421bc5bf4 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -36,13 +36,12 @@ import numpy as np import pytest -from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal +from numpy.testing import assert_allclose, assert_array_equal from .. import ecat, minc1, minc2, parrec from ..analyze import AnalyzeHeader from ..arrayproxy import ArrayProxy, is_proxy -from ..casting import have_binary128 -from ..deprecator import ExpiredDeprecationError +from ..casting import have_binary128, sctypes from ..externals.netcdf import netcdf_file from ..freesurfer.mghformat import MGHHeader from ..nifti1 import Nifti1Header @@ -58,6 +57,11 @@ h5py, have_h5py, _ = optional_package('h5py') +try: + from numpy.exceptions import ComplexWarning +except ModuleNotFoundError: # NumPy < 1.25 + from numpy import ComplexWarning + def _some_slicers(shape): ndim = len(shape) @@ -144,9 +148,9 @@ def validate_array_interface_with_dtype(self, pmaker, params): if np.issubdtype(orig.dtype, np.complexfloating): context = clear_and_catch_warnings() context.__enter__() - warnings.simplefilter('ignore', np.ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) - for dtype in np.sctypes['float'] + np.sctypes['int'] + np.sctypes['uint']: + for dtype in sctypes['float'] + sctypes['int'] + sctypes['uint']: # Directly coerce with a dtype direct = dtype(prox) # Half-precision is imprecise. Obviously. It's a bad idea, but don't break diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index cb754d0b5..07783fe55 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -10,7 +10,7 @@ from .. import Nifti1Header, Nifti1Image from ..arraywriters import ScalingError -from ..casting import best_float, type_info, ulp +from ..casting import best_float, sctypes, type_info, ulp from ..spatialimages import HeaderDataError, supported_np_types DEBUG = False @@ -102,7 +102,7 @@ def test_round_trip(): rng = np.random.RandomState(20111121) N = 10000 sd_10s = range(-20, 51, 5) - iuint_types = np.sctypes['int'] + np.sctypes['uint'] + iuint_types = sctypes['int'] + sctypes['uint'] # Remove types which cannot be set into nifti header datatype nifti_supported = supported_np_types(Nifti1Header()) iuint_types = [t for t in iuint_types if t in nifti_supported] diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 2fbe88a1a..f667b4164 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -13,9 +13,9 @@ import numpy as np import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_equal -from ..casting import type_info +from ..casting import sctypes, type_info from ..testing import suppress_warnings from ..volumeutils import apply_read_scaling, array_from_file, array_to_file, finite_range from .test_volumeutils import _calculate_scale @@ -177,8 +177,8 @@ def test_array_file_scales(in_type, out_type): ], ) def test_scaling_in_abstract(category0, category1, overflow): - for in_type in np.sctypes[category0]: - for out_type in np.sctypes[category1]: + for in_type in sctypes[category0]: + for out_type in sctypes[category1]: if overflow: with suppress_warnings(): check_int_a2f(in_type, out_type) @@ -188,10 +188,10 @@ def test_scaling_in_abstract(category0, category1, overflow): def check_int_a2f(in_type, out_type): # Check that array to / from file returns roughly the same as input - big_floater = np.maximum_sctype(np.float64) + big_floater = sctypes['float'][-1] info = type_info(in_type) this_min, this_max = info['min'], info['max'] - if not in_type in np.sctypes['complex']: + if not in_type in sctypes['complex']: data = np.array([this_min, this_max], in_type) # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(data)): diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 95d3a2a15..5cad23a22 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -9,7 +9,6 @@ """Testing spatialimages """ -import warnings from io import BytesIO import numpy as np @@ -19,13 +18,7 @@ from .. import load as top_load from ..imageclasses import spatial_axes_first from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage -from ..testing import ( - bytesio_round_trip, - clear_and_catch_warnings, - expires, - memmap_after_ufunc, - suppress_warnings, -) +from ..testing import bytesio_round_trip, expires, memmap_after_ufunc from ..tmpdirs import InTemporaryDirectory diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index a8756e301..ada92d3b0 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -23,8 +23,8 @@ # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..casting import shared_range, type_info -from ..spatialimages import HeaderDataError, supported_np_types +from ..casting import sctypes_aliases, shared_range, type_info +from ..spatialimages import HeaderDataError from ..spm99analyze import HeaderTypeError, Spm99AnalyzeHeader, Spm99AnalyzeImage from ..testing import ( assert_allclose_safely, @@ -35,11 +35,11 @@ from ..volumeutils import _dt_min_max, apply_read_scaling from . import test_analyze -# np.sctypes values are lists of types with unique sizes +# np.core.sctypes values are lists of types with unique sizes # For testing, we want all concrete classes of a type # Key on kind, rather than abstract base classes, since timedelta64 is a signedinteger sctypes = {} -for sctype in set(np.sctypeDict.values()): +for sctype in sctypes_aliases: sctypes.setdefault(np.dtype(sctype).kind, []).append(sctype) # Sort types to ensure that xdist doesn't complain about test order when we parametrize @@ -328,7 +328,8 @@ def test_no_scaling(self, in_dtype, supported_dtype): inter = 10 if hdr.has_data_intercept else 0 mn_in, mx_in = _dt_min_max(in_dtype) - arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) + mn = -1 if np.dtype(in_dtype).kind != 'u' else 0 + arr = np.array([mn_in, mn, 0, 1, 10, mx_in], dtype=in_dtype) img = img_class(arr, np.eye(4), hdr) img.set_data_dtype(supported_dtype) # Setting the scaling means we don't calculate it later diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 8cd70e37a..dee3ea355 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -8,6 +8,7 @@ import numpy as np import pytest +from ..casting import sctypes from ..testing import ( assert_allclose_safely, assert_re_in, @@ -48,7 +49,7 @@ def test_assert_allclose_safely(): with pytest.raises(AssertionError): assert_allclose_safely(a, b) # Test allcloseness of inf, especially np.float128 infs - for dtt in np.sctypes['float']: + for dtt in sctypes['float']: a = np.array([-np.inf, 1, np.inf], dtype=dtt) b = np.array([-np.inf, 1, np.inf], dtype=dtt) assert_allclose_safely(a, b) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index ab5bd38ee..07ca9a6ba 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -32,7 +32,7 @@ suppress_warnings, ) -from ..casting import OK_FLOATS, floor_log2, shared_range, type_info +from ..casting import OK_FLOATS, floor_log2, sctypes, shared_range, type_info from ..openers import BZ2File, ImageOpener, Opener from ..optpkg import optional_package from ..tmpdirs import InTemporaryDirectory @@ -59,15 +59,21 @@ pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') -#: convenience variables for numpy types -FLOAT_TYPES = np.sctypes['float'] -COMPLEX_TYPES = np.sctypes['complex'] +# convenience variables for numpy types +FLOAT_TYPES = sctypes['float'] +COMPLEX_TYPES = sctypes['complex'] CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES -INT_TYPES = np.sctypes['int'] -IUINT_TYPES = INT_TYPES + np.sctypes['uint'] +INT_TYPES = sctypes['int'] +IUINT_TYPES = INT_TYPES + sctypes['uint'] NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES FP_RUNTIME_WARN = Version(np.__version__) >= Version('1.24.0.dev0+239') +NP_2 = Version(np.__version__) >= Version('2.0.0.dev0') + +try: + from numpy.exceptions import ComplexWarning +except ModuleNotFoundError: # NumPy < 1.25 + from numpy import ComplexWarning def test__is_compressed_fobj(): @@ -538,8 +544,12 @@ def test_a2f_scaled_unscaled(): NUMERIC_TYPES, NUMERIC_TYPES, (0, 0.5, -1, 1), (1, 0.5, 2) ): mn_in, mx_in = _dt_min_max(in_dtype) - nan_val = np.nan if in_dtype in CFLOAT_TYPES else 10 - arr = np.array([mn_in, -1, 0, 1, mx_in, nan_val], dtype=in_dtype) + vals = [mn_in, 0, 1, mx_in] + if np.dtype(in_dtype).kind != 'u': + vals.append(-1) + if in_dtype in CFLOAT_TYPES: + vals.append(np.nan) + arr = np.array(vals, dtype=in_dtype) mn_out, mx_out = _dt_min_max(out_dtype) # 0 when scaled to output will also be the output value for NaN nan_fill = -intercept / divslope @@ -597,7 +607,7 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([np.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) + NUMERICAL_TYPES = sum([sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) for in_type, out_type, slope, inter in itertools.product( NUMERICAL_TYPES, NUMERICAL_TYPES, @@ -610,7 +620,7 @@ def test_a2f_bad_scaling(): if np.issubdtype(in_type, np.complexfloating) and not np.issubdtype( out_type, np.complexfloating ): - cm = pytest.warns(np.ComplexWarning) + cm = pytest.warns(ComplexWarning) if (slope, inter) == (1, 0): with cm: assert_array_equal( @@ -650,7 +660,7 @@ def test_a2f_nan2zero_range(): arr = np.array([-1, 0, 1, np.nan], dtype=dt) # Error occurs for arrays without nans too arr_no_nan = np.array([-1, 0, 1, 2], dtype=dt) - complex_warn = (np.ComplexWarning,) if np.issubdtype(dt, np.complexfloating) else () + complex_warn = (ComplexWarning,) if np.issubdtype(dt, np.complexfloating) else () # Casting nan to int will produce a RuntimeWarning in numpy 1.24 nan_warn = (RuntimeWarning,) if FP_RUNTIME_WARN else () c_and_n_warn = complex_warn + nan_warn @@ -733,9 +743,14 @@ def test_apply_scaling(): f32_arr = np.zeros((1,), dtype=f32) i16_arr = np.zeros((1,), dtype=np.int16) # Check float upcast (not the normal numpy scalar rule) + # This is the normal rule - no upcast from Python scalar + assert (f32_arr * 1.0).dtype == np.float32 + assert (f32_arr + 1.0).dtype == np.float32 # This is the normal rule - no upcast from scalar - assert (f32_arr * f64(1)).dtype == np.float32 - assert (f32_arr + f64(1)).dtype == np.float32 + # before NumPy 2.0, after 2.0, it upcasts + want_dtype = np.float64 if NP_2 else np.float32 + assert (f32_arr * f64(1)).dtype == want_dtype + assert (f32_arr + f64(1)).dtype == want_dtype # The function does upcast though ret = apply_read_scaling(np.float32(0), np.float64(2)) assert ret.dtype == np.float64 @@ -830,10 +845,10 @@ def check_against(f1, f2): return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2 for first in FLOAT_TYPES: - for other in IUINT_TYPES + np.sctypes['complex']: + for other in IUINT_TYPES + sctypes['complex']: assert better_float_of(first, other) == first assert better_float_of(other, first) == first - for other2 in IUINT_TYPES + np.sctypes['complex']: + for other2 in IUINT_TYPES + sctypes['complex']: assert better_float_of(other, other2) == np.float32 assert better_float_of(other, other2, np.float64) == np.float64 for second in FLOAT_TYPES: diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 70f22894a..10b4b3f22 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -32,11 +32,12 @@ from .. import imageglobals from ..batteryrunners import Report +from ..casting import sctypes from ..spatialimages import HeaderDataError from ..volumeutils import Recoder, native_code, swapped_code from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError -INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] +INTEGER_TYPES = sctypes['int'] + sctypes['uint'] def log_chk(hdr, level):