about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/numpy/testing
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/numpy/testing')
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/__init__.py22
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi50
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py248
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py2509
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi402
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/overrides.py83
-rwxr-xr-x.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py200
-rwxr-xr-x.venv/lib/python3.12/site-packages/numpy/testing/setup.py21
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py1626
11 files changed, 5161 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__init__.py b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.py
new file mode 100644
index 00000000..8a34221e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.py
@@ -0,0 +1,22 @@
+"""Common test support for all numpy test scripts.
+
+This single module should provide all the common functionality for numpy tests
+in a single location, so that test scripts can just import it and work right
+away.
+
+"""
+from unittest import TestCase
+
+from . import _private
+from ._private.utils import *
+from ._private.utils import (_assert_valid_refcount, _gen_alignment_data)
+from ._private import extbuild
+from . import overrides
+
+__all__ = (
+    _private.utils.__all__ + ['TestCase', 'overrides']
+)
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi
new file mode 100644
index 00000000..d65860cc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/__init__.pyi
@@ -0,0 +1,50 @@
+from numpy._pytesttester import PytestTester
+
+from unittest import (
+    TestCase as TestCase,
+)
+
+from numpy.testing._private.utils import (
+    assert_equal as assert_equal,
+    assert_almost_equal as assert_almost_equal,
+    assert_approx_equal as assert_approx_equal,
+    assert_array_equal as assert_array_equal,
+    assert_array_less as assert_array_less,
+    assert_string_equal as assert_string_equal,
+    assert_array_almost_equal as assert_array_almost_equal,
+    assert_raises as assert_raises,
+    build_err_msg as build_err_msg,
+    decorate_methods as decorate_methods,
+    jiffies as jiffies,
+    memusage as memusage,
+    print_assert_equal as print_assert_equal,
+    rundocs as rundocs,
+    runstring as runstring,
+    verbose as verbose,
+    measure as measure,
+    assert_ as assert_,
+    assert_array_almost_equal_nulp as assert_array_almost_equal_nulp,
+    assert_raises_regex as assert_raises_regex,
+    assert_array_max_ulp as assert_array_max_ulp,
+    assert_warns as assert_warns,
+    assert_no_warnings as assert_no_warnings,
+    assert_allclose as assert_allclose,
+    IgnoreException as IgnoreException,
+    clear_and_catch_warnings as clear_and_catch_warnings,
+    SkipTest as SkipTest,
+    KnownFailureException as KnownFailureException,
+    temppath as temppath,
+    tempdir as tempdir,
+    IS_PYPY as IS_PYPY,
+    IS_PYSTON as IS_PYSTON,
+    HAS_REFCOUNT as HAS_REFCOUNT,
+    suppress_warnings as suppress_warnings,
+    assert_array_compare as assert_array_compare,
+    assert_no_gc_cycles as assert_no_gc_cycles,
+    break_cycles as break_cycles,
+    HAS_LAPACK64 as HAS_LAPACK64,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py b/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py
new file mode 100644
index 00000000..541f5511
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/extbuild.py
@@ -0,0 +1,248 @@
+"""
+Build a c-extension module on-the-fly in tests.
+See build_and_import_extensions for usage hints
+
+"""
+
+import os
+import pathlib
+import subprocess
+import sys
+import sysconfig
+import textwrap
+
+__all__ = ['build_and_import_extension', 'compile_extension_module']
+
+
+def build_and_import_extension(
+        modname, functions, *, prologue="", build_dir=None,
+        include_dirs=[], more_init=""):
+    """
+    Build and imports a c-extension module `modname` from a list of function
+    fragments `functions`.
+
+
+    Parameters
+    ----------
+    functions : list of fragments
+        Each fragment is a sequence of func_name, calling convention, snippet.
+    prologue : string
+        Code to precede the rest, usually extra ``#include`` or ``#define``
+        macros.
+    build_dir : pathlib.Path
+        Where to build the module, usually a temporary directory
+    include_dirs : list
+        Extra directories to find include files when compiling
+    more_init : string
+        Code to appear in the module PyMODINIT_FUNC
+
+    Returns
+    -------
+    out: module
+        The module will have been loaded and is ready for use
+
+    Examples
+    --------
+    >>> functions = [("test_bytes", "METH_O", \"\"\"
+        if ( !PyBytesCheck(args)) {
+            Py_RETURN_FALSE;
+        }
+        Py_RETURN_TRUE;
+    \"\"\")]
+    >>> mod = build_and_import_extension("testme", functions)
+    >>> assert not mod.test_bytes(u'abc')
+    >>> assert mod.test_bytes(b'abc')
+    """
+    body = prologue + _make_methods(functions, modname)
+    init = """PyObject *mod = PyModule_Create(&moduledef);
+           """
+    if not build_dir:
+        build_dir = pathlib.Path('.')
+    if more_init:
+        init += """#define INITERROR return NULL
+                """
+        init += more_init
+    init += "\nreturn mod;"
+    source_string = _make_source(modname, init, body)
+    try:
+        mod_so = compile_extension_module(
+            modname, build_dir, include_dirs, source_string)
+    except Exception as e:
+        # shorten the exception chain
+        raise RuntimeError(f"could not compile in {build_dir}:") from e
+    import importlib.util
+    spec = importlib.util.spec_from_file_location(modname, mod_so)
+    foo = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(foo)
+    return foo
+
+
+def compile_extension_module(
+        name, builddir, include_dirs,
+        source_string, libraries=[], library_dirs=[]):
+    """
+    Build an extension module and return the filename of the resulting
+    native code file.
+
+    Parameters
+    ----------
+    name : string
+        name of the module, possibly including dots if it is a module inside a
+        package.
+    builddir : pathlib.Path
+        Where to build the module, usually a temporary directory
+    include_dirs : list
+        Extra directories to find include files when compiling
+    libraries : list
+        Libraries to link into the extension module
+    library_dirs: list
+        Where to find the libraries, ``-L`` passed to the linker
+    """
+    modname = name.split('.')[-1]
+    dirname = builddir / name
+    dirname.mkdir(exist_ok=True)
+    cfile = _convert_str_to_file(source_string, dirname)
+    include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')]
+
+    return _c_compile(
+        cfile, outputfilename=dirname / modname,
+        include_dirs=include_dirs, libraries=[], library_dirs=[],
+        )
+
+
+def _convert_str_to_file(source, dirname):
+    """Helper function to create a file ``source.c`` in `dirname` that contains
+    the string in `source`. Returns the file name
+    """
+    filename = dirname / 'source.c'
+    with filename.open('w') as f:
+        f.write(str(source))
+    return filename
+
+
+def _make_methods(functions, modname):
+    """ Turns the name, signature, code in functions into complete functions
+    and lists them in a methods_table. Then turns the methods_table into a
+    ``PyMethodDef`` structure and returns the resulting code fragment ready
+    for compilation
+    """
+    methods_table = []
+    codes = []
+    for funcname, flags, code in functions:
+        cfuncname = "%s_%s" % (modname, funcname)
+        if 'METH_KEYWORDS' in flags:
+            signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'
+        else:
+            signature = '(PyObject *self, PyObject *args)'
+        methods_table.append(
+            "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags))
+        func_code = """
+        static PyObject* {cfuncname}{signature}
+        {{
+        {code}
+        }}
+        """.format(cfuncname=cfuncname, signature=signature, code=code)
+        codes.append(func_code)
+
+    body = "\n".join(codes) + """
+    static PyMethodDef methods[] = {
+    %(methods)s
+    { NULL }
+    };
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "%(modname)s",  /* m_name */
+        NULL,           /* m_doc */
+        -1,             /* m_size */
+        methods,        /* m_methods */
+    };
+    """ % dict(methods='\n'.join(methods_table), modname=modname)
+    return body
+
+
+def _make_source(name, init, body):
+    """ Combines the code fragments into source code ready to be compiled
+    """
+    code = """
+    #include <Python.h>
+
+    %(body)s
+
+    PyMODINIT_FUNC
+    PyInit_%(name)s(void) {
+    %(init)s
+    }
+    """ % dict(
+        name=name, init=init, body=body,
+    )
+    return code
+
+
+def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[],
+               library_dirs=[]):
+    if sys.platform == 'win32':
+        compile_extra = ["/we4013"]
+        link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')]
+    elif sys.platform.startswith('linux'):
+        compile_extra = [
+            "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
+        link_extra = []
+    else:
+        compile_extra = link_extra = []
+        pass
+    if sys.platform == 'win32':
+        link_extra = link_extra + ['/DEBUG']  # generate .pdb file
+    if sys.platform == 'darwin':
+        # support Fink & Darwinports
+        for s in ('/sw/', '/opt/local/'):
+            if (s + 'include' not in include_dirs
+                    and os.path.exists(s + 'include')):
+                include_dirs.append(s + 'include')
+            if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
+                library_dirs.append(s + 'lib')
+
+    outputfilename = outputfilename.with_suffix(get_so_suffix())
+    build(
+        cfile, outputfilename,
+        compile_extra, link_extra,
+        include_dirs, libraries, library_dirs)
+    return outputfilename
+
+
+def build(cfile, outputfilename, compile_extra, link_extra,
+          include_dirs, libraries, library_dirs):
+    "use meson to build"
+
+    build_dir = cfile.parent / "build"
+    os.makedirs(build_dir, exist_ok=True)
+    so_name = outputfilename.parts[-1]
+    with open(cfile.parent / "meson.build", "wt") as fid:
+        includes = ['-I' + d for d in include_dirs]
+        link_dirs = ['-L' + d for d in library_dirs]
+        fid.write(textwrap.dedent(f"""\
+            project('foo', 'c')
+            shared_module('{so_name}', '{cfile.parts[-1]}',
+                c_args: {includes} + {compile_extra},
+                link_args: {link_dirs} + {link_extra},
+                link_with: {libraries},
+                name_prefix: '',
+                name_suffix: 'dummy',
+            )
+        """))
+    if sys.platform == "win32":
+        subprocess.check_call(["meson", "setup",
+                               "--buildtype=release", 
+                               "--vsenv", ".."],
+                              cwd=build_dir,
+                              )
+    else:
+        subprocess.check_call(["meson", "setup", "--vsenv", ".."],
+                              cwd=build_dir
+                              )
+    subprocess.check_call(["meson", "compile"], cwd=build_dir)
+    os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name)
+        
+def get_so_suffix():
+    ret = sysconfig.get_config_var('EXT_SUFFIX')
+    assert ret
+    return ret
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py
new file mode 100644
index 00000000..28dd656c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.py
@@ -0,0 +1,2509 @@
+"""
+Utility function to facilitate testing.
+
+"""
+import os
+import sys
+import platform
+import re
+import gc
+import operator
+import warnings
+from functools import partial, wraps
+import shutil
+import contextlib
+from tempfile import mkdtemp, mkstemp
+from unittest.case import SkipTest
+from warnings import WarningMessage
+import pprint
+import sysconfig
+
+import numpy as np
+from numpy.core import (
+     intp, float32, empty, arange, array_repr, ndarray, isnat, array)
+from numpy import isfinite, isnan, isinf
+import numpy.linalg._umath_linalg
+
+from io import StringIO
+
+__all__ = [
+        'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+        'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+        'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+        'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+        'rundocs', 'runstring', 'verbose', 'measure',
+        'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+        'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+        'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+        'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+        'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare',
+        'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
+        '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE'
+        ]
+
+
+class KnownFailureException(Exception):
+    '''Raise this exception to mark a test as a known failing test.'''
+    pass
+
+
+KnownFailureTest = KnownFailureException  # backwards compat
+verbose = 0
+
+IS_WASM = platform.machine() in ["wasm32", "wasm64"]
+IS_PYPY = sys.implementation.name == 'pypy'
+IS_PYSTON = hasattr(sys, "pyston_version_info")
+HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
+HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64
+
+_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy'
+
+IS_MUSL = False
+# alternate way is
+# from packaging.tags import sys_tags
+#     _tags = list(sys_tags())
+#     if 'musllinux' in _tags[0].platform:
+_v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
+if 'musl' in _v:
+    IS_MUSL = True
+
+
+def assert_(val, msg=''):
+    """
+    Assert that works in release mode.
+    Accepts callable msg to allow deferring evaluation until failure.
+
+    The Python built-in ``assert`` does not work when executing code in
+    optimized mode (the ``-O`` flag) - no byte-code is generated for it.
+
+    For documentation on usage, refer to the Python documentation.
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    if not val:
+        try:
+            smsg = msg()
+        except TypeError:
+            smsg = msg
+        raise AssertionError(smsg)
+
+
+if os.name == 'nt':
+    # Code "stolen" from enthought/debug/memusage.py
+    def GetPerformanceAttributes(object, counter, instance=None,
+                                 inum=-1, format=None, machine=None):
+        # NOTE: Many counters require 2 samples to give accurate results,
+        # including "% Processor Time" (as by definition, at any instant, a
+        # thread's CPU usage is either 0 or 100).  To read counters like this,
+        # you should copy this function, but keep the counter open, and call
+        # CollectQueryData() each time you need to know.
+        # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
+        # My older explanation for this was that the "AddCounter" process
+        # forced the CPU to 100%, but the above makes more sense :)
+        import win32pdh
+        if format is None:
+            format = win32pdh.PDH_FMT_LONG
+        path = win32pdh.MakeCounterPath( (machine, object, instance, None,
+                                          inum, counter))
+        hq = win32pdh.OpenQuery()
+        try:
+            hc = win32pdh.AddCounter(hq, path)
+            try:
+                win32pdh.CollectQueryData(hq)
+                type, val = win32pdh.GetFormattedCounterValue(hc, format)
+                return val
+            finally:
+                win32pdh.RemoveCounter(hc)
+        finally:
+            win32pdh.CloseQuery(hq)
+
+    def memusage(processName="python", instance=0):
+        # from win32pdhutil, part of the win32all package
+        import win32pdh
+        return GetPerformanceAttributes("Process", "Virtual Bytes",
+                                        processName, instance,
+                                        win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
+
+    def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
+        """
+        Return virtual memory size in bytes of the running python.
+
+        """
+        try:
+            with open(_proc_pid_stat) as f:
+                l = f.readline().split(' ')
+            return int(l[22])
+        except Exception:
+            return
+else:
+    def memusage():
+        """
+        Return memory usage of running python. [Not implemented]
+
+        """
+        raise NotImplementedError
+
+
+if sys.platform[:5] == 'linux':
+    def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
+        """
+        Return number of jiffies elapsed.
+
+        Return number of jiffies (1/100ths of a second) that this
+        process has been scheduled in user mode. See man 5 proc.
+
+        """
+        import time
+        if not _load_time:
+            _load_time.append(time.time())
+        try:
+            with open(_proc_pid_stat) as f:
+                l = f.readline().split(' ')
+            return int(l[13])
+        except Exception:
+            return int(100*(time.time()-_load_time[0]))
+else:
+    # os.getpid is not in all platforms available.
+    # Using time is safe but inaccurate, especially when process
+    # was suspended or sleeping.
+    def jiffies(_load_time=[]):
+        """
+        Return number of jiffies elapsed.
+
+        Return number of jiffies (1/100ths of a second) that this
+        process has been scheduled in user mode. See man 5 proc.
+
+        """
+        import time
+        if not _load_time:
+            _load_time.append(time.time())
+        return int(100*(time.time()-_load_time[0]))
+
+
+def build_err_msg(arrays, err_msg, header='Items are not equal:',
+                  verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
+    msg = ['\n' + header]
+    if err_msg:
+        if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
+            msg = [msg[0] + ' ' + err_msg]
+        else:
+            msg.append(err_msg)
+    if verbose:
+        for i, a in enumerate(arrays):
+
+            if isinstance(a, ndarray):
+                # precision argument is only needed if the objects are ndarrays
+                r_func = partial(array_repr, precision=precision)
+            else:
+                r_func = repr
+
+            try:
+                r = r_func(a)
+            except Exception as exc:
+                r = f'[repr failed for <{type(a).__name__}>: {exc}]'
+            if r.count('\n') > 3:
+                r = '\n'.join(r.splitlines()[:3])
+                r += '...'
+            msg.append(f' {names[i]}: {r}')
+    return '\n'.join(msg)
+
+
+def assert_equal(actual, desired, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two objects are not equal.
+
+    Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
+    check that all elements of these objects are equal. An exception is raised
+    at the first conflicting values.
+
+    When one of `actual` and `desired` is a scalar and the other is array_like,
+    the function checks that each element of the array_like object is equal to
+    the scalar.
+
+    This function handles NaN comparisons as if NaN was a "normal" number.
+    That is, AssertionError is not raised if both objects have NaNs in the same
+    positions.  This is in contrast to the IEEE standard on NaNs, which says
+    that NaN compared to anything must return False.
+
+    Parameters
+    ----------
+    actual : array_like
+        The object to check.
+    desired : array_like
+        The expected object.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired are not equal.
+
+    Examples
+    --------
+    >>> np.testing.assert_equal([4,5], [4,6])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Items are not equal:
+    item=1
+     ACTUAL: 5
+     DESIRED: 6
+
+    The following comparison does not raise an exception.  There are NaNs
+    in the inputs, but they are in the same positions.
+
+    >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    if isinstance(desired, dict):
+        if not isinstance(actual, dict):
+            raise AssertionError(repr(type(actual)))
+        assert_equal(len(actual), len(desired), err_msg, verbose)
+        for k, i in desired.items():
+            if k not in actual:
+                raise AssertionError(repr(k))
+            assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
+                         verbose)
+        return
+    if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+        assert_equal(len(actual), len(desired), err_msg, verbose)
+        for k in range(len(desired)):
+            assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
+                         verbose)
+        return
+    from numpy.core import ndarray, isscalar, signbit
+    from numpy.lib import iscomplexobj, real, imag
+    if isinstance(actual, ndarray) or isinstance(desired, ndarray):
+        return assert_array_equal(actual, desired, err_msg, verbose)
+    msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+    # Handle complex numbers: separate into real/imag to handle
+    # nan/inf/negative zero correctly
+    # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+    try:
+        usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+    except (ValueError, TypeError):
+        usecomplex = False
+
+    if usecomplex:
+        if iscomplexobj(actual):
+            actualr = real(actual)
+            actuali = imag(actual)
+        else:
+            actualr = actual
+            actuali = 0
+        if iscomplexobj(desired):
+            desiredr = real(desired)
+            desiredi = imag(desired)
+        else:
+            desiredr = desired
+            desiredi = 0
+        try:
+            assert_equal(actualr, desiredr)
+            assert_equal(actuali, desiredi)
+        except AssertionError:
+            raise AssertionError(msg)
+
+    # isscalar test to check cases such as [np.nan] != np.nan
+    if isscalar(desired) != isscalar(actual):
+        raise AssertionError(msg)
+
+    try:
+        isdesnat = isnat(desired)
+        isactnat = isnat(actual)
+        dtypes_match = (np.asarray(desired).dtype.type ==
+                        np.asarray(actual).dtype.type)
+        if isdesnat and isactnat:
+            # If both are NaT (and have the same dtype -- datetime or
+            # timedelta) they are considered equal.
+            if dtypes_match:
+                return
+            else:
+                raise AssertionError(msg)
+
+    except (TypeError, ValueError, NotImplementedError):
+        pass
+
+    # Inf/nan/negative zero handling
+    try:
+        isdesnan = isnan(desired)
+        isactnan = isnan(actual)
+        if isdesnan and isactnan:
+            return  # both nan, so equal
+
+        # handle signed zero specially for floats
+        array_actual = np.asarray(actual)
+        array_desired = np.asarray(desired)
+        if (array_actual.dtype.char in 'Mm' or
+                array_desired.dtype.char in 'Mm'):
+            # version 1.18
+            # until this version, isnan failed for datetime64 and timedelta64.
+            # Now it succeeds but comparison to scalar with a different type
+            # emits a DeprecationWarning.
+            # Avoid that by skipping the next check
+            raise NotImplementedError('cannot compare to a scalar '
+                                      'with a different type')
+
+        if desired == 0 and actual == 0:
+            if not signbit(desired) == signbit(actual):
+                raise AssertionError(msg)
+
+    except (TypeError, ValueError, NotImplementedError):
+        pass
+
+    try:
+        # Explicitly use __eq__ for comparison, gh-2552
+        if not (desired == actual):
+            raise AssertionError(msg)
+
+    except (DeprecationWarning, FutureWarning) as e:
+        # this handles the case when the two types are not even comparable
+        if 'elementwise == comparison' in e.args[0]:
+            raise AssertionError(msg)
+        else:
+            raise
+
+
+def print_assert_equal(test_string, actual, desired):
+    """
+    Test if two objects are equal, and print an error message if test fails.
+
+    The test is performed with ``actual == desired``.
+
+    Parameters
+    ----------
+    test_string : str
+        The message supplied to AssertionError.
+    actual : object
+        The object to test for equality against `desired`.
+    desired : object
+        The expected result.
+
+    Examples
+    --------
+    >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
+    >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
+    Traceback (most recent call last):
+    ...
+    AssertionError: Test XYZ of func xyz failed
+    ACTUAL:
+    [0, 1]
+    DESIRED:
+    [0, 2]
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import pprint
+
+    if not (actual == desired):
+        msg = StringIO()
+        msg.write(test_string)
+        msg.write(' failed\nACTUAL: \n')
+        pprint.pprint(actual, msg)
+        msg.write('DESIRED: \n')
+        pprint.pprint(desired, msg)
+        raise AssertionError(msg.getvalue())
+
+
+@np._no_nep50_warning()
+def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two items are not equal up to desired
+    precision.
+
+    .. note:: It is recommended to use one of `assert_allclose`,
+              `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+              instead of this function for more consistent floating point
+              comparisons.
+
+    The test verifies that the elements of `actual` and `desired` satisfy.
+
+        ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
+
+    That is a looser test than originally documented, but agrees with what the
+    actual implementation in `assert_array_almost_equal` did up to rounding
+    vagaries. An exception is raised at conflicting values. For ndarrays this
+    delegates to assert_array_almost_equal
+
+    Parameters
+    ----------
+    actual : array_like
+        The object to check.
+    desired : array_like
+        The expected object.
+    decimal : int, optional
+        Desired precision, default is 7.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+      If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Examples
+    --------
+    >>> from numpy.testing import assert_almost_equal
+    >>> assert_almost_equal(2.3333333333333, 2.33333334)
+    >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 10 decimals
+     ACTUAL: 2.3333333333333
+     DESIRED: 2.33333334
+
+    >>> assert_almost_equal(np.array([1.0,2.3333333333333]),
+    ...                     np.array([1.0,2.33333334]), decimal=9)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 9 decimals
+    <BLANKLINE>
+    Mismatched elements: 1 / 2 (50%)
+    Max absolute difference: 6.66669964e-09
+    Max relative difference: 2.85715698e-09
+     x: array([1.         , 2.333333333])
+     y: array([1.        , 2.33333334])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    from numpy.core import ndarray
+    from numpy.lib import iscomplexobj, real, imag
+
+    # Handle complex numbers: separate into real/imag to handle
+    # nan/inf/negative zero correctly
+    # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+    try:
+        usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+    except ValueError:
+        usecomplex = False
+
+    def _build_err_msg():
+        header = ('Arrays are not almost equal to %d decimals' % decimal)
+        return build_err_msg([actual, desired], err_msg, verbose=verbose,
+                             header=header)
+
+    if usecomplex:
+        if iscomplexobj(actual):
+            actualr = real(actual)
+            actuali = imag(actual)
+        else:
+            actualr = actual
+            actuali = 0
+        if iscomplexobj(desired):
+            desiredr = real(desired)
+            desiredi = imag(desired)
+        else:
+            desiredr = desired
+            desiredi = 0
+        try:
+            assert_almost_equal(actualr, desiredr, decimal=decimal)
+            assert_almost_equal(actuali, desiredi, decimal=decimal)
+        except AssertionError:
+            raise AssertionError(_build_err_msg())
+
+    if isinstance(actual, (ndarray, tuple, list)) \
+            or isinstance(desired, (ndarray, tuple, list)):
+        return assert_array_almost_equal(actual, desired, decimal, err_msg)
+    try:
+        # If one of desired/actual is not finite, handle it specially here:
+        # check that both are nan if any is a nan, and test for equality
+        # otherwise
+        if not (isfinite(desired) and isfinite(actual)):
+            if isnan(desired) or isnan(actual):
+                if not (isnan(desired) and isnan(actual)):
+                    raise AssertionError(_build_err_msg())
+            else:
+                if not desired == actual:
+                    raise AssertionError(_build_err_msg())
+            return
+    except (NotImplementedError, TypeError):
+        pass
+    if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)):
+        raise AssertionError(_build_err_msg())
+
+
+@np._no_nep50_warning()
+def assert_approx_equal(actual, desired, significant=7, err_msg='',
+                        verbose=True):
+    """
+    Raises an AssertionError if two items are not equal up to significant
+    digits.
+
+    .. note:: It is recommended to use one of `assert_allclose`,
+              `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+              instead of this function for more consistent floating point
+              comparisons.
+
+    Given two numbers, check that they are approximately equal.
+    Approximately equal is defined as the number of significant digits
+    that agree.
+
+    Parameters
+    ----------
+    actual : scalar
+        The object to check.
+    desired : scalar
+        The expected object.
+    significant : int, optional
+        Desired precision, default is 7.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+      If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Examples
+    --------
+    >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
+    >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
+    ...                                significant=8)
+    >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
+    ...                                significant=8)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Items are not equal to 8 significant digits:
+     ACTUAL: 1.234567e-21
+     DESIRED: 1.2345672e-21
+
+    the evaluated condition that raises the exception is
+
+    >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
+    True
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+
+    (actual, desired) = map(float, (actual, desired))
+    if desired == actual:
+        return
+    # Normalized the numbers to be in range (-10.0,10.0)
+    # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+    with np.errstate(invalid='ignore'):
+        scale = 0.5*(np.abs(desired) + np.abs(actual))
+        scale = np.power(10, np.floor(np.log10(scale)))
+    try:
+        sc_desired = desired/scale
+    except ZeroDivisionError:
+        sc_desired = 0.0
+    try:
+        sc_actual = actual/scale
+    except ZeroDivisionError:
+        sc_actual = 0.0
+    msg = build_err_msg(
+        [actual, desired], err_msg,
+        header='Items are not equal to %d significant digits:' % significant,
+        verbose=verbose)
+    try:
+        # If one of desired/actual is not finite, handle it specially here:
+        # check that both are nan if any is a nan, and test for equality
+        # otherwise
+        if not (isfinite(desired) and isfinite(actual)):
+            if isnan(desired) or isnan(actual):
+                if not (isnan(desired) and isnan(actual)):
+                    raise AssertionError(msg)
+            else:
+                if not desired == actual:
+                    raise AssertionError(msg)
+            return
+    except (TypeError, NotImplementedError):
+        pass
+    if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
+        raise AssertionError(msg)
+
+
+@np._no_nep50_warning()
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
+                         precision=6, equal_nan=True, equal_inf=True,
+                         *, strict=False):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    from numpy.core import (array2string, isnan, inf, bool_, errstate,
+                            all, max, object_)
+
+    x = np.asanyarray(x)
+    y = np.asanyarray(y)
+
+    # original array for output formatting
+    ox, oy = x, y
+
+    def isnumber(x):
+        return x.dtype.char in '?bhilqpBHILQPefdgFDG'
+
+    def istime(x):
+        return x.dtype.char in "Mm"
+
+    def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
+        """Handling nan/inf.
+
+        Combine results of running func on x and y, checking that they are True
+        at the same locations.
+
+        """
+        __tracebackhide__ = True  # Hide traceback for py.test
+
+        x_id = func(x)
+        y_id = func(y)
+        # We include work-arounds here to handle three types of slightly
+        # pathological ndarray subclasses:
+        # (1) all() on `masked` array scalars can return masked arrays, so we
+        #     use != True
+        # (2) __eq__ on some ndarray subclasses returns Python booleans
+        #     instead of element-wise comparisons, so we cast to bool_() and
+        #     use isinstance(..., bool) checks
+        # (3) subclasses with bare-bones __array_function__ implementations may
+        #     not implement np.all(), so favor using the .all() method
+        # We are not committed to supporting such subclasses, but it's nice to
+        # support them if possible.
+        if bool_(x_id == y_id).all() != True:
+            msg = build_err_msg([x, y],
+                                err_msg + '\nx and y %s location mismatch:'
+                                % (hasval), verbose=verbose, header=header,
+                                names=('x', 'y'), precision=precision)
+            raise AssertionError(msg)
+        # If there is a scalar, then here we know the array has the same
+        # flag as it everywhere, so we should return the scalar flag.
+        if isinstance(x_id, bool) or x_id.ndim == 0:
+            return bool_(x_id)
+        elif isinstance(y_id, bool) or y_id.ndim == 0:
+            return bool_(y_id)
+        else:
+            return y_id
+
+    try:
+        if strict:
+            cond = x.shape == y.shape and x.dtype == y.dtype
+        else:
+            cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+        if not cond:
+            if x.shape != y.shape:
+                reason = f'\n(shapes {x.shape}, {y.shape} mismatch)'
+            else:
+                reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)'
+            msg = build_err_msg([x, y],
+                                err_msg
+                                + reason,
+                                verbose=verbose, header=header,
+                                names=('x', 'y'), precision=precision)
+            raise AssertionError(msg)
+
+        flagged = bool_(False)
+        if isnumber(x) and isnumber(y):
+            if equal_nan:
+                flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
+
+            if equal_inf:
+                flagged |= func_assert_same_pos(x, y,
+                                                func=lambda xy: xy == +inf,
+                                                hasval='+inf')
+                flagged |= func_assert_same_pos(x, y,
+                                                func=lambda xy: xy == -inf,
+                                                hasval='-inf')
+
+        elif istime(x) and istime(y):
+            # If one is datetime64 and the other timedelta64 there is no point
+            if equal_nan and x.dtype.type == y.dtype.type:
+                flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
+
+        if flagged.ndim > 0:
+            x, y = x[~flagged], y[~flagged]
+            # Only do the comparison if actual values are left
+            if x.size == 0:
+                return
+        elif flagged:
+            # no sense doing comparison if everything is flagged.
+            return
+
+        val = comparison(x, y)
+
+        if isinstance(val, bool):
+            cond = val
+            reduced = array([val])
+        else:
+            reduced = val.ravel()
+            cond = reduced.all()
+
+        # The below comparison is a hack to ensure that fully masked
+        # results, for which val.ravel().all() returns np.ma.masked,
+        # do not trigger a failure (np.ma.masked != True evaluates as
+        # np.ma.masked, which is falsy).
+        if cond != True:
+            n_mismatch = reduced.size - reduced.sum(dtype=intp)
+            n_elements = flagged.size if flagged.ndim != 0 else reduced.size
+            percent_mismatch = 100 * n_mismatch / n_elements
+            remarks = [
+                'Mismatched elements: {} / {} ({:.3g}%)'.format(
+                    n_mismatch, n_elements, percent_mismatch)]
+
+            with errstate(all='ignore'):
+                # ignore errors for non-numeric types
+                with contextlib.suppress(TypeError):
+                    error = abs(x - y)
+                    if np.issubdtype(x.dtype, np.unsignedinteger):
+                        error2 = abs(y - x)
+                        np.minimum(error, error2, out=error)
+                    max_abs_error = max(error)
+                    if getattr(error, 'dtype', object_) == object_:
+                        remarks.append('Max absolute difference: '
+                                       + str(max_abs_error))
+                    else:
+                        remarks.append('Max absolute difference: '
+                                       + array2string(max_abs_error))
+
+                    # note: this definition of relative error matches that one
+                    # used by assert_allclose (found in np.isclose)
+                    # Filter values where the divisor would be zero
+                    nonzero = bool_(y != 0)
+                    if all(~nonzero):
+                        max_rel_error = array(inf)
+                    else:
+                        max_rel_error = max(error[nonzero] / abs(y[nonzero]))
+                    if getattr(error, 'dtype', object_) == object_:
+                        remarks.append('Max relative difference: '
+                                       + str(max_rel_error))
+                    else:
+                        remarks.append('Max relative difference: '
+                                       + array2string(max_rel_error))
+
+            err_msg += '\n' + '\n'.join(remarks)
+            msg = build_err_msg([ox, oy], err_msg,
+                                verbose=verbose, header=header,
+                                names=('x', 'y'), precision=precision)
+            raise AssertionError(msg)
+    except ValueError:
+        import traceback
+        efmt = traceback.format_exc()
+        header = f'error during assertion:\n\n{efmt}\n\n{header}'
+
+        msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
+                            names=('x', 'y'), precision=precision)
+        raise ValueError(msg)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):
+    """
+    Raises an AssertionError if two array_like objects are not equal.
+
+    Given two array_like objects, check that the shape is equal and all
+    elements of these objects are equal (but see the Notes for the special
+    handling of a scalar). An exception is raised at shape mismatch or
+    conflicting values. In contrast to the standard usage in numpy, NaNs
+    are compared like numbers, no assertion is raised if both objects have
+    NaNs in the same positions.
+
+    The usual caution for verifying equality with floating point numbers is
+    advised.
+
+    Parameters
+    ----------
+    x : array_like
+        The actual object to check.
+    y : array_like
+        The desired, expected object.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+    strict : bool, optional
+        If True, raise an AssertionError when either the shape or the data
+        type of the array_like objects does not match. The special
+        handling for scalars mentioned in the Notes section is disabled.
+
+        .. versionadded:: 1.24.0
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired objects are not equal.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Notes
+    -----
+    When one of `x` and `y` is a scalar and the other is array_like, the
+    function checks that each element of the array_like object is equal to
+    the scalar. This behaviour can be disabled with the `strict` parameter.
+
+    Examples
+    --------
+    The first assert does not raise an exception:
+
+    >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
+    ...                               [np.exp(0),2.33333, np.nan])
+
+    Assert fails with numerical imprecision with floats:
+
+    >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
+    ...                               [1, np.sqrt(np.pi)**2, np.nan])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not equal
+    <BLANKLINE>
+    Mismatched elements: 1 / 3 (33.3%)
+    Max absolute difference: 4.4408921e-16
+    Max relative difference: 1.41357986e-16
+     x: array([1.      , 3.141593,      nan])
+     y: array([1.      , 3.141593,      nan])
+
+    Use `assert_allclose` or one of the nulp (number of floating point values)
+    functions for these cases instead:
+
+    >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
+    ...                            [1, np.sqrt(np.pi)**2, np.nan],
+    ...                            rtol=1e-10, atol=0)
+
+    As mentioned in the Notes section, `assert_array_equal` has special
+    handling for scalars. Here the test checks that each value in `x` is 3:
+
+    >>> x = np.full((2, 5), fill_value=3)
+    >>> np.testing.assert_array_equal(x, 3)
+
+    Use `strict` to raise an AssertionError when comparing a scalar with an
+    array:
+
+    >>> np.testing.assert_array_equal(x, 3, strict=True)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not equal
+    <BLANKLINE>
+    (shapes (2, 5), () mismatch)
+     x: array([[3, 3, 3, 3, 3],
+           [3, 3, 3, 3, 3]])
+     y: array(3)
+
+    The `strict` parameter also ensures that the array data types match:
+
+    >>> x = np.array([2, 2, 2])
+    >>> y = np.array([2., 2., 2.], dtype=np.float32)
+    >>> np.testing.assert_array_equal(x, y, strict=True)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not equal
+    <BLANKLINE>
+    (dtypes int64, float32 mismatch)
+     x: array([2, 2, 2])
+     y: array([2., 2., 2.], dtype=float32)
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
+                         verbose=verbose, header='Arrays are not equal',
+                         strict=strict)
+
+
+@np._no_nep50_warning()
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two objects are not equal up to desired
+    precision.
+
+    .. note:: It is recommended to use one of `assert_allclose`,
+              `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+              instead of this function for more consistent floating point
+              comparisons.
+
+    The test verifies identical shapes and that the elements of ``actual`` and
+    ``desired`` satisfy.
+
+        ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+    That is a looser test than originally documented, but agrees with what the
+    actual implementation did up to rounding vagaries. An exception is raised
+    at shape mismatch or conflicting values. In contrast to the standard usage
+    in numpy, NaNs are compared like numbers, no assertion is raised if both
+    objects have NaNs in the same positions.
+
+    Parameters
+    ----------
+    x : array_like
+        The actual object to check.
+    y : array_like
+        The desired, expected object.
+    decimal : int, optional
+        Desired precision, default is 6.
+    err_msg : str, optional
+      The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Examples
+    --------
+    the first assert does not raise an exception
+
+    >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
+    ...                                      [1.0,2.333,np.nan])
+
+    >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+    ...                                      [1.0,2.33339,np.nan], decimal=5)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 5 decimals
+    <BLANKLINE>
+    Mismatched elements: 1 / 3 (33.3%)
+    Max absolute difference: 6.e-05
+    Max relative difference: 2.57136612e-05
+     x: array([1.     , 2.33333,     nan])
+     y: array([1.     , 2.33339,     nan])
+
+    >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+    ...                                      [1.0,2.33333, 5], decimal=5)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 5 decimals
+    <BLANKLINE>
+    x and y nan location mismatch:
+     x: array([1.     , 2.33333,     nan])
+     y: array([1.     , 2.33333, 5.     ])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    from numpy.core import number, float_, result_type
+    from numpy.core.numerictypes import issubdtype
+    from numpy.core.fromnumeric import any as npany
+
+    def compare(x, y):
+        try:
+            if npany(isinf(x)) or npany(isinf(y)):
+                xinfid = isinf(x)
+                yinfid = isinf(y)
+                if not (xinfid == yinfid).all():
+                    return False
+                # if one item, x and y is +- inf
+                if x.size == y.size == 1:
+                    return x == y
+                x = x[~xinfid]
+                y = y[~yinfid]
+        except (TypeError, NotImplementedError):
+            pass
+
+        # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+        # casting of x later.
+        dtype = result_type(y, 1.)
+        y = np.asanyarray(y, dtype)
+        z = abs(x - y)
+
+        if not issubdtype(z.dtype, number):
+            z = z.astype(float_)  # handle object arrays
+
+        return z < 1.5 * 10.0**(-decimal)
+
+    assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+             header=('Arrays are not almost equal to %d decimals' % decimal),
+             precision=decimal)
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two array_like objects are not ordered by less
+    than.
+
+    Given two array_like objects, check that the shape is equal and all
+    elements of the first object are strictly smaller than those of the
+    second object. An exception is raised at shape mismatch or incorrectly
+    ordered values. Shape mismatch does not raise if an object has zero
+    dimension. In contrast to the standard usage in numpy, NaNs are
+    compared, no assertion is raised if both objects have NaNs in the same
+    positions.
+
+    Parameters
+    ----------
+    x : array_like
+      The smaller object to check.
+    y : array_like
+      The larger object to compare.
+    err_msg : string
+      The error message to be printed in case of failure.
+    verbose : bool
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+      If x is not strictly smaller than y, element-wise.
+
+    See Also
+    --------
+    assert_array_equal: tests objects for equality
+    assert_array_almost_equal: test objects for equality up to precision
+
+    Examples
+    --------
+    >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
+    >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not less-ordered
+    <BLANKLINE>
+    Mismatched elements: 1 / 3 (33.3%)
+    Max absolute difference: 1.
+    Max relative difference: 0.5
+     x: array([ 1.,  1., nan])
+     y: array([ 1.,  2., nan])
+
+    >>> np.testing.assert_array_less([1.0, 4.0], 3)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not less-ordered
+    <BLANKLINE>
+    Mismatched elements: 1 / 2 (50%)
+    Max absolute difference: 2.
+    Max relative difference: 0.66666667
+     x: array([1., 4.])
+     y: array(3)
+
+    >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not less-ordered
+    <BLANKLINE>
+    (shapes (3,), (1,) mismatch)
+     x: array([1., 2., 3.])
+     y: array([4])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
+                         verbose=verbose,
+                         header='Arrays are not less-ordered',
+                         equal_inf=False)
+
+
+def runstring(astr, dict):
+    exec(astr, dict)
+
+
+def assert_string_equal(actual, desired):
+    """
+    Test if two strings are equal.
+
+    If the given strings are equal, `assert_string_equal` does nothing.
+    If they are not equal, an AssertionError is raised, and the diff
+    between the strings is shown.
+
+    Parameters
+    ----------
+    actual : str
+        The string to test for equality against the expected string.
+    desired : str
+        The expected string.
+
+    Examples
+    --------
+    >>> np.testing.assert_string_equal('abc', 'abc')
+    >>> np.testing.assert_string_equal('abc', 'abcd')
+    Traceback (most recent call last):
+      File "<stdin>", line 1, in <module>
+    ...
+    AssertionError: Differences in strings:
+    - abc+ abcd?    +
+
+    """
+    # delay import of difflib to reduce startup time
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import difflib
+
+    if not isinstance(actual, str):
+        raise AssertionError(repr(type(actual)))
+    if not isinstance(desired, str):
+        raise AssertionError(repr(type(desired)))
+    if desired == actual:
+        return
+
+    diff = list(difflib.Differ().compare(actual.splitlines(True),
+                desired.splitlines(True)))
+    diff_list = []
+    while diff:
+        d1 = diff.pop(0)
+        if d1.startswith('  '):
+            continue
+        if d1.startswith('- '):
+            l = [d1]
+            d2 = diff.pop(0)
+            if d2.startswith('? '):
+                l.append(d2)
+                d2 = diff.pop(0)
+            if not d2.startswith('+ '):
+                raise AssertionError(repr(d2))
+            l.append(d2)
+            if diff:
+                d3 = diff.pop(0)
+                if d3.startswith('? '):
+                    l.append(d3)
+                else:
+                    diff.insert(0, d3)
+            if d2[2:] == d1[2:]:
+                continue
+            diff_list.extend(l)
+            continue
+        raise AssertionError(repr(d1))
+    if not diff_list:
+        return
+    msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
+    if actual != desired:
+        raise AssertionError(msg)
+
+
+def rundocs(filename=None, raise_on_error=True):
+    """
+    Run doctests found in the given file.
+
+    By default `rundocs` raises an AssertionError on failure.
+
+    Parameters
+    ----------
+    filename : str
+        The path to the file for which the doctests are run.
+    raise_on_error : bool
+        Whether to raise an AssertionError when a doctest fails. Default is
+        True.
+
+    Notes
+    -----
+    The doctests can be run by the user/developer by adding the ``doctests``
+    argument to the ``test()`` call. For example, to run all tests (including
+    doctests) for `numpy.lib`:
+
+    >>> np.lib.test(doctests=True)  # doctest: +SKIP
+    """
+    from numpy.distutils.misc_util import exec_mod_from_location
+    import doctest
+    if filename is None:
+        f = sys._getframe(1)
+        filename = f.f_globals['__file__']
+    name = os.path.splitext(os.path.basename(filename))[0]
+    m = exec_mod_from_location(name, filename)
+
+    tests = doctest.DocTestFinder().find(m)
+    runner = doctest.DocTestRunner(verbose=False)
+
+    msg = []
+    if raise_on_error:
+        out = lambda s: msg.append(s)
+    else:
+        out = None
+
+    for test in tests:
+        runner.run(test, out=out)
+
+    if runner.failures > 0 and raise_on_error:
+        raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+
+
+def check_support_sve():
+    """
+    gh-22982
+    """
+    
+    import subprocess
+    cmd = 'lscpu'
+    try:
+        output = subprocess.run(cmd, capture_output=True, text=True)
+        return 'sve' in output.stdout
+    except OSError:
+        return False
+
+
+_SUPPORTS_SVE = check_support_sve()
+
+#
+# assert_raises and assert_raises_regex are taken from unittest.
+#
+import unittest
+
+
+class _Dummy(unittest.TestCase):
+    def nop(self):
+        pass
+
+
+_d = _Dummy('nop')
+
+
+def assert_raises(*args, **kwargs):
+    """
+    assert_raises(exception_class, callable, *args, **kwargs)
+    assert_raises(exception_class)
+
+    Fail unless an exception of class exception_class is thrown
+    by callable when invoked with arguments args and keyword
+    arguments kwargs. If a different type of exception is
+    thrown, it will not be caught, and the test case will be
+    deemed to have suffered an error, exactly as for an
+    unexpected exception.
+
+    Alternatively, `assert_raises` can be used as a context manager:
+
+    >>> from numpy.testing import assert_raises
+    >>> with assert_raises(ZeroDivisionError):
+    ...     1 / 0
+
+    is equivalent to
+
+    >>> def div(x, y):
+    ...     return x / y
+    >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    return _d.assertRaises(*args, **kwargs)
+
+
+def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
+    """
+    assert_raises_regex(exception_class, expected_regexp, callable, *args,
+                        **kwargs)
+    assert_raises_regex(exception_class, expected_regexp)
+
+    Fail unless an exception of class exception_class and with message that
+    matches expected_regexp is thrown by callable when invoked with arguments
+    args and keyword arguments kwargs.
+
+    Alternatively, can be used as a context manager like `assert_raises`.
+
+    Notes
+    -----
+    .. versionadded:: 1.9.0
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
+
+
+def decorate_methods(cls, decorator, testmatch=None):
+    """
+    Apply a decorator to all methods in a class matching a regular expression.
+
+    The given decorator is applied to all public methods of `cls` that are
+    matched by the regular expression `testmatch`
+    (``testmatch.search(methodname)``). Methods that are private, i.e. start
+    with an underscore, are ignored.
+
+    Parameters
+    ----------
+    cls : class
+        Class whose methods to decorate.
+    decorator : function
+        Decorator to apply to methods
+    testmatch : compiled regexp or str, optional
+        The regular expression. Default value is None, in which case the
+        nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
+        is used.
+        If `testmatch` is a string, it is compiled to a regular expression
+        first.
+
+    """
+    if testmatch is None:
+        testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+    else:
+        testmatch = re.compile(testmatch)
+    cls_attr = cls.__dict__
+
+    # delayed import to reduce startup time
+    from inspect import isfunction
+
+    methods = [_m for _m in cls_attr.values() if isfunction(_m)]
+    for function in methods:
+        try:
+            if hasattr(function, 'compat_func_name'):
+                funcname = function.compat_func_name
+            else:
+                funcname = function.__name__
+        except AttributeError:
+            # not a function
+            continue
+        if testmatch.search(funcname) and not funcname.startswith('_'):
+            setattr(cls, funcname, decorator(function))
+    return
+
+
+def measure(code_str, times=1, label=None):
+    """
+    Return elapsed time for executing code in the namespace of the caller.
+
+    The supplied code string is compiled with the Python builtin ``compile``.
+    The precision of the timing is 10 milli-seconds. If the code will execute
+    fast on this timescale, it can be executed many times to get reasonable
+    timing accuracy.
+
+    Parameters
+    ----------
+    code_str : str
+        The code to be timed.
+    times : int, optional
+        The number of times the code is executed. Default is 1. The code is
+        only compiled once.
+    label : str, optional
+        A label to identify `code_str` with. This is passed into ``compile``
+        as the second argument (for run-time error messages).
+
+    Returns
+    -------
+    elapsed : float
+        Total elapsed time in seconds for executing `code_str` `times` times.
+
+    Examples
+    --------
+    >>> times = 10
+    >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
+    >>> print("Time for a single execution : ", etime / times, "s")  # doctest: +SKIP
+    Time for a single execution :  0.005 s
+
+    """
+    frame = sys._getframe(1)
+    locs, globs = frame.f_locals, frame.f_globals
+
+    code = compile(code_str, f'Test name: {label} ', 'exec')
+    i = 0
+    elapsed = jiffies()
+    while i < times:
+        i += 1
+        exec(code, globs, locs)
+    elapsed = jiffies() - elapsed
+    return 0.01*elapsed
+
+
+def _assert_valid_refcount(op):
+    """
+    Check that ufuncs don't mishandle refcount of object `1`.
+    Used in a few regression tests.
+    """
+    if not HAS_REFCOUNT:
+        return True
+
+    import gc
+    import numpy as np
+
+    b = np.arange(100*100).reshape(100, 100)
+    c = b
+    i = 1
+
+    gc.disable()
+    try:
+        rc = sys.getrefcount(i)
+        for j in range(15):
+            d = op(b, c)
+        assert_(sys.getrefcount(i) >= rc)
+    finally:
+        gc.enable()
+    del d  # for pyflakes
+
+
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
+                    err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two objects are not equal up to desired
+    tolerance.
+
+    Given two array_like objects, check that their shapes and all elements
+    are equal (but see the Notes for the special handling of a scalar). An
+    exception is raised if the shapes mismatch or any values conflict. In
+    contrast to the standard usage in numpy, NaNs are compared like numbers,
+    no assertion is raised if both objects have NaNs in the same positions.
+
+    The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
+    that ``allclose`` has different default values). It compares the difference
+    between `actual` and `desired` to ``atol + rtol * abs(desired)``.
+
+    .. versionadded:: 1.5.0
+
+    Parameters
+    ----------
+    actual : array_like
+        Array obtained.
+    desired : array_like
+        Array desired.
+    rtol : float, optional
+        Relative tolerance.
+    atol : float, optional
+        Absolute tolerance.
+    equal_nan : bool, optional.
+        If True, NaNs will compare equal.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_array_almost_equal_nulp, assert_array_max_ulp
+
+    Notes
+    -----
+    When one of `actual` and `desired` is a scalar and the other is
+    array_like, the function checks that each element of the array_like
+    object is equal to the scalar.
+
+    Examples
+    --------
+    >>> x = [1e-5, 1e-3, 1e-1]
+    >>> y = np.arccos(np.cos(x))
+    >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+
+    def compare(x, y):
+        return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+                                       equal_nan=equal_nan)
+
+    actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+    header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
+    assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
+                         verbose=verbose, header=header, equal_nan=equal_nan)
+
+
+def assert_array_almost_equal_nulp(x, y, nulp=1):
+    """
+    Compare two arrays relatively to their spacing.
+
+    This is a relatively robust method to compare two arrays whose amplitude
+    is variable.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Input arrays.
+    nulp : int, optional
+        The maximum number of unit in the last place for tolerance (see Notes).
+        Default is 1.
+
+    Returns
+    -------
+    None
+
+    Raises
+    ------
+    AssertionError
+        If the spacing between `x` and `y` for one or more elements is larger
+        than `nulp`.
+
+    See Also
+    --------
+    assert_array_max_ulp : Check that all items of arrays differ in at most
+        N Units in the Last Place.
+    spacing : Return the distance between x and the nearest adjacent number.
+
+    Notes
+    -----
+    An assertion is raised if the following condition is not met::
+
+        abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
+
+    Examples
+    --------
+    >>> x = np.array([1., 1e-10, 1e-20])
+    >>> eps = np.finfo(x.dtype).eps
+    >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
+
+    >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
+    Traceback (most recent call last):
+      ...
+    AssertionError: X and Y are not equal to 1 ULP (max is 2)
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+    ax = np.abs(x)
+    ay = np.abs(y)
+    ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
+    if not np.all(np.abs(x-y) <= ref):
+        if np.iscomplexobj(x) or np.iscomplexobj(y):
+            msg = "X and Y are not equal to %d ULP" % nulp
+        else:
+            max_nulp = np.max(nulp_diff(x, y))
+            msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
+        raise AssertionError(msg)
+
+
+def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
+    """
+    Check that all items of arrays differ in at most N Units in the Last Place.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Input arrays to be compared.
+    maxulp : int, optional
+        The maximum number of units in the last place that elements of `a` and
+        `b` can differ. Default is 1.
+    dtype : dtype, optional
+        Data-type to convert `a` and `b` to if given. Default is None.
+
+    Returns
+    -------
+    ret : ndarray
+        Array containing number of representable floating point numbers between
+        items in `a` and `b`.
+
+    Raises
+    ------
+    AssertionError
+        If one or more elements differ by more than `maxulp`.
+
+    Notes
+    -----
+    For computing the ULP difference, this API does not differentiate between
+    various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+    is zero).
+
+    See Also
+    --------
+    assert_array_almost_equal_nulp : Compare two arrays relatively to their
+        spacing.
+
+    Examples
+    --------
+    >>> a = np.linspace(0., 1., 100)
+    >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+    ret = nulp_diff(a, b, dtype)
+    if not np.all(ret <= maxulp):
+        raise AssertionError("Arrays are not almost equal up to %g "
+                             "ULP (max difference is %g ULP)" %
+                             (maxulp, np.max(ret)))
+    return ret
+
+
+def nulp_diff(x, y, dtype=None):
+    """For each item in x and y, return the number of representable floating
+    points between them.
+
+    Parameters
+    ----------
+    x : array_like
+        first input array
+    y : array_like
+        second input array
+    dtype : dtype, optional
+        Data-type to convert `x` and `y` to if given. Default is None.
+
+    Returns
+    -------
+    nulp : array_like
+        number of representable floating point numbers between each item in x
+        and y.
+
+    Notes
+    -----
+    For computing the ULP difference, this API does not differentiate between
+    various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+    is zero).
+
+    Examples
+    --------
+    # By definition, epsilon is the smallest number such as 1 + eps != 1, so
+    # there should be exactly one ULP between 1 and 1 + eps
+    >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
+    1.0
+    """
+    import numpy as np
+    if dtype:
+        x = np.asarray(x, dtype=dtype)
+        y = np.asarray(y, dtype=dtype)
+    else:
+        x = np.asarray(x)
+        y = np.asarray(y)
+
+    t = np.common_type(x, y)
+    if np.iscomplexobj(x) or np.iscomplexobj(y):
+        raise NotImplementedError("_nulp not implemented for complex array")
+
+    x = np.array([x], dtype=t)
+    y = np.array([y], dtype=t)
+
+    x[np.isnan(x)] = np.nan
+    y[np.isnan(y)] = np.nan
+
+    if not x.shape == y.shape:
+        raise ValueError("x and y do not have the same shape: %s - %s" %
+                         (x.shape, y.shape))
+
+    def _diff(rx, ry, vdt):
+        diff = np.asarray(rx-ry, dtype=vdt)
+        return np.abs(diff)
+
+    rx = integer_repr(x)
+    ry = integer_repr(y)
+    return _diff(rx, ry, t)
+
+
+def _integer_repr(x, vdt, comp):
+    # Reinterpret binary representation of the float as sign-magnitude:
+    # take into account two-complement representation
+    # See also
+    # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+    rx = x.view(vdt)
+    if not (rx.size == 1):
+        rx[rx < 0] = comp - rx[rx < 0]
+    else:
+        if rx < 0:
+            rx = comp - rx
+
+    return rx
+
+
+def integer_repr(x):
+    """Return the signed-magnitude interpretation of the binary representation
+    of x."""
+    import numpy as np
+    if x.dtype == np.float16:
+        return _integer_repr(x, np.int16, np.int16(-2**15))
+    elif x.dtype == np.float32:
+        return _integer_repr(x, np.int32, np.int32(-2**31))
+    elif x.dtype == np.float64:
+        return _integer_repr(x, np.int64, np.int64(-2**63))
+    else:
+        raise ValueError(f'Unsupported dtype {x.dtype}')
+
+
+@contextlib.contextmanager
+def _assert_warns_context(warning_class, name=None):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    with suppress_warnings() as sup:
+        l = sup.record(warning_class)
+        yield
+        if not len(l) > 0:
+            name_str = f' when calling {name}' if name is not None else ''
+            raise AssertionError("No warning raised" + name_str)
+
+
+def assert_warns(warning_class, *args, **kwargs):
+    """
+    Fail unless the given callable throws the specified warning.
+
+    A warning of class warning_class should be thrown by the callable when
+    invoked with arguments args and keyword arguments kwargs.
+    If a different type of warning is thrown, it will not be caught.
+
+    If called with all arguments other than the warning class omitted, may be
+    used as a context manager:
+
+        with assert_warns(SomeWarning):
+            do_something()
+
+    The ability to be used as a context manager is new in NumPy v1.11.0.
+
+    .. versionadded:: 1.4.0
+
+    Parameters
+    ----------
+    warning_class : class
+        The class defining the warning that `func` is expected to throw.
+    func : callable, optional
+        Callable to test
+    *args : Arguments
+        Arguments for `func`.
+    **kwargs : Kwargs
+        Keyword arguments for `func`.
+
+    Returns
+    -------
+    The value returned by `func`.
+
+    Examples
+    --------
+    >>> import warnings
+    >>> def deprecated_func(num):
+    ...     warnings.warn("Please upgrade", DeprecationWarning)
+    ...     return num*num
+    >>> with np.testing.assert_warns(DeprecationWarning):
+    ...     assert deprecated_func(4) == 16
+    >>> # or passing a func
+    >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
+    >>> assert ret == 16
+    """
+    if not args:
+        return _assert_warns_context(warning_class)
+
+    func = args[0]
+    args = args[1:]
+    with _assert_warns_context(warning_class, name=func.__name__):
+        return func(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def _assert_no_warnings_context(name=None):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    with warnings.catch_warnings(record=True) as l:
+        warnings.simplefilter('always')
+        yield
+        if len(l) > 0:
+            name_str = f' when calling {name}' if name is not None else ''
+            raise AssertionError(f'Got warnings{name_str}: {l}')
+
+
+def assert_no_warnings(*args, **kwargs):
+    """
+    Fail if the given callable produces any warnings.
+
+    If called with all arguments omitted, may be used as a context manager:
+
+        with assert_no_warnings():
+            do_something()
+
+    The ability to be used as a context manager is new in NumPy v1.11.0.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    func : callable
+        The callable to test.
+    \\*args : Arguments
+        Arguments passed to `func`.
+    \\*\\*kwargs : Kwargs
+        Keyword arguments passed to `func`.
+
+    Returns
+    -------
+    The value returned by `func`.
+
+    """
+    if not args:
+        return _assert_no_warnings_context()
+
+    func = args[0]
+    args = args[1:]
+    with _assert_no_warnings_context(name=func.__name__):
+        return func(*args, **kwargs)
+
+
+def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
+    """
+    generator producing data with different alignment and offsets
+    to test simd vectorization
+
+    Parameters
+    ----------
+    dtype : dtype
+        data type to produce
+    type : string
+        'unary': create data for unary operations, creates one input
+                 and output array
+        'binary': create data for unary operations, creates two input
+                 and output array
+    max_size : integer
+        maximum size of data to produce
+
+    Returns
+    -------
+    if type is 'unary' yields one output, one input array and a message
+    containing information on the data
+    if type is 'binary' yields one output array, two input array and a message
+    containing information on the data
+
+    """
+    ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+    bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+    for o in range(3):
+        for s in range(o + 2, max(o + 3, max_size)):
+            if type == 'unary':
+                inp = lambda: arange(s, dtype=dtype)[o:]
+                out = empty((s,), dtype=dtype)[o:]
+                yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+                d = inp()
+                yield d, d, ufmt % (o, o, s, dtype, 'in place')
+                yield out[1:], inp()[:-1], ufmt % \
+                    (o + 1, o, s - 1, dtype, 'out of place')
+                yield out[:-1], inp()[1:], ufmt % \
+                    (o, o + 1, s - 1, dtype, 'out of place')
+                yield inp()[:-1], inp()[1:], ufmt % \
+                    (o, o + 1, s - 1, dtype, 'aliased')
+                yield inp()[1:], inp()[:-1], ufmt % \
+                    (o + 1, o, s - 1, dtype, 'aliased')
+            if type == 'binary':
+                inp1 = lambda: arange(s, dtype=dtype)[o:]
+                inp2 = lambda: arange(s, dtype=dtype)[o:]
+                out = empty((s,), dtype=dtype)[o:]
+                yield out, inp1(), inp2(),  bfmt % \
+                    (o, o, o, s, dtype, 'out of place')
+                d = inp1()
+                yield d, d, inp2(), bfmt % \
+                    (o, o, o, s, dtype, 'in place1')
+                d = inp2()
+                yield d, inp1(), d, bfmt % \
+                    (o, o, o, s, dtype, 'in place2')
+                yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+                    (o + 1, o, o, s - 1, dtype, 'out of place')
+                yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+                    (o, o + 1, o, s - 1, dtype, 'out of place')
+                yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+                    (o, o, o + 1, s - 1, dtype, 'out of place')
+                yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+                    (o + 1, o, o, s - 1, dtype, 'aliased')
+                yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+                    (o, o + 1, o, s - 1, dtype, 'aliased')
+                yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+                    (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+    "Ignoring this exception due to disabled feature"
+    pass
+
+
+@contextlib.contextmanager
+def tempdir(*args, **kwargs):
+    """Context manager to provide a temporary test folder.
+
+    All arguments are passed as this to the underlying tempfile.mkdtemp
+    function.
+
+    """
+    tmpdir = mkdtemp(*args, **kwargs)
+    try:
+        yield tmpdir
+    finally:
+        shutil.rmtree(tmpdir)
+
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+    """Context manager for temporary files.
+
+    Context manager that returns the path to a closed temporary file. Its
+    parameters are the same as for tempfile.mkstemp and are passed directly
+    to that function. The underlying file is removed when the context is
+    exited, so it should be closed at that time.
+
+    Windows does not allow a temporary file to be opened if it is already
+    open, so the underlying file must be closed after opening before it
+    can be opened again.
+
+    """
+    fd, path = mkstemp(*args, **kwargs)
+    os.close(fd)
+    try:
+        yield path
+    finally:
+        os.remove(path)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+    """ Context manager that resets warning registry for catching warnings
+
+    Warnings can be slippery, because, whenever a warning is triggered, Python
+    adds a ``__warningregistry__`` member to the *calling* module.  This makes
+    it impossible to retrigger the warning in this module, whatever you put in
+    the warnings filters.  This context manager accepts a sequence of `modules`
+    as a keyword argument to its constructor and:
+
+    * stores and removes any ``__warningregistry__`` entries in given `modules`
+      on entry;
+    * resets ``__warningregistry__`` to its previous state on exit.
+
+    This makes it possible to trigger any warning afresh inside the context
+    manager without disturbing the state of warnings outside.
+
+    For compatibility with Python 3.0, please consider all arguments to be
+    keyword-only.
+
+    Parameters
+    ----------
+    record : bool, optional
+        Specifies whether warnings should be captured by a custom
+        implementation of ``warnings.showwarning()`` and be appended to a list
+        returned by the context manager. Otherwise None is returned by the
+        context manager. The objects appended to the list are arguments whose
+        attributes mirror the arguments to ``showwarning()``.
+    modules : sequence, optional
+        Sequence of modules for which to reset warnings registry on entry and
+        restore on exit. To work correctly, all 'ignore' filters should
+        filter by one of these modules.
+
+    Examples
+    --------
+    >>> import warnings
+    >>> with np.testing.clear_and_catch_warnings(
+    ...         modules=[np.core.fromnumeric]):
+    ...     warnings.simplefilter('always')
+    ...     warnings.filterwarnings('ignore', module='np.core.fromnumeric')
+    ...     # do something that raises a warning but ignore those in
+    ...     # np.core.fromnumeric
+    """
+    class_modules = ()
+
+    def __init__(self, record=False, modules=()):
+        self.modules = set(modules).union(self.class_modules)
+        self._warnreg_copies = {}
+        super().__init__(record=record)
+
+    def __enter__(self):
+        for mod in self.modules:
+            if hasattr(mod, '__warningregistry__'):
+                mod_reg = mod.__warningregistry__
+                self._warnreg_copies[mod] = mod_reg.copy()
+                mod_reg.clear()
+        return super().__enter__()
+
+    def __exit__(self, *exc_info):
+        super().__exit__(*exc_info)
+        for mod in self.modules:
+            if hasattr(mod, '__warningregistry__'):
+                mod.__warningregistry__.clear()
+            if mod in self._warnreg_copies:
+                mod.__warningregistry__.update(self._warnreg_copies[mod])
+
+
+class suppress_warnings:
+    """
+    Context manager and decorator doing much the same as
+    ``warnings.catch_warnings``.
+
+    However, it also provides a filter mechanism to work around
+    https://bugs.python.org/issue4180.
+
+    This bug causes Python before 3.4 to not reliably show warnings again
+    after they have been ignored once (even within catch_warnings). It
+    means that no "ignore" filter can be used easily, since following
+    tests might need to see the warning. Additionally it allows easier
+    specificity for testing warnings and can be nested.
+
+    Parameters
+    ----------
+    forwarding_rule : str, optional
+        One of "always", "once", "module", or "location". Analogous to
+        the usual warnings module filter mode, it is useful to reduce
+        noise mostly on the outmost level. Unsuppressed and unrecorded
+        warnings will be forwarded based on this rule. Defaults to "always".
+        "location" is equivalent to the warnings "default", match by exact
+        location the warning warning originated from.
+
+    Notes
+    -----
+    Filters added inside the context manager will be discarded again
+    when leaving it. Upon entering all filters defined outside a
+    context will be applied automatically.
+
+    When a recording filter is added, matching warnings are stored in the
+    ``log`` attribute as well as in the list returned by ``record``.
+
+    If filters are added and the ``module`` keyword is given, the
+    warning registry of this module will additionally be cleared when
+    applying it, entering the context, or exiting it. This could cause
+    warnings to appear a second time after leaving the context if they
+    were configured to be printed once (default) and were already
+    printed before the context was entered.
+
+    Nesting this context manager will work as expected when the
+    forwarding rule is "always" (default). Unfiltered and unrecorded
+    warnings will be passed out and be matched by the outer level.
+    On the outmost level they will be printed (or caught by another
+    warnings context). The forwarding rule argument can modify this
+    behaviour.
+
+    Like ``catch_warnings`` this context manager is not threadsafe.
+
+    Examples
+    --------
+
+    With a context manager::
+
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Some text")
+            sup.filter(module=np.ma.core)
+            log = sup.record(FutureWarning, "Does this occur?")
+            command_giving_warnings()
+            # The FutureWarning was given once, the filtered warnings were
+            # ignored. All other warnings abide outside settings (may be
+            # printed/error)
+            assert_(len(log) == 1)
+            assert_(len(sup.log) == 1)  # also stored in log attribute
+
+    Or as a decorator::
+
+        sup = np.testing.suppress_warnings()
+        sup.filter(module=np.ma.core)  # module must match exactly
+        @sup
+        def some_function():
+            # do something which causes a warning in np.ma.core
+            pass
+    """
+    def __init__(self, forwarding_rule="always"):
+        self._entered = False
+
+        # Suppressions are either instance or defined inside one with block:
+        self._suppressions = []
+
+        if forwarding_rule not in {"always", "module", "once", "location"}:
+            raise ValueError("unsupported forwarding rule.")
+        self._forwarding_rule = forwarding_rule
+
+    def _clear_registries(self):
+        if hasattr(warnings, "_filters_mutated"):
+            # clearing the registry should not be necessary on new pythons,
+            # instead the filters should be mutated.
+            warnings._filters_mutated()
+            return
+        # Simply clear the registry, this should normally be harmless,
+        # note that on new pythons it would be invalidated anyway.
+        for module in self._tmp_modules:
+            if hasattr(module, "__warningregistry__"):
+                module.__warningregistry__.clear()
+
+    def _filter(self, category=Warning, message="", module=None, record=False):
+        if record:
+            record = []  # The log where to store warnings
+        else:
+            record = None
+        if self._entered:
+            if module is None:
+                warnings.filterwarnings(
+                    "always", category=category, message=message)
+            else:
+                module_regex = module.__name__.replace('.', r'\.') + '$'
+                warnings.filterwarnings(
+                    "always", category=category, message=message,
+                    module=module_regex)
+                self._tmp_modules.add(module)
+                self._clear_registries()
+
+            self._tmp_suppressions.append(
+                (category, message, re.compile(message, re.I), module, record))
+        else:
+            self._suppressions.append(
+                (category, message, re.compile(message, re.I), module, record))
+
+        return record
+
+    def filter(self, category=Warning, message="", module=None):
+        """
+        Add a new suppressing filter or apply it if the state is entered.
+
+        Parameters
+        ----------
+        category : class, optional
+            Warning class to filter
+        message : string, optional
+            Regular expression matching the warning message.
+        module : module, optional
+            Module to filter for. Note that the module (and its file)
+            must match exactly and cannot be a submodule. This may make
+            it unreliable for external modules.
+
+        Notes
+        -----
+        When added within a context, filters are only added inside
+        the context and will be forgotten when the context is exited.
+        """
+        self._filter(category=category, message=message, module=module,
+                     record=False)
+
+    def record(self, category=Warning, message="", module=None):
+        """
+        Append a new recording filter or apply it if the state is entered.
+
+        All warnings matching will be appended to the ``log`` attribute.
+
+        Parameters
+        ----------
+        category : class, optional
+            Warning class to filter
+        message : string, optional
+            Regular expression matching the warning message.
+        module : module, optional
+            Module to filter for. Note that the module (and its file)
+            must match exactly and cannot be a submodule. This may make
+            it unreliable for external modules.
+
+        Returns
+        -------
+        log : list
+            A list which will be filled with all matched warnings.
+
+        Notes
+        -----
+        When added within a context, filters are only added inside
+        the context and will be forgotten when the context is exited.
+        """
+        return self._filter(category=category, message=message, module=module,
+                            record=True)
+
+    def __enter__(self):
+        if self._entered:
+            raise RuntimeError("cannot enter suppress_warnings twice.")
+
+        self._orig_show = warnings.showwarning
+        self._filters = warnings.filters
+        warnings.filters = self._filters[:]
+
+        self._entered = True
+        self._tmp_suppressions = []
+        self._tmp_modules = set()
+        self._forwarded = set()
+
+        self.log = []  # reset global log (no need to keep same list)
+
+        for cat, mess, _, mod, log in self._suppressions:
+            if log is not None:
+                del log[:]  # clear the log
+            if mod is None:
+                warnings.filterwarnings(
+                    "always", category=cat, message=mess)
+            else:
+                module_regex = mod.__name__.replace('.', r'\.') + '$'
+                warnings.filterwarnings(
+                    "always", category=cat, message=mess,
+                    module=module_regex)
+                self._tmp_modules.add(mod)
+        warnings.showwarning = self._showwarning
+        self._clear_registries()
+
+        return self
+
+    def __exit__(self, *exc_info):
+        warnings.showwarning = self._orig_show
+        warnings.filters = self._filters
+        self._clear_registries()
+        self._entered = False
+        del self._orig_show
+        del self._filters
+
+    def _showwarning(self, message, category, filename, lineno,
+                     *args, use_warnmsg=None, **kwargs):
+        for cat, _, pattern, mod, rec in (
+                self._suppressions + self._tmp_suppressions)[::-1]:
+            if (issubclass(category, cat) and
+                    pattern.match(message.args[0]) is not None):
+                if mod is None:
+                    # Message and category match, either recorded or ignored
+                    if rec is not None:
+                        msg = WarningMessage(message, category, filename,
+                                             lineno, **kwargs)
+                        self.log.append(msg)
+                        rec.append(msg)
+                    return
+                # Use startswith, because warnings strips the c or o from
+                # .pyc/.pyo files.
+                elif mod.__file__.startswith(filename):
+                    # The message and module (filename) match
+                    if rec is not None:
+                        msg = WarningMessage(message, category, filename,
+                                             lineno, **kwargs)
+                        self.log.append(msg)
+                        rec.append(msg)
+                    return
+
+        # There is no filter in place, so pass to the outside handler
+        # unless we should only pass it once
+        if self._forwarding_rule == "always":
+            if use_warnmsg is None:
+                self._orig_show(message, category, filename, lineno,
+                                *args, **kwargs)
+            else:
+                self._orig_showmsg(use_warnmsg)
+            return
+
+        if self._forwarding_rule == "once":
+            signature = (message.args, category)
+        elif self._forwarding_rule == "module":
+            signature = (message.args, category, filename)
+        elif self._forwarding_rule == "location":
+            signature = (message.args, category, filename, lineno)
+
+        if signature in self._forwarded:
+            return
+        self._forwarded.add(signature)
+        if use_warnmsg is None:
+            self._orig_show(message, category, filename, lineno, *args,
+                            **kwargs)
+        else:
+            self._orig_showmsg(use_warnmsg)
+
+    def __call__(self, func):
+        """
+        Function decorator to apply certain suppressions to a whole
+        function.
+        """
+        @wraps(func)
+        def new_func(*args, **kwargs):
+            with self:
+                return func(*args, **kwargs)
+
+        return new_func
+
+
+@contextlib.contextmanager
+def _assert_no_gc_cycles_context(name=None):
+    __tracebackhide__ = True  # Hide traceback for py.test
+
+    # not meaningful to test if there is no refcounting
+    if not HAS_REFCOUNT:
+        yield
+        return
+
+    assert_(gc.isenabled())
+    gc.disable()
+    gc_debug = gc.get_debug()
+    try:
+        for i in range(100):
+            if gc.collect() == 0:
+                break
+        else:
+            raise RuntimeError(
+                "Unable to fully collect garbage - perhaps a __del__ method "
+                "is creating more reference cycles?")
+
+        gc.set_debug(gc.DEBUG_SAVEALL)
+        yield
+        # gc.collect returns the number of unreachable objects in cycles that
+        # were found -- we are checking that no cycles were created in the context
+        n_objects_in_cycles = gc.collect()
+        objects_in_cycles = gc.garbage[:]
+    finally:
+        del gc.garbage[:]
+        gc.set_debug(gc_debug)
+        gc.enable()
+
+    if n_objects_in_cycles:
+        name_str = f' when calling {name}' if name is not None else ''
+        raise AssertionError(
+            "Reference cycles were found{}: {} objects were collected, "
+            "of which {} are shown below:{}"
+            .format(
+                name_str,
+                n_objects_in_cycles,
+                len(objects_in_cycles),
+                ''.join(
+                    "\n  {} object with id={}:\n    {}".format(
+                        type(o).__name__,
+                        id(o),
+                        pprint.pformat(o).replace('\n', '\n    ')
+                    ) for o in objects_in_cycles
+                )
+            )
+        )
+
+
+def assert_no_gc_cycles(*args, **kwargs):
+    """
+    Fail if the given callable produces any reference cycles.
+
+    If called with all arguments omitted, may be used as a context manager:
+
+        with assert_no_gc_cycles():
+            do_something()
+
+    .. versionadded:: 1.15.0
+
+    Parameters
+    ----------
+    func : callable
+        The callable to test.
+    \\*args : Arguments
+        Arguments passed to `func`.
+    \\*\\*kwargs : Kwargs
+        Keyword arguments passed to `func`.
+
+    Returns
+    -------
+    Nothing. The result is deliberately discarded to ensure that all cycles
+    are found.
+
+    """
+    if not args:
+        return _assert_no_gc_cycles_context()
+
+    func = args[0]
+    args = args[1:]
+    with _assert_no_gc_cycles_context(name=func.__name__):
+        func(*args, **kwargs)
+
+
+def break_cycles():
+    """
+    Break reference cycles by calling gc.collect
+    Objects can call other objects' methods (for instance, another object's
+     __del__) inside their own __del__. On PyPy, the interpreter only runs
+    between calls to gc.collect, so multiple calls are needed to completely
+    release all cycles.
+    """
+
+    gc.collect()
+    if IS_PYPY:
+        # a few more, just to make sure all the finalizers are called
+        gc.collect()
+        gc.collect()
+        gc.collect()
+        gc.collect()
+
+
+def requires_memory(free_bytes):
+    """Decorator to skip a test if not enough memory is available"""
+    import pytest
+
+    def decorator(func):
+        @wraps(func)
+        def wrapper(*a, **kw):
+            msg = check_free_memory(free_bytes)
+            if msg is not None:
+                pytest.skip(msg)
+
+            try:
+                return func(*a, **kw)
+            except MemoryError:
+                # Probably ran out of memory regardless: don't regard as failure
+                pytest.xfail("MemoryError raised")
+
+        return wrapper
+
+    return decorator
+
+
+def check_free_memory(free_bytes):
+    """
+    Check whether `free_bytes` amount of memory is currently free.
+    Returns: None if enough memory available, otherwise error message
+    """
+    env_var = 'NPY_AVAILABLE_MEM'
+    env_value = os.environ.get(env_var)
+    if env_value is not None:
+        try:
+            mem_free = _parse_size(env_value)
+        except ValueError as exc:
+            raise ValueError(f'Invalid environment variable {env_var}: {exc}')
+
+        msg = (f'{free_bytes/1e9} GB memory required, but environment variable '
+               f'NPY_AVAILABLE_MEM={env_value} set')
+    else:
+        mem_free = _get_mem_available()
+
+        if mem_free is None:
+            msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
+                   "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
+                   "the test.")
+            mem_free = -1
+        else:
+            msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available'
+
+    return msg if mem_free < free_bytes else None
+
+
+def _parse_size(size_str):
+    """Convert memory size strings ('12 GB' etc.) to float"""
+    suffixes = {'': 1, 'b': 1,
+                'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
+                'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
+                'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
+
+    size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
+        '|'.join(suffixes.keys())), re.I)
+
+    m = size_re.match(size_str.lower())
+    if not m or m.group(2) not in suffixes:
+        raise ValueError(f'value {size_str!r} not a valid size')
+    return int(float(m.group(1)) * suffixes[m.group(2)])
+
+
+def _get_mem_available():
+    """Return available memory in bytes, or None if unknown."""
+    try:
+        import psutil
+        return psutil.virtual_memory().available
+    except (ImportError, AttributeError):
+        pass
+
+    if sys.platform.startswith('linux'):
+        info = {}
+        with open('/proc/meminfo') as f:
+            for line in f:
+                p = line.split()
+                info[p[0].strip(':').lower()] = int(p[1]) * 1024
+
+        if 'memavailable' in info:
+            # Linux >= 3.14
+            return info['memavailable']
+        else:
+            return info['memfree'] + info['cached']
+
+    return None
+
+
+def _no_tracing(func):
+    """
+    Decorator to temporarily turn off tracing for the duration of a test.
+    Needed in tests that check refcounting, otherwise the tracing itself
+    influences the refcounts
+    """
+    if not hasattr(sys, 'gettrace'):
+        return func
+    else:
+        @wraps(func)
+        def wrapper(*args, **kwargs):
+            original_trace = sys.gettrace()
+            try:
+                sys.settrace(None)
+                return func(*args, **kwargs)
+            finally:
+                sys.settrace(original_trace)
+        return wrapper
+
+
+def _get_glibc_version():
+    try:
+        ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
+    except Exception:
+        ver = '0.0'
+
+    return ver
+
+
+_glibcver = _get_glibc_version()
+_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
+
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi
new file mode 100644
index 00000000..6baefd83
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/_private/utils.pyi
@@ -0,0 +1,402 @@
+import os
+import sys
+import ast
+import types
+import warnings
+import unittest
+import contextlib
+from re import Pattern
+from collections.abc import Callable, Iterable, Sequence
+from typing import (
+    Literal as L,
+    Any,
+    AnyStr,
+    ClassVar,
+    NoReturn,
+    overload,
+    type_check_only,
+    TypeVar,
+    Union,
+    Final,
+    SupportsIndex,
+)
+if sys.version_info >= (3, 10):
+    from typing import ParamSpec
+else:
+    from typing_extensions import ParamSpec
+
+from numpy import generic, dtype, number, object_, bool_, _FloatValue
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+    DTypeLike,
+    _ArrayLikeNumber_co,
+    _ArrayLikeObject_co,
+    _ArrayLikeTD64_co,
+    _ArrayLikeDT64_co,
+)
+
+from unittest.case import (
+    SkipTest as SkipTest,
+)
+
+_P = ParamSpec("_P")
+_T = TypeVar("_T")
+_ET = TypeVar("_ET", bound=BaseException)
+_FT = TypeVar("_FT", bound=Callable[..., Any])
+
+# Must return a bool or an ndarray/generic type
+# that is supported by `np.logical_and.reduce`
+_ComparisonFunc = Callable[
+    [NDArray[Any], NDArray[Any]],
+    Union[
+        bool,
+        bool_,
+        number[Any],
+        NDArray[Union[bool_, number[Any], object_]],
+    ],
+]
+
+__all__: list[str]
+
+class KnownFailureException(Exception): ...
+class IgnoreException(Exception): ...
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+    class_modules: ClassVar[tuple[types.ModuleType, ...]]
+    modules: set[types.ModuleType]
+    @overload
+    def __new__(
+        cls,
+        record: L[False] = ...,
+        modules: Iterable[types.ModuleType] = ...,
+    ) -> _clear_and_catch_warnings_without_records: ...
+    @overload
+    def __new__(
+        cls,
+        record: L[True],
+        modules: Iterable[types.ModuleType] = ...,
+    ) -> _clear_and_catch_warnings_with_records: ...
+    @overload
+    def __new__(
+        cls,
+        record: bool,
+        modules: Iterable[types.ModuleType] = ...,
+    ) -> clear_and_catch_warnings: ...
+    def __enter__(self) -> None | list[warnings.WarningMessage]: ...
+    def __exit__(
+        self,
+        __exc_type: None | type[BaseException] = ...,
+        __exc_val: None | BaseException = ...,
+        __exc_tb: None | types.TracebackType = ...,
+    ) -> None: ...
+
+# Type-check only `clear_and_catch_warnings` subclasses for both values of the
+# `record` parameter. Copied from the stdlib `warnings` stubs.
+
+@type_check_only
+class _clear_and_catch_warnings_with_records(clear_and_catch_warnings):
+    def __enter__(self) -> list[warnings.WarningMessage]: ...
+
+@type_check_only
+class _clear_and_catch_warnings_without_records(clear_and_catch_warnings):
+    def __enter__(self) -> None: ...
+
+class suppress_warnings:
+    log: list[warnings.WarningMessage]
+    def __init__(
+        self,
+        forwarding_rule: L["always", "module", "once", "location"] = ...,
+    ) -> None: ...
+    def filter(
+        self,
+        category: type[Warning] = ...,
+        message: str = ...,
+        module: None | types.ModuleType = ...,
+    ) -> None: ...
+    def record(
+        self,
+        category: type[Warning] = ...,
+        message: str = ...,
+        module: None | types.ModuleType = ...,
+    ) -> list[warnings.WarningMessage]: ...
+    def __enter__(self: _T) -> _T: ...
+    def __exit__(
+        self,
+        __exc_type: None | type[BaseException] = ...,
+        __exc_val: None | BaseException = ...,
+        __exc_tb: None | types.TracebackType = ...,
+    ) -> None: ...
+    def __call__(self, func: _FT) -> _FT: ...
+
+verbose: int
+IS_PYPY: Final[bool]
+IS_PYSTON: Final[bool]
+HAS_REFCOUNT: Final[bool]
+HAS_LAPACK64: Final[bool]
+
+def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ...
+
+# Contrary to runtime we can't do `os.name` checks while type checking,
+# only `sys.platform` checks
+if sys.platform == "win32" or sys.platform == "cygwin":
+    def memusage(processName: str = ..., instance: int = ...) -> int: ...
+elif sys.platform == "linux":
+    def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ...
+else:
+    def memusage() -> NoReturn: ...
+
+if sys.platform == "linux":
+    def jiffies(
+        _proc_pid_stat: str | bytes | os.PathLike[Any] = ...,
+        _load_time: list[float] = ...,
+    ) -> int: ...
+else:
+    def jiffies(_load_time: list[float] = ...) -> int: ...
+
+def build_err_msg(
+    arrays: Iterable[object],
+    err_msg: str,
+    header: str = ...,
+    verbose: bool = ...,
+    names: Sequence[str] = ...,
+    precision: None | SupportsIndex = ...,
+) -> str: ...
+
+def assert_equal(
+    actual: object,
+    desired: object,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def print_assert_equal(
+    test_string: str,
+    actual: object,
+    desired: object,
+) -> None: ...
+
+def assert_almost_equal(
+    actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    decimal: int = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+# Anything that can be coerced into `builtins.float`
+def assert_approx_equal(
+    actual: _FloatValue,
+    desired: _FloatValue,
+    significant: int = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def assert_array_compare(
+    comparison: _ComparisonFunc,
+    x: ArrayLike,
+    y: ArrayLike,
+    err_msg: str = ...,
+    verbose: bool = ...,
+    header: str = ...,
+    precision: SupportsIndex = ...,
+    equal_nan: bool = ...,
+    equal_inf: bool = ...,
+    *,
+    strict: bool = ...
+) -> None: ...
+
+def assert_array_equal(
+    x: ArrayLike,
+    y: ArrayLike,
+    err_msg: str = ...,
+    verbose: bool = ...,
+    *,
+    strict: bool = ...
+) -> None: ...
+
+def assert_array_almost_equal(
+    x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    decimal: float = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+@overload
+def assert_array_less(
+    x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+    x: _ArrayLikeTD64_co,
+    y: _ArrayLikeTD64_co,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+    x: _ArrayLikeDT64_co,
+    y: _ArrayLikeDT64_co,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def runstring(
+    astr: str | bytes | types.CodeType,
+    dict: None | dict[str, Any],
+) -> Any: ...
+
+def assert_string_equal(actual: str, desired: str) -> None: ...
+
+def rundocs(
+    filename: None | str | os.PathLike[str] = ...,
+    raise_on_error: bool = ...,
+) -> None: ...
+
+def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ...
+
+@overload
+def assert_raises(  # type: ignore
+    expected_exception: type[BaseException] | tuple[type[BaseException], ...],
+    callable: Callable[_P, Any],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> None: ...
+@overload
+def assert_raises(
+    expected_exception: type[_ET] | tuple[type[_ET], ...],
+    *,
+    msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+@overload
+def assert_raises_regex(
+    expected_exception: type[BaseException] | tuple[type[BaseException], ...],
+    expected_regex: str | bytes | Pattern[Any],
+    callable: Callable[_P, Any],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> None: ...
+@overload
+def assert_raises_regex(
+    expected_exception: type[_ET] | tuple[type[_ET], ...],
+    expected_regex: str | bytes | Pattern[Any],
+    *,
+    msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+def decorate_methods(
+    cls: type[Any],
+    decorator: Callable[[Callable[..., Any]], Any],
+    testmatch: None | str | bytes | Pattern[Any] = ...,
+) -> None: ...
+
+def measure(
+    code_str: str | bytes | ast.mod | ast.AST,
+    times: int = ...,
+    label: None | str = ...,
+) -> float: ...
+
+@overload
+def assert_allclose(
+    actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_allclose(
+    actual: _ArrayLikeTD64_co,
+    desired: _ArrayLikeTD64_co,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def assert_array_almost_equal_nulp(
+    x: _ArrayLikeNumber_co,
+    y: _ArrayLikeNumber_co,
+    nulp: float = ...,
+) -> None: ...
+
+def assert_array_max_ulp(
+    a: _ArrayLikeNumber_co,
+    b: _ArrayLikeNumber_co,
+    maxulp: float = ...,
+    dtype: DTypeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def assert_warns(
+    warning_class: type[Warning],
+) -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_warns(
+    warning_class: type[Warning],
+    func: Callable[_P, _T],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> _T: ...
+
+@overload
+def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_warnings(
+    func: Callable[_P, _T],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> _T: ...
+
+@overload
+def tempdir(
+    suffix: None = ...,
+    prefix: None = ...,
+    dir: None = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def tempdir(
+    suffix: None | AnyStr = ...,
+    prefix: None | AnyStr = ...,
+    dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def temppath(
+    suffix: None = ...,
+    prefix: None = ...,
+    dir: None = ...,
+    text: bool = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def temppath(
+    suffix: None | AnyStr = ...,
+    prefix: None | AnyStr = ...,
+    dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+    text: bool = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_gc_cycles(
+    func: Callable[_P, Any],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> None: ...
+
+def break_cycles() -> None: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/overrides.py b/.venv/lib/python3.12/site-packages/numpy/testing/overrides.py
new file mode 100644
index 00000000..edc7132c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/overrides.py
@@ -0,0 +1,83 @@
+"""Tools for testing implementations of __array_function__ and ufunc overrides
+
+
+"""
+
+from numpy.core.overrides import ARRAY_FUNCTIONS as _array_functions
+from numpy import ufunc as _ufunc
+import numpy.core.umath as _umath
+
+def get_overridable_numpy_ufuncs():
+    """List all numpy ufuncs overridable via `__array_ufunc__`
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    set
+        A set containing all overridable ufuncs in the public numpy API.
+    """
+    ufuncs = {obj for obj in _umath.__dict__.values()
+              if isinstance(obj, _ufunc)}
+    return ufuncs
+    
+
+def allows_array_ufunc_override(func):
+    """Determine if a function can be overridden via `__array_ufunc__`
+
+    Parameters
+    ----------
+    func : callable
+        Function that may be overridable via `__array_ufunc__`
+
+    Returns
+    -------
+    bool
+        `True` if `func` is overridable via `__array_ufunc__` and
+        `False` otherwise.
+
+    Notes
+    -----
+    This function is equivalent to ``isinstance(func, np.ufunc)`` and
+    will work correctly for ufuncs defined outside of Numpy.
+
+    """
+    return isinstance(func, np.ufunc)
+
+
+def get_overridable_numpy_array_functions():
+    """List all numpy functions overridable via `__array_function__`
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    set
+        A set containing all functions in the public numpy API that are
+        overridable via `__array_function__`.
+
+    """
+    # 'import numpy' doesn't import recfunctions, so make sure it's imported
+    # so ufuncs defined there show up in the ufunc listing
+    from numpy.lib import recfunctions
+    return _array_functions.copy()
+
+def allows_array_function_override(func):
+    """Determine if a Numpy function can be overridden via `__array_function__`
+
+    Parameters
+    ----------
+    func : callable
+        Function that may be overridable via `__array_function__`
+
+    Returns
+    -------
+    bool
+        `True` if `func` is a function in the Numpy API that is
+        overridable via `__array_function__` and `False` otherwise.
+    """
+    return func in _array_functions
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py b/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py
new file mode 100755
index 00000000..c1d4cdff
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/print_coercion_tables.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+"""Prints type-coercion tables for the built-in NumPy types
+
+"""
+import numpy as np
+from collections import namedtuple
+
+# Generic object that can be added, but doesn't do anything else
+class GenericObject:
+    def __init__(self, v):
+        self.v = v
+
+    def __add__(self, other):
+        return self
+
+    def __radd__(self, other):
+        return self
+
+    dtype = np.dtype('O')
+
+def print_cancast_table(ntypes):
+    print('X', end=' ')
+    for char in ntypes:
+        print(char, end=' ')
+    print()
+    for row in ntypes:
+        print(row, end=' ')
+        for col in ntypes:
+            if np.can_cast(row, col, "equiv"):
+                cast = "#"
+            elif np.can_cast(row, col, "safe"):
+                cast = "="
+            elif np.can_cast(row, col, "same_kind"):
+                cast = "~"
+            elif np.can_cast(row, col, "unsafe"):
+                cast = "."
+            else:
+                cast = " "
+            print(cast, end=' ')
+        print()
+
+def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
+    print('+', end=' ')
+    for char in ntypes:
+        print(char, end=' ')
+    print()
+    for row in ntypes:
+        if row == 'O':
+            rowtype = GenericObject
+        else:
+            rowtype = np.obj2sctype(row)
+
+        print(row, end=' ')
+        for col in ntypes:
+            if col == 'O':
+                coltype = GenericObject
+            else:
+                coltype = np.obj2sctype(col)
+            try:
+                if firstarray:
+                    rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
+                else:
+                    rowvalue = rowtype(inputfirstvalue)
+                colvalue = coltype(inputsecondvalue)
+                if use_promote_types:
+                    char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
+                else:
+                    value = np.add(rowvalue, colvalue)
+                    if isinstance(value, np.ndarray):
+                        char = value.dtype.char
+                    else:
+                        char = np.dtype(type(value)).char
+            except ValueError:
+                char = '!'
+            except OverflowError:
+                char = '@'
+            except TypeError:
+                char = '#'
+            print(char, end=' ')
+        print()
+
+
+def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
+    """Prints new casts, the values given are default "can-cast" values, not
+    actual ones.
+    """
+    from numpy.core._multiarray_tests import get_all_cast_information
+
+    cast_table = {
+        -1: " ",
+        0: "#",  # No cast (classify as equivalent here)
+        1: "#",  # equivalent casting
+        2: "=",  # safe casting
+        3: "~",  # same-kind casting
+        4: ".",  # unsafe casting
+    }
+    flags_table = {
+        0 : "▗", 7: "█",
+        1: "▚", 2: "▐", 4: "▄",
+                3: "▜", 5: "▙",
+                        6: "▟",
+    }
+
+    cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
+    no_cast_info = cast_info(" ", " ", " ")
+
+    casts = get_all_cast_information()
+    table = {}
+    dtypes = set()
+    for cast in casts:
+        dtypes.add(cast["from"])
+        dtypes.add(cast["to"])
+
+        if cast["from"] not in table:
+            table[cast["from"]] = {}
+        to_dict = table[cast["from"]]
+
+        can_cast = cast_table[cast["casting"]]
+        legacy = "L" if cast["legacy"] else "."
+        flags = 0
+        if cast["requires_pyapi"]:
+            flags |= 1
+        if cast["supports_unaligned"]:
+            flags |= 2
+        if cast["no_floatingpoint_errors"]:
+            flags |= 4
+
+        flags = flags_table[flags]
+        to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
+
+    # The np.dtype(x.type) is a bit strange, because dtype classes do
+    # not expose much yet.
+    types = np.typecodes["All"]
+    def sorter(x):
+        # This is a bit weird hack, to get a table as close as possible to
+        # the one printing all typecodes (but expecting user-dtypes).
+        dtype = np.dtype(x.type)
+        try:
+            indx = types.index(dtype.char)
+        except ValueError:
+            indx = np.inf
+        return (indx, dtype.char)
+
+    dtypes = sorted(dtypes, key=sorter)
+
+    def print_table(field="can_cast"):
+        print('X', end=' ')
+        for dt in dtypes:
+            print(np.dtype(dt.type).char, end=' ')
+        print()
+        for from_dt in dtypes:
+            print(np.dtype(from_dt.type).char, end=' ')
+            row = table.get(from_dt, {})
+            for to_dt in dtypes:
+                print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
+            print()
+
+    if can_cast:
+        # Print the actual table:
+        print()
+        print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
+        print()
+        print_table("can_cast")
+
+    if legacy:
+        print()
+        print("L denotes a legacy cast . a non-legacy one.")
+        print()
+        print_table("legacy")
+
+    if flags:
+        print()
+        print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
+              f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
+        print()
+        print_table("flags")
+
+
+if __name__ == '__main__':
+    print("can cast")
+    print_cancast_table(np.typecodes['All'])
+    print()
+    print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+    print()
+    print("scalar + scalar")
+    print_coercion_table(np.typecodes['All'], 0, 0, False)
+    print()
+    print("scalar + neg scalar")
+    print_coercion_table(np.typecodes['All'], 0, -1, False)
+    print()
+    print("array + scalar")
+    print_coercion_table(np.typecodes['All'], 0, 0, True)
+    print()
+    print("array + neg scalar")
+    print_coercion_table(np.typecodes['All'], 0, -1, True)
+    print()
+    print("promote_types")
+    print_coercion_table(np.typecodes['All'], 0, 0, False, True)
+    print("New casting type promotion:")
+    print_new_cast_table(can_cast=True, legacy=True, flags=True)
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/setup.py b/.venv/lib/python3.12/site-packages/numpy/testing/setup.py
new file mode 100755
index 00000000..6f203e87
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/setup.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('testing', parent_package, top_path)
+
+    config.add_subpackage('_private')
+    config.add_subpackage('tests')
+    config.add_data_files('*.pyi')
+    config.add_data_files('_private/*.pyi')
+    return config
+
+if __name__ == '__main__':
+    from numpy.distutils.core import setup
+    setup(maintainer="NumPy Developers",
+          maintainer_email="numpy-dev@numpy.org",
+          description="NumPy test module",
+          url="https://www.numpy.org",
+          license="NumPy License (BSD Style)",
+          configuration=configuration,
+          )
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/tests/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py b/.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py
new file mode 100644
index 00000000..0aaa508e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/testing/tests/test_utils.py
@@ -0,0 +1,1626 @@
+import warnings
+import sys
+import os
+import itertools
+import pytest
+import weakref
+
+import numpy as np
+from numpy.testing import (
+    assert_equal, assert_array_equal, assert_almost_equal,
+    assert_array_almost_equal, assert_array_less, build_err_msg,
+    assert_raises, assert_warns, assert_no_warnings, assert_allclose,
+    assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
+    clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
+    tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
+    )
+
+
+class _GenericTest:
+
+    def _test_equal(self, a, b):
+        self._assert_func(a, b)
+
+    def _test_not_equal(self, a, b):
+        with assert_raises(AssertionError):
+            self._assert_func(a, b)
+
+    def test_array_rank1_eq(self):
+        """Test two equal array of rank 1 are found equal."""
+        a = np.array([1, 2])
+        b = np.array([1, 2])
+
+        self._test_equal(a, b)
+
+    def test_array_rank1_noteq(self):
+        """Test two different array of rank 1 are found not equal."""
+        a = np.array([1, 2])
+        b = np.array([2, 2])
+
+        self._test_not_equal(a, b)
+
+    def test_array_rank2_eq(self):
+        """Test two equal array of rank 2 are found equal."""
+        a = np.array([[1, 2], [3, 4]])
+        b = np.array([[1, 2], [3, 4]])
+
+        self._test_equal(a, b)
+
+    def test_array_diffshape(self):
+        """Test two arrays with different shapes are found not equal."""
+        a = np.array([1, 2])
+        b = np.array([[1, 2], [1, 2]])
+
+        self._test_not_equal(a, b)
+
+    def test_objarray(self):
+        """Test object arrays."""
+        a = np.array([1, 1], dtype=object)
+        self._test_equal(a, 1)
+
+    def test_array_likes(self):
+        self._test_equal([1, 2, 3], (1, 2, 3))
+
+
+class TestArrayEqual(_GenericTest):
+
+    def setup_method(self):
+        self._assert_func = assert_array_equal
+
+    def test_generic_rank1(self):
+        """Test rank 1 array for all dtypes."""
+        def foo(t):
+            a = np.empty(2, t)
+            a.fill(1)
+            b = a.copy()
+            c = a.copy()
+            c.fill(0)
+            self._test_equal(a, b)
+            self._test_not_equal(c, b)
+
+        # Test numeric types and object
+        for t in '?bhilqpBHILQPfdgFDG':
+            foo(t)
+
+        # Test strings
+        for t in ['S1', 'U1']:
+            foo(t)
+
+    def test_0_ndim_array(self):
+        x = np.array(473963742225900817127911193656584771)
+        y = np.array(18535119325151578301457182298393896)
+        assert_raises(AssertionError, self._assert_func, x, y)
+
+        y = x
+        self._assert_func(x, y)
+
+        x = np.array(43)
+        y = np.array(10)
+        assert_raises(AssertionError, self._assert_func, x, y)
+
+        y = x
+        self._assert_func(x, y)
+
+    def test_generic_rank3(self):
+        """Test rank 3 array for all dtypes."""
+        def foo(t):
+            a = np.empty((4, 2, 3), t)
+            a.fill(1)
+            b = a.copy()
+            c = a.copy()
+            c.fill(0)
+            self._test_equal(a, b)
+            self._test_not_equal(c, b)
+
+        # Test numeric types and object
+        for t in '?bhilqpBHILQPfdgFDG':
+            foo(t)
+
+        # Test strings
+        for t in ['S1', 'U1']:
+            foo(t)
+
+    def test_nan_array(self):
+        """Test arrays with nan values in them."""
+        a = np.array([1, 2, np.nan])
+        b = np.array([1, 2, np.nan])
+
+        self._test_equal(a, b)
+
+        c = np.array([1, 2, 3])
+        self._test_not_equal(c, b)
+
+    def test_string_arrays(self):
+        """Test two arrays with different shapes are found not equal."""
+        a = np.array(['floupi', 'floupa'])
+        b = np.array(['floupi', 'floupa'])
+
+        self._test_equal(a, b)
+
+        c = np.array(['floupipi', 'floupa'])
+
+        self._test_not_equal(c, b)
+
+    def test_recarrays(self):
+        """Test record arrays."""
+        a = np.empty(2, [('floupi', float), ('floupa', float)])
+        a['floupi'] = [1, 2]
+        a['floupa'] = [1, 2]
+        b = a.copy()
+
+        self._test_equal(a, b)
+
+        c = np.empty(2, [('floupipi', float),
+                         ('floupi', float), ('floupa', float)])
+        c['floupipi'] = a['floupi'].copy()
+        c['floupa'] = a['floupa'].copy()
+
+        with pytest.raises(TypeError):
+            self._test_not_equal(c, b)
+
+    def test_masked_nan_inf(self):
+        # Regression test for gh-11121
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
+        b = np.array([3., np.nan, 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
+        b = np.array([np.inf, 4., 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+
+    def test_subclass_that_overrides_eq(self):
+        # While we cannot guarantee testing functions will always work for
+        # subclasses, the tests should ideally rely only on subclasses having
+        # comparison operators, not on them being able to store booleans
+        # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+        class MyArray(np.ndarray):
+            def __eq__(self, other):
+                return bool(np.equal(self, other).all())
+
+            def __ne__(self, other):
+                return not self == other
+
+        a = np.array([1., 2.]).view(MyArray)
+        b = np.array([2., 3.]).view(MyArray)
+        assert_(type(a == a), bool)
+        assert_(a == a)
+        assert_(a != b)
+        self._test_equal(a, a)
+        self._test_not_equal(a, b)
+        self._test_not_equal(b, a)
+
+    def test_subclass_that_does_not_implement_npall(self):
+        class MyArray(np.ndarray):
+            def __array_function__(self, *args, **kwargs):
+                return NotImplemented
+
+        a = np.array([1., 2.]).view(MyArray)
+        b = np.array([2., 3.]).view(MyArray)
+        with assert_raises(TypeError):
+            np.all(a)
+        self._test_equal(a, a)
+        self._test_not_equal(a, b)
+        self._test_not_equal(b, a)
+
+    def test_suppress_overflow_warnings(self):
+        # Based on issue #18992
+        with pytest.raises(AssertionError):
+            with np.errstate(all="raise"):
+                np.testing.assert_array_equal(
+                    np.array([1, 2, 3], np.float32),
+                    np.array([1, 1e-40, 3], np.float32))
+
+    def test_array_vs_scalar_is_equal(self):
+        """Test comparing an array with a scalar when all values are equal."""
+        a = np.array([1., 1., 1.])
+        b = 1.
+
+        self._test_equal(a, b)
+
+    def test_array_vs_scalar_not_equal(self):
+        """Test comparing an array with a scalar when not all values equal."""
+        a = np.array([1., 2., 3.])
+        b = 1.
+
+        self._test_not_equal(a, b)
+
+    def test_array_vs_scalar_strict(self):
+        """Test comparing an array with a scalar with strict option."""
+        a = np.array([1., 1., 1.])
+        b = 1.
+
+        with pytest.raises(AssertionError):
+            assert_array_equal(a, b, strict=True)
+
+    def test_array_vs_array_strict(self):
+        """Test comparing two arrays with strict option."""
+        a = np.array([1., 1., 1.])
+        b = np.array([1., 1., 1.])
+
+        assert_array_equal(a, b, strict=True)
+
+    def test_array_vs_float_array_strict(self):
+        """Test comparing two arrays with strict option."""
+        a = np.array([1, 1, 1])
+        b = np.array([1., 1., 1.])
+
+        with pytest.raises(AssertionError):
+            assert_array_equal(a, b, strict=True)
+
+
+class TestBuildErrorMessage:
+
+    def test_build_err_msg_defaults(self):
+        x = np.array([1.00001, 2.00002, 3.00003])
+        y = np.array([1.00002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg)
+        b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+             '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
+             '2.00003, 3.00004])')
+        assert_equal(a, b)
+
+    def test_build_err_msg_no_verbose(self):
+        x = np.array([1.00001, 2.00002, 3.00003])
+        y = np.array([1.00002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg, verbose=False)
+        b = '\nItems are not equal: There is a mismatch'
+        assert_equal(a, b)
+
+    def test_build_err_msg_custom_names(self):
+        x = np.array([1.00001, 2.00002, 3.00003])
+        y = np.array([1.00002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
+        b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
+             '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
+             '3.00004])')
+        assert_equal(a, b)
+
+    def test_build_err_msg_custom_precision(self):
+        x = np.array([1.000000001, 2.00002, 3.00003])
+        y = np.array([1.000000002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg, precision=10)
+        b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+             '1.000000001, 2.00002    , 3.00003    ])\n DESIRED: array(['
+             '1.000000002, 2.00003    , 3.00004    ])')
+        assert_equal(a, b)
+
+
+class TestEqual(TestArrayEqual):
+
+    def setup_method(self):
+        self._assert_func = assert_equal
+
+    def test_nan_items(self):
+        self._assert_func(np.nan, np.nan)
+        self._assert_func([np.nan], [np.nan])
+        self._test_not_equal(np.nan, [np.nan])
+        self._test_not_equal(np.nan, 1)
+
+    def test_inf_items(self):
+        self._assert_func(np.inf, np.inf)
+        self._assert_func([np.inf], [np.inf])
+        self._test_not_equal(np.inf, [np.inf])
+
+    def test_datetime(self):
+        self._test_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-01", "s")
+        )
+        self._test_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-01", "m")
+        )
+
+        # gh-10081
+        self._test_not_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-02", "s")
+        )
+        self._test_not_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-02", "m")
+        )
+
+    def test_nat_items(self):
+        # not a datetime
+        nadt_no_unit = np.datetime64("NaT")
+        nadt_s = np.datetime64("NaT", "s")
+        nadt_d = np.datetime64("NaT", "ns")
+        # not a timedelta
+        natd_no_unit = np.timedelta64("NaT")
+        natd_s = np.timedelta64("NaT", "s")
+        natd_d = np.timedelta64("NaT", "ns")
+
+        dts = [nadt_no_unit, nadt_s, nadt_d]
+        tds = [natd_no_unit, natd_s, natd_d]
+        for a, b in itertools.product(dts, dts):
+            self._assert_func(a, b)
+            self._assert_func([a], [b])
+            self._test_not_equal([a], b)
+
+        for a, b in itertools.product(tds, tds):
+            self._assert_func(a, b)
+            self._assert_func([a], [b])
+            self._test_not_equal([a], b)
+
+        for a, b in itertools.product(tds, dts):
+            self._test_not_equal(a, b)
+            self._test_not_equal(a, [b])
+            self._test_not_equal([a], [b])
+            self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
+            self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
+            self._test_not_equal([a], np.timedelta64(123, "s"))
+            self._test_not_equal([b], np.timedelta64(123, "s"))
+
+    def test_non_numeric(self):
+        self._assert_func('ab', 'ab')
+        self._test_not_equal('ab', 'abb')
+
+    def test_complex_item(self):
+        self._assert_func(complex(1, 2), complex(1, 2))
+        self._assert_func(complex(1, np.nan), complex(1, np.nan))
+        self._test_not_equal(complex(1, np.nan), complex(1, 2))
+        self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+        self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+    def test_negative_zero(self):
+        self._test_not_equal(np.PZERO, np.NZERO)
+
+    def test_complex(self):
+        x = np.array([complex(1, 2), complex(1, np.nan)])
+        y = np.array([complex(1, 2), complex(1, 2)])
+        self._assert_func(x, x)
+        self._test_not_equal(x, y)
+
+    def test_object(self):
+        #gh-12942
+        import datetime
+        a = np.array([datetime.datetime(2000, 1, 1),
+                      datetime.datetime(2000, 1, 2)])
+        self._test_not_equal(a, a[::-1])
+
+
+class TestArrayAlmostEqual(_GenericTest):
+
+    def setup_method(self):
+        self._assert_func = assert_array_almost_equal
+
+    def test_closeness(self):
+        # Note that in the course of time we ended up with
+        #     `abs(x - y) < 1.5 * 10**(-decimal)`
+        # instead of the previously documented
+        #     `abs(x - y) < 0.5 * 10**(-decimal)`
+        # so this check serves to preserve the wrongness.
+
+        # test scalars
+        self._assert_func(1.499999, 0.0, decimal=0)
+        assert_raises(AssertionError,
+                          lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+        # test arrays
+        self._assert_func([1.499999], [0.0], decimal=0)
+        assert_raises(AssertionError,
+                          lambda: self._assert_func([1.5], [0.0], decimal=0))
+
+    def test_simple(self):
+        x = np.array([1234.2222])
+        y = np.array([1234.2223])
+
+        self._assert_func(x, y, decimal=3)
+        self._assert_func(x, y, decimal=4)
+        assert_raises(AssertionError,
+                lambda: self._assert_func(x, y, decimal=5))
+
+    def test_nan(self):
+        anan = np.array([np.nan])
+        aone = np.array([1])
+        ainf = np.array([np.inf])
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError,
+                lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError,
+                lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError,
+                lambda: self._assert_func(ainf, anan))
+
+    def test_inf(self):
+        a = np.array([[1., 2.], [3., 4.]])
+        b = a.copy()
+        a[0, 0] = np.inf
+        assert_raises(AssertionError,
+                lambda: self._assert_func(a, b))
+        b[0, 0] = -np.inf
+        assert_raises(AssertionError,
+                lambda: self._assert_func(a, b))
+
+    def test_subclass(self):
+        a = np.array([[1., 2.], [3., 4.]])
+        b = np.ma.masked_array([[1., 2.], [0., 4.]],
+                               [[False, False], [True, False]])
+        self._assert_func(a, b)
+        self._assert_func(b, a)
+        self._assert_func(b, b)
+
+        # Test fully masked as well (see gh-11123).
+        a = np.ma.MaskedArray(3.5, mask=True)
+        b = np.array([3., 4., 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.masked
+        b = np.array([3., 4., 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+        b = np.array([1., 2., 3.])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+        b = np.array(1.)
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+
+    def test_subclass_that_cannot_be_bool(self):
+        # While we cannot guarantee testing functions will always work for
+        # subclasses, the tests should ideally rely only on subclasses having
+        # comparison operators, not on them being able to store booleans
+        # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+        class MyArray(np.ndarray):
+            def __eq__(self, other):
+                return super().__eq__(other).view(np.ndarray)
+
+            def __lt__(self, other):
+                return super().__lt__(other).view(np.ndarray)
+
+            def all(self, *args, **kwargs):
+                raise NotImplementedError
+
+        a = np.array([1., 2.]).view(MyArray)
+        self._assert_func(a, a)
+
+
+class TestAlmostEqual(_GenericTest):
+
+    def setup_method(self):
+        self._assert_func = assert_almost_equal
+
+    def test_closeness(self):
+        # Note that in the course of time we ended up with
+        #     `abs(x - y) < 1.5 * 10**(-decimal)`
+        # instead of the previously documented
+        #     `abs(x - y) < 0.5 * 10**(-decimal)`
+        # so this check serves to preserve the wrongness.
+
+        # test scalars
+        self._assert_func(1.499999, 0.0, decimal=0)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+        # test arrays
+        self._assert_func([1.499999], [0.0], decimal=0)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func([1.5], [0.0], decimal=0))
+
+    def test_nan_item(self):
+        self._assert_func(np.nan, np.nan)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.nan, 1))
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.nan, np.inf))
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.inf, np.nan))
+
+    def test_inf_item(self):
+        self._assert_func(np.inf, np.inf)
+        self._assert_func(-np.inf, -np.inf)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.inf, 1))
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(-np.inf, np.inf))
+
+    def test_simple_item(self):
+        self._test_not_equal(1, 2)
+
+    def test_complex_item(self):
+        self._assert_func(complex(1, 2), complex(1, 2))
+        self._assert_func(complex(1, np.nan), complex(1, np.nan))
+        self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
+        self._test_not_equal(complex(1, np.nan), complex(1, 2))
+        self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+        self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+    def test_complex(self):
+        x = np.array([complex(1, 2), complex(1, np.nan)])
+        z = np.array([complex(1, 2), complex(np.nan, 1)])
+        y = np.array([complex(1, 2), complex(1, 2)])
+        self._assert_func(x, x)
+        self._test_not_equal(x, y)
+        self._test_not_equal(x, z)
+
+    def test_error_message(self):
+        """Check the message is formatted correctly for the decimal value.
+           Also check the message when input includes inf or nan (gh12200)"""
+        x = np.array([1.00000000001, 2.00000000002, 3.00003])
+        y = np.array([1.00000000002, 2.00000000003, 3.00004])
+
+        # Test with a different amount of decimal digits
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y, decimal=12)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+        assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+        assert_equal(
+            msgs[6],
+            ' x: array([1.00000000001, 2.00000000002, 3.00003      ])')
+        assert_equal(
+            msgs[7],
+            ' y: array([1.00000000002, 2.00000000003, 3.00004      ])')
+
+        # With the default value of decimal digits, only the 3rd element
+        # differs. Note that we only check for the formatting of the arrays
+        # themselves.
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+        assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+        assert_equal(msgs[6], ' x: array([1.     , 2.     , 3.00003])')
+        assert_equal(msgs[7], ' y: array([1.     , 2.     , 3.00004])')
+
+        # Check the error message when input includes inf
+        x = np.array([np.inf, 0])
+        y = np.array([np.inf, 1])
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 1.')
+        assert_equal(msgs[6], ' x: array([inf,  0.])')
+        assert_equal(msgs[7], ' y: array([inf,  1.])')
+
+        # Check the error message when dividing by zero
+        x = np.array([1, 2])
+        y = np.array([0, 0])
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 2')
+        assert_equal(msgs[5], 'Max relative difference: inf')
+
+    def test_error_message_2(self):
+        """Check the message is formatted correctly when either x or y is a scalar."""
+        x = 2
+        y = np.ones(20)
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 1.')
+
+        y = 2
+        x = np.ones(20)
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 0.5')
+
+    def test_subclass_that_cannot_be_bool(self):
+        # While we cannot guarantee testing functions will always work for
+        # subclasses, the tests should ideally rely only on subclasses having
+        # comparison operators, not on them being able to store booleans
+        # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+        class MyArray(np.ndarray):
+            def __eq__(self, other):
+                return super().__eq__(other).view(np.ndarray)
+
+            def __lt__(self, other):
+                return super().__lt__(other).view(np.ndarray)
+
+            def all(self, *args, **kwargs):
+                raise NotImplementedError
+
+        a = np.array([1., 2.]).view(MyArray)
+        self._assert_func(a, a)
+
+
+class TestApproxEqual:
+
+    def setup_method(self):
+        self._assert_func = assert_approx_equal
+
+    def test_simple_0d_arrays(self):
+        x = np.array(1234.22)
+        y = np.array(1234.23)
+
+        self._assert_func(x, y, significant=5)
+        self._assert_func(x, y, significant=6)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(x, y, significant=7))
+
+    def test_simple_items(self):
+        x = 1234.22
+        y = 1234.23
+
+        self._assert_func(x, y, significant=4)
+        self._assert_func(x, y, significant=5)
+        self._assert_func(x, y, significant=6)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(x, y, significant=7))
+
+    def test_nan_array(self):
+        anan = np.array(np.nan)
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+    def test_nan_items(self):
+        anan = np.array(np.nan)
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+
+class TestArrayAssertLess:
+
+    def setup_method(self):
+        self._assert_func = assert_array_less
+
+    def test_simple_arrays(self):
+        x = np.array([1.1, 2.2])
+        y = np.array([1.2, 2.3])
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([1.0, 2.3])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+    def test_rank2(self):
+        x = np.array([[1.1, 2.2], [3.3, 4.4]])
+        y = np.array([[1.2, 2.3], [3.4, 4.5]])
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([[1.0, 2.3], [3.4, 4.5]])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+    def test_rank3(self):
+        x = np.ones(shape=(2, 2, 2))
+        y = np.ones(shape=(2, 2, 2))+1
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y[0, 0, 0] = 0
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+    def test_simple_items(self):
+        x = 1.1
+        y = 2.2
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([2.2, 3.3])
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([1.0, 3.3])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+
+    def test_nan_noncompare(self):
+        anan = np.array(np.nan)
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+    def test_nan_noncompare_array(self):
+        x = np.array([1.1, 2.2, 3.3])
+        anan = np.array(np.nan)
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, x))
+
+        x = np.array([1.1, 2.2, np.nan])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, x))
+
+        y = np.array([1.0, 2.0, np.nan])
+
+        self._assert_func(y, x)
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+
+    def test_inf_compare(self):
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+
+        self._assert_func(aone, ainf)
+        self._assert_func(-ainf, aone)
+        self._assert_func(-ainf, ainf)
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
+
+    def test_inf_compare_array(self):
+        x = np.array([1.1, 2.2, np.inf])
+        ainf = np.array(np.inf)
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
+        assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
+        self._assert_func(-ainf, x)
+
+
+class TestWarns:
+
+    def test_warn(self):
+        def f():
+            warnings.warn("yo")
+            return 3
+
+        before_filters = sys.modules['warnings'].filters[:]
+        assert_equal(assert_warns(UserWarning, f), 3)
+        after_filters = sys.modules['warnings'].filters
+
+        assert_raises(AssertionError, assert_no_warnings, f)
+        assert_equal(assert_no_warnings(lambda x: x, 1), 1)
+
+        # Check that the warnings state is unchanged
+        assert_equal(before_filters, after_filters,
+                     "assert_warns does not preserver warnings state")
+
+    def test_context_manager(self):
+
+        before_filters = sys.modules['warnings'].filters[:]
+        with assert_warns(UserWarning):
+            warnings.warn("yo")
+        after_filters = sys.modules['warnings'].filters
+
+        def no_warnings():
+            with assert_no_warnings():
+                warnings.warn("yo")
+
+        assert_raises(AssertionError, no_warnings)
+        assert_equal(before_filters, after_filters,
+                     "assert_warns does not preserver warnings state")
+
+    def test_warn_wrong_warning(self):
+        def f():
+            warnings.warn("yo", DeprecationWarning)
+
+        failed = False
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", DeprecationWarning)
+            try:
+                # Should raise a DeprecationWarning
+                assert_warns(UserWarning, f)
+                failed = True
+            except DeprecationWarning:
+                pass
+
+        if failed:
+            raise AssertionError("wrong warning caught by assert_warn")
+
+
+class TestAssertAllclose:
+
+    def test_simple(self):
+        x = 1e-3
+        y = 1e-9
+
+        assert_allclose(x, y, atol=1)
+        assert_raises(AssertionError, assert_allclose, x, y)
+
+        a = np.array([x, y, x, y])
+        b = np.array([x, y, x, x])
+
+        assert_allclose(a, b, atol=1)
+        assert_raises(AssertionError, assert_allclose, a, b)
+
+        b[-1] = y * (1 + 1e-8)
+        assert_allclose(a, b)
+        assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
+
+        assert_allclose(6, 10, rtol=0.5)
+        assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
+
+    def test_min_int(self):
+        a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
+        # Should not raise:
+        assert_allclose(a, a)
+
+    def test_report_fail_percentage(self):
+        a = np.array([1, 1, 1, 1])
+        b = np.array([1, 1, 1, 2])
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(a, b)
+        msg = str(exc_info.value)
+        assert_('Mismatched elements: 1 / 4 (25%)\n'
+                'Max absolute difference: 1\n'
+                'Max relative difference: 0.5' in msg)
+
+    def test_equal_nan(self):
+        a = np.array([np.nan])
+        b = np.array([np.nan])
+        # Should not raise:
+        assert_allclose(a, b, equal_nan=True)
+
+    def test_not_equal_nan(self):
+        a = np.array([np.nan])
+        b = np.array([np.nan])
+        assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
+
+    def test_equal_nan_default(self):
+        # Make sure equal_nan default behavior remains unchanged. (All
+        # of these functions use assert_array_compare under the hood.)
+        # None of these should raise.
+        a = np.array([np.nan])
+        b = np.array([np.nan])
+        assert_array_equal(a, b)
+        assert_array_almost_equal(a, b)
+        assert_array_less(a, b)
+        assert_allclose(a, b)
+
+    def test_report_max_relative_error(self):
+        a = np.array([0, 1])
+        b = np.array([0, 2])
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(a, b)
+        msg = str(exc_info.value)
+        assert_('Max relative difference: 0.5' in msg)
+
+    def test_timedelta(self):
+        # see gh-18286
+        a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
+        assert_allclose(a, a)
+
+    def test_error_message_unsigned(self):
+        """Check the the message is formatted correctly when overflow can occur
+           (gh21768)"""
+        # Ensure to test for potential overflow in the case of:
+        #        x - y
+        # and
+        #        y - x
+        x = np.asarray([0, 1, 8], dtype='uint8')
+        y = np.asarray([4, 4, 4], dtype='uint8')
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(x, y, atol=3)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[4], 'Max absolute difference: 4')
+
+
+class TestArrayAlmostEqualNulp:
+
+    def test_float64_pass(self):
+        # The number of units of least precision
+        # In this case, use a few places above the lowest level (ie nulp=1)
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        # Addition
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+        # Subtraction
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+    def test_float64_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+    def test_float64_ignore_nan(self):
+        # Ignore ULP differences between various NAN's
+        # Note that MIPS may reverse quiet and signaling nans
+        # so we use the builtin version as a base.
+        offset = np.uint64(0xffffffff)
+        nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
+        nan2_i64 = nan1_i64 ^ offset  # nan payload on MIPS is all ones.
+        nan1_f64 = nan1_i64.view(np.float64)
+        nan2_f64 = nan2_i64.view(np.float64)
+        assert_array_max_ulp(nan1_f64, nan2_f64, 0)
+
+    def test_float32_pass(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+    def test_float32_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+    def test_float32_ignore_nan(self):
+        # Ignore ULP differences between various NAN's
+        # Note that MIPS may reverse quiet and signaling nans
+        # so we use the builtin version as a base.
+        offset = np.uint32(0xffff)
+        nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
+        nan2_i32 = nan1_i32 ^ offset  # nan payload on MIPS is all ones.
+        nan1_f32 = nan1_i32.view(np.float32)
+        nan2_f32 = nan2_i32.view(np.float32)
+        assert_array_max_ulp(nan1_f32, nan2_f32, 0)
+
+    def test_float16_pass(self):
+        nulp = 5
+        x = np.linspace(-4, 4, 10, dtype=np.float16)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+    def test_float16_fail(self):
+        nulp = 5
+        x = np.linspace(-4, 4, 10, dtype=np.float16)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+    def test_float16_ignore_nan(self):
+        # Ignore ULP differences between various NAN's
+        # Note that MIPS may reverse quiet and signaling nans
+        # so we use the builtin version as a base.
+        offset = np.uint16(0xff)
+        nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
+        nan2_i16 = nan1_i16 ^ offset  # nan payload on MIPS is all ones.
+        nan1_f16 = nan1_i16.view(np.float16)
+        nan2_f16 = nan2_i16.view(np.float16)
+        assert_array_max_ulp(nan1_f16, nan2_f16, 0)
+
+    def test_complex128_pass(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        # The test condition needs to be at least a factor of sqrt(2) smaller
+        # because the real and imaginary parts both change
+        y = x + x*eps*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+    def test_complex128_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        # The test condition needs to be at least a factor of sqrt(2) smaller
+        # because the real and imaginary parts both change
+        y = x + x*eps*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+    def test_complex64_pass(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        y = x + x*eps*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+    def test_complex64_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        y = x + x*eps*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+
+class TestULP:
+
+    def test_equal(self):
+        x = np.random.randn(10)
+        assert_array_max_ulp(x, x, maxulp=0)
+
+    def test_single(self):
+        # Generate 1 + small deviation, check that adding eps gives a few UNL
+        x = np.ones(10).astype(np.float32)
+        x += 0.01 * np.random.randn(10).astype(np.float32)
+        eps = np.finfo(np.float32).eps
+        assert_array_max_ulp(x, x+eps, maxulp=20)
+
+    def test_double(self):
+        # Generate 1 + small deviation, check that adding eps gives a few UNL
+        x = np.ones(10).astype(np.float64)
+        x += 0.01 * np.random.randn(10).astype(np.float64)
+        eps = np.finfo(np.float64).eps
+        assert_array_max_ulp(x, x+eps, maxulp=200)
+
+    def test_inf(self):
+        for dt in [np.float32, np.float64]:
+            inf = np.array([np.inf]).astype(dt)
+            big = np.array([np.finfo(dt).max])
+            assert_array_max_ulp(inf, big, maxulp=200)
+
+    def test_nan(self):
+        # Test that nan is 'far' from small, tiny, inf, max and min
+        for dt in [np.float32, np.float64]:
+            if dt == np.float32:
+                maxulp = 1e6
+            else:
+                maxulp = 1e12
+            inf = np.array([np.inf]).astype(dt)
+            nan = np.array([np.nan]).astype(dt)
+            big = np.array([np.finfo(dt).max])
+            tiny = np.array([np.finfo(dt).tiny])
+            zero = np.array([np.PZERO]).astype(dt)
+            nzero = np.array([np.NZERO]).astype(dt)
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, inf,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, big,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, tiny,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, zero,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, nzero,
+                          maxulp=maxulp))
+
+
+class TestStringEqual:
+    def test_simple(self):
+        assert_string_equal("hello", "hello")
+        assert_string_equal("hello\nmultiline", "hello\nmultiline")
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_string_equal("foo\nbar", "hello\nbar")
+        msg = str(exc_info.value)
+        assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
+
+        assert_raises(AssertionError,
+                      lambda: assert_string_equal("foo", "hello"))
+
+    def test_regex(self):
+        assert_string_equal("a+*b", "a+*b")
+
+        assert_raises(AssertionError,
+                      lambda: assert_string_equal("aaa", "a+b"))
+
+
+def assert_warn_len_equal(mod, n_in_context):
+    try:
+        mod_warns = mod.__warningregistry__
+    except AttributeError:
+        # the lack of a __warningregistry__
+        # attribute means that no warning has
+        # occurred; this can be triggered in
+        # a parallel test scenario, while in
+        # a serial test scenario an initial
+        # warning (and therefore the attribute)
+        # are always created first
+        mod_warns = {}
+
+    num_warns = len(mod_warns)
+
+    if 'version' in mod_warns:
+        # Python 3 adds a 'version' entry to the registry,
+        # do not count it.
+        num_warns -= 1
+
+    assert_equal(num_warns, n_in_context)
+
+
+def test_warn_len_equal_call_scenarios():
+    # assert_warn_len_equal is called under
+    # varying circumstances depending on serial
+    # vs. parallel test scenarios; this test
+    # simply aims to probe both code paths and
+    # check that no assertion is uncaught
+
+    # parallel scenario -- no warning issued yet
+    class mod:
+        pass
+
+    mod_inst = mod()
+
+    assert_warn_len_equal(mod=mod_inst,
+                          n_in_context=0)
+
+    # serial test scenario -- the __warningregistry__
+    # attribute should be present
+    class mod:
+        def __init__(self):
+            self.__warningregistry__ = {'warning1':1,
+                                        'warning2':2}
+
+    mod_inst = mod()
+    assert_warn_len_equal(mod=mod_inst,
+                          n_in_context=2)
+
+
+def _get_fresh_mod():
+    # Get this module, with warning registry empty
+    my_mod = sys.modules[__name__]
+    try:
+        my_mod.__warningregistry__.clear()
+    except AttributeError:
+        # will not have a __warningregistry__ unless warning has been
+        # raised in the module at some point
+        pass
+    return my_mod
+
+
+def test_clear_and_catch_warnings():
+    # Initial state of module, no warnings
+    my_mod = _get_fresh_mod()
+    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+    with clear_and_catch_warnings(modules=[my_mod]):
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_equal(my_mod.__warningregistry__, {})
+    # Without specified modules, don't clear warnings during context.
+    # catch_warnings doesn't make an entry for 'ignore'.
+    with clear_and_catch_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+    # Manually adding two warnings to the registry:
+    my_mod.__warningregistry__ = {'warning1': 1,
+                                  'warning2': 2}
+
+    # Confirm that specifying module keeps old warning, does not add new
+    with clear_and_catch_warnings(modules=[my_mod]):
+        warnings.simplefilter('ignore')
+        warnings.warn('Another warning')
+    assert_warn_len_equal(my_mod, 2)
+
+    # Another warning, no module spec it clears up registry
+    with clear_and_catch_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Another warning')
+    assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_module():
+    # Initial state of module, no warnings
+    my_mod = _get_fresh_mod()
+    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+
+    def warn_other_module():
+        # Apply along axis is implemented in python; stacklevel=2 means
+        # we end up inside its module, not ours.
+        def warn(arr):
+            warnings.warn("Some warning 2", stacklevel=2)
+            return arr
+        np.apply_along_axis(warn, 0, [0])
+
+    # Test module based warning suppression:
+    assert_warn_len_equal(my_mod, 0)
+    with suppress_warnings() as sup:
+        sup.record(UserWarning)
+        # suppress warning from other module (may have .pyc ending),
+        # if apply_along_axis is moved, had to be changed.
+        sup.filter(module=np.lib.shape_base)
+        warnings.warn("Some warning")
+        warn_other_module()
+    # Check that the suppression did test the file correctly (this module
+    # got filtered)
+    assert_equal(len(sup.log), 1)
+    assert_equal(sup.log[0].message.args[0], "Some warning")
+    assert_warn_len_equal(my_mod, 0)
+    sup = suppress_warnings()
+    # Will have to be changed if apply_along_axis is moved:
+    sup.filter(module=my_mod)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+    # And test repeat works:
+    sup.filter(module=my_mod)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+    # Without specified modules
+    with suppress_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_type():
+    # Initial state of module, no warnings
+    my_mod = _get_fresh_mod()
+    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+
+    # Test module based warning suppression:
+    with suppress_warnings() as sup:
+        sup.filter(UserWarning)
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+    sup = suppress_warnings()
+    sup.filter(UserWarning)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+    # And test repeat works:
+    sup.filter(module=my_mod)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+    # Without specified modules
+    with suppress_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_decorate_no_record():
+    sup = suppress_warnings()
+    sup.filter(UserWarning)
+
+    @sup
+    def warn(category):
+        warnings.warn('Some warning', category)
+
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter("always")
+        warn(UserWarning)  # should be supppressed
+        warn(RuntimeWarning)
+        assert_equal(len(w), 1)
+
+
+def test_suppress_warnings_record():
+    sup = suppress_warnings()
+    log1 = sup.record()
+
+    with sup:
+        log2 = sup.record(message='Some other warning 2')
+        sup.filter(message='Some warning')
+        warnings.warn('Some warning')
+        warnings.warn('Some other warning')
+        warnings.warn('Some other warning 2')
+
+        assert_equal(len(sup.log), 2)
+        assert_equal(len(log1), 1)
+        assert_equal(len(log2),1)
+        assert_equal(log2[0].message.args[0], 'Some other warning 2')
+
+    # Do it again, with the same context to see if some warnings survived:
+    with sup:
+        log2 = sup.record(message='Some other warning 2')
+        sup.filter(message='Some warning')
+        warnings.warn('Some warning')
+        warnings.warn('Some other warning')
+        warnings.warn('Some other warning 2')
+
+        assert_equal(len(sup.log), 2)
+        assert_equal(len(log1), 1)
+        assert_equal(len(log2), 1)
+        assert_equal(log2[0].message.args[0], 'Some other warning 2')
+
+    # Test nested:
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings() as sup2:
+            sup2.record(message='Some warning')
+            warnings.warn('Some warning')
+            warnings.warn('Some other warning')
+            assert_equal(len(sup2.log), 1)
+        assert_equal(len(sup.log), 1)
+
+
+def test_suppress_warnings_forwarding():
+    def warn_other_module():
+        # Apply along axis is implemented in python; stacklevel=2 means
+        # we end up inside its module, not ours.
+        def warn(arr):
+            warnings.warn("Some warning", stacklevel=2)
+            return arr
+        np.apply_along_axis(warn, 0, [0])
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("always"):
+            for i in range(2):
+                warnings.warn("Some warning")
+
+        assert_equal(len(sup.log), 2)
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("location"):
+            for i in range(2):
+                warnings.warn("Some warning")
+                warnings.warn("Some warning")
+
+        assert_equal(len(sup.log), 2)
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("module"):
+            for i in range(2):
+                warnings.warn("Some warning")
+                warnings.warn("Some warning")
+                warn_other_module()
+
+        assert_equal(len(sup.log), 2)
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("once"):
+            for i in range(2):
+                warnings.warn("Some warning")
+                warnings.warn("Some other warning")
+                warn_other_module()
+
+        assert_equal(len(sup.log), 2)
+
+
+def test_tempdir():
+    with tempdir() as tdir:
+        fpath = os.path.join(tdir, 'tmp')
+        with open(fpath, 'w'):
+            pass
+    assert_(not os.path.isdir(tdir))
+
+    raised = False
+    try:
+        with tempdir() as tdir:
+            raise ValueError()
+    except ValueError:
+        raised = True
+    assert_(raised)
+    assert_(not os.path.isdir(tdir))
+
+
+def test_temppath():
+    with temppath() as fpath:
+        with open(fpath, 'w'):
+            pass
+    assert_(not os.path.isfile(fpath))
+
+    raised = False
+    try:
+        with temppath() as fpath:
+            raise ValueError()
+    except ValueError:
+        raised = True
+    assert_(raised)
+    assert_(not os.path.isfile(fpath))
+
+
+class my_cacw(clear_and_catch_warnings):
+
+    class_modules = (sys.modules[__name__],)
+
+
+def test_clear_and_catch_warnings_inherit():
+    # Test can subclass and add default modules
+    my_mod = _get_fresh_mod()
+    with my_cacw():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_equal(my_mod.__warningregistry__, {})
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestAssertNoGcCycles:
+    """ Test assert_no_gc_cycles """
+    def test_passes(self):
+        def no_cycle():
+            b = []
+            b.append([])
+            return b
+
+        with assert_no_gc_cycles():
+            no_cycle()
+
+        assert_no_gc_cycles(no_cycle)
+
+    def test_asserts(self):
+        def make_cycle():
+            a = []
+            a.append(a)
+            a.append(a)
+            return a
+
+        with assert_raises(AssertionError):
+            with assert_no_gc_cycles():
+                make_cycle()
+
+        with assert_raises(AssertionError):
+            assert_no_gc_cycles(make_cycle)
+
+    @pytest.mark.slow
+    def test_fails(self):
+        """
+        Test that in cases where the garbage cannot be collected, we raise an
+        error, instead of hanging forever trying to clear it.
+        """
+
+        class ReferenceCycleInDel:
+            """
+            An object that not only contains a reference cycle, but creates new
+            cycles whenever it's garbage-collected and its __del__ runs
+            """
+            make_cycle = True
+
+            def __init__(self):
+                self.cycle = self
+
+            def __del__(self):
+                # break the current cycle so that `self` can be freed
+                self.cycle = None
+
+                if ReferenceCycleInDel.make_cycle:
+                    # but create a new one so that the garbage collector has more
+                    # work to do.
+                    ReferenceCycleInDel()
+
+        try:
+            w = weakref.ref(ReferenceCycleInDel())
+            try:
+                with assert_raises(RuntimeError):
+                    # this will be unable to get a baseline empty garbage
+                    assert_no_gc_cycles(lambda: None)
+            except AssertionError:
+                # the above test is only necessary if the GC actually tried to free
+                # our object anyway, which python 2.7 does not.
+                if w() is not None:
+                    pytest.skip("GC does not call __del__ on cyclic objects")
+                    raise
+
+        finally:
+            # make sure that we stop creating reference cycles
+            ReferenceCycleInDel.make_cycle = False