diff options
Diffstat (limited to '.venv/lib/python3.12/site-packages/numpy/core')
181 files changed, 125550 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__init__.py b/.venv/lib/python3.12/site-packages/numpy/core/__init__.py new file mode 100644 index 00000000..2d59b89e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/__init__.py @@ -0,0 +1,180 @@ +""" +Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. + +Please note that this module is private. All functions and objects +are available in the main ``numpy`` namespace - use that instead. + +""" + +import os +import warnings + +from numpy.version import version as __version__ + + +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: + if envkey not in os.environ: + os.environ[envkey] = '1' + env_added.append(envkey) + +try: + from . import multiarray +except ImportError as exc: + import sys + msg = """ + +IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! + +Importing the numpy C-extensions failed. This error can happen for +many reasons, often due to issues with your setup or how NumPy was +installed. + +We have compiled some common reasons and troubleshooting tips at: + + https://numpy.org/devdocs/user/troubleshooting-importerror.html + +Please note and check the following: + + * The Python version is: Python%d.%d from "%s" + * The NumPy version is: "%s" + +and make sure that they are the versions you expect. +Please carefully study the documentation linked above for further help. + +Original error was: %s +""" % (sys.version_info[0], sys.version_info[1], sys.executable, + __version__, exc) + raise ImportError(msg) +finally: + for envkey in env_added: + del os.environ[envkey] +del envkey +del env_added +del os + +from . import umath + +# Check that multiarray,umath are pure python modules wrapping +# _multiarray_umath and not either of the old c-extension modules +if not (hasattr(multiarray, '_multiarray_umath') and + hasattr(umath, '_multiarray_umath')): + import sys + path = sys.modules['numpy'].__path__ + msg = ("Something is wrong with the numpy installation. " + "While importing we detected an older version of " + "numpy in {}. One method of fixing this is to repeatedly uninstall " + "numpy until none is found, then reinstall this version.") + raise ImportError(msg.format(path)) + +from . import numerictypes as nt +multiarray.set_typeDict(nt.sctypeDict) +from . import numeric +from .numeric import * +from . import fromnumeric +from .fromnumeric import * +from . import defchararray as char +from . import records +from . import records as rec +from .records import record, recarray, format_parser +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .defchararray import chararray +from . import function_base +from .function_base import * +from . import _machar +from . import getlimits +from .getlimits import * +from . import shape_base +from .shape_base import * +from . import einsumfunc +from .einsumfunc import * +del nt + +from .numeric import absolute as abs + +# do this after everything else, to minimize the chance of this misleadingly +# appearing in an import-time traceback +from . import _add_newdocs +from . import _add_newdocs_scalars +# add these for module-freeze analysis (like PyInstaller) +from . import _dtype_ctypes +from . import _internal +from . import _dtype +from . import _methods + +__all__ = ['char', 'rec', 'memmap'] +__all__ += numeric.__all__ +__all__ += ['record', 'recarray', 'format_parser'] +__all__ += ['chararray'] +__all__ += function_base.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ +__all__ += einsumfunc.__all__ + +# We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary, +# but old pickles saved before 1.20 will be using it, and there is no reason +# to break loading them. +def _ufunc_reconstruct(module, name): + # The `fromlist` kwarg is required to ensure that `mod` points to the + # inner-most module rather than the parent package when module name is + # nested. This makes it possible to pickle non-toplevel ufuncs such as + # scipy.special.expit for instance. + mod = __import__(module, fromlist=[name]) + return getattr(mod, name) + + +def _ufunc_reduce(func): + # Report the `__name__`. pickle will try to find the module. Note that + # pickle supports for this `__name__` to be a `__qualname__`. It may + # make sense to add a `__qualname__` to ufuncs, to allow this more + # explicitly (Numba has ufuncs as attributes). + # See also: https://github.com/dask/distributed/issues/3450 + return func.__name__ + + +def _DType_reconstruct(scalar_type): + # This is a work-around to pickle type(np.dtype(np.float64)), etc. + # and it should eventually be replaced with a better solution, e.g. when + # DTypes become HeapTypes. + return type(dtype(scalar_type)) + + +def _DType_reduce(DType): + # As types/classes, most DTypes can simply be pickled by their name: + if not DType._legacy or DType.__module__ == "numpy.dtypes": + return DType.__name__ + + # However, user defined legacy dtypes (like rational) do not end up in + # `numpy.dtypes` as module and do not have a public class at all. + # For these, we pickle them by reconstructing them from the scalar type: + scalar_type = DType.type + return _DType_reconstruct, (scalar_type,) + + +def __getattr__(name): + # Deprecated 2022-11-22, NumPy 1.25. + if name == "MachAr": + warnings.warn( + "The `np.core.MachAr` is considered private API (NumPy 1.24)", + DeprecationWarning, stacklevel=2, + ) + return _machar.MachAr + raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") + + +import copyreg + +copyreg.pickle(ufunc, _ufunc_reduce) +copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct) + +# Unclutter namespace (must keep _*_reconstruct for unpickling) +del copyreg +del _ufunc_reduce +del _DType_reduce + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/.venv/lib/python3.12/site-packages/numpy/core/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/core/__init__.pyi new file mode 100644 index 00000000..4c7a42bf --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/__init__.pyi @@ -0,0 +1,2 @@ +# NOTE: The `np.core` namespace is deliberately kept empty due to it +# being private (despite the lack of leading underscore) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_add_newdocs.py b/.venv/lib/python3.12/site-packages/numpy/core/_add_newdocs.py new file mode 100644 index 00000000..6e29fcf5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_add_newdocs.py @@ -0,0 +1,7080 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + core/fromnumeric.py, core/defmatrix.py up-to-date. + +""" + +from numpy.core.function_base import add_newdoc +from numpy.core.overrides import array_function_like_doc + + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy.core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in row-major, C-style order (the last + index varying the fastest). The iterator can also be indexed using + basic slicing or advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + <class 'numpy.flatiter'> + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy.core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> next(fl) + 0 + >>> fl.coords + (0, 1) + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> next(fl) + 0 + >>> fl.index + 1 + + """)) + +# flatiter functions + +add_newdoc('numpy.core', 'flatiter', ('__array__', + """__array__(type=None) Get array from iterator + + """)) + + +add_newdoc('numpy.core', 'flatiter', ('copy', + """ + copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy.core', 'nditer', + """ + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) + + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration <arrays.nditer>`. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of ``allocate`` operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as `newaxis`. + itershape : tuple of ints, optional + The desired shape of the iterator. This allows ``allocate`` operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + Valid only before the iterator is closed. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the ``delay_bufalloc`` flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the ``c_index`` or + the ``f_index`` flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the ``multi_index`` flag, + and the property `multi_index` can be used to retrieve it. + index + When the ``c_index`` or ``f_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + and ``has_index`` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. Valid only before the iterator + is closed. + multi_index + When the ``multi_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and ``has_multi_index`` is False. + ndim : int + The dimensions of the iterator. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. Valid only before the iterator is + closed. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value + Value of ``operands`` at current iteration. Normally, this is a + tuple of array scalars, but if the flag ``external_loop`` is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the NumPy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the coordinates or index of an iterator, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol: + + >>> def iter_add_py(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... for (a, b, c) in it: + ... addop(a, b, out=c) + ... return it.operands[2] + + Here is the same function, but following the C-style pattern: + + >>> def iter_add(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... while not it.finished: + ... addop(it[0], it[1], out=it[2]) + ... it.iternext() + ... return it.operands[2] + + Here is an example outer product function: + + >>> def outer_it(x, y, out=None): + ... mulop = np.multiply + ... it = np.nditer([x, y, out], ['external_loop'], + ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], + ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + ... [-1] * x.ndim + list(range(y.ndim)), + ... None]) + ... with it: + ... for (a, b, c) in it: + ... mulop(a, b, out=c) + ... return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc: + + >>> def luf(lamdaexpr, *args, **kwargs): + ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' + ... nargs = len(args) + ... op = (kwargs.get('out',None),) + args + ... it = np.nditer(op, ['buffered','external_loop'], + ... [['writeonly','allocate','no_broadcast']] + + ... [['readonly','nbo','aligned']]*nargs, + ... order=kwargs.get('order','K'), + ... casting=kwargs.get('casting','safe'), + ... buffersize=kwargs.get('buffersize',0)) + ... while not it.finished: + ... it[0] = lamdaexpr(*it[1:]) + ... it.iternext() + ... return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + If operand flags ``"writeonly"`` or ``"readwrite"`` are used the + operands may be views into the original data with the + `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a + context manager or the `nditer.close` method must be called before + using the result. The temporary data will be written back to the + original data when the `__exit__` function is called but not before: + + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with np.nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) + + It is important to note that once the iterator is exited, dangling + references (like `x` in the example) may or may not share data with + the original data `a`. If writeback semantics were active, i.e. if + `x.base.flags.writebackifcopy` is `True`, then exiting the iterator + will sever the connection between `x` and `a`, writing to `x` will + no longer write to `a`. If writeback semantics are not active, then + `x.data` will still point at some part of `a.data`, and writing to + one will affect the other. + + Context management and the `close` method appeared in version 1.15.0. + + """) + +# nditer methods + +add_newdoc('numpy.core', 'nditer', ('copy', + """ + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> next(it) + (array(0), array(1)) + >>> it2 = it.copy() + >>> next(it2) + (array(1), array(2)) + + """)) + +add_newdoc('numpy.core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + +add_newdoc('numpy.core', 'nditer', ('debug_print', + """ + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy.core', 'nditer', ('enable_external_loop', + """ + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy.core', 'nditer', ('iternext', + """ + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_axis', + """ + remove_axis(i, /) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_multi_index', + """ + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy.core', 'nditer', ('reset', + """ + reset() + + Reset the iterator to its initial state. + + """)) + +add_newdoc('numpy.core', 'nested_iters', + """ + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ + order="K", casting="safe", buffersize=0) + + Create nditers for use in nested loops + + Create a tuple of `nditer` objects which iterate in nested loops over + different axes of the op argument. The first iterator is used in the + outermost loop, the last in the innermost loop. Advancing one will change + the subsequent iterators to point at its new element. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + axes : list of list of int + Each item is used as an "op_axes" argument to an nditer + + flags, op_flags, op_dtypes, order, casting, buffersize (optional) + See `nditer` parameters of the same name + + Returns + ------- + iters : tuple of nditer + An nditer for each item in `axes`, outermost first + + See Also + -------- + nditer + + Examples + -------- + + Basic usage. Note how y is the "flattened" version of + [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified + the first iter's axes as [1] + + >>> a = np.arange(12).reshape(2, 3, 2) + >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) + >>> for x in i: + ... print(i.multi_index) + ... for y in j: + ... print('', j.multi_index, y) + (0,) + (0, 0) 0 + (0, 1) 1 + (1, 0) 6 + (1, 1) 7 + (1,) + (0, 0) 2 + (0, 1) 3 + (1, 0) 8 + (1, 1) 9 + (2,) + (0, 0) 4 + (0, 1) 5 + (1, 0) 10 + (1, 1) 11 + + """) + +add_newdoc('numpy.core', 'nditer', ('close', + """ + close() + + Resolve all writeback semantics in writeable operands. + + .. versionadded:: 1.15.0 + + See Also + -------- + + :ref:`nditer-context-manager` + + """)) + + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy.core', 'broadcast', + """ + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + See Also + -------- + broadcast_arrays + broadcast_to + broadcast_shapes + + Examples + -------- + + Manually adding two vectors, using broadcasting: + + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy.core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> next(row), next(col) + (1, 4) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('ndim', + """ + Number of dimensions of broadcasted result. Alias for `nd`. + + .. versionadded:: 1.12.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.ndim + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. For code intended for NumPy + 1.12.0 and later the more consistent `ndim` is preferred. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('reset', + """ + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy.core.multiarray', 'array', + """ + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, + like=None) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an object whose + ``__array__`` method returns an array, or any (nested) sequence. + If object is a scalar, a 0-dimensional array containing object is + returned. + dtype : data-type, optional + The desired data-type for the array. If not given, NumPy will try to use + a default ``dtype`` that can represent the values (by applying promotion + rules when necessary.) + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy will + only be made if ``__array__`` returns a copy, if obj is a nested + sequence, or if a copy is needed to satisfy any of the other + requirements (``dtype``, ``order``, etc.). + order : {'K', 'A', 'C', 'F'}, optional + Specify the memory layout of the array. If object is not an array, the + newly created array will be in C order (row major) unless 'F' is + specified, in which case it will be in Fortran order (column major). + If object is an array the following holds. + + ===== ========= =================================================== + order no copy copy=True + ===== ========= =================================================== + 'K' unchanged F & C order preserved, otherwise most similar order + 'A' unchanged F order if input is F and not C, otherwise C order + 'C' C order C order + 'F' F order F order + ===== ========= =================================================== + + When ``copy=False`` and a copy is made for other reasons, the result is + the same as if ``copy=True``, with some exceptions for 'A', see the + Notes section. The default order is 'K'. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be prepended to the shape as + needed to meet this requirement. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Notes + ----- + When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order, + and a copy is forced by a change in dtype, then the order of the result is + not necessarily 'C' as expected. This is likely a bug. + + Examples + -------- + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')]) + >>> x['a'] + array([1, 3]) + + Creating an array from sub-classes: + + >>> np.array(np.mat('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.mat('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'asarray', + """ + asarray(a, dtype=None, order=None, *, like=None) + + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray with matching dtype and order. If `a` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'asanyarray', + """ + asanyarray(a, dtype=None, order=None, *, like=None) + + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'ascontiguousarray', + """ + ascontiguousarray(a, dtype=None, *, like=None) + + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a Fortran-contiguous array: + + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Calling ``ascontiguousarray`` makes a C-contiguous copy: + + >>> y = np.ascontiguousarray(x) + >>> y.flags['C_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a C-contiguous array: + + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Then, calling ``ascontiguousarray`` returns the same object: + + >>> y = np.ascontiguousarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'asfortranarray', + """ + asfortranarray(a, dtype=None, *, like=None) + + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a C-contiguous array: + + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Calling ``asfortranarray`` makes a Fortran-contiguous copy: + + >>> y = np.asfortranarray(x) + >>> y.flags['F_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a Fortran-contiguous array: + + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Then, calling ``asfortranarray`` returns the same object: + + >>> y = np.asfortranarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'empty', + """ + empty(shape, dtype=float, order='C', *, like=None) + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + Desired output data-type for the array, e.g, `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Notes + ----- + `empty`, unlike `zeros`, does not set the array values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #uninitialized + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) + +add_newdoc('numpy.core.multiarray', 'zeros', + """ + zeros(shape, dtype=float, order='C', *, like=None) + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '<i4'), ('y', '<i4')]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'set_typeDict', + """set_typeDict(dict) + + Set the internal dictionary that can look up an array type using a + registered code. + + """) + +add_newdoc('numpy.core.multiarray', 'fromstring', + """ + fromstring(string, dtype=float, count=-1, *, sep, like=None) + + A new 1-D array initialized from text data in a string. + + Parameters + ---------- + string : str + A string containing the data. + dtype : data-type, optional + The data type of the array; default: float. For binary input data, + the data must be in exactly this format. Most builtin numeric types are + supported and extension types may be supported. + + .. versionadded:: 1.18.0 + Complex dtypes. + + count : int, optional + Read this number of `dtype` elements from the data. If this is + negative (the default), the count will be determined from the + length of the data. + sep : str, optional + The string separating numbers in the data; extra whitespace between + elements is also ignored. + + .. deprecated:: 1.14 + Passing ``sep=''``, the default, is deprecated since it will + trigger the deprecated binary mode of this function. This mode + interprets `string` as binary bytes, rather than ASCII text with + decimal numbers, an operation which is better spelt + ``frombuffer(string, dtype, count)``. If `string` contains unicode + text, the binary mode of `fromstring` will first encode it into + bytes using utf-8, which will not produce sane results. + + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + arr : ndarray + The constructed array. + + Raises + ------ + ValueError + If the string is not the correct size to satisfy the requested + `dtype` and `count`. + + See Also + -------- + frombuffer, fromfile, fromiter + + Examples + -------- + >>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'compare_chararrays', + """ + compare_chararrays(a1, a2, cmp, rstrip) + + Performs element-wise comparison of two string arrays using the + comparison operator specified by `cmp_op`. + + Parameters + ---------- + a1, a2 : array_like + Arrays to be compared. + cmp : {"<", "<=", "==", ">=", ">", "!="} + Type of comparison. + rstrip : Boolean + If True, the spaces at the end of Strings are removed before the comparison. + + Returns + ------- + out : ndarray + The output array of type Boolean with the same shape as a and b. + + Raises + ------ + ValueError + If `cmp_op` is not valid. + TypeError + If at least one of `a` or `b` is a non-string array + + Examples + -------- + >>> a = np.array(["a", "b", "cde"]) + >>> b = np.array(["a", "a", "dec"]) + >>> np.compare_chararrays(a, b, ">", True) + array([False, True, False]) + + """) + +add_newdoc('numpy.core.multiarray', 'fromiter', + """ + fromiter(iter, dtype, count=-1, *, like=None) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iter : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + + .. versionchanged:: 1.23 + Object and subarray dtypes are now supported (note that the final + result is not 1-D for a subarray dtype). + + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, float) + array([ 0., 1., 4., 9., 16.]) + + A carefully constructed subarray dtype will lead to higher dimensional + results: + + >>> iterable = ((x+1, x+2) for x in range(5)) + >>> np.fromiter(iterable, dtype=np.dtype((int, 2))) + array([[1, 2], + [2, 3], + [3, 4], + [4, 5], + [5, 6]]) + + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'fromfile', + """ + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str or Path + Open file object or filename. + + .. versionchanged:: 1.17.0 + `pathlib.Path` objects are now accepted. + + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + Most builtin numeric types are supported and extension types may be supported. + + .. versionadded:: 1.18.0 + Complex dtypes. + + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + offset : int + The offset (in bytes) from the file's current position. Defaults to 0. + Only permitted for binary files. + + .. versionadded:: 1.17.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) + + Save the raw data to disk: + + >>> import tempfile + >>> fname = tempfile.mkstemp()[1] + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) + + The recommended way to store and load data: + + >>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'frombuffer', + """ + frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) + + Interpret a buffer as a 1-dimensional array. + + Parameters + ---------- + buffer : buffer_like + An object that exposes the buffer interface. + dtype : data-type, optional + Data-type of the returned array; default: float. + count : int, optional + Number of items to read. ``-1`` means all data in the buffer. + offset : int, optional + Start reading the buffer from this offset (in bytes); default: 0. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + + See also + -------- + ndarray.tobytes + Inverse of this operation, construct Python bytes from the raw data + bytes in the array. + + Notes + ----- + If the buffer has data that is not in machine byte-order, this should + be specified as part of the data-type, e.g.:: + + >>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + This function creates a view into the original object. This should be safe + in general, but it may make sense to copy the result when the original + object is mutable or untrusted. + + Examples + -------- + >>> s = b'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') + + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'from_dlpack', + """ + from_dlpack(x, /) + + Create a NumPy array from an object implementing the ``__dlpack__`` + protocol. Generally, the returned NumPy array is a read-only view + of the input object. See [1]_ and [2]_ for more details. + + Parameters + ---------- + x : object + A Python object that implements the ``__dlpack__`` and + ``__dlpack_device__`` methods. + + Returns + ------- + out : ndarray + + References + ---------- + .. [1] Array API documentation, + https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack + + .. [2] Python specification for DLPack, + https://dmlc.github.io/dlpack/latest/python_spec.html + + Examples + -------- + >>> import torch + >>> x = torch.arange(10) + >>> # create a view of the torch tensor "x" in NumPy + >>> y = np.from_dlpack(x) + """) + +add_newdoc('numpy.core', 'fastCopyAndTranspose', + """ + fastCopyAndTranspose(a) + + .. deprecated:: 1.24 + + fastCopyAndTranspose is deprecated and will be removed. Use the copy and + transpose methods instead, e.g. ``arr.T.copy()`` + """) + +add_newdoc('numpy.core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy.core.multiarray', 'arange', + """ + arange([start,] stop[, step,], dtype=None, *, like=None) + + Return evenly spaced values within a given interval. + + ``arange`` can be called with a varying number of positional arguments: + + * ``arange(stop)``: Values are generated within the half-open interval + ``[0, stop)`` (in other words, the interval including `start` but + excluding `stop`). + * ``arange(start, stop)``: Values are generated within the half-open + interval ``[start, stop)``. + * ``arange(start, stop, step)`` Values are generated within the half-open + interval ``[start, stop)``, with spacing between values given by + ``step``. + + For integer arguments the function is roughly equivalent to the Python + built-in :py:class:`range`, but returns an ndarray rather than a ``range`` + instance. + + When using a non-integer step, such as 0.1, it is often better to use + `numpy.linspace`. + + See the Warning sections below for more information. + + Parameters + ---------- + start : integer or real, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : integer or real + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : integer or real, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified as a position argument, + `start` must also be given. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + Warnings + -------- + The length of the output might not be numerically stable. + + Another stability issue is due to the internal implementation of + `numpy.arange`. + The actual step value used to populate the array is + ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss + can occur here, due to casting or due to using floating points when + `start` is much larger than `step`. This can lead to unexpected + behaviour. For example:: + + >>> np.arange(0, 5, 0.5, dtype=int) + array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> np.arange(-3, 3, 0.5, dtype=int) + array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + In such cases, the use of `numpy.linspace` should be preferred. + + The built-in :py:class:`range` generates :std:doc:`Python built-in integers + that have arbitrary size <python:c-api/long>`, while `numpy.arange` + produces `numpy.int32` or `numpy.int64` numbers. This may result in + incorrect results for large integer values:: + + >>> power = 40 + >>> modulo = 10000 + >>> x1 = [(n ** power) % modulo for n in range(8)] + >>> x2 = [(n ** power) % modulo for n in np.arange(8)] + >>> print(x1) + [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct + >>> print(x2) + [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect + + See Also + -------- + numpy.linspace : Evenly spaced numbers with careful handling of endpoints. + numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. + numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + :ref:`how-to-partition` + + Examples + -------- + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. + + """) + +add_newdoc('numpy.core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickles. + + """) + + +add_newdoc('numpy.core.multiarray', 'set_string_function', + """ + set_string_function(f, repr=1) + + Internal method to set a function to be used when pretty printing arrays. + + """) + +add_newdoc('numpy.core.multiarray', 'set_numeric_ops', + """ + set_numeric_ops(op1=func1, op2=func2, ...) + + Set numerical operators for array objects. + + .. deprecated:: 1.16 + + For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. + For ndarray subclasses, define the ``__array_ufunc__`` method and + override the relevant ufunc. + + Parameters + ---------- + op1, op2, ... : callable + Each ``op = func`` pair describes an operator to be replaced. + For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace + addition by modulus 5 addition. + + Returns + ------- + saved_ops : list of callables + A list of all operators, stored before making replacements. + + Notes + ----- + .. warning:: + Use with care! Incorrect usage may lead to memory errors. + + A function replacing an operator cannot make use of that operator. + For example, when replacing add, you may not use ``+``. Instead, + directly call ufuncs. + + Examples + -------- + >>> def add_mod5(x, y): + ... return np.add(x, y) % 5 + ... + >>> old_funcs = np.set_numeric_ops(add=add_mod5) + + >>> x = np.arange(12).reshape((3, 4)) + >>> x + x + array([[0, 2, 4, 1], + [3, 0, 2, 4], + [1, 3, 0, 2]]) + + >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators + + """) + +add_newdoc('numpy.core.multiarray', 'promote_types', + """ + promote_types(type1, type2) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always considered "canonical", this mainly + means that the promoted dtype will always be in native byte order. + + This function is symmetric, but rarely associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + Please see `numpy.result_type` for additional information about promotion. + + .. versionadded:: 1.6.0 + + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + .. versionchanged:: 1.23.0 + + NumPy now supports promotion for more structured dtypes. It will now + remove unnecessary padding from a structure dtype and promote included + fields individually. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '<c8') + dtype('complex128') + + >>> np.promote_types('i4', 'S8') + dtype('S11') + + An example of a non-associative case: + + >>> p = np.promote_types + >>> p('S', p('i1', 'u1')) + dtype('S6') + >>> p(p('S', 'i1'), 'u1') + dtype('S4') + + """) + +add_newdoc('numpy.core.multiarray', 'c_einsum', + """ + c_einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe') + + *This documentation shadows that of the native python implementation of the `einsum` function, + except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout of the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + .. versionadded:: 1.6.0 + + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) <numpy.trace>`. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, + and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + .. versionadded:: 1.10.0 + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>` + w.r.t. its `dtype.type <numpy.dtype.type>`. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[0.0e+000, 0.0e+000], # random + [ nan, 2.5e-323]]) + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack__', + """a.__dlpack__(*, stream=None) + + DLPack Protocol: Part of the Array API.""")) + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack_device__', + """a.__dlpack_device__() + + DLPack Protocol: Part of the Array API.""")) + +add_newdoc('numpy.core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + .. autoattribute:: numpy.core._internal._ctypes.data + :noindex: + + .. autoattribute:: numpy.core._internal._ctypes.shape + :noindex: + + .. autoattribute:: numpy.core._internal._ctypes.strides + :noindex: + + .. automethod:: numpy.core._internal._ctypes.data_as + :noindex: + + .. automethod:: numpy.core._internal._ctypes.shape_as + :noindex: + + .. automethod:: numpy.core._internal._ctypes.strides_as + :noindex: + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the ``as_parameter`` attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import ctypes + >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) + >>> x + array([[0, 1], + [2, 3]], dtype=int32) + >>> x.ctypes.data + 31962608 # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)) + <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents + c_uint(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents + c_ulong(4294967296) + >>> x.ctypes.shape + <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary + >>> x.ctypes.strides + <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + .. warning:: + + Setting ``arr.dtype`` is discouraged and may be deprecated in the + future. Setting will replace the ``dtype`` without modifying the + memory (see also `ndarray.view` and `ndarray.astype`). + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + ndarray.astype : Cast the values contained in the array to a new data-type. + ndarray.view : Create a view of the same data but a different data-type. + numpy.dtype + + Examples + -------- + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int32') + >>> type(x.dtype) + <type 'numpy.dtype'> + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - WRITEBACKIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + <class 'numpy.flatiter'> + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + See Also + -------- + sys.getsizeof + Memory consumed by the object itself without parents in case view. + This does include memory consumed by non-element attributes. + + Examples + -------- + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. + + .. warning:: + + Setting ``arr.shape`` is discouraged and may be deprecated in the + future. Using `ndarray.reshape` is the preferred approach. + + Examples + -------- + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ValueError: total size of new array must be unchanged + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + + See Also + -------- + numpy.shape : Equivalent getter function. + numpy.reshape : Function similar to setting ``shape``. + ndarray.reshape : Method similar to setting ``shape``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equal to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Notes + ----- + `a.size` returns a standard arbitrary precision Python integer. This + may not be the case with other methods of obtaining the same value + (like the suggested ``np.prod(a.shape)``, which returns an instance + of ``np.int_``), and may be relevant if the value is used further in + calculations that may overflow a fixed size integer type. + + Examples + -------- + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in the + "ndarray.rst" file in the NumPy reference guide. + + .. warning:: + + Setting ``arr.strides`` is discouraged and may be deprecated in the + future. `numpy.lib.stride_tricks.as_strided` should be preferred + to create a new view of the same data in a safer way. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + >>> y.strides + (48, 16, 4) + >>> y[1,1,1] + 17 + >>> offset=sum(y.strides * np.array((1,1,1))) + >>> offset/y.itemsize + 17 + + >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3,5,2,2]) + >>> offset = sum(i * x.strides) + >>> x[3,5,2,2] + 813 + >>> offset / x.itemsize + 813 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('T', + """ + View of the transposed array. + + Same as ``self.transpose()``. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.T + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.T + array([1, 2, 3, 4]) + + See Also + -------- + transpose + + """)) + + +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', + """ a.__array__([dtype], /) + + Returns either a new reference to self if dtype is not given or a new array + of provided data type if dtype is different from the current dtype of the + array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', + """a.__array_finalize__(obj, /) + + Present so subclasses can call super. Does nothing. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', + """a.__array_prepare__(array[, context], /) + + Returns a view of `array` with the same type as self. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', + """a.__array_wrap__(array[, context], /) + + Returns a view of `array` with the same type as self. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', + """a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__', + """a.__class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.ndarray` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.ndarray` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.ndarray[Any, np.dtype[Any]] + numpy.ndarray[typing.Any, numpy.dtype[typing.Any]] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>` + w.r.t. its `dtype.type <numpy.dtype.type>`. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', + """a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', + """a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', + """a.__setstate__(state, /) + + For unpickling. + + The `state` argument must be a sequence that contains the following + elements: + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('all', + """ + a.all(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('any', + """ + a.any(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', + """ + a.argmax(axis=None, out=None, *, keepdims=False) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', + """ + a.argsort(axis=-1, kind=None, order=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', + """ + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + .. versionadded:: 1.8.0 + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input parameter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Notes + ----- + .. versionchanged:: 1.17.0 + Casting between a simple data type and a structured one is possible only + for "unsafe" casting. Casting to multiple fields is allowed, but + casting from multiple fields is not. + + .. versionchanged:: 1.9.0 + Casting from numeric to string types in 'safe' casting mode requires + that the string dtype length is long enough to store the max + integer/float value converted. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + + Examples + -------- + >>> x = np.array([1, 2, 2.5]) + >>> x + array([1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', + """ + a.byteswap(inplace=False) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + Arrays of byte-strings are not swapped. The real and imaginary + parts of a complex number are swapped individually. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> list(map(hex, A)) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(inplace=True) + array([ 256, 1, 13090], dtype=int16) + >>> list(map(hex, A)) + ['0x100', '0x1', '0x3322'] + + Arrays of byte-strings are not swapped + + >>> A = np.array([b'ceg', b'fac']) + >>> A.byteswap() + array([b'ceg', b'fac'], dtype='|S3') + + ``A.newbyteorder().byteswap()`` produces an array with the same values + but different representation in memory + + >>> A = np.array([1, 2, 3]) + >>> A.view(np.uint8) + array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0], dtype=uint8) + >>> A.newbyteorder().byteswap(inplace=True) + array([1, 2, 3]) + >>> A.view(np.uint8) + array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, + 0, 3], dtype=uint8) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', + """ + a.clip(min=None, max=None, out=None, **kwargs) + + Return an array whose values are limited to ``[min, max]``. + One of max or min must be given. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:`numpy.copy` are very + similar but have different default values for their order= + arguments, and this function always passes sub-classes through.) + + See also + -------- + numpy.copy : Similar function with different default behavior + numpy.copyto + + Notes + ----- + This function is the preferred method for creating an array copy. The + function :func:`numpy.copy` is similar, but it defaults to using order 'K', + and will not pass sub-classes through by default. + + Examples + -------- + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + a future version the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dot')) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', + """a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str or Path + A string naming the dump file. + + .. versionchanged:: 1.17.0 + `pathlib.Path` objects are now accepted. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', + """ + a.dumps() + + Returns the pickle of the array as a string. + pickle.loads will convert the string back to an array. + + Parameters + ---------- + None + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([1., 1.]) + + Fill expects a scalar value and always behaves the same as assigning + to a single array element. The following is a rare example where this + distinction is important: + + >>> a = np.array([None, None], dtype=object) + >>> a[0] = np.array(3) + >>> a + array([array(3), None], dtype=object) + >>> a.fill(np.array(3)) + >>> a + array([array(3), array(3)], dtype=object) + + Where other forms of assignments will unpack the array being assigned: + + >>> a[...] = np.array(3) + >>> a + array([3, 3], dtype=object) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[1.+1.j, 0.+0.j], + [0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[1., 0.], + [0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[1., 0.], + [0., 4.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('item', + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.item(3) + 1 + >>> x.item(7) + 0 + >>> x.item((0, 1)) + 2 + >>> x.item((2, 2)) + 1 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', + """ + a.itemset(*args) + + Insert scalar into an array (scalar is cast to array's dtype, if possible) + + There must be at least 1 argument, and define the last argument + as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster + than ``a[args] = item``. The item should be a scalar value and `args` + must select a single item in the array `a`. + + Parameters + ---------- + \\*args : Arguments + If one argument: a scalar, only used in case `a` is of size 1. + If two arguments: the last argument is the value to be set + and must be a scalar, the first argument specifies a single array + element location. It is either an int or a tuple. + + Notes + ----- + Compared to indexing syntax, `itemset` provides some speed increase + for placing a scalar into a particular location in an `ndarray`, + if you must do this. However, generally this is discouraged: + among other problems, it complicates the appearance of the code. + Also, when using `itemset` (and `item`) inside a loop, be sure + to assign the methods to a local variable to avoid the attribute + look-up at each loop iteration. + + Examples + -------- + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.itemset(4, 0) + >>> x.itemset((2, 2), 9) + >>> x + array([[2, 2, 6], + [1, 0, 6], + [1, 0, 9]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('max', + """ + a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', + """ + a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('min', + """ + a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', + """ + arr.newbyteorder(new_order='S', /) + + Return the array with the same data viewed with a different byte order. + + Equivalent to:: + + arr.view(arr.dtype.newbytorder(new_order)) + + Changes are also made in all fields and sub-arrays of the array data + type. + + + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * {'=', 'native'} - native order, equivalent to `sys.byteorder` + * {'|', 'I'} - ignore (no change to byte order) + + The default value ('S') results in swapping the current + byte order. + + + Returns + ------- + new_arr : array + New array object with the dtype reflecting given change to the + byte order. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', + """ + a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', + """ + a.ptp(axis=None, out=None, keepdims=False) + + Peak to peak (maximum - minimum) value along a given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('put', + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all `n` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', + """ + a.ravel([order]) + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + + ndarray.flat : a flat iterator on the array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', + """ + a.reshape(shape, order='C') + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(10, 11)`` is equivalent to + ``a.reshape((10, 11))``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', + """ + a.resize(new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to it exist, + and the data memory must be changed. + PyPy only: will always raise if the data memory must be changed, since + there is no reliable way to determine if references or views to it + exist. + + SystemError + If the `order` keyword argument is specified. This behaviour is a + bug in NumPy. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. However, reference counts can increase in + other ways so if you are sure that you have not shared the memory + for this array with another Python object, then you may safely set + `refcheck` to False. + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that references or is referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('round', + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of v should be inserted in a to maintain order. + + For full documentation, see `numpy.searchsorted` + + See Also + -------- + numpy.searchsorted : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]], dtype=int32) + >>> x + array([[1.0e+000, 1.5e-323, 1.5e-323], + [1.5e-323, 1.0e+000, 1.5e-323], + [1.5e-323, 1.5e-323, 1.0e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, + respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The WRITEBACKIFCOPY and flag can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 7 Boolean flags + in use, only four of which can be changed by the user: + WRITEBACKIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. + + All flags can be accessed using the single (upper case) letter as well + as the full name. + + Examples + -------- + >>> y = np.array([[3, 1, 7], + ... [2, 0, 0], + ... [8, 5, 9]]) + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + WRITEBACKIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ValueError: cannot set WRITEBACKIFCOPY flag to True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', + """ + a.sort(axis=-1, kind=None, order=None) + + Sort an array in-place. Refer to `numpy.sort` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with datatype. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0 + The 'stable' option was added. + + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + numpy.argsort : Indirect sort. + numpy.lexsort : Indirect stable sort on multiple keys. + numpy.searchsorted : Find elements in sorted array. + numpy.partition: Partial sort. + + Notes + ----- + See `numpy.sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '<i8')]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', + """ + a.partition(kth, axis=-1, kind='introselect', order=None) + + Rearranges the elements in the array in such a way that the value of the + element in kth position is in the position it would be in a sorted array. + All elements smaller than the kth element are moved before this element and + all equal or greater are moved behind it. The ordering of the elements in + the two partitions is undefined. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. + + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + + Examples + -------- + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', + """ + a.squeeze(axis=None) + + Remove axes of length one from `a`. + + Refer to `numpy.squeeze` for full documentation. + + See Also + -------- + numpy.squeeze : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('std', + """ + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the standard deviation of the array elements along given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', + """ + a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', + """ + a.swapaxes(axis1, axis2) + + Return a view of the array with `axis1` and `axis2` interchanged. + + Refer to `numpy.swapaxes` for full documentation. + + See Also + -------- + numpy.swapaxes : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('take', + """ + a.take(indices, axis=None, out=None, mode='raise') + + Return an array formed from the elements of `a` at the given indices. + + Refer to `numpy.take` for full documentation. + + See Also + -------- + numpy.take : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', + """ + a.tofile(fid, sep="", format="%s") + + Write array to a file as text or binary (default). + + Data is always written in 'C' order, independent of the order of `a`. + The data produced by this method can be recovered using the function + fromfile(). + + Parameters + ---------- + fid : file or str or Path + An open file object, or a string containing a filename. + + .. versionchanged:: 1.17.0 + `pathlib.Path` objects are now accepted. + + sep : str + Separator between array items for text output. + If "" (empty), a binary file is written, equivalent to + ``file.write(a.tobytes())``. + format : str + Format string for text file output. + Each entry in the array is formatted to text by first converting + it to the closest Python type, and then using "format" % item. + + Notes + ----- + This is a convenience function for quick storage of array data. + Information on endianness and precision is lost, so this method is not a + good choice for files intended to archive data or transport data between + machines with different endianness. Some of these problems can be overcome + by outputting the data as text files, at the expense of speed and file + size. + + When fid is a file object, array contents are directly written to the + file, bypassing the file object's ``write`` method. As a result, tofile + cannot be used with files objects supporting compression (e.g., GzipFile) + or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', + """ + a.tolist() + + Return the array as an ``a.ndim``-levels deep nested list of Python scalars. + + Return a copy of the array data as a (nested) Python list. + Data items are converted to the nearest compatible builtin Python type, via + the `~numpy.ndarray.item` function. + + If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will + not be a list at all, but a simple Python scalar. + + Parameters + ---------- + none + + Returns + ------- + y : object, or list of object, or list of list of object, or ... + The possibly nested list of array elements. + + Notes + ----- + The array may be recreated via ``a = np.array(a.tolist())``, although this + may sometimes lose precision. + + Examples + -------- + For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, + except that ``tolist`` changes numpy scalars to Python scalars: + + >>> a = np.uint32([1, 2]) + >>> a_list = list(a) + >>> a_list + [1, 2] + >>> type(a_list[0]) + <class 'numpy.uint32'> + >>> a_tolist = a.tolist() + >>> a_tolist + [1, 2] + >>> type(a_tolist[0]) + <class 'int'> + + Additionally, for a 2D array, ``tolist`` applies recursively: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + The base case for this recursion is a 0D array: + + >>> a = np.array(1) + >>> list(a) + Traceback (most recent call last): + ... + TypeError: iteration over a 0-d array + >>> a.tolist() + 1 + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """ + a.tobytes(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object is produced in C-order by default. + This behavior is controlled by the ``order`` parameter. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Controls the memory layout of the bytes object. 'C' means C-order, + 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is + Fortran contiguous, 'C' otherwise. Default is 'C'. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + See also + -------- + frombuffer + Inverse of this operation, construct a 1-dimensional array from Python + bytes. + + Examples + -------- + >>> x = np.array([[0, 1], [2, 3]], dtype='<u2') + >>> x.tobytes() + b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r""" + a.tostring(order='C') + + A compatibility alias for `tobytes`, with exactly the same behavior. + + Despite its name, it returns `bytes` not `str`\ s. + + .. deprecated:: 1.19.0 + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + Refer to `numpy.transpose` for full documentation. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means that the + array's `i`-th axis becomes the transposed array's `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form). + + Returns + ------- + p : ndarray + View of the array with its axes suitably permuted. + + See Also + -------- + transpose : Equivalent function. + ndarray.T : Array property returning the array transposed. + ndarray.reshape : Give a new shape to an array without changing its data. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.transpose() + array([1, 2, 3, 4]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('var', + """ + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the variance of the array elements, along given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('view', + """ + a.view([dtype][, type]) + + New view of array with the same data. + + .. note:: + Passing None for ``dtype`` is different from omitting the parameter, + since the former invokes ``dtype(None)`` which is an alias for + ``dtype('float_')``. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + Omitting it results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, omission + of the parameter results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a regular + array to a structured array), then the last axis of ``a`` must be + contiguous. This axis will be resized in the result. + + .. versionchanged:: 1.23.0 + Only the last axis needs to be contiguous. Previously, the entire array + had to be C-contiguous. + + Examples + -------- + >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> y = x.view(dtype=np.int16, type=np.matrix) + >>> y + matrix([[513]], dtype=int16) + >>> print(type(y)) + <class 'numpy.matrix'> + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> x + array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) + + Using a view to convert an array to a recarray: + + >>> z = x.view(np.recarray) + >>> z.a + array([1, 3], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + (9, 10) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16) + >>> y = x[:, ::2] + >>> y + array([[1, 3], + [4, 6]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + ... + ValueError: To change to a dtype of a different size, the last axis must be contiguous + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 3)], + [(4, 6)]], dtype=[('width', '<i2'), ('length', '<i2')]) + + However, views that change dtype are totally fine for arrays with a + contiguous last axis, even if the rest of the axes are not C-contiguous: + + >>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4) + >>> x.transpose(1, 0, 2).view(np.int16) + array([[[ 256, 770], + [3340, 3854]], + <BLANKLINE> + [[1284, 1798], + [4368, 4882]], + <BLANKLINE> + [[2312, 2826], + [5396, 5910]]], dtype=int16) + + """)) + + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy.core.umath', 'frompyfunc', + """ + frompyfunc(func, /, nin, nout, *[, identity]) + + Takes an arbitrary Python function and returns a NumPy ufunc. + + Can be used, for example, to add broadcasting to a built-in Python + function (see Examples section). + + Parameters + ---------- + func : Python function object + An arbitrary Python function. + nin : int + The number of input arguments. + nout : int + The number of objects returned by `func`. + identity : object, optional + The value to use for the `~numpy.ufunc.identity` attribute of the resulting + object. If specified, this is equivalent to setting the underlying + C ``identity`` field to ``PyUFunc_IdentityValue``. + If omitted, the identity is set to ``PyUFunc_None``. Note that this is + _not_ equivalent to setting the identity to ``None``, which implies the + operation is reorderable. + + Returns + ------- + out : ufunc + Returns a NumPy universal function (``ufunc``) object. + + See Also + -------- + vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy. + + Notes + ----- + The returned ufunc always returns PyObject arrays. + + Examples + -------- + Use frompyfunc to add broadcasting to the Python function ``oct``: + + >>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array(['0o12', '0o36', '0o144'], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['0o12', '0o36', '0o144'], dtype='<U5') + + """) + +add_newdoc('numpy.core.umath', 'geterrobj', + """ + geterrobj() + + Return the current object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in NumPy. `geterrobj` is used internally by the other + functions that get and set error handling behavior (`geterr`, `seterr`, + `geterrcall`, `seterrcall`). + + Returns + ------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + seterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrobj() # first get the defaults + [8192, 521, None] + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + >>> old_bufsize = np.setbufsize(20000) + >>> old_err = np.seterr(divide='raise') + >>> old_handler = np.seterrcall(err_handler) + >>> np.geterrobj() + [8192, 521, <function err_handler at 0x91dcaac>] + + >>> old_err = np.seterr(all='ignore') + >>> np.base_repr(np.geterrobj()[1], 8) + '0' + >>> old_err = np.seterr(divide='warn', over='log', under='call', + ... invalid='print') + >>> np.base_repr(np.geterrobj()[1], 8) + '4351' + + """) + +add_newdoc('numpy.core.umath', 'seterrobj', + """ + seterrobj(errobj, /) + + Set the object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in NumPy. `seterrobj` is used internally by the other + functions that set error handling behavior (`seterr`, `seterrcall`). + + Parameters + ---------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + geterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> old_errobj = np.geterrobj() # first get the defaults + >>> old_errobj + [8192, 521, None] + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + >>> new_errobj = [20000, 12, err_handler] + >>> np.seterrobj(new_errobj) + >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') + '14' + >>> np.geterr() + {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} + >>> np.geterrcall() is err_handler + True + + """) + + +############################################################################## +# +# compiled_base functions +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'add_docstring', + """ + add_docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + +add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', + """ + add_ufunc_docstring(ufunc, new_docstring) + + Replace the docstring for a ufunc with new_docstring. + This method will only work if the current docstring for + the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a mempory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy.core.multiarray', 'get_handler_name', + """ + get_handler_name(a: ndarray) -> str,None + + Return the name of the memory handler used by `a`. If not provided, return + the name of the memory handler that will be used to allocate data for the + next `ndarray` in this context. May return None if `a` does not own its + memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy.core.multiarray', 'get_handler_version', + """ + get_handler_version(a: ndarray) -> int,None + + Return the version of the memory handler used by `a`. If not provided, + return the version of the memory handler that will be used to allocate data + for the next `ndarray` in this context. May return None if `a` does not own + its memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy.core.multiarray', '_get_madvise_hugepage', + """ + _get_madvise_hugepage() -> bool + + Get use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the currently set value. + See `global_state` for more information. + """) + +add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage', + """ + _set_madvise_hugepage(enabled: bool) -> bool + + Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the previously set value. + See `global_state` for more information. + """) + +add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', + """ + format_float_OSprintf_g(val, precision) + + Print a floating point scalar using the system's printf function, + equivalent to: + + printf("%.*g", precision, val); + + for half/float/double, or replacing 'g' by 'Lg' for longdouble. This + method is designed to help cross-validate the format_float_* methods. + + Parameters + ---------- + val : python float or numpy floating scalar + Value to format. + + precision : non-negative integer, optional + Precision given to printf. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + format_float_positional + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use `info`. For + example, ``np.info(np.sin)``. Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. + + **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)`` + + Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + *x : array_like + Input arrays. + out : ndarray, None, or tuple of ndarray and None, optional + Alternate array object(s) in which to put the result; if provided, it + must have a shape that the inputs broadcast to. A tuple of arrays + (possible only as a keyword argument) must have length equal to the + number of outputs; use None for uninitialized outputs to be + allocated by the ufunc. + where : array_like, optional + This condition is broadcast over the input. At locations where the + condition is True, the `out` array will be set to the ufunc result. + Elsewhere, the `out` array will retain its original value. + Note that if an uninitialized `out` array is created via the default + ``out=None``, locations within it where the condition is False will + remain uninitialized. + **kwargs + For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. + + Returns + ------- + r : ndarray or tuple of ndarray + `r` will have the shape that the arrays in `x` broadcast to; if `out` is + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one + output, then the result will be a tuple of arrays. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, if it has one. + If it does not, the attribute value is None. + + Examples + -------- + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> np.power.identity + 1 + >>> print(np.exp.identity) + None + """)) + +add_newdoc('numpy.core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect because all + ufuncs take the optional "out" argument. + + Examples + -------- + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be (at least) 1. + + Examples + -------- + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> np.add.ntypes + 18 + >>> np.multiply.ntypes + 18 + >>> np.power.ntypes + 17 + >>> np.exp.ntypes + 7 + >>> np.remainder.ntypes + 14 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.multiply.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', + 'OO->O'] + + >>> np.exp.types + ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] + + """)) + +add_newdoc('numpy.core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is None, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> np.core.umath_tests.matrix_multiply.signature + '(m,n),(n,p)->(m,p)' + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('reduce', + """ + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True) + + Reduces `array`'s dimension by one, by applying ufunc along one axis. + + Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + array : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is None, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data-type of the output array if this is provided, or + the data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or None, + a freshly-allocated array is returned. For consistency with + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `array`. + + .. versionadded:: 1.7.0 + initial : scalar, optional + The value with which to start the reduction. + If the ufunc has no identity or the dtype is object, this defaults + to None - otherwise it defaults to ufunc.identity. + If ``None`` is given, the first element of the reduction is used, + and an error is thrown if the reduction is empty. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `array`, and selects elements to include in the reduction. Note + that for ufuncs like ``minimum`` that do not have an identity + defined, one has to pass in also ``initial``. + + .. versionadded:: 1.17.0 + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + You can use the ``initial`` keyword argument to initialize the reduction + with a different value, and ``where`` to select specific elements to include: + + >>> np.add.reduce([10], initial=5) + 15 + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) + array([14., 14.]) + >>> a = np.array([10., np.nan, 10]) + >>> np.add.reduce(a, where=~np.isnan(a)) + 20.0 + + Allows reductions of empty arrays where they would normally fail, i.e. + for ufuncs without an identity. + + >>> np.minimum.reduce([], initial=np.inf) + inf + >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) + array([ 1., 10.]) + >>> np.minimum.reduce([]) + Traceback (most recent call last): + ... + ValueError: zero-size array to reduction operation minimum which has no identity + """)) + +add_newdoc('numpy.core', 'ufunc', ('accumulate', + """ + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or None, + a freshly-allocated array is returned. For consistency with + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[1., 0.], + [0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[1., 0.], + [1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[1., 0.], + [1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[1., 1.], + [0., 1.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('reduceat', + """ + reduceat(array, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = array.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``array[indices[i]]``. + * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `array` (this happens if ``len(indices) > array.shape[axis]``). + + Parameters + ---------- + array : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or None, + a freshly-allocated array is returned. For consistency with + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as + ``ufunc.reduceat(array, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(array) - 1)``, + ``indices[1::2] = range(1, len(array))``. + + Don't be fooled by this attribute's name: `reduceat(array)` is not + necessarily smaller than `array`. + + Examples + -------- + To take the running sum of four successive values: + + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[12., 15., 18., 21.], + [12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [2184., 15.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('outer', + r""" + outer(A, B, /, **kwargs) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + kwargs : any + Arguments to pass on to the ufunc. Typically `dtype` or `out`. + See `ufunc` for a comprehensive overview of all available arguments. + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer : A less powerful version of ``np.multiply.outer`` + that `ravel`\ s all inputs to 1D. This exists + primarily for compatibility with old code. + + tensordot : ``np.tensordot(a, b, axes=((), ()))`` and + ``np.multiply.outer(a, b)`` behave same for all + dimensions of a and b. + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('at', + """ + at(a, indices, b=None, /) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + ``a[indices] += b``, except that results are accumulated for elements that + are indexed more than once. For example, ``a[[0,0]] += 1`` will only + increment the first element once because of buffering, whereas + ``add.at(a, [0,0], 1)`` will increment the first element twice. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> a + array([-1, -2, 3, 4]) + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> a + array([2, 3, 5, 4]) + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> a + array([2, 4, 3, 4]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('resolve_dtypes', + """ + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) + + Find the dtypes NumPy will use for the operation. Both input and + output dtypes are returned and may differ from those provided. + + .. note:: + + This function always applies NEP 50 rules since it is not provided + any actual values. The Python types ``int``, ``float``, and + ``complex`` thus behave weak and should be passed for "untyped" + Python input. + + Parameters + ---------- + dtypes : tuple of dtypes, None, or literal int, float, complex + The input dtypes for each operand. Output operands can be + None, indicating that the dtype must be found. + signature : tuple of DTypes or None, optional + If given, enforces exact DType (classes) of the specific operand. + The ufunc ``dtype`` argument is equivalent to passing a tuple with + only output dtypes set. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + The casting mode when casting is necessary. This is identical to + the ufunc call casting modes. + reduction : boolean + If given, the resolution assumes a reduce operation is happening + which slightly changes the promotion and type resolution rules. + `dtypes` is usually something like ``(None, np.dtype("i2"), None)`` + for reductions (first input is also the output). + + .. note:: + + The default casting mode is "same_kind", however, as of + NumPy 1.24, NumPy uses "unsafe" for reductions. + + Returns + ------- + dtypes : tuple of dtypes + The dtypes which NumPy would use for the calculation. Note that + dtypes may not match the passed in ones (casting is necessary). + + See Also + -------- + numpy.ufunc._resolve_dtypes_and_context : + Similar function to this, but returns additional information which + give access to the core C functionality of NumPy. + + Examples + -------- + This API requires passing dtypes, define them for convenience: + + >>> int32 = np.dtype("int32") + >>> float32 = np.dtype("float32") + + The typical ufunc call does not pass an output dtype. `np.add` has two + inputs and one output, so leave the output as ``None`` (not provided): + + >>> np.add.resolve_dtypes((int32, float32, None)) + (dtype('float64'), dtype('float64'), dtype('float64')) + + The loop found uses "float64" for all operands (including the output), the + first input would be cast. + + ``resolve_dtypes`` supports "weak" handling for Python scalars by passing + ``int``, ``float``, or ``complex``: + + >>> np.add.resolve_dtypes((float32, float, None)) + (dtype('float32'), dtype('float32'), dtype('float32')) + + Where the Python ``float`` behaves samilar to a Python value ``0.0`` + in a ufunc call. (See :ref:`NEP 50 <NEP50>` for details.) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('_resolve_dtypes_and_context', + """ + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) + + See `numpy.ufunc.resolve_dtypes` for parameter information. This + function is considered *unstable*. You may use it, but the returned + information is NumPy version specific and expected to change. + Large API/ABI changes are not expected, but a new NumPy version is + expected to require updating code using this functionality. + + This function is designed to be used in conjunction with + `numpy.ufunc._get_strided_loop`. The calls are split to mirror the C API + and allow future improvements. + + Returns + ------- + dtypes : tuple of dtypes + call_info : + PyCapsule with all necessary information to get access to low level + C calls. See `numpy.ufunc._get_strided_loop` for more information. + + """)) + +add_newdoc('numpy.core', 'ufunc', ('_get_strided_loop', + """ + _get_strided_loop(call_info, /, *, fixed_strides=None) + + This function fills in the ``call_info`` capsule to include all + information necessary to call the low-level strided loop from NumPy. + + See notes for more information. + + Parameters + ---------- + call_info : PyCapsule + The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`. + fixed_strides : tuple of int or None, optional + A tuple with fixed byte strides of all input arrays. NumPy may use + this information to find specialized loops, so any call must follow + the given stride. Use ``None`` to indicate that the stride is not + known (or not fixed) for all calls. + + Notes + ----- + Together with `numpy.ufunc._resolve_dtypes_and_context` this function + gives low-level access to the NumPy ufunc loops. + The first function does general preparation and returns the required + information. It returns this as a C capsule with the version specific + name ``numpy_1.24_ufunc_call_info``. + The NumPy 1.24 ufunc call info capsule has the following layout:: + + typedef struct { + PyArrayMethod_StridedLoop *strided_loop; + PyArrayMethod_Context *context; + NpyAuxData *auxdata; + + /* Flag information (expected to change) */ + npy_bool requires_pyapi; /* GIL is required by loop */ + + /* Loop doesn't set FPE flags; if not set check FPE flags */ + npy_bool no_floatingpoint_errors; + } ufunc_call_info; + + Note that the first call only fills in the ``context``. The call to + ``_get_strided_loop`` fills in all other data. + Please see the ``numpy/experimental_dtype_api.h`` header for exact + call information; the main thing to note is that the new-style loops + return 0 on success, -1 on failure. They are passed context as new + first input and ``auxdata`` as (replaced) last. + + Only the ``strided_loop``signature is considered guaranteed stable + for NumPy bug-fix releases. All other API is tied to the experimental + API versioning. + + The reason for the split call is that cast information is required to + decide what the fixed-strides will be. + + NumPy ties the lifetime of the ``auxdata`` information to the capsule. + + """)) + + + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(dtype, align=False, copy=False, [metadata]) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + dtype + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + metadata : dict, optional + An optional dictionary with dtype metadata. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Structured type, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '<i2')]) + + Structured type, one field named 'f1', in itself containing a structured + type with one field: + + >>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '<i2')])]) + + Structured type, two fields: the first field contains an unsigned int, the + second an int32: + + >>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) + dtype([('f1', '<u8'), ('f2', '<i4')]) + + Using array-protocol type strings: + + >>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '<f8'), ('b', 'S10')]) + + Using comma-separated field formats. The shape is (2,3): + + >>> np.dtype("i4, (2,3)f8") + dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))]) + + Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` + is a flexible type, here of size 10: + + >>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) + dtype([('hello', '<i8', (3,)), ('world', 'V10')]) + + Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are + the offsets in bytes: + + >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) + + Using dictionaries. Two fields named 'gender' and 'age': + + >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', 'S1'), ('age', 'u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', 'S25'), ('age', 'u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + Examples + -------- + + >>> x = np.dtype('i4') + >>> x.alignment + 4 + + >>> x = np.dtype(float) + >>> x.alignment + 8 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types. + + Examples + -------- + + >>> x = np.dtype(float) + >>> x.char + 'd' + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + `__array_interface__` description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + Warning: This attribute exists specifically for `__array_interface__`, + and passing it directly to `np.dtype` will not accurately reconstruct + some dtypes (e.g., scalar and subarray dtypes). + + Examples + -------- + + >>> x = np.dtype(float) + >>> x.descr + [('', '<f8')] + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.descr + [('name', '<U16'), ('grades', '<f8', (2,))] + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('fields', + """ + Dictionary of named fields defined for this data type, or ``None``. + + The dictionary is indexed by keys that are the names of the fields. + Each entry in the dictionary is a tuple fully describing the field:: + + (dtype, offset[, title]) + + Offset is limited to C int, which is signed and usually 32 bits. + If present, the optional title can be any object (if it is a string + or unicode then it will also be a key in the fields dictionary, + otherwise it's meta-data). Notice also that the first two elements + of the tuple can be passed directly as arguments to the ``ndarray.getfield`` + and ``ndarray.setfield`` methods. + + See Also + -------- + ndarray.getfield, ndarray.setfield + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print(dt.fields) + {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + Bit-flags describing how this data type is to be interpreted. + + Bit-masks are in `numpy.core.multiarray` as the constants + `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, + `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation + of these flags is in C-API documentation; they are largely useful + for user-defined data-types. + + The following example demonstrates that operations on this particular + dtype requires Python C-API. + + Examples + -------- + + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.flags + 16 + >>> np.core.multiarray.NEEDS_PYAPI + 16 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the NumPy manual. + = ======================================================================== + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + Examples + -------- + + >>> arr = np.array([[1, 2], [3, 4]]) + >>> arr.dtype + dtype('int64') + >>> arr.itemsize + 8 + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.itemsize + 80 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcmMOSUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + m timedelta + M datetime + O object + S (byte-)string + U Unicode + V void + = ====================== + + Examples + -------- + + >>> dt = np.dtype('i4') + >>> dt.kind + 'i' + >>> dt = np.dtype('f8') + >>> dt.kind + 'f' + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.kind + 'V' + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('metadata', + """ + Either ``None`` or a readonly dictionary of metadata (mappingproxy). + + The metadata field can be set using any dictionary at data-type + creation. NumPy currently has no uniform approach to propagating + metadata; although some array operations preserve it, there is no + guarantee that others will. + + .. warning:: + + Although used in certain projects, this feature was long undocumented + and is not well supported. Some aspects of metadata propagation + are expected to change in the future. + + Examples + -------- + + >>> dt = np.dtype(float, metadata={"key": "value"}) + >>> dt.metadata["key"] + 'value' + >>> arr = np.array([1, 2, 3], dtype=dt) + >>> arr.dtype.metadata + mappingproxy({'key': 'value'}) + + Adding arrays with identical datatypes currently preserves the metadata: + + >>> (arr + arr).dtype.metadata + mappingproxy({'key': 'value'}) + + But if the arrays have different dtype metadata, the metadata may be + dropped: + + >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) + >>> arr2 = np.array([3, 2, 1], dtype=dt2) + >>> (arr + arr2).dtype.metadata is None + True # The metadata field is cleared so None is returned + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + Examples + -------- + + >>> x = np.dtype(float) + >>> x.name + 'float64' + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.name + 'void640' + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + Examples + -------- + + >>> dt = np.dtype(str) + >>> dt.num + 19 + + >>> dt = np.dtype(float) + >>> dt.num + 12 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + Examples + -------- + + >>> dt = np.dtype(('i4', 4)) + >>> dt.shape + (4,) + + >>> dt = np.dtype(('i4', (2, 3))) + >>> dt.shape + (2, 3) + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', + """ + Number of dimensions of the sub-array if this data type describes a + sub-array, and ``0`` otherwise. + + .. versionadded:: 1.13.0 + + Examples + -------- + >>> x = np.dtype(float) + >>> x.ndim + 0 + + >>> x = np.dtype((float, 8)) + >>> x.ndim + 1 + + >>> x = np.dtype(('i4', (3, 4))) + >>> x.ndim + 2 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + See Also + -------- + dtype.base + + Examples + -------- + >>> x = numpy.dtype('8f') + >>> x.subdtype + (dtype('float32'), (8,)) + + >>> x = numpy.dtype('i2') + >>> x.subdtype + >>> + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('base', + """ + Returns dtype for the base element of the subarrays, + regardless of their dimension or shape. + + See Also + -------- + dtype.subdtype + + Examples + -------- + >>> x = numpy.dtype('8f') + >>> x.base + dtype('float32') + + >>> x = numpy.dtype('i2') + >>> x.base + dtype('int16') + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder(new_order='S', /) + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * {'=', 'native'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('<i2') == native_dt.newbyteorder('<') + True + >>> np.dtype('<i2') == native_dt.newbyteorder('L') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.dtype` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.dtype` type. + + Examples + -------- + >>> import numpy as np + + >>> np.dtype[np.int64] + numpy.dtype[numpy.int64] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__ge__', + """ + __ge__(value, /) + + Return ``self >= value``. + + Equivalent to ``np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__le__', + """ + __le__(value, /) + + Return ``self <= value``. + + Equivalent to ``np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__gt__', + """ + __ge__(value, /) + + Return ``self > value``. + + Equivalent to + ``self != value and np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('__lt__', + """ + __lt__(value, /) + + Return ``self < value``. + + Equivalent to + ``self != value and np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + Note: once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Examples + -------- + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False]) + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy.core.multiarray', 'normalize_axis_index', + """ + normalize_axis_index(axis, ndim, msg_prefix=None) + + Normalizes an axis index, `axis`, such that is a valid positive index into + the shape of array with `ndim` dimensions. Raises an AxisError with an + appropriate message if this is not possible. + + Used internally by all axis-checking logic. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + axis : int + The un-normalized index of the axis. Can be negative + ndim : int + The number of dimensions of the array that `axis` should be normalized + against + msg_prefix : str + A prefix to put before the message, typically the name of the argument + + Returns + ------- + normalized_axis : int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If the axis index is invalid, when `-ndim <= axis < ndim` is false. + + Examples + -------- + >>> normalize_axis_index(0, ndim=3) + 0 + >>> normalize_axis_index(1, ndim=3) + 1 + >>> normalize_axis_index(-1, ndim=3) + 2 + + >>> normalize_axis_index(3, ndim=3) + Traceback (most recent call last): + ... + AxisError: axis 3 is out of bounds for array of dimension 3 + >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') + Traceback (most recent call last): + ... + AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 + """) + +add_newdoc('numpy.core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `numpy.datetime64` and + `numpy.timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + numpy.datetime64('2010-01-01T00:00:00','25s') + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +def refer_to_array_attribute(attr, method=True): + docstring = """ + Scalar {} identical to the corresponding array attribute. + + Please see `ndarray.{}`. + """ + + return attr, docstring.format("method" if method else "attribute", attr) + + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('T', method=False)) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('base', method=False)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """The length of the scalar in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('all')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('any')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argmax')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argmin')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argsort')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('astype')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('byteswap')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('choose')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('clip')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('compress')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('conjugate')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('copy')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('cumprod')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('cumsum')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('diagonal')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('dump')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('dumps')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('fill')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('flatten')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('getfield')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('item')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('itemset')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('max')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('mean')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('min')) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + newbyteorder(new_order='S', /) + + Return a new `dtype` with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + The `new_order` code can be any from the following: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * {'=', 'native'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + Parameters + ---------- + new_order : str, optional + Byte order to force; a value from the byte order specifications + above. The default value ('S') results in swapping the current + byte order. + + + Returns + ------- + new_dtype : dtype + New `dtype` object with the given change to the byte order. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('nonzero')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('prod')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('ptp')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('put')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('ravel')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('repeat')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('reshape')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('resize')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('round')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('searchsorted')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('setfield')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('setflags')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('sort')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('squeeze')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('std')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('sum')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('swapaxes')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('take')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tofile')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tolist')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tostring')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('trace')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('transpose')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('var')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('view')) + +add_newdoc('numpy.core.numerictypes', 'number', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.number` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.number` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.signedinteger[Any] + numpy.signedinteger[typing.Any] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +############################################################################## +# +# Documentation for scalar type abstract base classes in type hierarchy +# +############################################################################## + + +add_newdoc('numpy.core.numerictypes', 'number', + """ + Abstract base class of all numeric scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'integer', + """ + Abstract base class of all integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'signedinteger', + """ + Abstract base class of all signed integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'unsignedinteger', + """ + Abstract base class of all unsigned integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'inexact', + """ + Abstract base class of all numeric scalar types with a (potentially) + inexact representation of the values in its range, such as + floating-point numbers. + + """) + +add_newdoc('numpy.core.numerictypes', 'floating', + """ + Abstract base class of all floating-point scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'complexfloating', + """ + Abstract base class of all complex number scalar types that are made up of + floating-point numbers. + + """) + +add_newdoc('numpy.core.numerictypes', 'flexible', + """ + Abstract base class of all scalar types without predefined length. + The actual size of these types depends on the specific `np.dtype` + instantiation. + + """) + +add_newdoc('numpy.core.numerictypes', 'character', + """ + Abstract base class of all character string scalar types. + + """) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_add_newdocs_scalars.py b/.venv/lib/python3.12/site-packages/numpy/core/_add_newdocs_scalars.py new file mode 100644 index 00000000..f9a6ad96 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_add_newdocs_scalars.py @@ -0,0 +1,372 @@ +""" +This file is separate from ``_add_newdocs.py`` so that it can be mocked out by +our sphinx ``conf.py`` during doc builds, where we want to avoid showing +platform-dependent information. +""" +import sys +import os +from numpy.core import dtype +from numpy.core import numerictypes as _numerictypes +from numpy.core.function_base import add_newdoc + +############################################################################## +# +# Documentation for concrete scalar classes +# +############################################################################## + +def numeric_type_aliases(aliases): + def type_aliases_gen(): + for alias, doc in aliases: + try: + alias_type = getattr(_numerictypes, alias) + except AttributeError: + # The set of aliases that actually exist varies between platforms + pass + else: + yield (alias_type, alias, doc) + return list(type_aliases_gen()) + + +possible_aliases = numeric_type_aliases([ + ('int8', '8-bit signed integer (``-128`` to ``127``)'), + ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'), + ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'), + ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'), + ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), + ('uint8', '8-bit unsigned integer (``0`` to ``255``)'), + ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'), + ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'), + ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'), + ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), + ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), + ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), + ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), + ('float96', '96-bit extended-precision floating-point number type'), + ('float128', '128-bit extended-precision floating-point number type'), + ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), + ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), + ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), + ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), + ]) + + +def _get_platform_and_machine(): + try: + system, _, _, _, machine = os.uname() + except AttributeError: + system = sys.platform + if system == 'win32': + machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \ + or os.environ.get('PROCESSOR_ARCHITECTURE', '') + else: + machine = 'unknown' + return system, machine + + +_system, _machine = _get_platform_and_machine() +_doc_alias_string = f":Alias on this platform ({_system} {_machine}):" + + +def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): + # note: `:field: value` is rST syntax which renders as field lists. + o = getattr(_numerictypes, obj) + + character_code = dtype(o).char + canonical_name_doc = "" if obj == o.__name__ else \ + f":Canonical name: `numpy.{obj}`\n " + if fixed_aliases: + alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " + for alias in fixed_aliases) + else: + alias_doc = '' + alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " + for (alias_type, alias, doc) in possible_aliases if alias_type is o) + + docstring = f""" + {doc.strip()} + + :Character code: ``'{character_code}'`` + {canonical_name_doc}{alias_doc} + """ + + add_newdoc('numpy.core.numerictypes', obj, docstring) + + +add_newdoc_for_scalar_type('bool_', [], + """ + Boolean type (True or False), stored as a byte. + + .. warning:: + + The :class:`bool_` type is not a subclass of the :class:`int_` type + (the :class:`bool_` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. + """) + +add_newdoc_for_scalar_type('byte', [], + """ + Signed integer type, compatible with C ``char``. + """) + +add_newdoc_for_scalar_type('short', [], + """ + Signed integer type, compatible with C ``short``. + """) + +add_newdoc_for_scalar_type('intc', [], + """ + Signed integer type, compatible with C ``int``. + """) + +add_newdoc_for_scalar_type('int_', [], + """ + Signed integer type, compatible with Python `int` and C ``long``. + """) + +add_newdoc_for_scalar_type('longlong', [], + """ + Signed integer type, compatible with C ``long long``. + """) + +add_newdoc_for_scalar_type('ubyte', [], + """ + Unsigned integer type, compatible with C ``unsigned char``. + """) + +add_newdoc_for_scalar_type('ushort', [], + """ + Unsigned integer type, compatible with C ``unsigned short``. + """) + +add_newdoc_for_scalar_type('uintc', [], + """ + Unsigned integer type, compatible with C ``unsigned int``. + """) + +add_newdoc_for_scalar_type('uint', [], + """ + Unsigned integer type, compatible with C ``unsigned long``. + """) + +add_newdoc_for_scalar_type('ulonglong', [], + """ + Signed integer type, compatible with C ``unsigned long long``. + """) + +add_newdoc_for_scalar_type('half', [], + """ + Half-precision floating-point number type. + """) + +add_newdoc_for_scalar_type('single', [], + """ + Single-precision floating-point number type, compatible with C ``float``. + """) + +add_newdoc_for_scalar_type('double', ['float_'], + """ + Double-precision floating-point number type, compatible with Python `float` + and C ``double``. + """) + +add_newdoc_for_scalar_type('longdouble', ['longfloat'], + """ + Extended-precision floating-point number type, compatible with C + ``long double`` but not necessarily with IEEE 754 quadruple-precision. + """) + +add_newdoc_for_scalar_type('csingle', ['singlecomplex'], + """ + Complex number type composed of two single-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], + """ + Complex number type composed of two double-precision floating-point + numbers, compatible with Python `complex`. + """) + +add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], + """ + Complex number type composed of two extended-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('object_', [], + """ + Any Python object. + """) + +add_newdoc_for_scalar_type('str_', ['unicode_'], + r""" + A unicode string. + + This type strips trailing null codepoints. + + >>> s = np.str_("abc\x00") + >>> s + 'abc' + + Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its + contents as UCS4: + + >>> m = memoryview(np.str_("abc")) + >>> m.format + '3w' + >>> m.tobytes() + b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' + """) + +add_newdoc_for_scalar_type('bytes_', ['string_'], + r""" + A byte string. + + When used in arrays, this type strips trailing null bytes. + """) + +add_newdoc_for_scalar_type('void', [], + r""" + np.void(length_or_data, /, dtype=None) + + Create a new structured or unstructured void scalar. + + Parameters + ---------- + length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. + dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + ..versionadded:: 1.24 + + Notes + ----- + For historical reasons and because void scalars can represent both + arbitrary byte data and structured dtypes, the void constructor + has three calling conventions: + + 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. + 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. + 3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + + Please see the examples which show all three different conventions. + + Examples + -------- + >>> np.void(5) + void(b'\x00\x00\x00\x00\x00') + >>> np.void(b'abcd') + void(b'\x61\x62\x63\x64') + >>> np.void((5, 3.2, "eggs"), dtype="i,d,S5") + (5, 3.2, b'eggs') # looks like a tuple, but is `np.void` + >>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) + (3, 3) # looks like a tuple, but is `np.void` + + """) + +add_newdoc_for_scalar_type('datetime64', [], + """ + If created from a 64-bit integer, it represents an offset from + ``1970-01-01T00:00:00``. + If created from string, the string can be in ISO 8601 date + or datetime format. + + >>> np.datetime64(10, 'Y') + numpy.datetime64('1980') + >>> np.datetime64('1980', 'Y') + numpy.datetime64('1980') + >>> np.datetime64(10, 'D') + numpy.datetime64('1970-01-11') + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc_for_scalar_type('timedelta64', [], + """ + A timedelta stored as a 64-bit integer. + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc('numpy.core.numerictypes', "integer", ('is_integer', + """ + integer.is_integer() -> bool + + Return ``True`` if the number is finite with integral value. + + .. versionadded:: 1.22 + + Examples + -------- + >>> np.int64(-2).is_integer() + True + >>> np.uint32(5).is_integer() + True + """)) + +# TODO: work out how to put this on the base class, np.floating +for float_name in ('half', 'single', 'double', 'longdouble'): + add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio', + """ + {ftype}.as_integer_ratio() -> (int, int) + + Return a pair of integers, whose ratio is exactly equal to the original + floating point number, and with a positive denominator. + Raise `OverflowError` on infinities and a `ValueError` on NaNs. + + >>> np.{ftype}(10.0).as_integer_ratio() + (10, 1) + >>> np.{ftype}(0.0).as_integer_ratio() + (0, 1) + >>> np.{ftype}(-.25).as_integer_ratio() + (-1, 4) + """.format(ftype=float_name))) + + add_newdoc('numpy.core.numerictypes', float_name, ('is_integer', + f""" + {float_name}.is_integer() -> bool + + Return ``True`` if the floating point number is finite with integral + value, and ``False`` otherwise. + + .. versionadded:: 1.22 + + Examples + -------- + >>> np.{float_name}(-2.0).is_integer() + True + >>> np.{float_name}(3.2).is_integer() + False + """)) + +for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + # Add negative examples for signed cases by checking typecode + add_newdoc('numpy.core.numerictypes', int_name, ('bit_count', + f""" + {int_name}.bit_count() -> int + + Computes the number of 1-bits in the absolute value of the input. + Analogous to the builtin `int.bit_count` or ``popcount`` in C++. + + Examples + -------- + >>> np.{int_name}(127).bit_count() + 7""" + + (f""" + >>> np.{int_name}(-127).bit_count() + 7 + """ if dtype(int_name).char.islower() else ""))) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_asarray.py b/.venv/lib/python3.12/site-packages/numpy/core/_asarray.py new file mode 100644 index 00000000..a9abc5a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_asarray.py @@ -0,0 +1,134 @@ +""" +Functions in the ``as*array`` family that promote array-likes into arrays. + +`require` fits this category despite its name not matching this pattern. +""" +from .overrides import ( + array_function_dispatch, + set_array_function_like_doc, + set_module, +) +from .multiarray import array, asanyarray + + +__all__ = ["require"] + + +POSSIBLE_FLAGS = { + 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', + 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', + 'A': 'A', 'ALIGNED': 'A', + 'W': 'W', 'WRITEABLE': 'W', + 'O': 'O', 'OWNDATA': 'O', + 'E': 'E', 'ENSUREARRAY': 'E' +} + + +@set_array_function_like_doc +@set_module('numpy') +def require(a, dtype=None, requirements=None, *, like=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type. If None preserve the current dtype. If your + application requires the data to be in native byteorder, include + a byteorder specification as a part of the dtype specification. + requirements : str or sequence of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array with specified requirements and type if given. + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + if like is not None: + return _require_with_like( + like, + a, + dtype=dtype, + requirements=requirements, + ) + + if not requirements: + return asanyarray(a, dtype=dtype) + + requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements} + + if 'E' in requirements: + requirements.remove('E') + subok = False + else: + subok = True + + order = 'A' + if requirements >= {'C', 'F'}: + raise ValueError('Cannot specify both "C" and "F" order') + elif 'F' in requirements: + order = 'F' + requirements.remove('F') + elif 'C' in requirements: + order = 'C' + requirements.remove('C') + + arr = array(a, dtype=dtype, order=order, copy=False, subok=subok) + + for prop in requirements: + if not arr.flags[prop]: + return arr.copy(order) + return arr + + +_require_with_like = array_function_dispatch()(require) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_asarray.pyi b/.venv/lib/python3.12/site-packages/numpy/core/_asarray.pyi new file mode 100644 index 00000000..69d1528d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_asarray.pyi @@ -0,0 +1,42 @@ +from collections.abc import Iterable +from typing import Any, TypeVar, Union, overload, Literal + +from numpy import ndarray +from numpy._typing import DTypeLike, _SupportsArrayFunc + +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +_Requirements = Literal[ + "C", "C_CONTIGUOUS", "CONTIGUOUS", + "F", "F_CONTIGUOUS", "FORTRAN", + "A", "ALIGNED", + "W", "WRITEABLE", + "O", "OWNDATA" +] +_E = Literal["E", "ENSUREARRAY"] +_RequirementsWithE = Union[_Requirements, _E] + +@overload +def require( + a: _ArrayType, + dtype: None = ..., + requirements: None | _Requirements | Iterable[_Requirements] = ..., + *, + like: _SupportsArrayFunc = ... +) -> _ArrayType: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: _E | Iterable[_RequirementsWithE] = ..., + *, + like: _SupportsArrayFunc = ... +) -> ndarray[Any, Any]: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: None | _Requirements | Iterable[_Requirements] = ..., + *, + like: _SupportsArrayFunc = ... +) -> ndarray[Any, Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_dtype.py b/.venv/lib/python3.12/site-packages/numpy/core/_dtype.py new file mode 100644 index 00000000..ff50f519 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_dtype.py @@ -0,0 +1,369 @@ +""" +A place for code to be called from the implementation of np.dtype + +String handling is much easier to do correctly in python. +""" +import numpy as np + + +_kind_to_stem = { + 'u': 'uint', + 'i': 'int', + 'c': 'complex', + 'f': 'float', + 'b': 'bool', + 'V': 'void', + 'O': 'object', + 'M': 'datetime', + 'm': 'timedelta', + 'S': 'bytes', + 'U': 'str', +} + + +def _kind_name(dtype): + try: + return _kind_to_stem[dtype.kind] + except KeyError as e: + raise RuntimeError( + "internal dtype error, unknown kind {!r}" + .format(dtype.kind) + ) from None + + +def __str__(dtype): + if dtype.fields is not None: + return _struct_str(dtype, include_align=True) + elif dtype.subdtype: + return _subarray_str(dtype) + elif issubclass(dtype.type, np.flexible) or not dtype.isnative: + return dtype.str + else: + return dtype.name + + +def __repr__(dtype): + arg_str = _construction_repr(dtype, include_align=False) + if dtype.isalignedstruct: + arg_str = arg_str + ", align=True" + return "dtype({})".format(arg_str) + + +def _unpack_field(dtype, offset, title=None): + """ + Helper function to normalize the items in dtype.fields. + + Call as: + + dtype, offset, title = _unpack_field(*dtype.fields[name]) + """ + return dtype, offset, title + + +def _isunsized(dtype): + # PyDataType_ISUNSIZED + return dtype.itemsize == 0 + + +def _construction_repr(dtype, include_align=False, short=False): + """ + Creates a string repr of the dtype, excluding the 'dtype()' part + surrounding the object. This object may be a string, a list, or + a dict depending on the nature of the dtype. This + is the object passed as the first parameter to the dtype + constructor, and if no additional constructor parameters are + given, will reproduce the exact memory layout. + + Parameters + ---------- + short : bool + If true, this creates a shorter repr using 'kind' and 'itemsize', instead + of the longer type name. + + include_align : bool + If true, this includes the 'align=True' parameter + inside the struct dtype construction dict when needed. Use this flag + if you want a proper repr string without the 'dtype()' part around it. + + If false, this does not preserve the + 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for + struct arrays like the regular repr does, because the 'align' + flag is not part of first dtype constructor parameter. This + mode is intended for a full 'repr', where the 'align=True' is + provided as the second parameter. + """ + if dtype.fields is not None: + return _struct_str(dtype, include_align=include_align) + elif dtype.subdtype: + return _subarray_str(dtype) + else: + return _scalar_str(dtype, short=short) + + +def _scalar_str(dtype, short): + byteorder = _byte_order_str(dtype) + + if dtype.type == np.bool_: + if short: + return "'?'" + else: + return "'bool'" + + elif dtype.type == np.object_: + # The object reference may be different sizes on different + # platforms, so it should never include the itemsize here. + return "'O'" + + elif dtype.type == np.bytes_: + if _isunsized(dtype): + return "'S'" + else: + return "'S%d'" % dtype.itemsize + + elif dtype.type == np.str_: + if _isunsized(dtype): + return "'%sU'" % byteorder + else: + return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + + # unlike the other types, subclasses of void are preserved - but + # historically the repr does not actually reveal the subclass + elif issubclass(dtype.type, np.void): + if _isunsized(dtype): + return "'V'" + else: + return "'V%d'" % dtype.itemsize + + elif dtype.type == np.datetime64: + return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) + + elif dtype.type == np.timedelta64: + return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) + + elif np.issubdtype(dtype, np.number): + # Short repr with endianness, like '<f8' + if short or dtype.byteorder not in ('=', '|'): + return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize) + + # Longer repr, like 'float64' + else: + return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize) + + elif dtype.isbuiltin == 2: + return dtype.type.__name__ + + else: + raise RuntimeError( + "Internal error: NumPy dtype unrecognized type number") + + +def _byte_order_str(dtype): + """ Normalize byteorder to '<' or '>' """ + # hack to obtain the native and swapped byte order characters + swapped = np.dtype(int).newbyteorder('S') + native = swapped.newbyteorder('S') + + byteorder = dtype.byteorder + if byteorder == '=': + return native.byteorder + if byteorder == 'S': + # TODO: this path can never be reached + return swapped.byteorder + elif byteorder == '|': + return '' + else: + return byteorder + + +def _datetime_metadata_str(dtype): + # TODO: this duplicates the C metastr_to_unicode functionality + unit, count = np.datetime_data(dtype) + if unit == 'generic': + return '' + elif count == 1: + return '[{}]'.format(unit) + else: + return '[{}{}]'.format(count, unit) + + +def _struct_dict_str(dtype, includealignedflag): + # unpack the fields dictionary into ls + names = dtype.names + fld_dtypes = [] + offsets = [] + titles = [] + for name in names: + fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) + fld_dtypes.append(fld_dtype) + offsets.append(offset) + titles.append(title) + + # Build up a string to make the dictionary + + if np.core.arrayprint._get_legacy_print_mode() <= 121: + colon = ":" + fieldsep = "," + else: + colon = ": " + fieldsep = ", " + + # First, the names + ret = "{'names'%s[" % colon + ret += fieldsep.join(repr(name) for name in names) + + # Second, the formats + ret += "], 'formats'%s[" % colon + ret += fieldsep.join( + _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) + + # Third, the offsets + ret += "], 'offsets'%s[" % colon + ret += fieldsep.join("%d" % offset for offset in offsets) + + # Fourth, the titles + if any(title is not None for title in titles): + ret += "], 'titles'%s[" % colon + ret += fieldsep.join(repr(title) for title in titles) + + # Fifth, the itemsize + ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize) + + if (includealignedflag and dtype.isalignedstruct): + # Finally, the aligned flag + ret += ", 'aligned'%sTrue}" % colon + else: + ret += "}" + + return ret + + +def _aligned_offset(offset, alignment): + # round up offset: + return - (-offset // alignment) * alignment + + +def _is_packed(dtype): + """ + Checks whether the structured data type in 'dtype' + has a simple layout, where all the fields are in order, + and follow each other with no alignment padding. + + When this returns true, the dtype can be reconstructed + from a list of the field names and dtypes with no additional + dtype parameters. + + Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. + """ + align = dtype.isalignedstruct + max_alignment = 1 + total_offset = 0 + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + if align: + total_offset = _aligned_offset(total_offset, fld_dtype.alignment) + max_alignment = max(max_alignment, fld_dtype.alignment) + + if fld_offset != total_offset: + return False + total_offset += fld_dtype.itemsize + + if align: + total_offset = _aligned_offset(total_offset, max_alignment) + + if total_offset != dtype.itemsize: + return False + return True + + +def _struct_list_str(dtype): + items = [] + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + item = "(" + if title is not None: + item += "({!r}, {!r}), ".format(title, name) + else: + item += "{!r}, ".format(name) + # Special case subarray handling here + if fld_dtype.subdtype is not None: + base, shape = fld_dtype.subdtype + item += "{}, {}".format( + _construction_repr(base, short=True), + shape + ) + else: + item += _construction_repr(fld_dtype, short=True) + + item += ")" + items.append(item) + + return "[" + ", ".join(items) + "]" + + +def _struct_str(dtype, include_align): + # The list str representation can't include the 'align=' flag, + # so if it is requested and the struct has the aligned flag set, + # we must use the dict str instead. + if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): + sub = _struct_list_str(dtype) + + else: + sub = _struct_dict_str(dtype, include_align) + + # If the data type isn't the default, void, show it + if dtype.type != np.void: + return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub) + else: + return sub + + +def _subarray_str(dtype): + base, shape = dtype.subdtype + return "({}, {})".format( + _construction_repr(base, short=True), + shape + ) + + +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool_: + # implied + return False + elif dtype.type is None: + return True + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + +def _name_get(dtype): + # provides dtype.name.__get__, documented as returning a "bit name" + + if dtype.isbuiltin == 2: + # user dtypes don't promise to do anything special + return dtype.type.__name__ + + if dtype.kind == '\x00': + name = type(dtype).__name__ + elif issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) + + # append bit counts + if _name_includes_bit_suffix(dtype): + name += "{}".format(dtype.itemsize * 8) + + # append metadata to datetimes + if dtype.type in (np.datetime64, np.timedelta64): + name += _datetime_metadata_str(dtype) + + return name diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py b/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py new file mode 100644 index 00000000..6d7cbb24 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_dtype_ctypes.py @@ -0,0 +1,117 @@ +""" +Conversion from ctypes to dtype. + +In an ideal world, we could achieve this through the PEP3118 buffer protocol, +something like:: + + def dtype_from_ctypes_type(t): + # needed to ensure that the shape of `t` is within memoryview.format + class DummyStruct(ctypes.Structure): + _fields_ = [('a', t)] + + # empty to avoid memory allocation + ctype_0 = (DummyStruct * 0)() + mv = memoryview(ctype_0) + + # convert the struct, and slice back out the field + return _dtype_from_pep3118(mv.format)['a'] + +Unfortunately, this fails because: + +* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) +* PEP3118 cannot represent unions, but both numpy and ctypes can +* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) +""" + +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/core/__init__.py. +import numpy as np + + +def _from_ctypes_array(t): + return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) + + +def _from_ctypes_structure(t): + for item in t._fields_: + if len(item) > 2: + raise TypeError( + "ctypes bitfields have no dtype equivalent") + + if hasattr(t, "_pack_"): + import ctypes + formats = [] + offsets = [] + names = [] + current_offset = 0 + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + # Each type has a default offset, this is platform dependent for some types. + effective_pack = min(t._pack_, ctypes.alignment(ftyp)) + current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack + offsets.append(current_offset) + current_offset += ctypes.sizeof(ftyp) + + return np.dtype(dict( + formats=formats, + offsets=offsets, + names=names, + itemsize=ctypes.sizeof(t))) + else: + fields = [] + for fname, ftyp in t._fields_: + fields.append((fname, dtype_from_ctypes_type(ftyp))) + + # by default, ctypes structs are aligned + return np.dtype(fields, align=True) + + +def _from_ctypes_scalar(t): + """ + Return the dtype type with endianness included if it's the case + """ + if getattr(t, '__ctype_be__', None) is t: + return np.dtype('>' + t._type_) + elif getattr(t, '__ctype_le__', None) is t: + return np.dtype('<' + t._type_) + else: + return np.dtype(t._type_) + + +def _from_ctypes_union(t): + import ctypes + formats = [] + offsets = [] + names = [] + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + offsets.append(0) # Union fields are offset to 0 + + return np.dtype(dict( + formats=formats, + offsets=offsets, + names=names, + itemsize=ctypes.sizeof(t))) + + +def dtype_from_ctypes_type(t): + """ + Construct a dtype object from a ctypes type + """ + import _ctypes + if issubclass(t, _ctypes.Array): + return _from_ctypes_array(t) + elif issubclass(t, _ctypes._Pointer): + raise TypeError("ctypes pointers have no dtype equivalent") + elif issubclass(t, _ctypes.Structure): + return _from_ctypes_structure(t) + elif issubclass(t, _ctypes.Union): + return _from_ctypes_union(t) + elif isinstance(getattr(t, '_type_', None), str): + return _from_ctypes_scalar(t) + else: + raise NotImplementedError( + "Unknown ctypes type {}".format(t.__name__)) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_exceptions.py b/.venv/lib/python3.12/site-packages/numpy/core/_exceptions.py new file mode 100644 index 00000000..87d4213a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_exceptions.py @@ -0,0 +1,172 @@ +""" +Various richly-typed exceptions, that also help us deal with string formatting +in python where it's easier. + +By putting the formatting in `__str__`, we also avoid paying the cost for +users who silence the exceptions. +""" +from .._utils import set_module + +def _unpack_tuple(tup): + if len(tup) == 1: + return tup[0] + else: + return tup + + +def _display_as_base(cls): + """ + A decorator that makes an exception class look like its base. + + We use this to hide subclasses that are implementation details - the user + should catch the base type, which is what the traceback will show them. + + Classes decorated with this decorator are subject to removal without a + deprecation warning. + """ + assert issubclass(cls, Exception) + cls.__name__ = cls.__base__.__name__ + return cls + + +class UFuncTypeError(TypeError): + """ Base class for all ufunc exceptions """ + def __init__(self, ufunc): + self.ufunc = ufunc + + +@_display_as_base +class _UFuncNoLoopError(UFuncTypeError): + """ Thrown when a ufunc loop cannot be found """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc) + self.dtypes = tuple(dtypes) + + def __str__(self): + return ( + "ufunc {!r} did not contain a loop with signature matching types " + "{!r} -> {!r}" + ).format( + self.ufunc.__name__, + _unpack_tuple(self.dtypes[:self.ufunc.nin]), + _unpack_tuple(self.dtypes[self.ufunc.nin:]) + ) + + +@_display_as_base +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + """ Thrown when a binary resolution fails """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc, dtypes) + assert len(self.dtypes) == 2 + + def __str__(self): + return ( + "ufunc {!r} cannot use operands with types {!r} and {!r}" + ).format( + self.ufunc.__name__, *self.dtypes + ) + + +@_display_as_base +class _UFuncCastingError(UFuncTypeError): + def __init__(self, ufunc, casting, from_, to): + super().__init__(ufunc) + self.casting = casting + self.from_ = from_ + self.to = to + + +@_display_as_base +class _UFuncInputCastingError(_UFuncCastingError): + """ Thrown when a ufunc input cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.in_i = i + + def __str__(self): + # only show the number if more than one input exists + i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" + return ( + "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " + "rule {!r}" + ).format( + self.ufunc.__name__, i_str, self.from_, self.to, self.casting + ) + + +@_display_as_base +class _UFuncOutputCastingError(_UFuncCastingError): + """ Thrown when a ufunc output cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.out_i = i + + def __str__(self): + # only show the number if more than one output exists + i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" + return ( + "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " + "rule {!r}" + ).format( + self.ufunc.__name__, i_str, self.from_, self.to, self.casting + ) + + +@_display_as_base +class _ArrayMemoryError(MemoryError): + """ Thrown when an array cannot be allocated""" + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return '{:.0f} {}'.format(n_units, unit_name) + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return '{:#.3g} {}'.format(n_units, unit_name) + else: + # just give all the digits otherwise + return '{:#.0f} {}'.format(n_units, unit_name) + + def __str__(self): + size_str = self._size_to_string(self._total_size) + return ( + "Unable to allocate {} for an array with shape {} and data type {}" + .format(size_str, self.shape, self.dtype) + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_internal.py b/.venv/lib/python3.12/site-packages/numpy/core/_internal.py new file mode 100644 index 00000000..c7838588 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_internal.py @@ -0,0 +1,935 @@ +""" +A place for internal code + +Some things are more easily handled Python. + +""" +import ast +import re +import sys +import warnings + +from ..exceptions import DTypePromotionError +from .multiarray import dtype, array, ndarray, promote_types +try: + import ctypes +except ImportError: + ctypes = None + +IS_PYPY = sys.implementation.name == 'pypy' + +if sys.byteorder == 'little': + _nbo = '<' +else: + _nbo = '>' + +def _makenames_list(adict, align): + allfields = [] + + for fname, obj in adict.items(): + n = len(obj) + if not isinstance(obj, tuple) or n not in (2, 3): + raise ValueError("entry not a 2- or 3- tuple") + if n > 2 and obj[2] == fname: + continue + num = int(obj[1]) + if num < 0: + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if n > 2: + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if len(res) > 2: + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names": names, + "formats": formats, + "offsets": offsets, + "titles": titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', f'|V{num}')) + offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', f'|V{num}')) + + return result + +# Build a new array from the information in a pickle. +# Note that the name numpy.core._internal._reconstruct is embedded in +# pickles of ndarrays made with NumPy before release 1.0 +# so don't remove the name here, or you'll +# break backward compatibility. +def _reconstruct(subtype, shape, dtype): + return ndarray.__new__(subtype, shape, dtype) + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(r'(?P<order1>[<>|=]?)' + r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)' + r'(?P<order2>[<>|=]?)' + r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') +sep_re = re.compile(r'\s*,\s*') +space_re = re.compile(r'\s+$') + +# astr is a string (perhaps comma separated) + +_convorder = {'=': _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError( + f'format number {len(result)+1} of "{astr}" is not recognized' + ) from None + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result)+1, astr)) + startindex = mo.end() + + if order2 == '': + order = order1 + elif order1 == '': + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError( + 'inconsistent byte-order specification %s and %s' % + (order1, order2)) + order = order1 + + if order in ('|', '=', _nbo): + order = '' + dtype = order + dtype + if (repeats == ''): + newitem = dtype + else: + newitem = (dtype, ast.literal_eval(repeats)) + result.append(newitem) + + return result + +class dummy_ctype: + def __init__(self, cls): + self._cls = cls + def __mul__(self, other): + return self + def __call__(self, *other): + return self._cls(other) + def __eq__(self, other): + return self._cls == other._cls + def __ne__(self, other): + return self._cls != other._cls + +def _getintp_ctype(): + val = _getintp_ctype.cache + if val is not None: + return val + if ctypes is None: + import numpy as np + val = dummy_ctype(np.intp) + else: + char = dtype('p').char + if char == 'i': + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes: + def cast(self, num, obj): + return num.value + + class c_void_p: + def __init__(self, ptr): + self.value = ptr + + +class _ctypes: + def __init__(self, array, ptr=None): + self._arr = array + + if ctypes: + self._ctypes = ctypes + self._data = self._ctypes.c_void_p(ptr) + else: + # fake a pointer-like object that holds onto the reference + self._ctypes = _missing_ctypes() + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a + pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. + """ + # _ctypes.cast function causes a circular reference of self._data in + # self._data._objects. Attributes of self._data cannot be released + # until gc.collect is called. Make a copy of the pointer first then let + # it hold the array reference. This is a workaround to circumvent the + # CPython bug https://bugs.python.org/issue12836 + ptr = self._ctypes.cast(self._data, obj) + ptr._arr = self._arr + return ptr + + def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.strides) + + @property + def data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in correct + byte-order. The memory area may not even be writeable. The array + flags and data-type of this array should be respected when passing this + attribute to arbitrary C-code to avoid trouble that can include Python + crashing. User Beware! The value of this attribute is exactly the same + as ``self._array_interface_['data'][0]``. + + Note that unlike ``data_as``, a reference will not be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` + """ + return self._data.value + + @property + def shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform (see `~numpy.ctypeslib.c_intp`). This base-type could be + `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on + the platform. The ctypes array contains the shape of + the underlying array. + """ + return self.shape_as(_getintp_ctype()) + + @property + def strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes array + contains the strides information from the underlying array. This strides + information is important for showing how many bytes must be jumped to + get to the next element in the array. + """ + return self.strides_as(_getintp_ctype()) + + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self.data_as(ctypes.c_void_p) + + # Numpy 1.21.0, 2021-05-18 + + def get_data(self): + """Deprecated getter for the `_ctypes.data` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_data" is deprecated. Use "data" instead', + DeprecationWarning, stacklevel=2) + return self.data + + def get_shape(self): + """Deprecated getter for the `_ctypes.shape` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_shape" is deprecated. Use "shape" instead', + DeprecationWarning, stacklevel=2) + return self.shape + + def get_strides(self): + """Deprecated getter for the `_ctypes.strides` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_strides" is deprecated. Use "strides" instead', + DeprecationWarning, stacklevel=2) + return self.strides + + def get_as_parameter(self): + """Deprecated getter for the `_ctypes._as_parameter_` property. + + .. deprecated:: 1.21 + """ + warnings.warn( + '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', + DeprecationWarning, stacklevel=2, + ) + return self._as_parameter_ + + +def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, str): + order = [order] + seen = set() + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + if name in seen: + raise ValueError(f"duplicate field name: {name}") from None + else: + raise ValueError(f"unknown field name: {name}") from None + seen.add(name) + return tuple(list(order) + nameslist) + raise ValueError(f"unsupported order value: {order}") + +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. + + Parameters + ---------- + ary : ndarray + Structured array from which to remove padding bytes + + Returns + ------- + ary_copy : ndarray + Copy of ary with padding bytes removed + """ + dt = ary.dtype + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) + +def _promote_fields(dt1, dt2): + """ Perform type promotion for two structured dtypes. + + Parameters + ---------- + dt1 : structured dtype + First dtype. + dt2 : structured dtype + Second dtype. + + Returns + ------- + out : dtype + The promoted dtype + + Notes + ----- + If one of the inputs is aligned, the result will be. The titles of + both descriptors must match (point to the same field). + """ + # Both must be structured and have the same names in the same order + if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: + raise DTypePromotionError( + f"field names `{dt1.names}` and `{dt2.names}` mismatch.") + + # if both are identical, we can (maybe!) just return the same dtype. + identical = dt1 is dt2 + new_fields = [] + for name in dt1.names: + field1 = dt1.fields[name] + field2 = dt2.fields[name] + new_descr = promote_types(field1[0], field2[0]) + identical = identical and new_descr is field1[0] + + # Check that the titles match (if given): + if field1[2:] != field2[2:]: + raise DTypePromotionError( + f"field titles of field '{name}' mismatch") + if len(field1) == 2: + new_fields.append((name, new_descr)) + else: + new_fields.append(((field1[2], name), new_descr)) + + res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) + + # Might as well preserve identity (and metadata) if the dtype is identical + # and the itemsize, offsets are also unmodified. This could probably be + # sped up, but also probably just be removed entirely. + if identical and res.itemsize == dt1.itemsize: + for name in dt1.names: + if dt1.fields[name][1] != res.fields[name][1]: + return res # the dtype changed. + return dt1 + + return res + + +def _getfield_is_safe(oldtype, newtype, offset): + """ Checks safety of getfield for object arrays. + + As in _view_is_safe, we need to check that memory containing objects is not + reinterpreted as a non-object datatype and vice versa. + + Parameters + ---------- + oldtype : data-type + Data type of the original ndarray. + newtype : data-type + Data type of the field being accessed by ndarray.getfield + offset : int + Offset of the field being accessed by ndarray.getfield + + Raises + ------ + TypeError + If the field access is invalid + + """ + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names is not None: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return + +def _view_is_safe(oldtype, newtype): + """ Checks safety of a view involving object arrays, for example when + doing:: + + np.zeros(10, dtype=oldtype).view(newtype) + + Parameters + ---------- + oldtype : data-type + Data type of original ndarray + newtype : data-type + Data type of the view + + Raises + ------ + TypeError + If the new type is incompatible with the old type. + + """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for object array.") + return + +# Given a string containing a PEP 3118 format specifier, +# construct a NumPy dtype + +_pep3118_native_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +_pep3118_unsupported_map = { + 'u': 'UCS-2 strings', + '&': 'pointers', + 't': 'bitfields', + 'X': 'function pointers', +} + +class _Stream: + def __init__(self, s): + self.s = s + self.byteorder = '@' + + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res + + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False + + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res + + @property + def next(self): + return self.s[0] + + def __bool__(self): + return bool(self.s) + + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) + dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) + return dtype + +def __dtype_from_pep3118(stream, is_subdtype): + field_spec = dict( + names=[], + formats=[], + offsets=[], + itemsize=0 + ) + offset = 0 + common_alignment = 1 + is_padding = False + + # Parse spec + while stream: + value = None + + # End of structure, bail out to upper level + if stream.consume('}'): + break + + # Sub-arrays (1) + shape = None + if stream.consume('('): + shape = stream.consume_until(')') + shape = tuple(map(int, shape.split(','))) + + # Byte order + if stream.next in ('@', '=', '<', '>', '^', '!'): + byteorder = stream.advance(1) + if byteorder == '!': + byteorder = '>' + stream.byteorder = byteorder + + # Byte order characters also control native vs. standard type sizes + if stream.byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize_str = stream.consume_until(lambda c: not c.isdigit()) + if itemsize_str: + itemsize = int(itemsize_str) + else: + itemsize = 1 + + # Data types + is_padding = False + + if stream.consume('T{'): + value, align = __dtype_from_pep3118( + stream, is_subdtype=True) + elif stream.next in type_map_chars: + if stream.next == 'Z': + typechar = stream.advance(2) + else: + typechar = stream.advance(1) + + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get( + stream.byteorder, stream.byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + elif stream.next in _pep3118_unsupported_map: + desc = _pep3118_unsupported_map[stream.next] + raise NotImplementedError( + "Unrepresentable PEP 3118 data type {!r} ({})" + .format(stream.next, desc)) + else: + raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly implies + # that the start of the array is *already* aligned. + # + extra_offset = 0 + if stream.byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = _lcm(align, common_alignment) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + if stream.consume(':'): + name = stream.consume_until(':') + else: + name = None + + if not (is_padding and name is None): + if name is not None and name in field_spec['names']: + raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format") + field_spec['names'].append(name) + field_spec['formats'].append(value) + field_spec['offsets'].append(offset) + + offset += value.itemsize + offset += extra_offset + + field_spec['itemsize'] = offset + + # extra final padding for aligned types + if stream.byteorder == '@': + field_spec['itemsize'] += (-offset) % common_alignment + + # Check if this was a simple 1-item type, and unwrap it + if (field_spec['names'] == [None] + and field_spec['offsets'][0] == 0 + and field_spec['itemsize'] == field_spec['formats'][0].itemsize + and not is_subdtype): + ret = field_spec['formats'][0] + else: + _fix_names(field_spec) + ret = dtype(field_spec) + + # Finished + return ret, common_alignment + +def _fix_names(field_spec): + """ Replace names which are None with the next unused f%d name """ + names = field_spec['names'] + for i, name in enumerate(names): + if name is not None: + continue + + j = 0 + while True: + name = f'f{j}' + if name not in names: + break + j = j + 1 + names[i] = name + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + if value.fields is None: + field_spec = dict( + names=['f0'], + formats=[value], + offsets=[0], + itemsize=value.itemsize + ) + else: + fields = value.fields + names = value.names + field_spec = dict( + names=names, + formats=[fields[name][0] for name in names], + offsets=[fields[name][1] for name in names], + itemsize=value.itemsize + ) + + field_spec['itemsize'] += padding + return dtype(field_spec) + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + while b: + a, b = b, a % b + return a + +def _lcm(a, b): + return a // _gcd(a, b) * b + +def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): + """ Format the error message for when __array_ufunc__ gives up. """ + args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + + ['{}={!r}'.format(k, v) + for k, v in kwargs.items()]) + args = inputs + kwargs.get('out', ()) + types_string = ', '.join(repr(type(arg).__name__) for arg in args) + return ('operand type(s) all returned NotImplemented from ' + '__array_ufunc__({!r}, {!r}, {}): {}' + .format(ufunc, method, args_string, types_string)) + + +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) + return ("no implementation found for '{}' on types that implement " + '__array_function__: {}'.format(func_name, list(types))) + + +def _ufunc_doc_signature_formatter(ufunc): + """ + Builds a signature string which resembles PEP 457 + + This is used to construct the first line of the docstring + """ + + # input arguments are simple + if ufunc.nin == 1: + in_args = 'x' + else: + in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) + + # output arguments are both keyword or positional + if ufunc.nout == 0: + out_args = ', /, out=()' + elif ufunc.nout == 1: + out_args = ', /, out=None' + else: + out_args = '[, {positional}], / [, out={default}]'.format( + positional=', '.join( + 'out{}'.format(i+1) for i in range(ufunc.nout)), + default=repr((None,)*ufunc.nout) + ) + + # keyword only args depend on whether this is a gufunc + kwargs = ( + ", casting='same_kind'" + ", order='K'" + ", dtype=None" + ", subok=True" + ) + + # NOTE: gufuncs may or may not support the `axis` parameter + if ufunc.signature is None: + kwargs = f", where=True{kwargs}[, signature, extobj]" + else: + kwargs += "[, signature, extobj, axes, axis]" + + # join all the parts together + return '{name}({in_args}{out_args}, *{kwargs})'.format( + name=ufunc.__name__, + in_args=in_args, + out_args=out_args, + kwargs=kwargs + ) + + +def npy_ctypes_check(cls): + # determine if a class comes from ctypes, in order to work around + # a bug in the buffer protocol for those objects, bpo-10746 + try: + # ctypes class are new-style, so have an __mro__. This probably fails + # for ctypes classes with multiple inheritance. + if IS_PYPY: + # (..., _ctypes.basics._CData, Bufferable, object) + ctype_base = cls.__mro__[-3] + else: + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] + # right now, they're part of the _ctypes module + return '_ctypes' in ctype_base.__module__ + except Exception: + return False diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_internal.pyi b/.venv/lib/python3.12/site-packages/numpy/core/_internal.pyi new file mode 100644 index 00000000..8a25ef2c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_internal.pyi @@ -0,0 +1,30 @@ +from typing import Any, TypeVar, overload, Generic +import ctypes as ct + +from numpy import ndarray +from numpy.ctypeslib import c_intp + +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` +_CT = TypeVar("_CT", bound=ct._CData) +_PT = TypeVar("_PT", bound=None | int) + +# TODO: Let the likes of `shape_as` and `strides_as` return `None` +# for 0D arrays once we've got shape-support + +class _ctypes(Generic[_PT]): + @overload + def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]: ... + @overload + def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ... + @property + def data(self) -> _PT: ... + @property + def shape(self) -> ct.Array[c_intp]: ... + @property + def strides(self) -> ct.Array[c_intp]: ... + @property + def _as_parameter_(self) -> ct.c_void_p: ... + + def data_as(self, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_machar.py b/.venv/lib/python3.12/site-packages/numpy/core/_machar.py new file mode 100644 index 00000000..59d71014 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_machar.py @@ -0,0 +1,356 @@ +""" +Machine arithmetic - determine the parameters of the +floating-point arithmetic system + +Author: Pearu Peterson, September 2003 + +""" +__all__ = ['MachAr'] + +from .fromnumeric import any +from ._ufunc_config import errstate +from .._utils import set_module + +# Need to speed this up...especially for longfloat + +# Deprecated 2021-10-20, NumPy 1.22 +class MachAr: + """ + Diagnosing machine parameters. + + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, subtracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating-point number ``beta**minexp`` (the smallest [in + magnitude] positive floating point number with full precision). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. + epsilon : float + Same as `eps`. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + ``- 10**(-precision)`` + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754. Same as `xmin`. + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + float_conv : function, optional + Function that converts an integer or integer array to a float + or float array. Default is `float`. + int_conv : function, optional + Function that converts a float or float array to an integer or + integer array. Default is `int`. + float_to_float : function, optional + Function that converts a float array to float. Default is `float`. + Note that this does not seem to do anything useful in the current + implementation. + float_to_str : function, optional + Function that converts a single float to a string. Default is + ``lambda v:'%24.16e' %v``. + title : str, optional + Title that is printed in the string representation of `MachAr`. + + See Also + -------- + finfo : Machine limits for floating point types. + iinfo : Machine limits for integer types. + + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. + + """ + + def __init__(self, float_conv=float,int_conv=int, + float_to_float=float, + float_to_str=lambda v:'%24.16e' % v, + title='Python floating point number'): + """ + + float_conv - convert integer to float (array) + int_conv - convert float (array) to integer + float_to_float - convert float array to float + float_to_str - convert array float to str + title - description of used floating point numbers + + """ + # We ignore all errors here because we are purposely triggering + # underflow to detect the properties of the runninng arch. + with errstate(under='ignore'): + self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) + + def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): + max_iterN = 10000 + msg = "Did not converge after %d tries with %s" + one = float_conv(1) + two = one + one + zero = one - one + + # Do we really need to do this? Aren't they 2 and 2.0? + # Determine ibeta and beta + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + b = one + for _ in range(max_iterN): + b = b + b + temp = a + b + itemp = int_conv(temp-a) + if any(itemp != 0): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + ibeta = itemp + beta = float_conv(ibeta) + + # Determine it and irnd + it = -1 + b = one + for _ in range(max_iterN): + it = it + 1 + b = b * beta + temp = b + one + temp1 = temp - b + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + + betah = beta / two + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + temp = a + betah + irnd = 0 + if any(temp-a != zero): + irnd = 1 + tempa = a + beta + temp = tempa + betah + if irnd == 0 and any(temp-tempa != zero): + irnd = 2 + + # Determine negep and epsneg + negep = it + 3 + betain = one / beta + a = one + for i in range(negep): + a = a * betain + b = a + for _ in range(max_iterN): + temp = one - a + if any(temp-one != zero): + break + a = a * beta + negep = negep - 1 + # Prevent infinite loop on PPC with gcc 4.0: + if negep < 0: + raise RuntimeError("could not determine machine tolerance " + "for 'negep', locals() -> %s" % (locals())) + else: + raise RuntimeError(msg % (_, one.dtype)) + negep = -negep + epsneg = a + + # Determine machep and eps + machep = - it - 3 + a = b + + for _ in range(max_iterN): + temp = one + a + if any(temp-one != zero): + break + a = a * beta + machep = machep + 1 + else: + raise RuntimeError(msg % (_, one.dtype)) + eps = a + + # Determine ngrd + ngrd = 0 + temp = one + eps + if irnd == 0 and any(temp*one - one != zero): + ngrd = 1 + + # Determine iexp + i = 0 + k = 1 + z = betain + t = one + eps + nxres = 0 + for _ in range(max_iterN): + y = z + z = y*y + a = z*one # Check here for underflow + temp = z*t + if any(a+a == zero) or any(abs(z) >= y): + break + temp1 = temp * betain + if any(temp1*beta == z): + break + i = i + 1 + k = k + k + else: + raise RuntimeError(msg % (_, one.dtype)) + if ibeta != 10: + iexp = i + 1 + mx = k + k + else: + iexp = 2 + iz = ibeta + while k >= iz: + iz = iz * ibeta + iexp = iexp + 1 + mx = iz + iz - 1 + + # Determine minexp and xmin + for _ in range(max_iterN): + xmin = y + y = y * betain + a = y * one + temp = y * t + if any((a + a) != zero) and any(abs(y) < xmin): + k = k + 1 + temp1 = temp * betain + if any(temp1*beta == y) and any(temp != y): + nxres = 3 + xmin = y + break + else: + break + else: + raise RuntimeError(msg % (_, one.dtype)) + minexp = -k + + # Determine maxexp, xmax + if mx <= k + k - 3 and ibeta != 10: + mx = mx + mx + iexp = iexp + 1 + maxexp = mx + minexp + irnd = irnd + nxres + if irnd >= 2: + maxexp = maxexp - 2 + i = maxexp + minexp + if ibeta == 2 and not i: + maxexp = maxexp - 1 + if i > 20: + maxexp = maxexp - 1 + if any(a != y): + maxexp = maxexp - 2 + xmax = one - epsneg + if any(xmax*one != xmax): + xmax = one - beta*epsneg + xmax = xmax / (xmin*beta*beta*beta) + i = maxexp + minexp + 3 + for j in range(i): + if ibeta == 2: + xmax = xmax + xmax + else: + xmax = xmax * beta + + smallest_subnormal = abs(xmin / beta ** (it)) + + self.ibeta = ibeta + self.it = it + self.negep = negep + self.epsneg = float_to_float(epsneg) + self._str_epsneg = float_to_str(epsneg) + self.machep = machep + self.eps = float_to_float(eps) + self._str_eps = float_to_str(eps) + self.ngrd = ngrd + self.iexp = iexp + self.minexp = minexp + self.xmin = float_to_float(xmin) + self._str_xmin = float_to_str(xmin) + self.maxexp = maxexp + self.xmax = float_to_float(xmax) + self._str_xmax = float_to_str(xmax) + self.irnd = irnd + + self.title = title + # Commonly used parameters + self.epsilon = self.eps + self.tiny = self.xmin + self.huge = self.xmax + self.smallest_normal = self.xmin + self._str_smallest_normal = float_to_str(self.xmin) + self.smallest_subnormal = float_to_float(smallest_subnormal) + self._str_smallest_subnormal = float_to_str(smallest_subnormal) + + import math + self.precision = int(-math.log10(float_to_float(self.eps))) + ten = two + two + two + two + two + resolution = ten ** (-self.precision) + self.resolution = float_to_float(resolution) + self._str_resolution = float_to_str(resolution) + + def __str__(self): + fmt = ( + 'Machine parameters for %(title)s\n' + '---------------------------------------------------------------------\n' + 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' + 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' + 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' + 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' + 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' + 'smallest_normal=%(smallest_normal)s ' + 'smallest_subnormal=%(smallest_subnormal)s\n' + '---------------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + +if __name__ == '__main__': + print(MachAr()) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_methods.py b/.venv/lib/python3.12/site-packages/numpy/core/_methods.py new file mode 100644 index 00000000..0fc070b3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_methods.py @@ -0,0 +1,234 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +import warnings +from contextlib import nullcontext + +from numpy.core import multiarray as mu +from numpy.core import umath as um +from numpy.core.multiarray import asanyarray +from numpy.core import numerictypes as nt +from numpy.core import _exceptions +from numpy.core._ufunc_config import _no_nep50_warning +from numpy._globals import _NoValue +from numpy.compat import pickle, os_fspath + +# save those O(100) nanoseconds! +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# Complex types to -> (2,)float view for fast-path computation in _var() +_complex_to_float = { + nt.dtype(nt.csingle) : nt.dtype(nt.single), + nt.dtype(nt.cdouble) : nt.dtype(nt.double), +} +# Special case for windows: ensure double takes precedence +if nt.dtype(nt.longdouble) != nt.dtype(nt.double): + _complex_to_float.update({ + nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble), + }) + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_maximum(a, axis, None, out, keepdims, initial, where) + +def _amin(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_minimum(a, axis, None, out, keepdims, initial, where) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_sum(a, axis, dtype, out, keepdims, initial, where) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_prod(a, axis, dtype, out, keepdims, initial, where) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_any(a, axis, dtype, out, keepdims) + return umr_any(a, axis, dtype, out, keepdims, where=where) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_all(a, axis, dtype, out, keepdims) + return umr_all(a, axis, dtype, out, keepdims, where=where) + +def _count_reduce_items(arr, axis, keepdims=False, where=True): + # fast-path for the default case + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)] + items = nt.intp(items) + else: + # TODO: Optimize case when `where` is broadcast along a non-reduction + # axis and full sum is more excessive than needed. + + # guarded to protect circular imports + from numpy.lib.stride_tricks import broadcast_to + # count True values in (potentially broadcasted) boolean mask + items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None, + keepdims) + return items + +def _clip(a, min=None, max=None, out=None, **kwargs): + if min is None and max is None: + raise ValueError("One of max or min must be given") + + if min is None: + return um.minimum(a, max, out=out, **kwargs) + elif max is None: + return um.maximum(a, min, out=out, **kwargs) + else: + return um.clip(a, min, max, out=out, **kwargs) + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + arr = asanyarray(a) + + is_float16_result = False + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): + warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None: + if issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + elif issubclass(arr.dtype.type, nt.float16): + dtype = mu.dtype('f4') + is_float16_result = True + + ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) + if isinstance(ret, mu.ndarray): + with _no_nep50_warning(): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + if is_float16_result and out is None: + ret = arr.dtype.type(ret) + elif hasattr(ret, 'dtype'): + if is_float16_result: + ret = arr.dtype.type(ret / rcount) + else: + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + # Make this warning show up on top. + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, + stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where) + # The shape of rcount has to match arrmean to not change the shape of out + # in broadcasting. Otherwise, it cannot be stored back to arrmean. + if rcount.ndim == 0: + # fast-path for default case when where is True + div = rcount + else: + # matching rcount to arrmean when where is specified as array + div = rcount.reshape(arrmean.shape) + if isinstance(arrmean, mu.ndarray): + with _no_nep50_warning(): + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) + elif hasattr(arrmean, "dtype"): + arrmean = arrmean.dtype.type(arrmean / rcount) + else: + arrmean = arrmean / rcount + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = asanyarray(arr - arrmean) + + if issubclass(arr.dtype.type, (nt.floating, nt.integer)): + x = um.multiply(x, x, out=x) + # Fast-paths for built-in complex types + elif x.dtype in _complex_to_float: + xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) + um.multiply(xv, xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + # Most general case; includes handling object arrays containing imaginary + # numbers and complex types with non-native byteorder + else: + x = um.multiply(x, um.conjugate(x), out=x).real + + ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where) + + # Compute degrees of freedom and make sure it is not negative. + rcount = um.maximum(rcount - ddof, 0) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + with _no_nep50_warning(): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) + +def _dump(self, file, protocol=2): + if hasattr(file, 'write'): + ctx = nullcontext(file) + else: + ctx = open(os_fspath(file), "wb") + with ctx as f: + pickle.dump(self, f, protocol=protocol) + +def _dumps(self, protocol=2): + return pickle.dumps(self, protocol=protocol) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..15daa38b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..96fceb4f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..03e5810b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/_rational_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_rational_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..35074db8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_rational_tests.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/_simd.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_simd.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..6b630579 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_simd.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/_string_helpers.py b/.venv/lib/python3.12/site-packages/numpy/core/_string_helpers.py new file mode 100644 index 00000000..1f757cc0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_string_helpers.py @@ -0,0 +1,100 @@ +""" +String-handling utilities to avoid locale-dependence. + +Used primarily to generate type name aliases. +""" +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = tuple(map(chr, range(256))) +_ascii_upper = _all_chars[65:65+26] +_ascii_lower = _all_chars[97:97+26] +LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) +UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) + + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..a24e4e7a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/_type_aliases.py b/.venv/lib/python3.12/site-packages/numpy/core/_type_aliases.py new file mode 100644 index 00000000..38f1a099 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_type_aliases.py @@ -0,0 +1,245 @@ +""" +Due to compatibility, numpy has a very large number of different naming +conventions for the scalar types (those subclassing from `numpy.generic`). +This file produces a convoluted set of dictionaries mapping names to types, +and sometimes other mappings too. + +.. data:: allTypes + A dictionary of names to types that will be exposed as attributes through + ``np.core.numerictypes.*`` + +.. data:: sctypeDict + Similar to `allTypes`, but maps a broader set of aliases to their types. + +.. data:: sctypes + A dictionary keyed by a "type group" string, providing a list of types + under that group. + +""" + +from numpy.compat import unicode +from numpy.core._string_helpers import english_lower +from numpy.core.multiarray import typeinfo, dtype +from numpy.core._dtype import _kind_name + + +sctypeDict = {} # Contains all leaf-node scalar types with aliases +allTypes = {} # Collect the types we will add to the module + + +# separate the actual type info from the abstract base classes +_abstract_types = {} +_concrete_typeinfo = {} +for k, v in typeinfo.items(): + # make all the keys lowercase too + k = english_lower(k) + if isinstance(v, type): + _abstract_types[k] = v + else: + _concrete_typeinfo[k] = v + +_concrete_types = {v.type for k, v in _concrete_typeinfo.items()} + + +def _bits_of(obj): + try: + info = next(v for v in _concrete_typeinfo.values() if v.type is obj) + except StopIteration: + if obj in _abstract_types.values(): + msg = "Cannot count the bits of an abstract type" + raise ValueError(msg) from None + + # some third-party type - make a best-guess + return dtype(obj).itemsize * 8 + else: + return info.bits + + +def bitname(obj): + """Return a bit-width name for a given type object""" + bits = _bits_of(obj) + dt = dtype(obj) + char = dt.kind + base = _kind_name(dt) + + if base == 'object': + bits = 0 + + if bits != 0: + char = "%s%d" % (char, bits // 8) + + return base, bits, char + + +def _add_types(): + for name, info in _concrete_typeinfo.items(): + # define C-name and insert typenum and typechar references also + allTypes[name] = info.type + sctypeDict[name] = info.type + sctypeDict[info.char] = info.type + sctypeDict[info.num] = info.type + + for name, cls in _abstract_types.items(): + allTypes[name] = cls +_add_types() + +# This is the priority order used to assign the bit-sized NPY_INTxx names, which +# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be +# consistent. +# If two C types have the same size, then the earliest one in this list is used +# as the sized name. +_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte'] +_uint_ctypes = list('u' + t for t in _int_ctypes) + +def _add_aliases(): + for name, info in _concrete_typeinfo.items(): + # these are handled by _add_integer_aliases + if name in _int_ctypes or name in _uint_ctypes: + continue + + # insert bit-width version for this class (if relevant) + base, bit, char = bitname(info.type) + + myname = "%s%d" % (base, bit) + + # ensure that (c)longdouble does not overwrite the aliases assigned to + # (c)double + if name in ('longdouble', 'clongdouble') and myname in allTypes: + continue + + # Add to the main namespace if desired: + if bit != 0 and base != "bool": + allTypes[myname] = info.type + + # add forward, reverse, and string mapping to numarray + sctypeDict[char] = info.type + + # add mapping for both the bit name + sctypeDict[myname] = info.type + + +_add_aliases() + +def _add_integer_aliases(): + seen_bits = set() + for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes): + i_info = _concrete_typeinfo[i_ctype] + u_info = _concrete_typeinfo[u_ctype] + bits = i_info.bits # same for both + + for info, charname, intname in [ + (i_info,'i%d' % (bits//8,), 'int%d' % bits), + (u_info,'u%d' % (bits//8,), 'uint%d' % bits)]: + if bits not in seen_bits: + # sometimes two different types have the same number of bits + # if so, the one iterated over first takes precedence + allTypes[intname] = info.type + sctypeDict[intname] = info.type + sctypeDict[charname] = info.type + + seen_bits.add(bits) + +_add_integer_aliases() + +# We use these later +void = allTypes['void'] + +# +# Rework the Python names (so that float and complex and int are consistent +# with Python usage) +# +def _set_up_aliases(): + type_pairs = [('complex_', 'cdouble'), + ('single', 'float'), + ('csingle', 'cfloat'), + ('singlecomplex', 'cfloat'), + ('float_', 'double'), + ('intc', 'int'), + ('uintc', 'uint'), + ('int_', 'long'), + ('uint', 'ulong'), + ('cfloat', 'cdouble'), + ('longfloat', 'longdouble'), + ('clongfloat', 'clongdouble'), + ('longcomplex', 'clongdouble'), + ('bool_', 'bool'), + ('bytes_', 'string'), + ('string_', 'string'), + ('str_', 'unicode'), + ('unicode_', 'unicode'), + ('object_', 'object')] + for alias, t in type_pairs: + allTypes[alias] = allTypes[t] + sctypeDict[alias] = sctypeDict[t] + # Remove aliases overriding python types and modules + to_remove = ['object', 'int', 'float', + 'complex', 'bool', 'string', 'datetime', 'timedelta', + 'bytes', 'str'] + + for t in to_remove: + try: + del allTypes[t] + del sctypeDict[t] + except KeyError: + pass + + # Additional aliases in sctypeDict that should not be exposed as attributes + attrs_to_remove = ['ulong'] + + for t in attrs_to_remove: + try: + del allTypes[t] + except KeyError: + pass +_set_up_aliases() + + +sctypes = {'int': [], + 'uint':[], + 'float':[], + 'complex':[], + 'others':[bool, object, bytes, unicode, void]} + +def _add_array_type(typename, bits): + try: + t = allTypes['%s%d' % (typename, bits)] + except KeyError: + pass + else: + sctypes[typename].append(t) + +def _set_array_types(): + ibytes = [1, 2, 4, 8, 16, 32, 64] + fbytes = [2, 4, 8, 10, 12, 16, 32, 64] + for bytes in ibytes: + bits = 8*bytes + _add_array_type('int', bits) + _add_array_type('uint', bits) + for bytes in fbytes: + bits = 8*bytes + _add_array_type('float', bits) + _add_array_type('complex', 2*bits) + _gi = dtype('p') + if _gi.type not in sctypes['int']: + indx = 0 + sz = _gi.itemsize + _lst = sctypes['int'] + while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): + indx += 1 + sctypes['int'].insert(indx, _gi.type) + sctypes['uint'].insert(indx, dtype('P').type) +_set_array_types() + + +# Add additional strings to the sctypeDict +_toadd = ['int', 'float', 'complex', 'bool', 'object', + 'str', 'bytes', ('a', 'bytes_'), + ('int0', 'intp'), ('uint0', 'uintp')] + +for name in _toadd: + if isinstance(name, tuple): + sctypeDict[name[0]] = allTypes[name[1]] + else: + sctypeDict[name] = allTypes['%s_' % name] + +del _toadd, name diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_type_aliases.pyi b/.venv/lib/python3.12/site-packages/numpy/core/_type_aliases.pyi new file mode 100644 index 00000000..c0b6f1a8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_type_aliases.pyi @@ -0,0 +1,13 @@ +from typing import Any, TypedDict + +from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating + +class _SCTypes(TypedDict): + int: list[type[signedinteger[Any]]] + uint: list[type[unsignedinteger[Any]]] + float: list[type[floating[Any]]] + complex: list[type[complexfloating[Any, Any]]] + others: list[type] + +sctypeDict: dict[int | str, type[generic]] +sctypes: _SCTypes diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_ufunc_config.py b/.venv/lib/python3.12/site-packages/numpy/core/_ufunc_config.py new file mode 100644 index 00000000..df821309 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_ufunc_config.py @@ -0,0 +1,466 @@ +""" +Functions for changing global ufunc configuration + +This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj` +""" +import collections.abc +import contextlib +import contextvars + +from .._utils import set_module +from .umath import ( + UFUNC_BUFSIZE_DEFAULT, + ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT, + SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID, +) +from . import umath + +__all__ = [ + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", '_no_nep50_warning' +] + +_errdict = {"ignore": ERR_IGNORE, + "warn": ERR_WARN, + "raise": ERR_RAISE, + "call": ERR_CALL, + "print": ERR_PRINT, + "log": ERR_LOG} + +_errdict_rev = {value: key for key, value in _errdict.items()} + + +@set_module('numpy') +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a `RuntimeWarning` (via the Python `warnings` module). + - raise: Raise a `FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]_: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + .. [1] https://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> old_settings = np.seterr(all='ignore') #seterr to known value + >>> np.seterr(over='raise') + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> np.seterr(**old_settings) # reset to default + {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} + + >>> np.int16(32000) * np.int16(3) + 30464 + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + FloatingPointError: overflow encountered in scalar multiply + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + >>> np.int16(32000) * np.int16(3) + 30464 + + """ + + pyvals = umath.geterrobj() + old = geterr() + + if divide is None: + divide = all or old['divide'] + if over is None: + over = all or old['over'] + if under is None: + under = all or old['under'] + if invalid is None: + invalid = all or old['invalid'] + + maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + + (_errdict[over] << SHIFT_OVERFLOW) + + (_errdict[under] << SHIFT_UNDERFLOW) + + (_errdict[invalid] << SHIFT_INVALID)) + + pyvals[1] = maskvalue + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) + array([nan, 1., 1.]) + + >>> oldsettings = np.seterr(all='warn', over='raise') + >>> np.geterr() + {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) + array([nan, 1., 1.]) + + """ + maskvalue = umath.geterrobj()[1] + mask = 7 + res = {} + val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask + res['divide'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_OVERFLOW) & mask + res['over'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_UNDERFLOW) & mask + res['under'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_INVALID) & mask + res['invalid'] = _errdict_rev[val] + return res + + +@set_module('numpy') +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + Parameters + ---------- + size : int + Size of buffer. + + """ + if size > 10e6: + raise ValueError("Buffer size, %s, is too big." % size) + if size < 5: + raise ValueError("Buffer size, %s, is too small." % size) + if size % 16 != 0: + raise ValueError("Buffer size, %s, is not a multiple of 16." % size) + + pyvals = umath.geterrobj() + old = getbufsize() + pyvals[0] = size + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + """ + return umath.geterrobj()[0] + + +@set_module('numpy') +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is a string describing + the type of error (such as "divide by zero", "overflow", "underflow", + or "invalid value"), and the second is the status flag. The flag is a + byte, whose four least-significant bits indicate the type of error, one + of "divide", "over", "under", "invalid":: + + [0 0 0 0 divide over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + + >>> saved_handler = np.seterrcall(err_handler) + >>> save_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> np.seterrcall(saved_handler) + <function err_handler at 0x...> + >>> np.seterr(**save_err) + {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} + + Log error message: + + >>> class Log: + ... def write(self, msg): + ... print("LOG: %s" % msg) + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in divide + array([inf, inf, inf]) + + >>> np.seterrcall(saved_handler) + <numpy.core.numeric.Log object at 0x...> + >>> np.seterr(**save_err) + {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} + + """ + if func is not None and not isinstance(func, collections.abc.Callable): + if (not hasattr(func, 'write') or + not isinstance(func.write, collections.abc.Callable)): + raise ValueError("Only callable can be used as callback") + pyvals = umath.geterrobj() + old = geterrcall() + pyvals[2] = func + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> oldsettings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + >>> oldhandler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + + """ + return umath.geterrobj()[2] + + +class _unspecified: + pass + + +_Unspecified = _unspecified() + + +@set_module('numpy') +class errstate(contextlib.ContextDecorator): + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + .. versionchanged:: 1.17.0 + `errstate` is also usable as a function decorator, saving + a level of indentation if an entire function is wrapped. + See :py:class:`contextlib.ContextDecorator` for more information. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([nan, inf, inf]) + >>> with np.errstate(divide='warn'): + ... np.arange(3) / 0. + array([nan, inf, inf]) + + >>> np.sqrt(-1) + nan + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "<stdin>", line 2, in <module> + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + + """ + + def __init__(self, *, call=_Unspecified, **kwargs): + self.call = call + self.kwargs = kwargs + + def __enter__(self): + self.oldstate = seterr(**self.kwargs) + if self.call is not _Unspecified: + self.oldcall = seterrcall(self.call) + + def __exit__(self, *exc_info): + seterr(**self.oldstate) + if self.call is not _Unspecified: + seterrcall(self.oldcall) + + +def _setdef(): + defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] + umath.seterrobj(defval) + + +# set the default values +_setdef() + + +NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False) + +@set_module('numpy') +@contextlib.contextmanager +def _no_nep50_warning(): + """ + Context manager to disable NEP 50 warnings. This context manager is + only relevant if the NEP 50 warnings are enabled globally (which is not + thread/context safe). + + This warning context manager itself is fully safe, however. + """ + token = NO_NEP50_WARNING.set(True) + try: + yield + finally: + NO_NEP50_WARNING.reset(token) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_ufunc_config.pyi b/.venv/lib/python3.12/site-packages/numpy/core/_ufunc_config.pyi new file mode 100644 index 00000000..f5650450 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_ufunc_config.pyi @@ -0,0 +1,37 @@ +from collections.abc import Callable +from typing import Any, Literal, TypedDict + +from numpy import _SupportsWrite + +_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrFunc = Callable[[str, int], Any] + +class _ErrDict(TypedDict): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + +class _ErrDictOptional(TypedDict, total=False): + all: None | _ErrKind + divide: None | _ErrKind + over: None | _ErrKind + under: None | _ErrKind + invalid: None | _ErrKind + +def seterr( + all: None | _ErrKind = ..., + divide: None | _ErrKind = ..., + over: None | _ErrKind = ..., + under: None | _ErrKind = ..., + invalid: None | _ErrKind = ..., +) -> _ErrDict: ... +def geterr() -> _ErrDict: ... +def setbufsize(size: int) -> int: ... +def getbufsize() -> int: ... +def seterrcall( + func: None | _ErrFunc | _SupportsWrite[str] +) -> None | _ErrFunc | _SupportsWrite[str]: ... +def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ... + +# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` diff --git a/.venv/lib/python3.12/site-packages/numpy/core/_umath_tests.cpython-312-x86_64-linux-gnu.so b/.venv/lib/python3.12/site-packages/numpy/core/_umath_tests.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 00000000..3eaa83ed --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/_umath_tests.cpython-312-x86_64-linux-gnu.so Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.py new file mode 100644 index 00000000..62cd5270 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.py @@ -0,0 +1,1725 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +__all__ = ["array2string", "array_str", "array_repr", "set_string_function", + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca> +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + +import functools +import numbers +import sys +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident + +import numpy as np +from . import numerictypes as _nt +from .umath import absolute, isinf, isfinite, isnat +from . import multiarray +from .multiarray import (array, dragon4_positional, dragon4_scientific, + datetime_as_string, datetime_data, ndarray, + set_legacy_print_mode) +from .fromnumeric import any +from .numeric import concatenate, asarray, errstate +from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, + flexible) +from .overrides import array_function_dispatch, set_module +import operator +import warnings +import contextlib + +_format_options = { + 'edgeitems': 3, # repr N leading and trailing items of each dimension + 'threshold': 1000, # total items > triggers array summarization + 'floatmode': 'maxprec', + 'precision': 8, # precision of floating point representations + 'suppress': False, # suppress printing small floating values in exp format + 'linewidth': 75, + 'nanstr': 'nan', + 'infstr': 'inf', + 'sign': '-', + 'formatter': None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize} + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None): + """ + Make a dictionary out of the non-None arguments, plus conversion of + *legacy* and sanity checks. + """ + + options = {k: v for k, v in locals().items() if v is not None} + + if suppress is not None: + options['suppress'] = bool(suppress) + + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join('"{}"'.format(m) for m in modes)) + + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy == False: + options['legacy'] = sys.maxsize + elif legacy == '1.13': + options['legacy'] = 113 + elif legacy == '1.21': + options['legacy'] = 121 + elif legacy is None: + pass # OK, do nothing. + else: + warnings.warn( + "legacy printing option can currently only be '1.13', '1.21', or " + "`False`", stacklevel=3) + + if threshold is not None: + # forbid the bad threshold arg suggested by stack overflow, gh-12351 + if not isinstance(threshold, numbers.Number): + raise TypeError("threshold must be numeric") + if np.isnan(threshold): + raise ValueError("threshold must be non-NAN, try " + "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + try: + options['precision'] = operator.index(precision) + except TypeError as e: + raise TypeError('precision must be an integer') from e + + return options + + +@set_module('numpy') +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + formatter=None, sign=None, floatmode=None, *, legacy=None): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int or None, optional + Number of digits of precision for floating point output (default 8). + May be None if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + To always use the full repr without summarization, pass `sys.maxsize`. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + - 'object' : `np.object_` arrays + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values + (default maxprec_equal): + + * 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + * 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + * 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + * 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. This also + enables 1.21 legacy printing mode (described below). + + If set to the string `'1.21'` enables 1.21 legacy printing mode. This + approximates numpy 1.21 print output of complex structured dtypes + by not inserting spaces after commas that separate fields and after + colons. + + If set to `False`, disables legacy mode. + + Unrecognized strings will be ignored with a warning for forward + compatibility. + + .. versionadded:: 1.14.0 + .. versionchanged:: 1.22.0 + + See Also + -------- + get_printoptions, printoptions, set_string_function, array2string + + Notes + ----- + `formatter` is always reset with a call to `set_printoptions`. + + Use `printoptions` as a context manager to set the values temporarily. + + Examples + -------- + Floating point precision can be set: + + >>> np.set_printoptions(precision=4) + >>> np.array([1.123456789]) + [1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> np.arange(10) + array([0, 1, 2, ..., 7, 8, 9]) + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + Also to temporarily override options, use `printoptions` as a context manager: + + >>> with np.printoptions(precision=2, suppress=True, threshold=5): + ... np.linspace(0, 10, 10) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) + + """ + opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter is always reset + opt['formatter'] = formatter + _format_options.update(opt) + + # set the C variable for legacy mode + if _format_options['legacy'] == 113: + set_legacy_print_mode(113) + # reset the sign option in legacy mode to avoid confusion + _format_options['sign'] = '-' + elif _format_options['legacy'] == 121: + set_legacy_print_mode(121) + elif _format_options['legacy'] == sys.maxsize: + set_legacy_print_mode(0) + + +@set_module('numpy') +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - formatter : dict of callables + - sign : str + + For a full description of these options, see `set_printoptions`. + + See Also + -------- + set_printoptions, printoptions, set_string_function + + """ + opts = _format_options.copy() + opts['legacy'] = { + 113: '1.13', 121: '1.21', sys.maxsize: False, + }[opts['legacy']] + return opts + + +def _get_legacy_print_mode(): + """Return the legacy print mode as an int.""" + return _format_options['legacy'] + + +@set_module('numpy') +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + + >>> from numpy.testing import assert_equal + >>> with np.printoptions(precision=2): + ... np.array([2.0]) / 3 + array([0.67]) + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + """ + opts = np.get_printoptions() + try: + np.set_printoptions(*args, **kwargs) + yield np.get_printoptions() + finally: + np.set_printoptions(**opts) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2*edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) + else: + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + + +def _object_format(o): + """ Object arrays containing lists should be printed unambiguously """ + if type(o) is list: + fmt = 'list({!r})' + else: + fmt = '{!r}' + return fmt.format(o) + +def repr_format(x): + return repr(x) + +def str_format(x): + return str(x) + +def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, + formatter, **kwargs): + # note: extra arguments in kwargs are ignored + + # wrapped in lambdas to avoid taking a code path with the wrong type of data + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data), + 'float': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longfloat': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'complexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longcomplexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format} + + # we need to wrap values in `formatter` in a lambda, so that the interface + # is the same as the above values. + def indirect(x): + return lambda: x + + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = indirect(formatter['all']) + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = indirect(formatter['int_kind']) + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = indirect(formatter['float_kind']) + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = indirect(formatter['complex_kind']) + if 'str_kind' in fkeys: + formatdict['numpystr'] = indirect(formatter['str_kind']) + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = indirect(formatter[key]) + + return formatdict + +def _get_format_function(data, **options): + """ + find the right formatting function for the dtype_ + """ + dtype_ = data.dtype + dtypeobj = dtype_.type + formatdict = _get_formatdict(data, **options) + if dtypeobj is None: + return formatdict["numpystr"]() + elif issubclass(dtypeobj, _nt.bool_): + return formatdict['bool']() + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + return formatdict['timedelta']() + else: + return formatdict['int']() + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longfloat): + return formatdict['longfloat']() + else: + return formatdict['float']() + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongfloat): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() + elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)): + return formatdict['numpystr']() + elif issubclass(dtypeobj, _nt.datetime64): + return formatdict['datetime']() + elif issubclass(dtypeobj, _nt.object_): + return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() + else: + return formatdict['numpystr']() + + +def _recursive_guard(fillvalue='...'): + """ + Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs + + Decorates a function such that if it calls itself with the same first + argument, it returns `fillvalue` instead of recursing. + + Largely copied from reprlib.recursive_repr + """ + + def decorating_function(f): + repr_running = set() + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + return f(self, *args, **kwargs) + finally: + repr_running.discard(key) + + return wrapper + + return decorating_function + + +# gracefully handle recursive calls, when object arrays contain themselves +@_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " "*len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + +def _array2string_dispatcher( + a, max_line_width=None, precision=None, + suppress_small=None, separator=None, prefix=None, + style=None, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix=None, + *, legacy=None): + return (a,) + + +@array_function_dispatch(_array2string_dispatcher, module='numpy') +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + style=np._NoValue, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + *, legacy=None): + """ + Return a string representation of an array. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int or None, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + separator : str, optional + Inserted between elements. + prefix : str, optional + suffix : str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + It should be noted that the content of prefix and suffix strings are + not included in the output. + style : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.14.0 + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + Defaults to ``numpy.get_printoptions()['threshold']``. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + Defaults to ``numpy.get_printoptions()['edgeitems']``. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + Defaults to ``numpy.get_printoptions()['sign']``. + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. + Defaults to ``numpy.get_printoptions()['floatmode']``. + Can take the following values: + + - 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + This is a very flexible function; `array_repr` and `array_str` are using + `array2string` internally so keywords with the same name should work + identically in all three functions. + + Examples + -------- + >>> x = np.array([1e-16,1,2,3]) + >>> np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + '[0.,1.,2.,3.]' + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0 0x1 0x2]' + + """ + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = _format_options.copy() + options.update(overrides) + + if options['legacy'] <= 113: + if style is np._NoValue: + style = repr + + if a.shape == () and a.dtype.names is None: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=2) + + if options['legacy'] > 113: + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy > 113: + # don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): + """ + Extends line with nicely formatted (possibly multi-line) string ``word``. + """ + words = word.splitlines() + if len(words) == 1 or legacy <= 113: + return _extendLine(s, line, word, line_width, next_line_prefix, legacy) + + max_word_length = max(len(word) for word in words) + if (len(line) + max_word_length > line_width and + len(line) > len(next_line_prefix)): + s += line.rstrip() + '\n' + line = next_line_prefix + words[0] + indent = next_line_prefix + else: + indent = len(line)*' ' + line += words[0] + + for word in words[1::]: + s += line.rstrip() + '\n' + line = indent + word + + suffix_length = max_word_length - len(words[-1]) + line += suffix_length*' ' + + return s, line + +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy <= 113: + next_width = curr_width + else: + next_width = curr_width - len(']') + + a_len = a.shape[axis] + show_summary = summary_insert and 2*edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy <= 113: + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max(len(separator.rstrip()), len(']')) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy) + if legacy <= 113: + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy <= 113: + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n'*(axes_left - 1) + + for i in range(leading_items): + nested = recurser(index + (i,), next_hanging_indent, next_width) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy <= 113: + # trailing space, fixed nbr of newlines, and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError("{} must be >= 0".format(name)) + return x + +class FloatingFormat: + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = legacy + if self._legacy <= 113: + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + + self.fillFormat(data) + + def fillFormat(self, data): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + with errstate(over='ignore'): # division can overflow + if max_val >= 1.e8 or (not self.suppress_small and + (min_val < 0.0001 or max_val/min_val > 1000.)): + self.exp_format = True + + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + self.min_digits = None + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy <= 113: + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + self.min_digits = self.precision + self.unique = unique + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy <= 113: + self.pad_left = 3 + else: + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + else: + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy <= 113: + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + self.unique = unique + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.min_digits = self.pad_right + self.trim = 'k' + else: + self.trim = '.' + self.min_digits = 0 + + if self._legacy > 113: + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + nanlen = len(_format_options['nanstr']) + inflen = len(_format_options['infstr']) + neginf + offset = self.pad_right + 1 # +1 for decimal pt + self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) + + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + _format_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + _format_options['infstr'] + return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret + + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + + +@set_module('numpy') +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None, + min_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed. If `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this many digits. + If omitted, the exponent will be at least 2 digits. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. This only has an effect for + `unique=True`. In that case more digits than necessary to uniquely + identify the value may be printed and rounded unbiased. + + -- versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits, min_digits=min_digits) + + +@set_module('numpy') +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None, min_digits=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + fractional : boolean, optional + If `True`, the cutoffs of `precision` and `min_digits` refer to the + total number of digits after the decimal point, including leading + zeros. + If `False`, `precision` and `min_digits` refer to the total number of + significant digits, before or after the decimal point, ignoring leading + zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. + + -- versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> np.format_float_positional(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if not fractional and precision == 0: + raise ValueError("precision must be greater than 0 if " + "fractional=False") + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right, min_digits=min_digits) + + +class IntegerFormat: + def __init__(self, data): + if data.size > 0: + max_str_len = max(len(str(np.max(data))), + len(str(np.min(data)))) + else: + max_str_len = 0 + self.format = '%{}d'.format(max_str_len) + + def __call__(self, x): + return self.format % x + + +class BoolFormat: + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' + + def __call__(self, x): + return self.truestr if x else "False" + + +class ComplexFloatingFormat: + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if legacy <= 113: + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat( + x.real, precision, floatmode_real, suppress_small, + sign=sign, legacy=legacy + ) + self.imag_format = FloatingFormat( + x.imag, precision, floatmode_imag, suppress_small, + sign='+', legacy=legacy + ) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + + +class _TimelikeFormat: + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = '%{}s'.format(max_str_len) + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + if timezone is None: + timezone = 'naive' + self.timezone = timezone + self.unit = unit + self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super().__init__(x) + + def __call__(self, x): + if self.legacy <= 113: + return self._format_non_nat(x) + return super().__call__(x) + + def _format_non_nat(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + + +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) + + +class SubArrayFormat: + def __init__(self, format_function, **options): + self.format_function = format_function + self.threshold = options['threshold'] + self.edge_items = options['edgeitems'] + + def __call__(self, a): + self.summary_insert = "..." if a.size > self.threshold else "" + return self.format_array(a) + + def format_array(self, a): + if np.ndim(a) == 0: + return self.format_function(a) + + if self.summary_insert and a.shape[0] > 2*self.edge_items: + formatted = ( + [self.format_array(a_) for a_ in a[:self.edge_items]] + + [self.summary_insert] + + [self.format_array(a_) for a_ in a[-self.edge_items:]] + ) + else: + formatted = [self.format_array(a_) for a_ in a] + + return "[" + ", ".join(formatted) + "]" + + +class StructuredVoidFormat: + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), + as alias scalars lose their field information, and the implementation + relies upon np.void.__getitem__. + """ + def __init__(self, format_functions): + self.format_functions = format_functions + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, using the raw data + as input. Added to avoid changing the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function, **options) + format_functions.append(format_function) + return cls(format_functions) + + def __call__(self, x): + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return "({},)".format(str_fields[0]) + else: + return "({})".format(", ".join(str_fields)) + + +def _void_scalar_repr(x): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + + +_typelessdata = [int_, float_, complex_, bool_] + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> np.core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np.core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=int8) + """ + dtype = np.dtype(dtype) + if _format_options['legacy'] <= 113 and dtype.type == bool_: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + # should care about endianness *unless size is 1* (e.g., int8, bool) + if not dtype.isnative: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> dt = np.int64([1, 2]).dtype + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if type(dtype).__repr__ != np.dtype.__repr__: + # TODO: Custom repr for user DTypes, logic should likely move. + return repr(dtype) + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return "'%s'" % str(dtype) + + typename = dtype.name + if not dtype.isnative: + # deal with cases like dtype('<u2') that are identical to an + # established dtype (in this case uint16) + # except that they have a different endianness. + return "'%s'" % str(dtype) + # quote typenames which can't be represented as python variable names + if typename and not (typename[0].isalpha() and typename.isalnum()): + typename = repr(typename) + return typename + + +def _array_repr_implementation( + arr, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_repr() that allows overriding array2string.""" + if max_line_width is None: + max_line_width = _format_options['linewidth'] + + if type(arr) is not ndarray: + class_name = type(arr).__name__ + else: + class_name = "array" + + skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 + + prefix = class_name + "(" + suffix = ")" if skipdtype else "," + + if (_format_options['legacy'] <= 113 and + arr.shape == () and not arr.dtype.names): + lst = repr(arr.item()) + elif arr.size > 0 or arr.shape == (0,): + lst = array2string(arr, max_line_width, precision, suppress_small, + ', ', prefix, suffix=suffix) + else: # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(arr.shape),) + + arr_str = prefix + lst + suffix + + if skipdtype: + return arr_str + + dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) + + # compute whether we should put dtype on a new line: Do so if adding the + # dtype would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if _format_options['legacy'] <= 113: + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' '*len(class_name + "(") + elif last_line_len + len(dtype_str) + 1 > max_line_width: + spacer = '\n' + ' '*len(class_name + "(") + + return arr_str + spacer + dtype_str + + +def _array_repr_dispatcher( + arr, max_line_width=None, precision=None, suppress_small=None): + return (arr,) + + +@array_function_dispatch(_array_repr_dispatcher, module='numpy') +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([0.000001, 0. , 2. , 3. ])' + + """ + return _array_repr_implementation( + arr, max_line_width, precision, suppress_small) + + +@_recursive_guard() +def _guarded_repr_or_str(v): + if isinstance(v, bytes): + return repr(v) + return str(v) + + +def _array_str_implementation( + a, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_str() that allows overriding array2string.""" + if (_format_options['legacy'] <= 113 and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + + +def _array_str_dispatcher( + a, max_line_width=None, precision=None, suppress_small=None): + return (a,) + + +@array_function_dispatch(_array_str_dispatcher, module='numpy') +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return _array_str_implementation( + a, max_line_width, precision, suppress_small) + + +# needed if __array_function__ is disabled +_array2string_impl = getattr(array2string, '__wrapped__', array2string) +_default_array_str = functools.partial(_array_str_implementation, + array2string=_array2string_impl) +_default_array_repr = functools.partial(_array_repr_implementation, + array2string=_array2string_impl) + + +def set_string_function(f, repr=True): + """ + Set a Python function to be used when pretty printing arrays. + + Parameters + ---------- + f : function or None + Function to be used to pretty print arrays. The function should expect + a single array argument and return a string of the representation of + the array. If None, the function is reset to the default NumPy function + to print arrays. + repr : bool, optional + If True (default), the function for pretty printing (``__repr__``) + is set, if False the function that returns the default string + representation (``__str__``) is set. + + See Also + -------- + set_printoptions, get_printoptions + + Examples + -------- + >>> def pprint(arr): + ... return 'HA! - What are you going to do now?' + ... + >>> np.set_string_function(pprint) + >>> a = np.arange(10) + >>> a + HA! - What are you going to do now? + >>> _ = a + >>> # [0 1 2 3 4 5 6 7 8 9] + + We can reset the function to the default: + + >>> np.set_string_function(None) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + `repr` affects either pretty printing or normal string representation. + Note that ``__repr__`` is still affected by setting ``__str__`` + because the width of each array element in the returned string becomes + equal to the length of the result of ``__str__()``. + + >>> x = np.arange(4) + >>> np.set_string_function(lambda x:'random', repr=False) + >>> x.__str__() + 'random' + >>> x.__repr__() + 'array([0, 1, 2, 3])' + + """ + if f is None: + if repr: + return multiarray.set_string_function(_default_array_repr, 1) + else: + return multiarray.set_string_function(_default_array_str, 0) + else: + return multiarray.set_string_function(f, repr) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.pyi b/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.pyi new file mode 100644 index 00000000..d8255387 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/arrayprint.pyi @@ -0,0 +1,142 @@ +from types import TracebackType +from collections.abc import Callable +from typing import Any, Literal, TypedDict, SupportsIndex + +# Using a private class is by no means ideal, but it is simply a consequence +# of a `contextlib.context` returning an instance of aforementioned class +from contextlib import _GeneratorContextManager + +from numpy import ( + ndarray, + generic, + bool_, + integer, + timedelta64, + datetime64, + floating, + complexfloating, + void, + str_, + bytes_, + longdouble, + clongdouble, +) +from numpy._typing import ArrayLike, _CharLike_co, _FloatLike_co + +_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] + +class _FormatDict(TypedDict, total=False): + bool: Callable[[bool_], str] + int: Callable[[integer[Any]], str] + timedelta: Callable[[timedelta64], str] + datetime: Callable[[datetime64], str] + float: Callable[[floating[Any]], str] + longfloat: Callable[[longdouble], str] + complexfloat: Callable[[complexfloating[Any, Any]], str] + longcomplexfloat: Callable[[clongdouble], str] + void: Callable[[void], str] + numpystr: Callable[[_CharLike_co], str] + object: Callable[[object], str] + all: Callable[[object], str] + int_kind: Callable[[integer[Any]], str] + float_kind: Callable[[floating[Any]], str] + complex_kind: Callable[[complexfloating[Any, Any]], str] + str_kind: Callable[[_CharLike_co], str] + +class _FormatOptions(TypedDict): + precision: int + threshold: int + edgeitems: int + linewidth: int + suppress: bool + nanstr: str + infstr: str + formatter: None | _FormatDict + sign: Literal["-", "+", " "] + floatmode: _FloatMode + legacy: Literal[False, "1.13", "1.21"] + +def set_printoptions( + precision: None | SupportsIndex = ..., + threshold: None | int = ..., + edgeitems: None | int = ..., + linewidth: None | int = ..., + suppress: None | bool = ..., + nanstr: None | str = ..., + infstr: None | str = ..., + formatter: None | _FormatDict = ..., + sign: Literal[None, "-", "+", " "] = ..., + floatmode: None | _FloatMode = ..., + *, + legacy: Literal[None, False, "1.13", "1.21"] = ... +) -> None: ... +def get_printoptions() -> _FormatOptions: ... +def array2string( + a: ndarray[Any, Any], + max_line_width: None | int = ..., + precision: None | SupportsIndex = ..., + suppress_small: None | bool = ..., + separator: str = ..., + prefix: str = ..., + # NOTE: With the `style` argument being deprecated, + # all arguments between `formatter` and `suffix` are de facto + # keyworld-only arguments + *, + formatter: None | _FormatDict = ..., + threshold: None | int = ..., + edgeitems: None | int = ..., + sign: Literal[None, "-", "+", " "] = ..., + floatmode: None | _FloatMode = ..., + suffix: str = ..., + legacy: Literal[None, False, "1.13", "1.21"] = ..., +) -> str: ... +def format_float_scientific( + x: _FloatLike_co, + precision: None | int = ..., + unique: bool = ..., + trim: Literal["k", ".", "0", "-"] = ..., + sign: bool = ..., + pad_left: None | int = ..., + exp_digits: None | int = ..., + min_digits: None | int = ..., +) -> str: ... +def format_float_positional( + x: _FloatLike_co, + precision: None | int = ..., + unique: bool = ..., + fractional: bool = ..., + trim: Literal["k", ".", "0", "-"] = ..., + sign: bool = ..., + pad_left: None | int = ..., + pad_right: None | int = ..., + min_digits: None | int = ..., +) -> str: ... +def array_repr( + arr: ndarray[Any, Any], + max_line_width: None | int = ..., + precision: None | SupportsIndex = ..., + suppress_small: None | bool = ..., +) -> str: ... +def array_str( + a: ndarray[Any, Any], + max_line_width: None | int = ..., + precision: None | SupportsIndex = ..., + suppress_small: None | bool = ..., +) -> str: ... +def set_string_function( + f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ... +) -> None: ... +def printoptions( + precision: None | SupportsIndex = ..., + threshold: None | int = ..., + edgeitems: None | int = ..., + linewidth: None | int = ..., + suppress: None | bool = ..., + nanstr: None | str = ..., + infstr: None | str = ..., + formatter: None | _FormatDict = ..., + sign: Literal[None, "-", "+", " "] = ..., + floatmode: None | _FloatMode = ..., + *, + legacy: Literal[None, False, "1.13", "1.21"] = ... +) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/cversions.py b/.venv/lib/python3.12/site-packages/numpy/core/cversions.py new file mode 100644 index 00000000..00159c3a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/cversions.py @@ -0,0 +1,13 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/defchararray.py b/.venv/lib/python3.12/site-packages/numpy/core/defchararray.py new file mode 100644 index 00000000..11c5a30b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/defchararray.py @@ -0,0 +1,2914 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `bytes_` or `str_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +import functools + +from .._utils import set_module +from .numerictypes import ( + bytes_, str_, integer, int_, object_, bool_, character) +from .numeric import ndarray, compare_chararrays +from .numeric import array as narray +from numpy.core.multiarray import _vec_string +from numpy.core import overrides +from numpy.compat import asbytes +import numpy + +__all__ = [ + 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', + 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', + 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', + 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', + 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', + 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', + 'array', 'asarray' + ] + + +_globalvar = 0 + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.char') + + +def _is_unicode(arr): + """Returns True if arr is a string or a string array with a dtype that + represents a unicode string, otherwise returns False. + + """ + if (isinstance(arr, str) or + issubclass(numpy.asarray(arr).dtype.type, str)): + return True + return False + + +def _to_bytes_or_str_array(result, output_dtype_like=None): + """ + Helper function to cast a result back into an array + with the appropriate dtype if an object array must be used + as an intermediary. + """ + ret = numpy.asarray(result.tolist()) + dtype = getattr(output_dtype_like, 'dtype', None) + if dtype is not None: + return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False) + return ret + + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all None arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, str_): + return a.itemsize // 4 + return a.itemsize + + +def _binary_op_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_binary_op_dispatcher) +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '!=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '>=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + """ + return compare_chararrays(x1, x2, '<=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + """ + return compare_chararrays(x1, x2, '>', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + """ + return compare_chararrays(x1, x2, '<', True) + + +def _unary_op_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_op_dispatcher) +def str_len(a): + """ + Return len(a) element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of integers + + See Also + -------- + len + + Examples + -------- + >>> a = np.array(['Grace Hopper Conference', 'Open Source Day']) + >>> np.char.str_len(a) + array([23, 15]) + >>> a = np.array([u'\u0420', u'\u043e']) + >>> np.char.str_len(a) + array([1, 1]) + >>> a = np.array([['hello', 'world'], [u'\u0420', u'\u043e']]) + >>> np.char.str_len(a) + array([[5, 5], [1, 1]]) + """ + # Note: __len__, etc. currently return ints, which are not C-integers. + # Generally intp would be expected for lengths, although int is sufficient + # due to the dtype itemsize limitation. + return _vec_string(a, int_, '__len__') + + +@array_function_dispatch(_binary_op_dispatcher) +def add(x1, x2): + """ + Return element-wise string concatenation for two arrays of str or unicode. + + Arrays `x1` and `x2` must have the same shape. + + Parameters + ---------- + x1 : array_like of str or unicode + Input array. + x2 : array_like of str or unicode + Input array. + + Returns + ------- + add : ndarray + Output array of `bytes_` or `str_`, depending on input types + of the same shape as `x1` and `x2`. + + """ + arr1 = numpy.asarray(x1) + arr2 = numpy.asarray(x2) + out_size = _get_num_chars(arr1) + _get_num_chars(arr2) + + if type(arr1.dtype) != type(arr2.dtype): + # Enforce this for now. The solution to it will be implement add + # as a ufunc. It never worked right on Python 3: bytes + unicode gave + # nonsense unicode + bytes errored, and unicode + object used the + # object dtype itemsize as num chars (worked on short strings). + # bytes + void worked but promoting void->bytes is dubious also. + raise TypeError( + "np.char.add() requires both arrays of the same dtype kind, but " + f"got dtypes: '{arr1.dtype}' and '{arr2.dtype}' (the few cases " + "where this used to work often lead to incorrect results).") + + return _vec_string(arr1, type(arr1.dtype)(out_size), '__add__', (arr2,)) + +def _multiply_dispatcher(a, i): + return (a,) + + +@array_function_dispatch(_multiply_dispatcher) +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in `i` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like of str or unicode + + i : array_like of ints + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + Examples + -------- + >>> a = np.array(["a", "b", "c"]) + >>> np.char.multiply(x, 3) + array(['aaa', 'bbb', 'ccc'], dtype='<U3') + >>> i = np.array([1, 2, 3]) + >>> np.char.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='<U3') + >>> np.char.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='<U3') + >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.char.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='<U3') + >>> np.char.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype='<U3') + """ + a_arr = numpy.asarray(a) + i_arr = numpy.asarray(i) + if not issubclass(i_arr.dtype.type, integer): + raise ValueError("Can only multiply by integers") + out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0) + return _vec_string( + a_arr, type(a_arr.dtype)(out_size), '__mul__', (i_arr,)) + + +def _mod_dispatcher(a, values): + return (a, values) + + +@array_function_dispatch(_mod_dispatcher) +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like of str or unicode + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See Also + -------- + str.__mod__ + + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, '__mod__', (values,)), a) + + +@array_function_dispatch(_unary_op_dispatcher) +def capitalize(a): + """ + Return a copy of `a` with only the first character of each element + capitalized. + + Calls `str.capitalize` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See Also + -------- + str.capitalize + + Examples + -------- + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.char.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +def _center_dispatcher(a, width, fillchar=None): + return (a,) + + +@array_function_dispatch(_center_dispatcher) +def center(a, width, fillchar=' '): + """ + Return a copy of `a` with its elements centered in a string of + length `width`. + + Calls `str.center` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The padding character to use (default is space). + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See Also + -------- + str.center + + Notes + ----- + This function is intended to work with arrays of strings. The + fill character is not applied to numeric types. + + Examples + -------- + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4') + >>> np.char.center(c, width=9) + array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='<U9') + >>> np.char.center(c, width=9, fillchar='*') + array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='<U9') + >>> np.char.center(c, width=1) + array(['a', '1', 'b', '2'], dtype='<U1') + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.bytes_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, type(a_arr.dtype)(size), 'center', (width_arr, fillchar)) + + +def _count_dispatcher(a, sub, start=None, end=None): + return (a,) + + +@array_function_dispatch(_count_dispatcher) +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + Calls `str.count` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + The substring to search for. + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as slice + notation to specify the range in which to count. + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + str.count + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') + >>> np.char.count(c, 'A') + array([3, 1, 1]) + >>> np.char.count(c, 'aA') + array([3, 1, 0]) + >>> np.char.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.char.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end)) + + +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + +@array_function_dispatch(_code_dispatcher) +def decode(a, encoding=None, errors=None): + r""" + Calls ``bytes.decode`` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + :py:meth:`bytes.decode` + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) + >>> c + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + ... b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + >>> np.char.decode(c, encoding='cp037') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) + + +@array_function_dispatch(_code_dispatcher) +def encode(a, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the codecs + module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + str.encode + + Notes + ----- + The type of the result will depend on the encoding specified. + + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) + + +def _endswith_dispatcher(a, suffix, start=None, end=None): + return (a,) + + +@array_function_dispatch(_endswith_dispatcher) +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` ends with `suffix`, otherwise `False`. + + Calls `str.endswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + suffix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Outputs an array of bools. + + See Also + -------- + str.endswith + + Examples + -------- + >>> s = np.array(['foo', 'bar']) + >>> s[0] = 'foo' + >>> s[1] = 'bar' + >>> s + array(['foo', 'bar'], dtype='<U3') + >>> np.char.endswith(s, 'ar') + array([False, True]) + >>> np.char.endswith(s, 'a', start=1, end=2) + array([False, True]) + + """ + return _vec_string( + a, bool_, 'endswith', [suffix, start] + _clean_args(end)) + + +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@array_function_dispatch(_expandtabs_dispatcher) +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls `str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array_like of str or unicode + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.expandtabs + + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, 'expandtabs', (tabsize,)), a) + + +@array_function_dispatch(_count_dispatcher) +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + Calls `str.find` element-wise. + + For each element, return the lowest index in the string where + substring `sub` is found, such that `sub` is contained in the + range [`start`, `end`]. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray or int + Output array of ints. Returns -1 if `sub` is not found. + + See Also + -------- + str.find + + Examples + -------- + >>> a = np.array(["NumPy is a Python library"]) + >>> np.char.find(a, "Python", start=0, end=None) + array([11]) + + """ + return _vec_string( + a, int_, 'find', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_count_dispatcher) +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + Calls `str.index` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. Returns -1 if `sub` is not found. + + See Also + -------- + find, str.find + + Examples + -------- + >>> a = np.array(["Computer Science"]) + >>> np.char.index(a, "Science", start=0, end=None) + array([9]) + + """ + return _vec_string( + a, int_, 'index', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_unary_op_dispatcher) +def isalnum(a): + """ + Returns true for each element if all characters in the string are + alphanumeric and there is at least one character, false otherwise. + + Calls `str.isalnum` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.isalnum + """ + return _vec_string(a, bool_, 'isalnum') + + +@array_function_dispatch(_unary_op_dispatcher) +def isalpha(a): + """ + Returns true for each element if all characters in the string are + alphabetic and there is at least one character, false otherwise. + + Calls `str.isalpha` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isalpha + """ + return _vec_string(a, bool_, 'isalpha') + + +@array_function_dispatch(_unary_op_dispatcher) +def isdigit(a): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + Calls `str.isdigit` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isdigit + + Examples + -------- + >>> a = np.array(['a', 'b', '0']) + >>> np.char.isdigit(a) + array([False, False, True]) + >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']]) + >>> np.char.isdigit(a) + array([[False, False, True], [False, True, True]]) + """ + return _vec_string(a, bool_, 'isdigit') + + +@array_function_dispatch(_unary_op_dispatcher) +def islower(a): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + Calls `str.islower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.islower + """ + return _vec_string(a, bool_, 'islower') + + +@array_function_dispatch(_unary_op_dispatcher) +def isspace(a): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + Calls `str.isspace` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isspace + """ + return _vec_string(a, bool_, 'isspace') + + +@array_function_dispatch(_unary_op_dispatcher) +def istitle(a): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + Call `str.istitle` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.istitle + """ + return _vec_string(a, bool_, 'istitle') + + +@array_function_dispatch(_unary_op_dispatcher) +def isupper(a): + """ + Return true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + Call `str.isupper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isupper + + Examples + -------- + >>> str = "GHC" + >>> np.char.isupper(str) + array(True) + >>> a = np.array(["hello", "HELLO", "Hello"]) + >>> np.char.isupper(a) + array([False, True, False]) + + """ + return _vec_string(a, bool_, 'isupper') + + +def _join_dispatcher(sep, seq): + return (sep, seq) + + +@array_function_dispatch(_join_dispatcher) +def join(sep, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + Calls `str.join` element-wise. + + Parameters + ---------- + sep : array_like of str or unicode + seq : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See Also + -------- + str.join + + Examples + -------- + >>> np.char.join('-', 'osd') + array('o-s-d', dtype='<U5') + + >>> np.char.join(['-', '.'], ['ghc', 'osd']) + array(['g-h-c', 'o.s.d'], dtype='<U5') + + """ + return _to_bytes_or_str_array( + _vec_string(sep, object_, 'join', (seq,)), seq) + + + +def _just_dispatcher(a, width, fillchar=None): + return (a,) + + +@array_function_dispatch(_just_dispatcher) +def ljust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` left-justified in a + string of length `width`. + + Calls `str.ljust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.ljust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.bytes_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, type(a_arr.dtype)(size), 'ljust', (width_arr, fillchar)) + + +@array_function_dispatch(_unary_op_dispatcher) +def lower(a): + """ + Return an array with the elements converted to lowercase. + + Call `str.lower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.lower + + Examples + -------- + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], dtype='<U5') + >>> np.char.lower(c) + array(['a1b c', '1bca', 'bca1'], dtype='<U5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lower') + + +def _strip_dispatcher(a, chars=None): + return (a,) + + +@array_function_dispatch(_strip_dispatcher) +def lstrip(a, chars=None): + """ + For each element in `a`, return a copy with the leading characters + removed. + + Calls `str.lstrip` element-wise. + + Parameters + ---------- + a : array-like, {str, unicode} + Input array. + + chars : {str, unicode}, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.lstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') + + The 'a' variable is unstripped from c[1] because whitespace leading. + + >>> np.char.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7') + + + >>> np.char.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() + ... # XXX: is this a regression? This used to return True + ... # np.char.lstrip(c,'') does not modify c at all. + False + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() + True + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) + + +def _partition_dispatcher(a, sep): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, sep): + """ + Partition each element in `a` around `sep`. + + Calls `str.partition` element-wise. + + For each element in `a`, split the element as the first + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array + sep : {str, unicode} + Separator to split each string element in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type. + The output array will have an extra dimension with 3 + elements per input element. + + See Also + -------- + str.partition + + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, 'partition', (sep,)), a) + + +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@array_function_dispatch(_replace_dispatcher) +def replace(a, old, new, count=None): + """ + For each element in `a`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + Calls `str.replace` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + old, new : str or unicode + + count : int, optional + If the optional argument `count` is given, only the first + `count` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.replace + + Examples + -------- + >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) + >>> np.char.replace(a, 'mango', 'banana') + array(['That is a banana', 'Monkeys eat bananas'], dtype='<U19') + + >>> a = np.array(["The dish is fresh", "This is it"]) + >>> np.char.replace(a, 'is', 'was') + array(['The dwash was fresh', 'Thwas was it'], dtype='<U19') + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, 'replace', [old, new] + _clean_args(count)), a) + + +@array_function_dispatch(_count_dispatcher) +def rfind(a, sub, start=0, end=None): + """ + For each element in `a`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + Calls `str.rfind` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray + Output array of ints. Return -1 on failure. + + See Also + -------- + str.rfind + + """ + return _vec_string( + a, int_, 'rfind', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_count_dispatcher) +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + Calls `str.rindex` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + rfind, str.rindex + + """ + return _vec_string( + a, int_, 'rindex', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_just_dispatcher) +def rjust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` right-justified in a + string of length `width`. + + Calls `str.rjust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.rjust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.bytes_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, type(a_arr.dtype)(size), 'rjust', (width_arr, fillchar)) + + +@array_function_dispatch(_partition_dispatcher) +def rpartition(a, sep): + """ + Partition (split) each element around the right-most separator. + + Calls `str.rpartition` element-wise. + + For each element in `a`, split the element as the last + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like of str or unicode + Input array + sep : str or unicode + Right-most separator to split each element in array. + + Returns + ------- + out : ndarray + Output array of string or unicode, depending on input + type. The output array will have an extra dimension with + 3 elements per input element. + + See Also + -------- + str.rpartition + + """ + return _to_bytes_or_str_array( + _vec_string(a, object_, 'rpartition', (sep,)), a) + + +def _split_dispatcher(a, sep=None, maxsplit=None): + return (a,) + + +@array_function_dispatch(_split_dispatcher) +def rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.rsplit, split + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def _strip_dispatcher(a, chars=None): + return (a,) + + +@array_function_dispatch(_strip_dispatcher) +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing + characters removed. + + Calls `str.rstrip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a suffix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.rstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c + array(['aAaAaA', 'abBABba'], + dtype='|S7') + >>> np.char.rstrip(c, b'a') + array(['aAaAaA', 'abBABb'], + dtype='|S7') + >>> np.char.rstrip(c, b'A') + array(['aAaAa', 'abBABba'], + dtype='|S7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) + + +@array_function_dispatch(_split_dispatcher) +def split(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.split` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string is a + separator. + + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'split', [sep] + _clean_args(maxsplit)) + + +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) +def splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls `str.splitlines` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.splitlines + + """ + return _vec_string( + a, object_, 'splitlines', _clean_args(keepends)) + + +def _startswith_dispatcher(a, prefix, start=None, end=None): + return (a,) + + +@array_function_dispatch(_startswith_dispatcher) +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` starts with `prefix`, otherwise `False`. + + Calls `str.startswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + prefix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Array of booleans + + See Also + -------- + str.startswith + + """ + return _vec_string( + a, bool_, 'startswith', [prefix, start] + _clean_args(end)) + + +@array_function_dispatch(_strip_dispatcher) +def strip(a, chars=None): + """ + For each element in `a`, return a copy with the leading and + trailing characters removed. + + Calls `str.strip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.strip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') + >>> np.char.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7') + >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads + array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7') + >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails + array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars)) + + +@array_function_dispatch(_unary_op_dispatcher) +def swapcase(a): + """ + Return element-wise a copy of the string with + uppercase characters converted to lowercase and vice versa. + + Calls `str.swapcase` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.swapcase + + Examples + -------- + >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.char.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +@array_function_dispatch(_unary_op_dispatcher) +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls `str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.title + + Examples + -------- + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.char.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def _translate_dispatcher(a, table, deletechars=None): + return (a,) + + +@array_function_dispatch(_translate_dispatcher) +def translate(a, table, deletechars=None): + """ + For each element in `a`, return a copy of the string where all + characters occurring in the optional argument `deletechars` are + removed, and the remaining characters have been mapped through the + given translation table. + + Calls `str.translate` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + table : str of length 256 + + deletechars : str + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.translate + + """ + a_arr = numpy.asarray(a) + if issubclass(a_arr.dtype.type, str_): + return _vec_string( + a_arr, a_arr.dtype, 'translate', (table,)) + else: + return _vec_string( + a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) + + +@array_function_dispatch(_unary_op_dispatcher) +def upper(a): + """ + Return an array with the elements converted to uppercase. + + Calls `str.upper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.upper + + Examples + -------- + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], dtype='<U5') + >>> np.char.upper(c) + array(['A1B C', '1BCA', 'BCA1'], dtype='<U5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'upper') + + +def _zfill_dispatcher(a, width): + return (a,) + + +@array_function_dispatch(_zfill_dispatcher) +def zfill(a, width): + """ + Return the numeric string left-filled with zeros + + Calls `str.zfill` element-wise. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + width : int + Width of string to left-fill elements in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.zfill + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + return _vec_string( + a_arr, type(a_arr.dtype)(size), 'zfill', (width_arr,)) + + +@array_function_dispatch(_unary_op_dispatcher) +def isnumeric(a): + """ + For each element, return True if there are only numeric + characters in the element. + + Calls `str.isnumeric` element-wise. + + Numeric characters include digit characters, and all characters + that have the Unicode numeric value property, e.g. ``U+2155, + VULGAR FRACTION ONE FIFTH``. + + Parameters + ---------- + a : array_like, unicode + Input array. + + Returns + ------- + out : ndarray, bool + Array of booleans of same shape as `a`. + + See Also + -------- + str.isnumeric + + Examples + -------- + >>> np.char.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII']) + array([ True, False, False, False, False]) + + """ + if not _is_unicode(a): + raise TypeError("isnumeric is only available for Unicode strings and arrays") + return _vec_string(a, bool_, 'isnumeric') + + +@array_function_dispatch(_unary_op_dispatcher) +def isdecimal(a): + """ + For each element, return True if there are only decimal + characters in the element. + + Calls `str.isdecimal` element-wise. + + Decimal characters include digit characters, and all characters + that can be used to form decimal-radix numbers, + e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. + + Parameters + ---------- + a : array_like, unicode + Input array. + + Returns + ------- + out : ndarray, bool + Array of booleans identical in shape to `a`. + + See Also + -------- + str.isdecimal + + Examples + -------- + >>> np.char.isdecimal(['12345', '4.99', '123ABC', '']) + array([ True, False, False, False]) + + """ + if not _is_unicode(a): + raise TypeError( + "isdecimal is only available for Unicode strings and arrays") + return _vec_string(a, bool_, 'isdecimal') + + +@set_module('numpy') +class chararray(ndarray): + """ + chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0, + strides=None, order=None) + + Provides a convenient view on arrays of string and unicode values. + + .. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `bytes_` or `str_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``) + + chararrays should be created using `numpy.char.array` or + `numpy.char.asarray`, rather than this constructor directly. + + This constructor creates the array, using `buffer` (with `offset` + and `strides`) if it is not ``None``. If `buffer` is ``None``, then + constructs a new array with `strides` in "C order", unless both + ``len(shape) >= 2`` and ``order='F'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + tostring + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : object exposing the buffer interface or str, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `ndarray.strides` for full description). + Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> charar = np.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([[b'a', b'a', b'a'], + [b'a', b'a', b'a'], + [b'a', b'a', b'a']], dtype='|S1') + + >>> charar = np.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([[b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc']], dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + global _globalvar + + if unicode: + dtype = str_ + else: + dtype = bytes_ + + # force itemsize to be a Python int, since using NumPy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = int(itemsize) + + if isinstance(buffer, str): + # unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + _globalvar = 1 + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + _globalvar = 0 + return self + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if not _globalvar and self.dtype.char not in 'SUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + + if isinstance(val, character): + temp = val.rstrip() + if len(temp) == 0: + val = '' + else: + val = temp + + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See Also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See Also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See Also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See Also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See Also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See Also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See Also + -------- + add + """ + return asarray(add(self, other)) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `bytes_` or `str_`. + + See Also + -------- + add + """ + return asarray(add(numpy.asarray(other), self)) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of `bytes_` + or `str_`. + + See Also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind=None, order=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See Also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See Also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See Also + -------- + char.count + + """ + return count(self, sub, start, end) + + def decode(self, encoding=None, errors=None): + """ + Calls ``bytes.decode`` element-wise. + + See Also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + See Also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See Also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See Also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See Also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + See Also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See Also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See Also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See Also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See Also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See Also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See Also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See Also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See Also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See Also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See Also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See Also + -------- + char.lstrip + + """ + return asarray(lstrip(self, chars)) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See Also + -------- + char.replace + + """ + return asarray(replace(self, old, new, count)) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See Also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + See Also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See Also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See Also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See Also + -------- + char.rstrip + + """ + return asarray(rstrip(self, chars)) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See Also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See Also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See Also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See Also + -------- + char.strip + + """ + return asarray(strip(self, chars)) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See Also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See Also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See Also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See Also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See Also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See Also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See Also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +@set_module("numpy.char") +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `bytes_` or `str_` and use the free functions + in :mod:`numpy.char <numpy.core.defchararray>` for fast + vectorized string operations instead. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if __array__ returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or `unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + """ + if isinstance(obj, (bytes, str)): + if unicode is None: + if isinstance(obj, str): + unicode = True + else: + unicode = False + + if itemsize is None: + itemsize = len(obj) + shape = len(obj) // itemsize + + return chararray(shape, itemsize=itemsize, unicode=unicode, + buffer=obj, order=order) + + if isinstance(obj, (list, tuple)): + obj = numpy.asarray(obj) + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): + # If we just have a vanilla chararray, create a chararray + # view around it. + if not isinstance(obj, chararray): + obj = obj.view(chararray) + + if itemsize is None: + itemsize = obj.itemsize + # itemsize is in 8-bit chars, so for Unicode, we need + # to divide by the size of a single Unicode character, + # which for NumPy is always 4 + if issubclass(obj.dtype.type, str_): + itemsize //= 4 + + if unicode is None: + if issubclass(obj.dtype.type, str_): + unicode = True + else: + unicode = False + + if unicode: + dtype = str_ + else: + dtype = bytes_ + + if order is not None: + obj = numpy.asarray(obj, order=order) + if (copy or + (itemsize != obj.itemsize) or + (not unicode and isinstance(obj, str_)) or + (unicode and isinstance(obj, bytes_))): + obj = obj.astype((dtype, int(itemsize))) + return obj + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): + if itemsize is None: + # Since no itemsize was specified, convert the input array to + # a list so the ndarray constructor will automatically + # determine the itemsize for us. + obj = obj.tolist() + # Fall through to the default case + + if unicode: + dtype = str_ + else: + dtype = bytes_ + + if itemsize is None: + val = narray(obj, dtype=dtype, order=order, subok=True) + else: + val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) + return val.view(chararray) + + +@set_module("numpy.char") +def asarray(obj, itemsize=None, unicode=None, order=None): + """ + Convert the input to a `chararray`, copying the data only if + necessary. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or 'unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + """ + return array(obj, itemsize, copy=False, + unicode=unicode, order=order) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/defchararray.pyi b/.venv/lib/python3.12/site-packages/numpy/core/defchararray.pyi new file mode 100644 index 00000000..73d90bb2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/defchararray.pyi @@ -0,0 +1,421 @@ +from typing import ( + Literal as L, + overload, + TypeVar, + Any, +) + +from numpy import ( + chararray as chararray, + dtype, + str_, + bytes_, + int_, + bool_, + object_, + _OrderKACF, +) + +from numpy._typing import ( + NDArray, + _ArrayLikeStr_co as U_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeBool_co as b_co, +) + +from numpy.core.multiarray import compare_chararrays as compare_chararrays + +_SCT = TypeVar("_SCT", str_, bytes_) +_CharArray = chararray[Any, dtype[_SCT]] + +__all__: list[str] + +# Comparison +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[bool_]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[bool_]: ... + +# String operations +@overload +def add(x1: U_co, x2: U_co) -> NDArray[str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ... + +@overload +def mod(a: U_co, value: Any) -> NDArray[str_]: ... +@overload +def mod(a: S_co, value: Any) -> NDArray[bytes_]: ... + +@overload +def capitalize(a: U_co) -> NDArray[str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[bytes_]: ... + +@overload +def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... + +def decode( + a: S_co, + encoding: None | str = ..., + errors: None | str = ..., +) -> NDArray[str_]: ... + +def encode( + a: U_co, + encoding: None | str = ..., + errors: None | str = ..., +) -> NDArray[bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... + +@overload +def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... +@overload +def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... + +@overload +def lower(a: U_co) -> NDArray[str_]: ... +@overload +def lower(a: S_co) -> NDArray[bytes_]: ... + +@overload +def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +@overload +def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: None | i_co = ..., +) -> NDArray[str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: None | i_co = ..., +) -> NDArray[bytes_]: ... + +@overload +def rjust( + a: U_co, + width: i_co, + fillchar: U_co = ..., +) -> NDArray[str_]: ... +@overload +def rjust( + a: S_co, + width: i_co, + fillchar: S_co = ..., +) -> NDArray[bytes_]: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... + +@overload +def rsplit( + a: U_co, + sep: None | U_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: S_co, + sep: None | S_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... + +@overload +def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +@overload +def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... + +@overload +def split( + a: U_co, + sep: None | U_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: S_co, + sep: None | S_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... + +@overload +def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ... +@overload +def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ... + +@overload +def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... +@overload +def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... + +@overload +def swapcase(a: U_co) -> NDArray[str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[bytes_]: ... + +@overload +def title(a: U_co) -> NDArray[str_]: ... +@overload +def title(a: S_co) -> NDArray[bytes_]: ... + +@overload +def translate( + a: U_co, + table: U_co, + deletechars: None | U_co = ..., +) -> NDArray[str_]: ... +@overload +def translate( + a: S_co, + table: S_co, + deletechars: None | S_co = ..., +) -> NDArray[bytes_]: ... + +@overload +def upper(a: U_co) -> NDArray[str_]: ... +@overload +def upper(a: S_co) -> NDArray[bytes_]: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... + +# String information +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +def isalpha(a: U_co | S_co) -> NDArray[bool_]: ... +def isalnum(a: U_co | S_co) -> NDArray[bool_]: ... +def isdecimal(a: U_co | S_co) -> NDArray[bool_]: ... +def isdigit(a: U_co | S_co) -> NDArray[bool_]: ... +def islower(a: U_co | S_co) -> NDArray[bool_]: ... +def isnumeric(a: U_co | S_co) -> NDArray[bool_]: ... +def isspace(a: U_co | S_co) -> NDArray[bool_]: ... +def istitle(a: U_co | S_co) -> NDArray[bool_]: ... +def isupper(a: U_co | S_co) -> NDArray[bool_]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = ..., + end: None | i_co = ..., +) -> NDArray[bool_]: ... + +def str_len(A: U_co | S_co) -> NDArray[int_]: ... + +# Overload 1 and 2: str- or bytes-based array-likes +# overload 3: arbitrary object with unicode=False (-> bytes_) +# overload 4: arbitrary object with unicode=True (-> str_) +@overload +def array( + obj: U_co, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: S_co, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: None | int = ..., + copy: bool = ..., + unicode: L[True] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... + +@overload +def asarray( + obj: U_co, + itemsize: None | int = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: S_co, + itemsize: None | int = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: None | int = ..., + unicode: L[False] = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: None | int = ..., + unicode: L[True] = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py b/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py new file mode 100644 index 00000000..01966f0f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.py @@ -0,0 +1,1443 @@ +""" +Implementation of optimized einsum. + +""" +import itertools +import operator + +from numpy.core.multiarray import c_einsum +from numpy.core.numeric import asanyarray, tensordot +from numpy.core.overrides import array_function_dispatch + +__all__ = ['einsum', 'einsum_path'] + +einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' +einsum_symbols_set = set(einsum_symbols) + + +def _flop_count(idx_contraction, inner, num_terms, size_dictionary): + """ + Computes the number of FLOPS in the contraction. + + Parameters + ---------- + idx_contraction : iterable + The indices involved in the contraction + inner : bool + Does this contraction require an inner product? + num_terms : int + The number of terms in a contraction + size_dictionary : dict + The size of each of the indices in idx_contraction + + Returns + ------- + flop_count : int + The total number of FLOPS required for the contraction. + + Examples + -------- + + >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) + 30 + + >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) + 60 + + """ + + overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) + op_factor = max(1, num_terms - 1) + if inner: + op_factor += 1 + + return overall_size * op_factor + +def _compute_size_by_dict(indices, idx_dict): + """ + Computes the product of the elements in indices based on the dictionary + idx_dict. + + Parameters + ---------- + indices : iterable + Indices to base the product on. + idx_dict : dictionary + Dictionary of index sizes + + Returns + ------- + ret : int + The resulting product. + + Examples + -------- + >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) + 90 + + """ + ret = 1 + for i in indices: + ret *= idx_dict[i] + return ret + + +def _find_contraction(positions, input_sets, output_set): + """ + Finds the contraction for a given set of input and output sets. + + Parameters + ---------- + positions : iterable + Integer positions of terms used in the contraction. + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + + Returns + ------- + new_result : set + The indices of the resulting contraction + remaining : list + List of sets that have not been contracted, the new set is appended to + the end of this list + idx_removed : set + Indices removed from the entire contraction + idx_contraction : set + The indices used in the current contraction + + Examples + -------- + + # A simple dot product test case + >>> pos = (0, 1) + >>> isets = [set('ab'), set('bc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) + + # A more complex case with additional terms in the contraction + >>> pos = (0, 2) + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) + """ + + idx_contract = set() + idx_remain = output_set.copy() + remaining = [] + for ind, value in enumerate(input_sets): + if ind in positions: + idx_contract |= value + else: + remaining.append(value) + idx_remain |= value + + new_result = idx_remain & idx_contract + idx_removed = (idx_contract - new_result) + remaining.append(new_result) + + return (new_result, remaining, idx_removed, idx_contract) + + +def _optimal_path(input_sets, output_set, idx_dict, memory_limit): + """ + Computes all possible pair contractions, sieves the results based + on ``memory_limit`` and returns the lowest cost path. This algorithm + scales factorial with respect to the elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The optimal contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _optimal_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + full_results = [(0, [], input_sets)] + for iteration in range(len(input_sets) - 1): + iter_results = [] + + # Compute all unique pairs + for curr in full_results: + cost, positions, remaining = curr + for con in itertools.combinations(range(len(input_sets) - iteration), 2): + + # Find the contraction + cont = _find_contraction(con, remaining, output_set) + new_result, new_input_sets, idx_removed, idx_contract = cont + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(new_result, idx_dict) + if new_size > memory_limit: + continue + + # Build (total_cost, positions, indices_remaining) + total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) + new_pos = positions + [con] + iter_results.append((total_cost, new_pos, new_input_sets)) + + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path + + # If we have not found anything return single einsum contraction + if len(full_results) == 0: + return [tuple(range(len(input_sets)))] + + path = min(full_results, key=lambda x: x[0])[1] + return path + +def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): + """Compute the cost (removed size + flops) and resultant indices for + performing the contraction specified by ``positions``. + + Parameters + ---------- + positions : tuple of int + The locations of the proposed tensors to contract. + input_sets : list of sets + The indices found on each tensors. + output_set : set + The output indices of the expression. + idx_dict : dict + Mapping of each index to its size. + memory_limit : int + The total allowed size for an intermediary tensor. + path_cost : int + The contraction cost so far. + naive_cost : int + The cost of the unoptimized expression. + + Returns + ------- + cost : (int, int) + A tuple containing the size of any indices removed, and the flop cost. + positions : tuple of int + The locations of the proposed tensors to contract. + new_input_sets : list of sets + The resulting new list of indices if this proposed contraction is performed. + + """ + + # Find the contraction + contract = _find_contraction(positions, input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(idx_result, idx_dict) + if new_size > memory_limit: + return None + + # Build sort tuple + old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) + removed_size = sum(old_sizes) - new_size + + # NB: removed_size used to be just the size of any removed indices i.e.: + # helpers.compute_size_by_dict(idx_removed, idx_dict) + cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) + sort = (-removed_size, cost) + + # Sieve based on total cost as well + if (path_cost + cost) > naive_cost: + return None + + # Add contraction to possible choices + return [sort, positions, new_input_sets] + + +def _update_other_results(results, best): + """Update the positions and provisional input_sets of ``results`` based on + performing the contraction result ``best``. Remove any involving the tensors + contracted. + + Parameters + ---------- + results : list + List of contraction results produced by ``_parse_possible_contraction``. + best : list + The best contraction of ``results`` i.e. the one that will be performed. + + Returns + ------- + mod_results : list + The list of modified results, updated with outcome of ``best`` contraction. + """ + + best_con = best[1] + bx, by = best_con + mod_results = [] + + for cost, (x, y), con_sets in results: + + # Ignore results involving tensors just contracted + if x in best_con or y in best_con: + continue + + # Update the input_sets + del con_sets[by - int(by > x) - int(by > y)] + del con_sets[bx - int(bx > x) - int(bx > y)] + con_sets.insert(-1, best[2][-1]) + + # Update the position indices + mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) + mod_results.append((cost, mod_con, con_sets)) + + return mod_results + +def _greedy_path(input_sets, output_set, idx_dict, memory_limit): + """ + Finds the path by contracting the best pair until the input list is + exhausted. The best pair is found by minimizing the tuple + ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing + matrix multiplication or inner product operations, then Hadamard like + operations, and finally outer operations. Outer products are limited by + ``memory_limit``. This algorithm scales cubically with respect to the + number of elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The greedy contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _greedy_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + # Handle trivial cases that leaked through + if len(input_sets) == 1: + return [(0,)] + elif len(input_sets) == 2: + return [(0, 1)] + + # Build up a naive cost + contract = _find_contraction(range(len(input_sets)), input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) + + # Initially iterate over all pairs + comb_iter = itertools.combinations(range(len(input_sets)), 2) + known_contractions = [] + + path_cost = 0 + path = [] + + for iteration in range(len(input_sets) - 1): + + # Iterate over all pairs on first step, only previously found pairs on subsequent steps + for positions in comb_iter: + + # Always initially ignore outer products + if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): + continue + + result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, + naive_cost) + if result is not None: + known_contractions.append(result) + + # If we do not have a inner contraction, rescan pairs including outer products + if len(known_contractions) == 0: + + # Then check the outer products + for positions in itertools.combinations(range(len(input_sets)), 2): + result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, + path_cost, naive_cost) + if result is not None: + known_contractions.append(result) + + # If we still did not find any remaining contractions, default back to einsum like behavior + if len(known_contractions) == 0: + path.append(tuple(range(len(input_sets)))) + break + + # Sort based on first index + best = min(known_contractions, key=lambda x: x[0]) + + # Now propagate as many unused contractions as possible to next iteration + known_contractions = _update_other_results(known_contractions, best) + + # Next iteration only compute contractions with the new tensor + # All other contractions have been accounted for + input_sets = best[2] + new_tensor_pos = len(input_sets) - 1 + comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) + + # Update path and total cost + path.append(best[1]) + path_cost += best[0][1] + + return path + + +def _can_dot(inputs, result, idx_removed): + """ + Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. + + Parameters + ---------- + inputs : list of str + Specifies the subscripts for summation. + result : str + Resulting summation. + idx_removed : set + Indices that are removed in the summation + + + Returns + ------- + type : bool + Returns true if BLAS should and can be used, else False + + Notes + ----- + If the operations is BLAS level 1 or 2 and is not already aligned + we default back to einsum as the memory movement to copy is more + costly than the operation itself. + + + Examples + -------- + + # Standard GEMM operation + >>> _can_dot(['ij', 'jk'], 'ik', set('j')) + True + + # Can use the standard BLAS, but requires odd data movement + >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) + False + + # DDOT where the memory is not aligned + >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) + False + + """ + + # All `dot` calls remove indices + if len(idx_removed) == 0: + return False + + # BLAS can only handle two operands + if len(inputs) != 2: + return False + + input_left, input_right = inputs + + for c in set(input_left + input_right): + # can't deal with repeated indices on same input or more than 2 total + nl, nr = input_left.count(c), input_right.count(c) + if (nl > 1) or (nr > 1) or (nl + nr > 2): + return False + + # can't do implicit summation or dimension collapse e.g. + # "ab,bc->c" (implicitly sum over 'a') + # "ab,ca->ca" (take diagonal of 'a') + if nl + nr - 1 == int(c in result): + return False + + # Build a few temporaries + set_left = set(input_left) + set_right = set(input_right) + keep_left = set_left - idx_removed + keep_right = set_right - idx_removed + rs = len(idx_removed) + + # At this point we are a DOT, GEMV, or GEMM operation + + # Handle inner products + + # DDOT with aligned data + if input_left == input_right: + return True + + # DDOT without aligned data (better to use einsum) + if set_left == set_right: + return False + + # Handle the 4 possible (aligned) GEMV or GEMM cases + + # GEMM or GEMV no transpose + if input_left[-rs:] == input_right[:rs]: + return True + + # GEMM or GEMV transpose both + if input_left[:rs] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose right + if input_left[-rs:] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose left + if input_left[:rs] == input_right[:rs]: + return True + + # Einsum is faster than GEMV if we have to copy data + if not keep_left or not keep_right: + return False + + # We are a matrix-matrix product, but we need to copy data + return True + + +def _parse_einsum_input(operands): + """ + A reproduction of einsum c side einsum parsing in python. + + Returns + ------- + input_strings : str + Parsed input strings + output_string : str + Parsed output string + operands : list of array_like + The operands to use in the numpy contraction + + Examples + -------- + The operand list is simplified to reduce printing: + + >>> np.random.seed(123) + >>> a = np.random.rand(4, 4) + >>> b = np.random.rand(4, 4, 4) + >>> _parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) # may vary + + >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) # may vary + """ + + if len(operands) == 0: + raise ValueError("No input operands") + + if isinstance(operands[0], str): + subscripts = operands[0].replace(" ", "") + operands = [asanyarray(v) for v in operands[1:]] + + # Ensure all characters are valid + for s in subscripts: + if s in '.,->': + continue + if s not in einsum_symbols: + raise ValueError("Character %s is not a valid symbol." % s) + + else: + tmp_operands = list(operands) + operand_list = [] + subscript_list = [] + for p in range(len(operands) // 2): + operand_list.append(tmp_operands.pop(0)) + subscript_list.append(tmp_operands.pop(0)) + + output_list = tmp_operands[-1] if len(tmp_operands) else None + operands = [asanyarray(v) for v in operand_list] + subscripts = "" + last = len(subscript_list) - 1 + for num, sub in enumerate(subscript_list): + for s in sub: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") from e + subscripts += einsum_symbols[s] + if num != last: + subscripts += "," + + if output_list is not None: + subscripts += "->" + for s in output_list: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") from e + subscripts += einsum_symbols[s] + # Check for proper "->" + if ("-" in subscripts) or (">" in subscripts): + invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) + if invalid or (subscripts.count("->") != 1): + raise ValueError("Subscripts can only contain one '->'.") + + # Parse ellipses + if "." in subscripts: + used = subscripts.replace(".", "").replace(",", "").replace("->", "") + unused = list(einsum_symbols_set - set(used)) + ellipse_inds = "".join(unused) + longest = 0 + + if "->" in subscripts: + input_tmp, output_sub = subscripts.split("->") + split_subscripts = input_tmp.split(",") + out_sub = True + else: + split_subscripts = subscripts.split(',') + out_sub = False + + for num, sub in enumerate(split_subscripts): + if "." in sub: + if (sub.count(".") != 3) or (sub.count("...") != 1): + raise ValueError("Invalid Ellipses.") + + # Take into account numerical values + if operands[num].shape == (): + ellipse_count = 0 + else: + ellipse_count = max(operands[num].ndim, 1) + ellipse_count -= (len(sub) - 3) + + if ellipse_count > longest: + longest = ellipse_count + + if ellipse_count < 0: + raise ValueError("Ellipses lengths do not match.") + elif ellipse_count == 0: + split_subscripts[num] = sub.replace('...', '') + else: + rep_inds = ellipse_inds[-ellipse_count:] + split_subscripts[num] = sub.replace('...', rep_inds) + + subscripts = ",".join(split_subscripts) + if longest == 0: + out_ellipse = "" + else: + out_ellipse = ellipse_inds[-longest:] + + if out_sub: + subscripts += "->" + output_sub.replace("...", out_ellipse) + else: + # Special care for outputless ellipses + output_subscript = "" + tmp_subscripts = subscripts.replace(",", "") + for s in sorted(set(tmp_subscripts)): + if s not in (einsum_symbols): + raise ValueError("Character %s is not a valid symbol." % s) + if tmp_subscripts.count(s) == 1: + output_subscript += s + normal_inds = ''.join(sorted(set(output_subscript) - + set(out_ellipse))) + + subscripts += "->" + out_ellipse + normal_inds + + # Build output string if does not exist + if "->" in subscripts: + input_subscripts, output_subscript = subscripts.split("->") + else: + input_subscripts = subscripts + # Build output subscripts + tmp_subscripts = subscripts.replace(",", "") + output_subscript = "" + for s in sorted(set(tmp_subscripts)): + if s not in einsum_symbols: + raise ValueError("Character %s is not a valid symbol." % s) + if tmp_subscripts.count(s) == 1: + output_subscript += s + + # Make sure output subscripts are in the input + for char in output_subscript: + if char not in input_subscripts: + raise ValueError("Output character %s did not appear in the input" + % char) + + # Make sure number operands is equivalent to the number of terms + if len(input_subscripts.split(',')) != len(operands): + raise ValueError("Number of einsum subscripts must be equal to the " + "number of operands.") + + return (input_subscripts, output_subscript, operands) + + +def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None): + # NOTE: technically, we should only dispatch on array-like arguments, not + # subscripts (given as strings). But separating operands into + # arrays/subscripts is a little tricky/slow (given einsum's two supported + # signatures), so as a practical shortcut we dispatch on everything. + # Strings will be ignored for dispatching since they don't define + # __array_function__. + return operands + + +@array_function_dispatch(_einsum_path_dispatcher, module='numpy') +def einsum_path(*operands, optimize='greedy', einsum_call=False): + """ + einsum_path(subscripts, *operands, optimize='greedy') + + Evaluates the lowest cost contraction order for an einsum expression by + considering the creation of intermediate arrays. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + *operands : list of array_like + These are the arrays for the operation. + optimize : {bool, list, tuple, 'greedy', 'optimal'} + Choose the type of path. If a tuple is provided, the second argument is + assumed to be the maximum intermediate size created. If only a single + argument is provided the largest input or output array size is used + as a maximum intermediate size. + + * if a list is given that starts with ``einsum_path``, uses this as the + contraction path + * if False no optimization is taken + * if True defaults to the 'greedy' algorithm + * 'optimal' An algorithm that combinatorially explores all possible + ways of contracting the listed tensors and chooses the least costly + path. Scales exponentially with the number of terms in the + contraction. + * 'greedy' An algorithm that chooses the best pair contraction + at each step. Effectively, this algorithm searches the largest inner, + Hadamard, and then outer products at each step. Scales cubically with + the number of terms in the contraction. Equivalent to the 'optimal' + path for most contractions. + + Default is 'greedy'. + + Returns + ------- + path : list of tuples + A list representation of the einsum path. + string_repr : str + A printable representation of the einsum path. + + Notes + ----- + The resulting path indicates which terms of the input contraction should be + contracted first, the result of this contraction is then appended to the + end of the contraction list. This list can then be iterated over until all + intermediate contractions are complete. + + See Also + -------- + einsum, linalg.multi_dot + + Examples + -------- + + We can begin with a chain dot example. In this case, it is optimal to + contract the ``b`` and ``c`` tensors first as represented by the first + element of the path ``(1, 2)``. The resulting tensor is added to the end + of the contraction and the remaining contraction ``(0, 1)`` is then + completed. + + >>> np.random.seed(123) + >>> a = np.random.rand(2, 2) + >>> b = np.random.rand(2, 5) + >>> c = np.random.rand(5, 2) + >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + >>> print(path_info[0]) + ['einsum_path', (1, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ij,jk,kl->il # may vary + Naive scaling: 4 + Optimized scaling: 3 + Naive FLOP count: 1.600e+02 + Optimized FLOP count: 5.600e+01 + Theoretical speedup: 2.857 + Largest intermediate: 4.000e+00 elements + ------------------------------------------------------------------------- + scaling current remaining + ------------------------------------------------------------------------- + 3 kl,jk->jl ij,jl->il + 3 jl,ij->il il->il + + + A more complex index transformation example. + + >>> I = np.random.rand(10, 10, 10, 10) + >>> C = np.random.rand(10, 10) + >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, + ... optimize='greedy') + + >>> print(path_info[0]) + ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary + Naive scaling: 8 + Optimized scaling: 5 + Naive FLOP count: 8.000e+08 + Optimized FLOP count: 8.000e+05 + Theoretical speedup: 1000.000 + Largest intermediate: 1.000e+04 elements + -------------------------------------------------------------------------- + scaling current remaining + -------------------------------------------------------------------------- + 5 abcd,ea->bcde fb,gc,hd,bcde->efgh + 5 bcde,fb->cdef gc,hd,cdef->efgh + 5 cdef,gc->defg hd,defg->efgh + 5 defg,hd->efgh efgh->efgh + """ + + # Figure out what the path really is + path_type = optimize + if path_type is True: + path_type = 'greedy' + if path_type is None: + path_type = False + + explicit_einsum_path = False + memory_limit = None + + # No optimization or a named path algorithm + if (path_type is False) or isinstance(path_type, str): + pass + + # Given an explicit path + elif len(path_type) and (path_type[0] == 'einsum_path'): + explicit_einsum_path = True + + # Path tuple with memory limit + elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + isinstance(path_type[1], (int, float))): + memory_limit = int(path_type[1]) + path_type = path_type[0] + + else: + raise TypeError("Did not understand the path: %s" % str(path_type)) + + # Hidden option, only einsum should call this + einsum_call_arg = einsum_call + + # Python side parsing + input_subscripts, output_subscript, operands = _parse_einsum_input(operands) + + # Build a few useful list and sets + input_list = input_subscripts.split(',') + input_sets = [set(x) for x in input_list] + output_set = set(output_subscript) + indices = set(input_subscripts.replace(',', '')) + + # Get length of each unique dimension and ensure all dimensions are correct + dimension_dict = {} + broadcast_indices = [[] for x in range(len(input_list))] + for tnum, term in enumerate(input_list): + sh = operands[tnum].shape + if len(sh) != len(term): + raise ValueError("Einstein sum subscript %s does not contain the " + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) + for cnum, char in enumerate(term): + dim = sh[cnum] + + # Build out broadcast indices + if dim == 1: + broadcast_indices[tnum].append(char) + + if char in dimension_dict.keys(): + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) + else: + dimension_dict[char] = dim + + # Convert broadcast inds to sets + broadcast_indices = [set(x) for x in broadcast_indices] + + # Compute size of each input array plus the output array + size_list = [_compute_size_by_dict(term, dimension_dict) + for term in input_list + [output_subscript]] + max_size = max(size_list) + + if memory_limit is None: + memory_arg = max_size + else: + memory_arg = memory_limit + + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 + naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) + + # Compute the path + if explicit_einsum_path: + path = path_type[1:] + elif ( + (path_type is False) + or (len(input_list) in [1, 2]) + or (indices == output_set) + ): + # Nothing to be optimized, leave it to einsum + path = [tuple(range(len(input_list)))] + elif path_type == "greedy": + path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) + elif path_type == "optimal": + path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) + else: + raise KeyError("Path name %s not found", path_type) + + cost_list, scale_list, size_list, contraction_list = [], [], [], [] + + # Build contraction tuple (positions, gemm, einsum_str, remaining) + for cnum, contract_inds in enumerate(path): + # Make sure we remove inds from right to left + contract_inds = tuple(sorted(list(contract_inds), reverse=True)) + + contract = _find_contraction(contract_inds, input_sets, output_set) + out_inds, input_sets, idx_removed, idx_contract = contract + + cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + + bcast = set() + tmp_inputs = [] + for x in contract_inds: + tmp_inputs.append(input_list.pop(x)) + bcast |= broadcast_indices.pop(x) + + new_bcast_inds = bcast - idx_removed + + # If we're broadcasting, nix blas + if not len(idx_removed & bcast): + do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) + else: + do_blas = False + + # Last contraction + if (cnum - len(path)) == -1: + idx_result = output_subscript + else: + sort_result = [(dimension_dict[ind], ind) for ind in out_inds] + idx_result = "".join([x[1] for x in sorted(sort_result)]) + + input_list.append(idx_result) + broadcast_indices.append(new_bcast_inds) + einsum_str = ",".join(tmp_inputs) + "->" + idx_result + + contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) + contraction_list.append(contraction) + + opt_cost = sum(cost_list) + 1 + + if len(input_list) != 1: + # Explicit "einsum_path" is usually trusted, but we detect this kind of + # mistake in order to prevent from returning an intermediate value. + raise RuntimeError( + "Invalid einsum_path is specified: {} more operands has to be " + "contracted.".format(len(input_list) - 1)) + + if einsum_call_arg: + return (operands, contraction_list) + + # Return the path along with a nice string representation + overall_contraction = input_subscripts + "->" + output_subscript + header = ("scaling", "current", "remaining") + + speedup = naive_cost / opt_cost + max_i = max(size_list) + + path_print = " Complete contraction: %s\n" % overall_contraction + path_print += " Naive scaling: %d\n" % len(indices) + path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += " Naive FLOP count: %.3e\n" % naive_cost + path_print += " Optimized FLOP count: %.3e\n" % opt_cost + path_print += " Theoretical speedup: %3.3f\n" % speedup + path_print += " Largest intermediate: %.3e elements\n" % max_i + path_print += "-" * 74 + "\n" + path_print += "%6s %24s %40s\n" % header + path_print += "-" * 74 + + for n, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + remaining_str = ",".join(remaining) + "->" + output_subscript + path_run = (scale_list[n], einsum_str, remaining_str) + path_print += "\n%4d %24s %40s" % path_run + + path = ['einsum_path'] + path + return (path, path_print) + + +def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): + # Arguably we dispatch on more arguments than we really should; see note in + # _einsum_path_dispatcher for why. + yield from operands + yield out + + +# Rewrite einsum to handle different cases +@array_function_dispatch(_einsum_dispatcher, module='numpy') +def einsum(*operands, out=None, optimize=False, **kwargs): + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe', optimize=False) + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + einops : + similar verbose interface is provided by + `einops <https://github.com/arogozhnikov/einops>`_ package to cover + additional operations: transpose, reshape/flatten, repeat/tile, + squeeze/unsqueeze and reductions. + opt_einsum : + `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_ + optimizes contraction order for einsum-like expressions + in backend-agnostic manner. + + Notes + ----- + .. versionadded:: 1.6.0 + + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) <numpy.trace>`. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, + and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + .. versionadded:: 1.10.0 + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + .. versionadded:: 1.12.0 + + Added the ``optimize`` argument which will optimize the contraction order + of an einsum expression. For a contraction with three or more operands this + can greatly increase the computational efficiency at the cost of a larger + memory footprint during computation. + + Typically a 'greedy' algorithm is applied which empirical tests have shown + returns the optimal path in the majority of cases. In some cases 'optimal' + will return the superlative path through a more expensive, exhaustive search. + For iterative calculations it may be advisable to calculate the optimal path + once and reuse that path by supplying it as an argument. An example is given + below. + + See :py:func:`numpy.einsum_path` for more details. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Chained array operations. For more complicated contractions, speed ups + might be achieved by repeatedly computing a 'greedy' path or pre-computing the + 'optimal' path and repeatedly applying it, using an + `einsum_path` insertion (since version 1.12.0). Performance improvements can be + particularly significant with larger arrays: + + >>> a = np.ones(64).reshape(2,4,8) + + Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + + Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') + + Greedy `einsum` (faster optimal path approximation): ~160ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + + Optimal `einsum` (best usage pattern in some use cases): ~110ms + + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + + """ + # Special handling if out is specified + specified_out = out is not None + + # If no optimization, run pure einsum + if optimize is False: + if specified_out: + kwargs['out'] = out + return c_einsum(*operands, **kwargs) + + # Check the kwargs to avoid a more cryptic error later, without having to + # repeat default values here + valid_einsum_kwargs = ['dtype', 'order', 'casting'] + unknown_kwargs = [k for (k, v) in kwargs.items() if + k not in valid_einsum_kwargs] + if len(unknown_kwargs): + raise TypeError("Did not understand the following kwargs: %s" + % unknown_kwargs) + + # Build the contraction list and operand + operands, contraction_list = einsum_path(*operands, optimize=optimize, + einsum_call=True) + + # Handle order kwarg for output array, c_einsum allows mixed case + output_order = kwargs.pop('order', 'K') + if output_order.upper() == 'A': + if all(arr.flags.f_contiguous for arr in operands): + output_order = 'F' + else: + output_order = 'C' + + # Start contraction loop + for num, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + tmp_operands = [operands.pop(x) for x in inds] + + # Do we need to deal with the output? + handle_out = specified_out and ((num + 1) == len(contraction_list)) + + # Call tensordot if still possible + if blas: + # Checks have already been handled + input_str, results_index = einsum_str.split('->') + input_left, input_right = input_str.split(',') + + tensor_result = input_left + input_right + for s in idx_rm: + tensor_result = tensor_result.replace(s, "") + + # Find indices to contract over + left_pos, right_pos = [], [] + for s in sorted(idx_rm): + left_pos.append(input_left.find(s)) + right_pos.append(input_right.find(s)) + + # Contract! + new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) + + # Build a new view if needed + if (tensor_result != results_index) or handle_out: + if handle_out: + kwargs["out"] = out + new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs) + + # Call einsum + else: + # If out was specified + if handle_out: + kwargs["out"] = out + + # Do the contraction + new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) + + # Append new items and dereference what we can + operands.append(new_view) + del tmp_operands, new_view + + if specified_out: + return out + else: + return asanyarray(operands[0], order=output_order) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.pyi b/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.pyi new file mode 100644 index 00000000..ad483bb9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/einsumfunc.pyi @@ -0,0 +1,187 @@ +from collections.abc import Sequence +from typing import TypeVar, Any, overload, Union, Literal + +from numpy import ( + ndarray, + dtype, + bool_, + number, + _OrderKACF, +) +from numpy._typing import ( + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, + _DTypeLikeComplex_co, + _DTypeLikeObject, +) + +_ArrayType = TypeVar( + "_ArrayType", + bound=ndarray[Any, dtype[Union[bool_, number[Any]]]], +) + +_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any] +_CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +_CastingUnsafe = Literal["unsafe"] + +__all__: list[str] + +# TODO: Properly handle the `casting`-based combinatorics +# TODO: We need to evaluate the content `__subscripts` in order +# to identify whether or an array or scalar is returned. At a cursory +# glance this seems like something that can quite easily be done with +# a mypy plugin. +# Something like `is_scalar = bool(__subscripts.partition("->")[-1])` +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeBool_co, + out: None = ..., + dtype: None | _DTypeLikeBool = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeUInt_co, + out: None = ..., + dtype: None | _DTypeLikeUInt = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeInt_co, + out: None = ..., + dtype: None | _DTypeLikeInt = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeFloat_co, + out: None = ..., + dtype: None | _DTypeLikeFloat = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: None = ..., + dtype: None | _DTypeLikeComplex = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: None | _DTypeLikeComplex_co = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: _ArrayType, + dtype: None | _DTypeLikeComplex_co = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayType: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayType, + casting: _CastingUnsafe, + dtype: None | _DTypeLikeComplex_co = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayType: ... + +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: None = ..., + dtype: None | _DTypeLikeObject = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: None | _DTypeLikeObject = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: _ArrayType, + dtype: None | _DTypeLikeObject = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayType: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayType, + casting: _CastingUnsafe, + dtype: None | _DTypeLikeObject = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayType: ... + +# NOTE: `einsum_call` is a hidden kwarg unavailable for public use. +# It is therefore excluded from the signatures below. +# NOTE: In practice the list consists of a `str` (first element) +# and a variable number of integer tuples. +def einsum_path( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co | _DTypeLikeObject, + optimize: _OptimizeKind = ..., +) -> tuple[list[Any], str]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py b/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py new file mode 100644 index 00000000..69cabb33 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.py @@ -0,0 +1,3920 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +import functools +import types +import warnings + +import numpy as np +from .._utils import set_module +from . import multiarray as mu +from . import overrides +from . import umath as um +from . import numerictypes as nt +from .multiarray import asarray, array, asanyarray, concatenate +from . import _methods + +_dt_ = nt.sctype2char + +# functions that are methods +__all__ = [ + 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', + 'max', 'min', + 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', + 'ravel', 'repeat', 'reshape', 'resize', 'round', 'round_', + 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', +] + +_gentype = types.GeneratorType +# save away Python sum +_sum_ = sum + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + try: + wrap = obj.__array_wrap__ + except AttributeError: + wrap = None + result = getattr(asarray(obj), method)(*args, **kwds) + if wrap: + if not isinstance(result, mu.ndarray): + result = asarray(result) + result = wrap(result) + return result + + +def _wrapfunc(obj, method, *args, **kwds): + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) + + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. + return _wrapit(obj, method, *args, **kwds) + + +def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + # This branch is needed for reductions like any which don't + # support a dtype. + if dtype is not None: + return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, dtype, out, **passkwargs) + + +def _take_dispatcher(a, indices, axis=None, out=None, mode=None): + return (a, out) + + +@array_function_dispatch(_take_dispatcher) +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] + + Parameters + ---------- + a : array_like (Ni..., M, Nk...) + The source array. + indices : array_like (Nj...) + The indices of the values to extract. + + .. versionadded:: 1.8.0 + + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional (Ni..., Nj..., Nk...) + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + take_along_axis : Take elements by matching the array and the index arrays + + Notes + ----- + + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + + Examples + -------- + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) + + +def _reshape_dispatcher(a, newshape, order=None): + return (a,) + + +# not deprecated --- copy if necessary, view otherwise +@array_function_dispatch(_reshape_dispatcher) +def reshape(a, newshape, order='C'): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of `a` using this index order, and place the + elements into the reshaped array using this index order. 'C' + means to read / write the elements using C-like index order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to read / write the + elements using Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that + the 'C' and 'F' options take no account of the memory layout of + the underlying array, and only refer to the order of indexing. + 'A' means to read / write the elements in Fortran-like index + order if `a` is Fortran *contiguous* in memory, C-like order + otherwise. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without copying + the data. + + The `order` keyword gives the index ordering both for *fetching* the values + from `a`, and then *placing* the values into the output array. + For example, let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + return _wrapfunc(a, 'reshape', newshape, order=order) + + +def _choose_dispatcher(a, choices, out=None, mode=None): + yield a + yield from choices + yield out + + +@array_function_dispatch(_choose_dispatcher) +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a list of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description (below ndi = + `numpy.lib.index_tricks`): + + ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; + + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if ``mode='raise'``; use other modes for better performance. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside ``[0, n-1]`` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod ``n`` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + numpy.take_along_axis : Preferable if `choices` is an array + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + return _wrapfunc(a, 'choose', choices, out=out, mode=mode) + + +def _repeat_dispatcher(a, repeats, axis=None): + return (a,) + + +@array_function_dispatch(_repeat_dispatcher) +def repeat(a, repeats, axis=None): + """ + Repeat each element of an array after themselves + + Parameters + ---------- + a : array_like + Input array. + repeats : int or array of ints + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + unique : Find the unique elements of an array. + + Examples + -------- + >>> np.repeat(3, 4) + array([3, 3, 3, 3]) + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + return _wrapfunc(a, 'repeat', repeats, axis=axis) + + +def _put_dispatcher(a, ind, v, mode=None): + return (a, ind, v) + + +@array_function_dispatch(_put_dispatcher) +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. + + See Also + -------- + putmask, place + put_along_axis : Put elements by matching the array and the index arrays + + Examples + -------- + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + try: + put = a.put + except AttributeError as e: + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(a).__name__)) from e + + return put(ind, v, mode=mode) + + +def _swapaxes_dispatcher(a, axis1, axis2): + return (a,) + + +@array_function_dispatch(_swapaxes_dispatcher) +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is + returned; otherwise a new array is created. For earlier NumPy + versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + return _wrapfunc(a, 'swapaxes', axis1, axis2) + + +def _transpose_dispatcher(a, axes=None): + return (a,) + + +@array_function_dispatch(_transpose_dispatcher) +def transpose(a, axes=None): + """ + Returns an array with axes transposed. + + For a 1-D array, this returns an unchanged view of the original array, as a + transposed vector is simply the same vector. + To convert a 1-D array into a 2-D column vector, an additional dimension + must be added, e.g., ``np.atleast2d(a).T`` achieves this, as does + ``a[:, np.newaxis]``. + For a 2-D array, this is the standard matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided, then + ``transpose(a).shape == a.shape[::-1]``. + + Parameters + ---------- + a : array_like + Input array. + axes : tuple or list of ints, optional + If specified, it must be a tuple or list which contains a permutation + of [0,1,...,N-1] where N is the number of axes of `a`. The `i`'th axis + of the returned array will correspond to the axis numbered ``axes[i]`` + of the input. If not specified, defaults to ``range(a.ndim)[::-1]``, + which reverses the order of the axes. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever possible. + + See Also + -------- + ndarray.transpose : Equivalent method. + moveaxis : Move axes of an array to new positions. + argsort : Return the indices that would sort an array. + + Notes + ----- + Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors + when using the `axes` keyword argument. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> np.transpose(a) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> np.transpose(a) + array([1, 2, 3, 4]) + + >>> a = np.ones((1, 2, 3)) + >>> np.transpose(a, (1, 0, 2)).shape + (2, 1, 3) + + >>> a = np.ones((2, 3, 4, 5)) + >>> np.transpose(a).shape + (5, 4, 3, 2) + + """ + return _wrapfunc(a, 'transpose', axes) + + +def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array with its elements rearranged in such a + way that the value of the element in k-th position is in the position + the value would be in a sorted array. In the partitioned array, all + elements before the k-th element are less than or equal to that + element, and all the elements after the k-th element are greater than + or equal to that element. The ordering of the elements in the two + partitions is undefined. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The k-th value of the element + will be in its final sorted position and all smaller elements + will be moved before it and all equal or greater elements behind + it. The order of all elements in the partitions is undefined. If + provided with a sequence of k-th it will partition all elements + indexed by k-th of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string. Not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average + speed, worst case performance, work space size, and whether they are + stable. A stable sort keeps items with the same key in the same + relative order. The available algorithms have the following + properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, + partitioning along the last axis is faster and uses less space than + partitioning along any other axis. + + The sort order for complex numbers is lexicographic. If both the + real and imaginary parts are non-nan then the order is determined by + the real parts except when they are equal, in which case the order + is determined by the imaginary parts. + + Examples + -------- + >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) + >>> p = np.partition(a, 4) + >>> p + array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) + + ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal + to ``p[4]``, and all elements in ``p[5:]`` are greater than or + equal to ``p[4]``. The partition is:: + + [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7] + + The next example shows the use of multiple values passed to `kth`. + + >>> p2 = np.partition(a, (4, 8)) + >>> p2 + array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7]) + + ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]`` + are less than or equal to ``p2[4]``, all elements in ``p2[5:8]`` + are greater than or equal to ``p2[4]`` and less than or equal to + ``p2[8]``, and all elements in ``p2[9:]`` are greater than or + equal to ``p2[8]``. The partition is:: + + [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7] + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argpartition_dispatcher) +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the + algorithm specified by the `kind` keyword. It returns an array of + indices of the same shape as `a` that index data along the given + axis in partitioned order. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The k-th element will be in its + final sorted position and all smaller elements will be moved + before it and all larger elements behind it. The order of all + elements in the partitions is undefined. If provided with a + sequence of k-th it will partition all of them into their sorted + position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If + None, the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string, and not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. + More generally, ``np.take_along_axis(a, index_array, axis=axis)`` + always yields the partitioned `a`, irrespective of dimensionality. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort. + take_along_axis : Apply ``index_array`` from argpartition + to an array as if by calling partition. + + Notes + ----- + See `partition` for notes on the different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) + + >>> x = [3, 4, 2, 1] + >>> np.array(x)[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + + Multi-dimensional array: + + >>> x = np.array([[3, 4, 2], [1, 3, 1]]) + >>> index_array = np.argpartition(x, kth=1, axis=-1) + >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1) + array([[2, 3, 4], + [1, 1, 3]]) + + """ + return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) + + +def _sort_dispatcher(a, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_sort_dispatcher) +def sort(a, axis=-1, kind=None, order=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort or radix sort under the covers and, in general, + the actual implementation will vary with data type. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0. + The 'stable' option was added. + + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The four algorithms implemented in NumPy have the following + properties: + + =========== ======= ============= ============ ======== + kind speed worst case work space stable + =========== ======= ============= ============ ======== + 'quicksort' 1 O(n^2) 0 no + 'heapsort' 3 O(n*log(n)) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'timsort' 2 O(n*log(n)) ~n/2 yes + =========== ======= ============= ============ ======== + + .. note:: The datatype determines which of 'mergesort' or 'timsort' + is actually used, even if 'mergesort' is specified. User selection + at a finer scale is not currently available. + + All the sort algorithms make temporary copies of the data when + sorting along any but the last axis. Consequently, sorting along + the last axis is faster and uses less space than sorting along + any other axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + .. versionadded:: 1.12.0 + + quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_. + When sorting does not make enough progress it switches to + `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_. + This implementation makes quicksort O(n*log(n)) in the worst case. + + 'stable' automatically chooses the best stable sorting algorithm + for the data type being sorted. + It, along with 'mergesort' is currently mapped to + `timsort <https://en.wikipedia.org/wiki/Timsort>`_ + or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_ + depending on the data type. + API forward compatibility currently limits the + ability to select the implementation and it is hardwired for the different + data types. + + .. versionadded:: 1.17.0 + + Timsort is added for better performance on already or nearly + sorted data. On random data timsort is almost identical to + mergesort. It is now used for stable sort while quicksort is still the + default sort if none is chosen. For timsort details, refer to + `CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_. + 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an + O(n) sort instead of O(n log n). + + .. versionchanged:: 1.18.0 + + NaT now sorts to the end of arrays for consistency with NaN. + + Examples + -------- + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) + + Sort by age, then height if ages are equal: + + >>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) + + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.sort(axis=axis, kind=kind, order=order) + return a + + +def _argsort_dispatcher(a, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argsort_dispatcher) +def argsort(a, axis=-1, kind=None, order=None): + """ + Returns the indices that would sort an array. + + Perform an indirect sort along the given axis using the algorithm specified + by the `kind` keyword. It returns an array of indices of the same shape as + `a` that index data along the given axis in sorted order. + + Parameters + ---------- + a : array_like + Array to sort. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If None, + the flattened array is used. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with data type. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0. + The 'stable' option was added. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified `axis`. + If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`. + More generally, ``np.take_along_axis(a, index_array, axis=axis)`` + always yields the sorted `a`, irrespective of dimensionality. + + See Also + -------- + sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + ndarray.sort : Inplace sort. + argpartition : Indirect partial sort. + take_along_axis : Apply ``index_array`` from argsort + to an array as if by calling sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + As of NumPy 1.4.0 `argsort` works with real/complex arrays containing + nan values. The enhanced sort order is documented in `sort`. + + Examples + -------- + One dimensional array: + + >>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) + array([[0, 2], + [2, 3]]) + + >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind + array([[0, 1], + [0, 1]]) + >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) + array([[0, 3], + [2, 2]]) + + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')]) + >>> x + array([(1, 0), (0, 1)], + dtype=[('x', '<i4'), ('y', '<i4')]) + + >>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) + + +def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmax_dispatcher) +def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmax to an array as if by calling max. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + Indexes of the maximal elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 15 + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmax(x, axis=-1) + >>> # Same as np.amax(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[4], + [3]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) + array([4, 3]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmax(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds) + + +def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmin_dispatcher) +def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. + + See Also + -------- + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmin to an array as if by calling min. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + Indices of the minimum elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 10 + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmin(x, axis=-1) + >>> # Same as np.amin(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[2], + [0]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) + array([2, 0]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmin(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds) + + +def _searchsorted_dispatcher(a, v, side=None, sorter=None): + return (a, v, sorter) + + +@array_function_dispatch(_searchsorted_dispatcher) +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + .. versionadded:: 1.7.0 + + Returns + ------- + indices : int or array of ints + Array of insertion points with the same shape as `v`, + or an integer if `v` is a scalar. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + This function uses the same algorithm as the builtin python `bisect.bisect_left` + (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, + which is also vectorized in the `v` argument. + + Examples + -------- + >>> np.searchsorted([1,2,3,4,5], 3) + 2 + >>> np.searchsorted([1,2,3,4,5], 3, side='right') + 3 + >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) + array([0, 5, 1, 2]) + + """ + return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) + + +def _resize_dispatcher(a, new_shape): + return (a,) + + +@array_function_dispatch(_resize_dispatcher) +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated iterating over the array in C-order. + + See Also + -------- + numpy.reshape : Reshape an array without changing the total size. + numpy.pad : Enlarge and pad an array. + numpy.repeat : Repeat elements of an array. + ndarray.resize : resize an array in-place. + + Notes + ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + + Warning: This functionality does **not** consider axes separately, + i.e. it does not apply interpolation/extrapolation. + It fills the return array with the required number of elements, iterating + over `a` in C-order, disregarding axes (and cycling back from the start if + the new shape is larger). This functionality is therefore not suitable to + resize images, or data where each axis represents a separate and distinct + entity. + + Examples + -------- + >>> a=np.array([[0,1],[2,3]]) + >>> np.resize(a,(2,3)) + array([[0, 1, 2], + [3, 0, 1]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + + a = ravel(a) + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError('all elements of `new_shape` must be non-negative') + + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) + + repeats = -(-new_size // a.size) # ceil division + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +def _squeeze_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_squeeze_dispatcher) +def squeeze(a, axis=None): + """ + Remove axes of length one from `a`. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + .. versionadded:: 1.7.0 + + Selects a subset of the entries of length one in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. Note that if all axes are squeezed, + the result is a 0d array and not a scalar. + + Raises + ------ + ValueError + If `axis` is not None, and an axis being squeezed is not of length 1 + + See Also + -------- + expand_dims : The inverse operation, adding entries of length one + reshape : Insert, remove, and combine dimensions, and resize existing ones + + Examples + -------- + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=0).shape + (3, 1) + >>> np.squeeze(x, axis=1).shape + Traceback (most recent call last): + ... + ValueError: cannot select an axis to squeeze out which has size not equal to one + >>> np.squeeze(x, axis=2).shape + (1, 3) + >>> x = np.array([[1234]]) + >>> x.shape + (1, 1) + >>> np.squeeze(x) + array(1234) # 0d array + >>> np.squeeze(x).shape + () + >>> np.squeeze(x)[()] + 1234 + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze', axis=axis) + if axis is None: + return squeeze() + else: + return squeeze(axis=axis) + + +def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): + return (a,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + Starting in NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array + will have the same type as the input array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead + of just ``np.diagonal(a)``. This will work with both past and future + versions of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.fliplr(a).diagonal() # Horizontal flip + array([2, 4, 6]) + >>> np.flipud(a).diagonal() # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ + if isinstance(a, np.matrix): + # Make diagonal of matrix 1-D to preserve backward compatibility. + return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + else: + return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + + +def _trace_dispatcher( + a, offset=None, axis1=None, axis2=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_trace_dispatcher) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) + else: + return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) + + +def _ravel_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_ravel_dispatcher) +def ravel(a, order='C'): + """Return a contiguous flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + As of NumPy 1.10, the returned array will have the same type as the input + array. (for example, a masked array will be returned for a masked array + input) + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + + The elements of `a` are read using this index order. 'C' means + to index the elements in row-major, C-style order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to index the elements + in column-major, Fortran-style order, with the + first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of + the memory layout of the underlying array, and only refer to + the order of axis indexing. 'A' means to read the elements in + Fortran-like index order if `a` is Fortran *contiguous* in + memory, C-like order otherwise. 'K' means to read the + elements in the order they occur in memory, except for + reversing the data when strides are negative. By default, 'C' + index order is used. + + Returns + ------- + y : array_like + y is a contiguous 1-D array of the same subtype as `a`, + with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, + if `a` is a matrix, then y is a 1-D ndarray. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. + + Notes + ----- + In row-major, C-style order, in two dimensions, the row index + varies the slowest, and the column index the quickest. This can + be generalized to multiple dimensions, where row-major order + implies that the index along the first axis varies slowest, and + the index along the last quickest. The opposite holds for + column-major, Fortran-style index ordering. + + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. However, ``ravel`` supports ``K`` in the optional + ``order`` argument while ``reshape`` does not. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) + + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) + + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + if isinstance(a, np.matrix): + return asarray(a).ravel(order=order) + else: + return asanyarray(a).ravel(order=order) + + +def _nonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_nonzero_dispatcher) +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, + containing the indices of the non-zero elements in that + dimension. The values in `a` are always tested and returned in + row-major, C-style order. + + To group the indices by element, rather than dimension, use `argwhere`, + which returns a row for each non-zero element. + + .. note:: + + When called on a zero-d array or scalar, ``nonzero(a)`` is treated + as ``nonzero(atleast_1d(a))``. + + .. deprecated:: 1.17.0 + + Use `atleast_1d` explicitly if this behavior is deliberate. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Notes + ----- + While the nonzero values can be obtained with ``a[nonzero(a)]``, it is + recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + will correctly handle 0-d arrays. + + Examples + -------- + >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) + >>> x + array([[3, 0, 0], + [0, 4, 0], + [5, 6, 0]]) + >>> np.nonzero(x) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) + + >>> x[np.nonzero(x)] + array([3, 4, 5, 6]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 0], + [2, 1]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]]) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + Using this result to index `a` is equivalent to using the mask directly: + + >>> a[np.nonzero(a > 3)] + array([4, 5, 6, 7, 8, 9]) + >>> a[a > 3] # prefer this spelling + array([4, 5, 6, 7, 8, 9]) + + ``nonzero`` can also be called as a method of the array. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return _wrapfunc(a, 'nonzero') + + +def _shape_dispatcher(a): + return (a,) + + +@array_function_dispatch(_shape_dispatcher) +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with + ``N>=1``. + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 3]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4), (5, 6)], + ... dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (3,) + >>> a.shape + (3,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def _compress_dispatcher(condition, a, axis=None, out=None): + return (condition, a, out) + + +@array_function_dispatch(_compress_dispatcher) +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + extract : Equivalent method when working on 1-D arrays + :ref:`ufuncs-output-type` + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + return _wrapfunc(a, 'compress', condition, axis=axis, out=out) + + +def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): + return (a, a_min, a_max) + + +@array_function_dispatch(_clip_dispatcher) +def clip(a, a_min, a_max, out=None, **kwargs): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min, a_max : array_like or None + Minimum and maximum value. If ``None``, clipping is not performed on + the corresponding edge. Only one of `a_min` and `a_max` may be + ``None``. Both are broadcast against `a`. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs <ufuncs.kwargs>`. + + .. versionadded:: 1.17.0 + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + When `a_min` is greater than `a_max`, `clip` returns an + array in which all values are equal to `a_max`, + as shown in the second example. + + Examples + -------- + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, 8, 1) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) + + +def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_sum_dispatcher) +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + + add.reduce : Equivalent functionality of `add`. + + cumsum : Cumulative sum of array elements. + + trapz : Integration of array values using the composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty array is the neutral element 0: + + >>> np.sum([]) + 0.0 + + For floating point numbers the numerical precision of sum (and + ``np.add.reduce``) is in general limited by directly adding each number + individually to the result causing rounding errors in every step. + However, often numpy will use a numerically better approach (partial + pairwise summation) leading to improved precision in many use-cases. + This improved precision is always provided when no ``axis`` is given. + When ``axis`` is given, it will depend on which axis is summed. + Technically, to provide the best speed possible, the improved precision + is only used when the summation is along the fast axis in memory. + Note that the exact precision may vary depending on other parameters. + In contrast to NumPy, Python's ``math.fsum`` function uses a slower but + more precise approach to summation. + Especially when summing a large number of lower precision floating point + numbers, such as ``float32``, numerical errors can become significant. + In such cases it can be advisable to use `dtype="float64"` to use a higher + precision for the output. + + Examples + -------- + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + 1 + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) + array([1., 5.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + -128 + + You can also start the sum with a value other than zero: + + >>> np.sum([10], initial=5) + 15 + """ + if isinstance(a, _gentype): + # 2018-02-25, 1.15.0 + warnings.warn( + "Calling np.sum(generator) is deprecated, and in the future will give a different result. " + "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", + DeprecationWarning, stacklevel=2) + + res = _sum_(a) + if out is not None: + out[...] = res + return out + return res + + return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, + initial=initial, where=where) + + +def _any_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=np._NoValue): + return (a, where, out) + + +@array_function_dispatch(_any_dispatcher) +def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean if `axis` is ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (``axis=None``) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for any `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + Examples + -------- + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False], [False, False]], axis=0) + array([ True, False]) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any(np.nan) + True + + >>> np.any([[True, False], [False, False]], where=[[False], [True]]) + False + + >>> o=np.array(False) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array(True), array(True)) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + return _wrapreduction(a, np.logical_or, 'any', axis, None, out, + keepdims=keepdims, where=where) + + +def _all_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_all_dispatcher) +def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (``axis=None``) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more + details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for all `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + Examples + -------- + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False]) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> np.all([[True, True], [False, True]], where=[[True], [False]]) + True + + >>> o=np.array(False) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary + + """ + return _wrapreduction(a, np.logical_and, 'all', axis, None, out, + keepdims=keepdims, where=where) + + +def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumsum_dispatcher) +def cumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + See Also + -------- + sum : Sum array elements. + trapz : Integration of array values using the composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point + values since ``sum`` may use a pairwise summation routine, reducing + the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` + + >>> b = np.array([1, 2e-9, 3e-9] * 1000000) + >>> b.cumsum()[-1] + 1000000.0050045159 + >>> b.sum() + 1000000.0050000029 + + """ + return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) + + +def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_ptp_dispatcher) +def ptp(a, axis=None, out=None, keepdims=np._NoValue): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + a : array_like + Input values. + axis : None or int or tuple of ints, optional + Axis along which to find the peaks. By default, flatten the + array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.15.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + ptp : ndarray or scalar + The range of a given array - `scalar` if array is one-dimensional + or a new array holding the result along the given axis + + Examples + -------- + >>> x = np.array([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> np.ptp(x, axis=1) + array([8, 6]) + + >>> np.ptp(x, axis=0) + array([2, 0, 5, 2]) + + >>> np.ptp(x) + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.array([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> np.ptp(y, axis=1) + array([ 126, 127, -128, -127], dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> np.ptp(y, axis=1).view(np.uint8) + array([126, 127, 128, 129], dtype=uint8) + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: + try: + ptp = a.ptp + except AttributeError: + pass + else: + return ptp(axis=axis, out=out, **kwargs) + return _methods._ptp(a, axis=axis, out=out, **kwargs) + + +def _max_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_max_dispatcher) +@set_module('numpy') +def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, the maximum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``max`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + max : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `~numpy.max` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``max(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.max(a) # Maximum of the flattened array + 3 + >>> np.max(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.max(a, axis=1) # Maxima along the second axis + array([1, 3]) + >>> np.max(a, where=[False, True], initial=-1, axis=0) + array([-1, 3]) + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.max(b) + nan + >>> np.max(b, where=~np.isnan(b), initial=-1) + 4.0 + >>> np.nanmax(b) + 4.0 + + You can use an initial value to compute the maximum of an empty slice, or + to initialize it to a different value: + + >>> np.max([[-50], [10]], axis=-1, initial=0) + array([ 0, 10]) + + Notice that the initial value is used as one of the elements for which the + maximum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + >>> np.max([5], initial=6) + 6 + >>> max([5], default=6) + 5 + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_max_dispatcher) +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + `amax` is an alias of `~numpy.max`. + + See Also + -------- + max : alias of this function + ndarray.max : equivalent method + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _min_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_min_dispatcher) +def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``min`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + min : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `~numpy.min` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``min(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.min(a) # Minimum of the flattened array + 0 + >>> np.min(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.min(a, axis=1) # Minima along the second axis + array([0, 2]) + >>> np.min(a, where=[False, True], initial=10, axis=0) + array([10, 1]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.min(b) + nan + >>> np.min(b, where=~np.isnan(b), initial=10) + 0.0 + >>> np.nanmin(b) + 0.0 + + >>> np.min([[-50], [10]], axis=-1, initial=0) + array([-50, 0]) + + Notice that the initial value is used as one of the elements for which the + minimum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + Notice that this isn't the same as Python's ``default`` argument. + + >>> np.min([6], initial=5) + 5 + >>> min([6], default=5) + 6 + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_min_dispatcher) +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + `amin` is an alias of `~numpy.min`. + + See Also + -------- + min : alias of this function + ndarray.min : equivalent method + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_prod_dispatcher) +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) + 16 # may vary + + The product of an empty array is the neutral element 1: + + >>> np.prod([]) + 1.0 + + Examples + -------- + By default, calculate the product of all elements: + + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> np.prod(a) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod(a, axis=1) + array([ 2., 12.]) + >>> np.prod(a, axis=0) + array([3., 8.]) + + Or select specific elements to include: + + >>> np.prod([1., np.nan, 3.], where=[True, False, True]) + 3.0 + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True + + You can also start the product with a value other than one: + + >>> np.prod([1, 2], initial=5) + 10 + """ + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where) + + +def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumprod_dispatcher) +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) + + +def _ndim_dispatcher(a): + return (a,) + + +@array_function_dispatch(_ndim_dispatcher) +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def _size_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_size_dispatcher) +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which the elements are counted. By default, give + the total number of elements. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,1) + 3 + >>> np.size(a,0) + 2 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + try: + return a.shape[axis] + except AttributeError: + return asarray(a).shape[axis] + + +def _round_dispatcher(a, decimals=None, out=None): + return (a, out) + + +@array_function_dispatch(_round_dispatcher) +def round(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See :ref:`ufuncs-output-type` for more + details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + around : an alias for this function + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. + + ``np.round`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has + error due to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling by powers + of ten. For instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 + >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 + (16.06, 16.05) + + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + + Examples + -------- + >>> np.round([0.37, 1.64]) + array([0., 2.]) + >>> np.round([0.37, 1.64], decimals=1) + array([0.4, 1.6]) + >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([0., 2., 2., 4., 4.]) + >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.round([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +@array_function_dispatch(_round_dispatcher) +def around(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + `around` is an alias of `~numpy.round`. + + See Also + -------- + ndarray.round : equivalent method + round : alias for this function + ceil, fix, floor, rint, trunc + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_mean_dispatcher) +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + By default, `float16` results are computed using `float32` intermediates + for extra precision. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([2., 3.]) + >>> np.mean(a, axis=1) + array([1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + 0.54999924 + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 # may vary + + Specifying a where argument: + + >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) + >>> np.mean(a) + 12.0 + >>> np.mean(a, where=[[True], [False], [False]]) + 9.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + mean = a.mean + except AttributeError: + pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, **kwargs) + + +def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): + return (a, where, out) + + +@array_function_dispatch(_std_dispatcher) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean, i.e., ``std = sqrt(mean(x))``, where + ``x = abs(a - a.mean())**2``. + + The average squared deviation is typically calculated as ``x.sum() / N``, + where ``N = len(x)``. If, however, `ddof` is specified, the divisor + ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` + provides an unbiased estimator of the variance of the infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. The standard deviation computed in this + function is the square root of the estimated variance, so even with + ``ddof=1``, it will not be an unbiased estimate of the standard deviation + per se. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 # may vary + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.std(a) + 0.45000005 + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925494177 # may vary + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.std(a) + 2.614064523559687 # may vary + >>> np.std(a, where=[[True], [True], [False]]) + 2.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + std = a.std + except AttributeError: + pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): + return (a, where, out) + + +@array_function_dispatch(_var_dispatcher) +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.20.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``. + + The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.var(a) + 0.20250003 + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932944759 # may vary + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.2025 + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.var(a) + 6.833333333333333 # may vary + >>> np.var(a, where=[[True], [True], [False]]) + 4.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + + if type(a) is not mu.ndarray: + try: + var = a.var + + except AttributeError: + pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +# Aliases of other functions. Provided unique docstrings +# are for reference purposes only. Wherever possible, +# avoid using them. + + +def _round__dispatcher(a, decimals=None, out=None): + # 2023-02-28, 1.25.0 + warnings.warn("`round_` is deprecated as of NumPy 1.25.0, and will be " + "removed in NumPy 2.0. Please use `round` instead.", + DeprecationWarning, stacklevel=3) + return (a, out) + + +@array_function_dispatch(_round__dispatcher) +def round_(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + `~numpy.round_` is a disrecommended backwards-compatibility + alias of `~numpy.around` and `~numpy.round`. + + .. deprecated:: 1.25.0 + ``round_`` is deprecated as of NumPy 1.25.0, and will be + removed in NumPy 2.0. Please use `round` instead. + + See Also + -------- + around : equivalent function; see for details. + """ + return around(a, decimals=decimals, out=out) + + +def _product_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + # 2023-03-02, 1.25.0 + warnings.warn("`product` is deprecated as of NumPy 1.25.0, and will be " + "removed in NumPy 2.0. Please use `prod` instead.", + DeprecationWarning, stacklevel=3) + return (a, out) + + +@array_function_dispatch(_product_dispatcher, verify=False) +def product(*args, **kwargs): + """ + Return the product of array elements over a given axis. + + .. deprecated:: 1.25.0 + ``product`` is deprecated as of NumPy 1.25.0, and will be + removed in NumPy 2.0. Please use `prod` instead. + + See Also + -------- + prod : equivalent function; see for details. + """ + return prod(*args, **kwargs) + + +def _cumproduct_dispatcher(a, axis=None, dtype=None, out=None): + # 2023-03-02, 1.25.0 + warnings.warn("`cumproduct` is deprecated as of NumPy 1.25.0, and will be " + "removed in NumPy 2.0. Please use `cumprod` instead.", + DeprecationWarning, stacklevel=3) + return (a, out) + + +@array_function_dispatch(_cumproduct_dispatcher, verify=False) +def cumproduct(*args, **kwargs): + """ + Return the cumulative product over the given axis. + + .. deprecated:: 1.25.0 + ``cumproduct`` is deprecated as of NumPy 1.25.0, and will be + removed in NumPy 2.0. Please use `cumprod` instead. + + See Also + -------- + cumprod : equivalent function; see for details. + """ + return cumprod(*args, **kwargs) + + +def _sometrue_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=np._NoValue): + # 2023-03-02, 1.25.0 + warnings.warn("`sometrue` is deprecated as of NumPy 1.25.0, and will be " + "removed in NumPy 2.0. Please use `any` instead.", + DeprecationWarning, stacklevel=3) + return (a, where, out) + + +@array_function_dispatch(_sometrue_dispatcher, verify=False) +def sometrue(*args, **kwargs): + """ + Check whether some values are true. + + Refer to `any` for full documentation. + + .. deprecated:: 1.25.0 + ``sometrue`` is deprecated as of NumPy 1.25.0, and will be + removed in NumPy 2.0. Please use `any` instead. + + See Also + -------- + any : equivalent function; see for details. + """ + return any(*args, **kwargs) + + +def _alltrue_dispatcher(a, axis=None, out=None, keepdims=None, *, where=None): + # 2023-03-02, 1.25.0 + warnings.warn("`alltrue` is deprecated as of NumPy 1.25.0, and will be " + "removed in NumPy 2.0. Please use `all` instead.", + DeprecationWarning, stacklevel=3) + return (a, where, out) + + +@array_function_dispatch(_alltrue_dispatcher, verify=False) +def alltrue(*args, **kwargs): + """ + Check if all elements of input array are true. + + .. deprecated:: 1.25.0 + ``alltrue`` is deprecated as of NumPy 1.25.0, and will be + removed in NumPy 2.0. Please use `all` instead. + + See Also + -------- + numpy.all : Equivalent function; see for details. + """ + return all(*args, **kwargs) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.pyi b/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.pyi new file mode 100644 index 00000000..5438b270 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/fromnumeric.pyi @@ -0,0 +1,1060 @@ +import datetime as dt +from collections.abc import Sequence +from typing import Union, Any, overload, TypeVar, Literal, SupportsIndex + +from numpy import ( + ndarray, + number, + uint64, + int_, + int64, + intp, + float16, + bool_, + floating, + complexfloating, + object_, + generic, + _OrderKACF, + _OrderACF, + _ModeKind, + _PartitionKind, + _SortKind, + _SortSide, + _CastingKind, +) +from numpy._typing import ( + DTypeLike, + _DTypeLike, + ArrayLike, + _ArrayLike, + NDArray, + _ShapeLike, + _Shape, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, + _IntLike_co, + _BoolLike_co, + _ComplexLike_co, + _NumberLike_co, + _ScalarLike_co, +) + +_SCT = TypeVar("_SCT", bound=generic) +_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +__all__: list[str] + +@overload +def take( + a: _ArrayLike[_SCT], + indices: _IntLike_co, + axis: None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> _SCT: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> Any: ... +@overload +def take( + a: _ArrayLike[_SCT], + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[_SCT]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: _ArrayType = ..., + mode: _ModeKind = ..., +) -> _ArrayType: ... + +@overload +def reshape( + a: _ArrayLike[_SCT], + newshape: _ShapeLike, + order: _OrderACF = ..., +) -> NDArray[_SCT]: ... +@overload +def reshape( + a: ArrayLike, + newshape: _ShapeLike, + order: _OrderACF = ..., +) -> NDArray[Any]: ... + +@overload +def choose( + a: _IntLike_co, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., +) -> Any: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: _ArrayLike[_SCT], + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[_SCT]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[Any]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: _ArrayType = ..., + mode: _ModeKind = ..., +) -> _ArrayType: ... + +@overload +def repeat( + a: _ArrayLike[_SCT], + repeats: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +def put( + a: NDArray[Any], + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., +) -> None: ... + +@overload +def swapaxes( + a: _ArrayLike[_SCT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> NDArray[_SCT]: ... +@overload +def swapaxes( + a: ArrayLike, + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def transpose( + a: _ArrayLike[_SCT], + axes: None | _ShapeLike = ... +) -> NDArray[_SCT]: ... +@overload +def transpose( + a: ArrayLike, + axes: None | _ShapeLike = ... +) -> NDArray[Any]: ... + +@overload +def partition( + a: _ArrayLike[_SCT], + kth: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., +) -> NDArray[_SCT]: ... +@overload +def partition( + a: ArrayLike, + kth: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., +) -> NDArray[Any]: ... + +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., +) -> NDArray[intp]: ... + +@overload +def sort( + a: _ArrayLike[_SCT], + axis: None | SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., +) -> NDArray[_SCT]: ... +@overload +def sort( + a: ArrayLike, + axis: None | SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., +) -> NDArray[Any]: ... + +def argsort( + a: ArrayLike, + axis: None | SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., +) -> NDArray[intp]: ... + +@overload +def argmax( + a: ArrayLike, + axis: None = ..., + out: None = ..., + *, + keepdims: Literal[False] = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: None = ..., + *, + keepdims: bool = ..., +) -> Any: ... +@overload +def argmax( + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: _ArrayType = ..., + *, + keepdims: bool = ..., +) -> _ArrayType: ... + +@overload +def argmin( + a: ArrayLike, + axis: None = ..., + out: None = ..., + *, + keepdims: Literal[False] = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: None = ..., + *, + keepdims: bool = ..., +) -> Any: ... +@overload +def argmin( + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: _ArrayType = ..., + *, + keepdims: bool = ..., +) -> _ArrayType: ... + +@overload +def searchsorted( + a: ArrayLike, + v: _ScalarLike_co, + side: _SortSide = ..., + sorter: None | _ArrayLikeInt_co = ..., # 1D int array +) -> intp: ... +@overload +def searchsorted( + a: ArrayLike, + v: ArrayLike, + side: _SortSide = ..., + sorter: None | _ArrayLikeInt_co = ..., # 1D int array +) -> NDArray[intp]: ... + +@overload +def resize( + a: _ArrayLike[_SCT], + new_shape: _ShapeLike, +) -> NDArray[_SCT]: ... +@overload +def resize( + a: ArrayLike, + new_shape: _ShapeLike, +) -> NDArray[Any]: ... + +@overload +def squeeze( + a: _SCT, + axis: None | _ShapeLike = ..., +) -> _SCT: ... +@overload +def squeeze( + a: _ArrayLike[_SCT], + axis: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: None | _ShapeLike = ..., +) -> NDArray[Any]: ... + +@overload +def diagonal( + a: _ArrayLike[_SCT], + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., # >= 2D array +) -> NDArray[_SCT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., # >= 2D array +) -> NDArray[Any]: ... + +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> Any: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +@overload +def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]: ... + +def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]: ... + +def shape(a: ArrayLike) -> _Shape: ... + +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: _ArrayLike[_SCT], + axis: None | SupportsIndex = ..., + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +@overload +def clip( + a: _SCT, + a_min: None | ArrayLike, + a_max: None | ArrayLike, + out: None = ..., + *, + dtype: None = ..., + where: None | _ArrayLikeBool_co = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., + casting: _CastingKind = ..., +) -> _SCT: ... +@overload +def clip( + a: _ScalarLike_co, + a_min: None | ArrayLike, + a_max: None | ArrayLike, + out: None = ..., + *, + dtype: None = ..., + where: None | _ArrayLikeBool_co = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., + casting: _CastingKind = ..., +) -> Any: ... +@overload +def clip( + a: _ArrayLike[_SCT], + a_min: None | ArrayLike, + a_max: None | ArrayLike, + out: None = ..., + *, + dtype: None = ..., + where: None | _ArrayLikeBool_co = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., + casting: _CastingKind = ..., +) -> NDArray[_SCT]: ... +@overload +def clip( + a: ArrayLike, + a_min: None | ArrayLike, + a_max: None | ArrayLike, + out: None = ..., + *, + dtype: None = ..., + where: None | _ArrayLikeBool_co = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., + casting: _CastingKind = ..., +) -> NDArray[Any]: ... +@overload +def clip( + a: ArrayLike, + a_min: None | ArrayLike, + a_max: None | ArrayLike, + out: _ArrayType = ..., + *, + dtype: DTypeLike, + where: None | _ArrayLikeBool_co = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., + casting: _CastingKind = ..., +) -> Any: ... +@overload +def clip( + a: ArrayLike, + a_min: None | ArrayLike, + a_max: None | ArrayLike, + out: _ArrayType, + *, + dtype: DTypeLike = ..., + where: None | _ArrayLikeBool_co = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[None | str, ...] = ..., + extobj: list[Any] = ..., + casting: _CastingKind = ..., +) -> _ArrayType: ... + +@overload +def sum( + a: _ArrayLike[_SCT], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def sum( + a: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def sum( + a: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def all( + a: ArrayLike, + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> bool_: ... +@overload +def all( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def all( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def any( + a: ArrayLike, + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> bool_: ... +@overload +def any( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def any( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def cumsum( + a: _ArrayLike[_SCT], + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: None | SupportsIndex = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +@overload +def ptp( + a: _ArrayLike[_SCT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., +) -> _SCT: ... +@overload +def ptp( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def ptp( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., +) -> _ArrayType: ... + +@overload +def amax( + a: _ArrayLike[_SCT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def amax( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def amax( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def amin( + a: _ArrayLike[_SCT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def amin( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def amin( + a: ArrayLike, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +# TODO: `np.prod()``: For object arrays `initial` does not necessarily +# have to be a numerical scalar. +# The only requirement is that it is compatible +# with the `.__mul__()` method(s) of the passed array's elements. + +# Note that the same situation holds for all wrappers around +# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +@overload +def prod( + a: _ArrayLikeBool_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> int_: ... +@overload +def prod( + a: _ArrayLikeUInt_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> uint64: ... +@overload +def prod( + a: _ArrayLikeInt_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> int64: ... +@overload +def prod( + a: _ArrayLikeFloat_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> floating[Any]: ... +@overload +def prod( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> complexfloating[Any, Any]: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: None | DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: None | DTypeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def cumprod( + a: _ArrayLikeBool_co, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[int_]: ... +@overload +def cumprod( + a: _ArrayLikeUInt_co, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[uint64]: ... +@overload +def cumprod( + a: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[int64]: ... +@overload +def cumprod( + a: _ArrayLikeFloat_co, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cumprod( + a: _ArrayLikeObject_co, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[object_]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | SupportsIndex = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +def ndim(a: ArrayLike) -> int: ... + +def size(a: ArrayLike, axis: None | int = ...) -> int: ... + +@overload +def around( + a: _BoolLike_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> float16: ... +@overload +def around( + a: _SCT_uifcO, + decimals: SupportsIndex = ..., + out: None = ..., +) -> _SCT_uifcO: ... +@overload +def around( + a: _ComplexLike_co | object_, + decimals: SupportsIndex = ..., + out: None = ..., +) -> Any: ... +@overload +def around( + a: _ArrayLikeBool_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[float16]: ... +@overload +def around( + a: _ArrayLike[_SCT_uifcO], + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[_SCT_uifcO]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +@overload +def mean( + a: _ArrayLikeFloat_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> floating[Any]: ... +@overload +def mean( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> complexfloating[Any, Any]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def std( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> floating[Any]: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +@overload +def var( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> floating[Any]: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _ArrayType: ... + +max = amax +min = amin +round = around diff --git a/.venv/lib/python3.12/site-packages/numpy/core/function_base.py b/.venv/lib/python3.12/site-packages/numpy/core/function_base.py new file mode 100644 index 00000000..00e4e6b0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/function_base.py @@ -0,0 +1,551 @@ +import functools +import warnings +import operator +import types + +import numpy as np +from . import numeric as _nx +from .numeric import result_type, NaN, asanyarray, ndim +from numpy.core.multiarray import add_docstring +from numpy.core import overrides + +__all__ = ['logspace', 'linspace', 'geomspace'] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, + dtype=None, axis=None): + return (start, stop) + + +@array_function_dispatch(_linspace_dispatcher) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, + axis=0): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop`]. + + The endpoint of the interval can optionally be excluded. + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + .. versionchanged:: 1.20.0 + Values are rounded towards ``-inf`` instead of ``0`` when an + integer ``dtype`` is specified. The old behavior can + still be obtained with ``np.linspace(start, stop, num).astype(int)`` + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. Must be non-negative. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + + .. versionadded:: 1.9.0 + + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float, optional + Only returned if `retstep` is True + + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. + :ref:`how-to-partition` + + Examples + -------- + >>> np.linspace(2.0, 3.0, num=5) + array([2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [<matplotlib.lines.Line2D object at 0x...>] + >>> plt.plot(x2, y + 0.5, 'o') + [<matplotlib.lines.Line2D object at 0x...>] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + num = operator.index(num) + if num < 0: + raise ValueError("Number of samples, %s, must be non-negative." % num) + div = (num - 1) if endpoint else num + + # Convert float/complex array scalars to float, gh-3504 + # and make sure one can use variables that have an __array_interface__, gh-6634 + start = asanyarray(start) * 1.0 + stop = asanyarray(stop) * 1.0 + + dt = result_type(start, stop, float(num)) + if dtype is None: + dtype = dt + integer_dtype = False + else: + integer_dtype = _nx.issubdtype(dtype, _nx.integer) + + delta = stop - start + y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta)) + # In-place multiplication y *= delta/div is faster, but prevents the multiplicant + # from overriding what class is produced, and thus prevents, e.g. use of Quantities, + # see gh-7142. Hence, we multiply in place only for standard scalar types. + if div > 0: + _mult_inplace = _nx.isscalar(delta) + step = delta / div + any_step_zero = ( + step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any()) + if any_step_zero: + # Special handling for denormal numbers, gh-5437 + y /= div + if _mult_inplace: + y *= delta + else: + y = y * delta + else: + if _mult_inplace: + y *= step + else: + y = y * step + else: + # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) + # have an undefined step + step = NaN + # Multiply with delta to allow possible override of output class. + y = y * delta + + y += start + + if endpoint and num > 1: + y[-1, ...] = stop + + if axis != 0: + y = _nx.moveaxis(y, 0, axis) + + if integer_dtype: + _nx.floor(y, out=y) + + if retstep: + return y.astype(dtype, copy=False), step + else: + return y.astype(dtype, copy=False) + + +def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, + dtype=None, axis=None): + return (start, stop, base) + + +@array_function_dispatch(_logspace_dispatcher) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, + axis=0): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + .. versionchanged:: 1.25.0 + Non-scalar 'base` is now supported + + Parameters + ---------- + start : array_like + ``base ** start`` is the starting value of the sequence. + stop : array_like + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : array_like, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred type will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start, + stop, or base are array-like. By default (0), the samples will be + along a new axis inserted at the beginning. Use -1 to get an axis at + the end. + + .. versionadded:: 1.16.0 + + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + :ref:`how-to-partition` + + Notes + ----- + If base is a scalar, logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([4. , 5.0396842 , 6.34960421, 8. ]) + >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) + array([[ 4. , 5.0396842 , 6.34960421, 8. ], + [ 9. , 12.98024613, 18.72075441, 27. ]]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [<matplotlib.lines.Line2D object at 0x...>] + >>> plt.plot(x2, y + 0.5, 'o') + [<matplotlib.lines.Line2D object at 0x...>] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + ndmax = np.broadcast(start, stop, base).ndim + start, stop, base = ( + np.array(a, copy=False, subok=True, ndmin=ndmax) + for a in (start, stop, base) + ) + y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + base = np.expand_dims(base, axis=axis) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype, copy=False) + + +def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, + axis=None): + return (start, stop) + + +@array_function_dispatch(_geomspace_dispatcher) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """ + Return numbers spaced evenly on a log scale (a geometric progression). + + This is similar to `logspace`, but with endpoints specified directly. + Each output sample is a constant multiple of the previous. + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The final value of the sequence, unless `endpoint` is False. + In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + :ref:`how-to-partition` + + Notes + ----- + If the inputs or dtype are complex, the output will follow a logarithmic + spiral in the complex plane. (There are an infinite number of spirals + passing through two points; the output will follow the shortest such path.) + + Examples + -------- + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, decreasing, and complex inputs are allowed: + + >>> np.geomspace(1000, 1, num=4) + array([1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) + >>> np.geomspace(1j, 1000j, num=4) # Straight line + array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle + array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, + 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, + 1.00000000e+00+0.00000000e+00j]) + + Graphical illustration of `endpoint` parameter: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> y = np.zeros(N) + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + [<matplotlib.lines.Line2D object at 0x...>] + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + [<matplotlib.lines.Line2D object at 0x...>] + >>> plt.axis([0.5, 2000, 0, 3]) + [0.5, 2000, 0, 3] + >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') + >>> plt.show() + + """ + start = asanyarray(start) + stop = asanyarray(stop) + if _nx.any(start == 0) or _nx.any(stop == 0): + raise ValueError('Geometric sequence cannot include zero') + + dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) + if dtype is None: + dtype = dt + else: + # complex to dtype('complex128'), for instance + dtype = _nx.dtype(dtype) + + # Promote both arguments to the same dtype in case, for instance, one is + # complex and another is negative and log would produce NaN otherwise. + # Copy since we may change things in-place further down. + start = start.astype(dt, copy=True) + stop = stop.astype(dt, copy=True) + + out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt) + # Avoid negligible real or imaginary parts in output by rotating to + # positive real, calculating, then undoing rotation + if _nx.issubdtype(dt, _nx.complexfloating): + all_imag = (start.real == 0.) & (stop.real == 0.) + if _nx.any(all_imag): + start[all_imag] = start[all_imag].imag + stop[all_imag] = stop[all_imag].imag + out_sign[all_imag] = 1j + + both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1) + if _nx.any(both_negative): + _nx.negative(start, out=start, where=both_negative) + _nx.negative(stop, out=stop, where=both_negative) + _nx.negative(out_sign, out=out_sign, where=both_negative) + + log_start = _nx.log10(start) + log_stop = _nx.log10(stop) + result = logspace(log_start, log_stop, num=num, + endpoint=endpoint, base=10.0, dtype=dtype) + + # Make sure the endpoints match the start and stop arguments. This is + # necessary because np.exp(np.log(x)) is not necessarily equal to x. + if num > 0: + result[0] = start + if num > 1 and endpoint: + result[-1] = stop + + result = out_sign * result + + if axis != 0: + result = _nx.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +def _needs_add_docstring(obj): + """ + Returns true if the only way to set the docstring of `obj` from python is + via add_docstring. + + This function errs on the side of being overly conservative. + """ + Py_TPFLAGS_HEAPTYPE = 1 << 9 + + if isinstance(obj, (types.FunctionType, types.MethodType, property)): + return False + + if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: + return False + + return True + + +def _add_docstring(obj, doc, warn_on_python): + if warn_on_python and not _needs_add_docstring(obj): + warnings.warn( + "add_newdoc was used on a pure-python object {}. " + "Prefer to attach it directly to the source." + .format(obj), + UserWarning, + stacklevel=3) + try: + add_docstring(obj, doc) + except Exception: + pass + + +def add_newdoc(place, obj, doc, warn_on_python=True): + """ + Add documentation to an existing object, typically one defined in C + + The purpose is to allow easier editing of the docstrings without requiring + a re-compile. This exists primarily for internal use within numpy itself. + + Parameters + ---------- + place : str + The absolute name of the module to import from + obj : str + The name of the object to add documentation to, typically a class or + function name + doc : {str, Tuple[str, str], List[Tuple[str, str]]} + If a string, the documentation to apply to `obj` + + If a tuple, then the first element is interpreted as an attribute of + `obj` and the second as the docstring to apply - ``(method, docstring)`` + + If a list, then each element of the list should be a tuple of length + two - ``[(method1, docstring1), (method2, docstring2), ...]`` + warn_on_python : bool + If True, the default, emit `UserWarning` if this is used to attach + documentation to a pure-python object. + + Notes + ----- + This routine never raises an error if the docstring can't be written, but + will raise an error if the object being documented does not exist. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + + Since this function grabs the ``char *`` from a c-level str object and puts + it into the ``tp_doc`` slot of the type of `obj`, it violates a number of + C-API best-practices, by: + + - modifying a `PyTypeObject` after calling `PyType_Ready` + - calling `Py_INCREF` on the str and losing the reference, so the str + will never be released + + If possible it should be avoided. + """ + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + _add_docstring(new, doc.strip(), warn_on_python) + elif isinstance(doc, tuple): + attr, docstring = doc + _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + elif isinstance(doc, list): + for attr, docstring in doc: + _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/function_base.pyi b/.venv/lib/python3.12/site-packages/numpy/core/function_base.pyi new file mode 100644 index 00000000..2c2a277b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/function_base.pyi @@ -0,0 +1,187 @@ +from typing import ( + Literal as L, + overload, + Any, + SupportsIndex, + TypeVar, +) + +from numpy import floating, complexfloating, generic +from numpy._typing import ( + NDArray, + DTypeLike, + _DTypeLike, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) + +_SCT = TypeVar("_SCT", bound=generic) + +__all__: list[str] + +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> NDArray[Any]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[floating[Any]], floating[Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[_SCT], _SCT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[True] = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> tuple[NDArray[Any], Any]: ... + +@overload +def logspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeFloat_co = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeComplex_co = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeComplex_co = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeComplex_co = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def geomspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> NDArray[Any]: ... + +# Re-exported to `np.lib.function_base` +def add_newdoc( + place: str, + obj: str, + doc: str | tuple[str, str] | list[tuple[str, str]], + warn_on_python: bool = ..., +) -> None: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/getlimits.py b/.venv/lib/python3.12/site-packages/numpy/core/getlimits.py new file mode 100644 index 00000000..13414c2a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/getlimits.py @@ -0,0 +1,735 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +__all__ = ['finfo', 'iinfo'] + +import warnings + +from .._utils import set_module +from ._machar import MachAr +from . import numeric +from . import numerictypes as ntypes +from .numeric import array, inf, NaN +from .umath import log10, exp2, nextafter, isnan + + +def _fr0(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: + a = a.copy() + a.shape = (1,) + return a + + +def _fr1(a): + """fix rank > 0 --> rank-0""" + if a.size == 1: + a = a.copy() + a.shape = () + return a + + +class MachArLike: + """ Object to simulate MachAr instance """ + def __init__(self, ftype, *, eps, epsneg, huge, tiny, + ibeta, smallest_subnormal=None, **kwargs): + self.params = _MACHAR_PARAMS[ftype] + self.ftype = ftype + self.title = self.params['title'] + # Parameter types same as for discovered MachAr object. + if not smallest_subnormal: + self._smallest_subnormal = nextafter( + self.ftype(0), self.ftype(1), dtype=self.ftype) + else: + self._smallest_subnormal = smallest_subnormal + self.epsilon = self.eps = self._float_to_float(eps) + self.epsneg = self._float_to_float(epsneg) + self.xmax = self.huge = self._float_to_float(huge) + self.xmin = self._float_to_float(tiny) + self.smallest_normal = self.tiny = self._float_to_float(tiny) + self.ibeta = self.params['itype'](ibeta) + self.__dict__.update(kwargs) + self.precision = int(-log10(self.eps)) + self.resolution = self._float_to_float( + self._float_conv(10) ** (-self.precision)) + self._str_eps = self._float_to_str(self.eps) + self._str_epsneg = self._float_to_str(self.epsneg) + self._str_xmin = self._float_to_str(self.xmin) + self._str_xmax = self._float_to_str(self.xmax) + self._str_resolution = self._float_to_str(self.resolution) + self._str_smallest_normal = self._float_to_str(self.xmin) + + @property + def smallest_subnormal(self): + """Return the value for the smallest subnormal. + + Returns + ------- + smallest_subnormal : float + value for the smallest subnormal. + + Warns + ----- + UserWarning + If the calculated value for the smallest subnormal is zero. + """ + # Check that the calculated value is not zero, in case it raises a + # warning. + value = self._smallest_subnormal + if self.ftype(0) == value: + warnings.warn( + 'The value of the smallest subnormal for {} type ' + 'is zero.'.format(self.ftype), UserWarning, stacklevel=2) + + return self._float_to_float(value) + + @property + def _str_smallest_subnormal(self): + """Return the string representation of the smallest subnormal.""" + return self._float_to_str(self.smallest_subnormal) + + def _float_to_float(self, value): + """Converts float to float. + + Parameters + ---------- + value : float + value to be converted. + """ + return _fr1(self._float_conv(value)) + + def _float_conv(self, value): + """Converts float to conv. + + Parameters + ---------- + value : float + value to be converted. + """ + return array([value], self.ftype) + + def _float_to_str(self, value): + """Converts float to str. + + Parameters + ---------- + value : float + value to be converted. + """ + return self.params['fmt'] % array(_fr0(value)[0], self.ftype) + + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex_: ntypes.float_, + ntypes.clongfloat: ntypes.longfloat + } + +# Parameters for creating MachAr / MachAr-like objects +_title_fmt = 'numpy {} precision floating point number' +_MACHAR_PARAMS = { + ntypes.double: dict( + itype = ntypes.int64, + fmt = '%24.16e', + title = _title_fmt.format('double')), + ntypes.single: dict( + itype = ntypes.int32, + fmt = '%15.7e', + title = _title_fmt.format('single')), + ntypes.longdouble: dict( + itype = ntypes.longlong, + fmt = '%s', + title = _title_fmt.format('long double')), + ntypes.half: dict( + itype = ntypes.int16, + fmt = '%12.5e', + title = _title_fmt.format('half'))} + +# Key to identify the floating point type. Key is result of +# ftype('-0.1').newbyteorder('<').tobytes() +# +# 20230201 - use (ftype(-1.0) / ftype(10.0)).newbyteorder('<').tobytes() +# instead because stold may have deficiencies on some platforms. +# See: +# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure + +_KNOWN_TYPES = {} +def _register_type(machar, bytepat): + _KNOWN_TYPES[bytepat] = machar +_float_ma = {} + + +def _register_known_types(): + # Known parameters for float16 + # See docstring of MachAr class for description of parameters. + f16 = ntypes.float16 + float16_ma = MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + it=10, + iexp=5, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f16(-10)), + epsneg=exp2(f16(-11)), + huge=f16(65504), + tiny=f16(2 ** -14)) + _register_type(float16_ma, b'f\xae') + _float_ma[16] = float16_ma + + # Known parameters for float32 + f32 = ntypes.float32 + float32_ma = MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + it=23, + iexp=8, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f32(-23)), + epsneg=exp2(f32(-24)), + huge=f32((1 - 2 ** -24) * 2**128), + tiny=exp2(f32(-126))) + _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') + _float_ma[32] = float32_ma + + # Known parameters for float64 + f64 = ntypes.float64 + epsneg_f64 = 2.0 ** -53.0 + tiny_f64 = 2.0 ** -1022.0 + float64_ma = MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + it=52, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=2.0 ** -52.0, + epsneg=epsneg_f64, + huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), + tiny=tiny_f64) + _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') + _float_ma[64] = float64_ma + + # Known parameters for IEEE 754 128-bit binary float + ld = ntypes.longdouble + epsneg_f128 = exp2(ld(-113)) + tiny_f128 = exp2(ld(-16382)) + # Ignore runtime error when this is not f128 + with numeric.errstate(all='ignore'): + huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) + float128_ma = MachArLike(ld, + machep=-112, + negep=-113, + minexp=-16382, + maxexp=16384, + it=112, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-112)), + epsneg=epsneg_f128, + huge=huge_f128, + tiny=tiny_f128) + # IEEE 754 128-bit binary float + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _float_ma[128] = float128_ma + + # Known parameters for float80 (Intel 80-bit extended precision) + epsneg_f80 = exp2(ld(-64)) + tiny_f80 = exp2(ld(-16382)) + # Ignore runtime error when this is not f80 + with numeric.errstate(all='ignore'): + huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) + float80_ma = MachArLike(ld, + machep=-63, + negep=-64, + minexp=-16382, + maxexp=16384, + it=63, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-63)), + epsneg=epsneg_f80, + huge=huge_f80, + tiny=tiny_f80) + # float80, first 10 bytes containing actual storage + _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') + _float_ma[80] = float80_ma + + # Guessed / known parameters for double double; see: + # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic + # These numbers have the same exponent range as float64, but extended number of + # digits in the significand. + huge_dd = nextafter(ld(inf), ld(0), dtype=ld) + # As the smallest_normal in double double is so hard to calculate we set + # it to NaN. + smallest_normal_dd = NaN + # Leave the same value for the smallest subnormal as double + smallest_subnormal_dd = ld(nextafter(0., 1.)) + float_dd_ma = MachArLike(ld, + machep=-105, + negep=-106, + minexp=-1022, + maxexp=1024, + it=105, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-105)), + epsneg=exp2(ld(-106)), + huge=huge_dd, + tiny=smallest_normal_dd, + smallest_subnormal=smallest_subnormal_dd) + # double double; low, high order (e.g. PPC 64) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') + # double double; high, low order (e.g. PPC 64 le) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') + _float_ma['dd'] = float_dd_ma + + +def _get_machar(ftype): + """ Get MachAr instance or MachAr-like instance + + Get parameters for floating point type, by first trying signatures of + various known floating point types, then, if none match, attempting to + identify parameters by analysis. + + Parameters + ---------- + ftype : class + Numpy floating point type class (e.g. ``np.float64``) + + Returns + ------- + ma_like : instance of :class:`MachAr` or :class:`MachArLike` + Object giving floating point parameters for `ftype`. + + Warns + ----- + UserWarning + If the binary signature of the float type is not in the dictionary of + known float types. + """ + params = _MACHAR_PARAMS.get(ftype) + if params is None: + raise ValueError(repr(ftype)) + # Detect known / suspected types + # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold + # may be deficient + key = (ftype(-1.0) / ftype(10.)).newbyteorder('<').tobytes() + ma_like = None + if ftype == ntypes.longdouble: + # Could be 80 bit == 10 byte extended precision, where last bytes can + # be random garbage. + # Comparing first 10 bytes to pattern first to avoid branching on the + # random garbage. + ma_like = _KNOWN_TYPES.get(key[:10]) + if ma_like is None: + # see if the full key is known. + ma_like = _KNOWN_TYPES.get(key) + if ma_like is None and len(key) == 16: + # machine limits could be f80 masquerading as np.float128, + # find all keys with length 16 and make new dict, but make the keys + # only 10 bytes long, the last bytes can be random garbage + _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} + ma_like = _kt.get(key[:10]) + if ma_like is not None: + return ma_like + # Fall back to parameter discovery + warnings.warn( + f'Signature {key} for {ftype} does not match any known type: ' + 'falling back to type probe function.\n' + 'This warnings indicates broken support for the dtype!', + UserWarning, stacklevel=2) + return _discovered_machar(ftype) + + +def _discovered_machar(ftype): + """ Create MachAr instance with found information on float types + + TODO: MachAr should be retired completely ideally. We currently only + ever use it system with broken longdouble (valgrind, WSL). + """ + params = _MACHAR_PARAMS[ftype] + return MachAr(lambda v: array([v], ftype), + lambda v:_fr0(v.astype(params['itype']))[0], + lambda v:array(_fr0(v)[0], ftype), + lambda v: params['fmt'] % array(_fr0(v)[0], ftype), + params['title']) + + +@set_module('numpy') +class finfo: + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `finfo` returns information. For complex + input, the returned dtype is the associated ``float*`` dtype for its + real and complex components. + eps : float + The difference between 1.0 and the next smallest representable float + larger than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``eps = 2**-52``, approximately 2.22e-16. + epsneg : float + The difference between 1.0 and the next smallest representable float + less than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``epsneg = 2**-53``, approximately 1.11e-16. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754 (see Notes). + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point or complex floating point + data-type about which to get information. + + See Also + -------- + iinfo : The equivalent for integer data types. + spacing : The distance between a value and the nearest adjacent number + nextafter : The next floating point value after x1 towards x2 + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + Note that ``smallest_normal`` is not actually the smallest positive + representable value in a NumPy floating point type. As in the IEEE-754 + standard [1]_, NumPy floating point types make use of subnormal numbers to + fill the gap between 0 and ``smallest_normal``. However, subnormal numbers + may have significantly reduced precision [2]_. + + This function can also be used for complex data types as well. If used, + the output will be the same as the corresponding real float type + (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). + However, the output is true for the real and imaginary components. + + References + ---------- + .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, + pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935 + .. [2] Wikipedia, "Denormal Numbers", + https://en.wikipedia.org/wiki/Denormal_number + + Examples + -------- + >>> np.finfo(np.float64).dtype + dtype('float64') + >>> np.finfo(np.complex64).dtype + dtype('float32') + + """ + + _finfo_cache = {} + + def __new__(cls, dtype): + try: + obj = cls._finfo_cache.get(dtype) # most common path + if obj is not None: + return obj + except TypeError: + pass + + if dtype is None: + # Deprecated in NumPy 1.25, 2023-01-16 + warnings.warn( + "finfo() dtype cannot be None. This behavior will " + "raise an error in the future. (Deprecated in NumPy 1.25)", + DeprecationWarning, + stacklevel=2 + ) + + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = numeric.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError("data type %r not inexact" % (dtype)) + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + # dtype changed, for example from complex128 to float64 + dtypes.append(newdtype) + dtype = newdtype + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + # the original dtype was not in the cache, but the new + # dtype is in the cache. we add the original dtypes to + # the cache and return the result + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + machar = _get_machar(dtype) + + for word in ['precision', 'iexp', + 'maxexp', 'minexp', 'negep', + 'machep']: + setattr(self, word, getattr(machar, word)) + for word in ['resolution', 'epsneg', 'smallest_subnormal']: + setattr(self, word, getattr(machar, word).flat[0]) + self.bits = self.dtype.itemsize * 8 + self.max = machar.huge.flat[0] + self.min = -self.max + self.eps = machar.eps.flat[0] + self.nexp = machar.iexp + self.nmant = machar.it + self._machar = machar + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() + self._str_smallest_normal = machar._str_smallest_normal.strip() + self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + return self + + def __str__(self): + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'precision = %(precision)3s resolution = %(_str_resolution)s\n' + 'machep = %(machep)6s eps = %(_str_eps)s\n' + 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' + 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' + 'maxexp = %(maxexp)6s max = %(_str_max)s\n' + 'nexp = %(nexp)6s min = -max\n' + 'smallest_normal = %(_str_smallest_normal)s ' + 'smallest_subnormal = %(_str_smallest_subnormal)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + def __repr__(self): + c = self.__class__.__name__ + d = self.__dict__.copy() + d['klass'] = c + return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," + " max=%(_str_max)s, dtype=%(dtype)s)") % d) + + @property + def smallest_normal(self): + """Return the value for the smallest normal. + + Returns + ------- + smallest_normal : float + Value for the smallest normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + # This check is necessary because the value for smallest_normal is + # platform dependent for longdouble types. + if isnan(self._machar.smallest_normal.flat[0]): + warnings.warn( + 'The value of smallest normal is undefined for double double', + UserWarning, stacklevel=2) + return self._machar.smallest_normal.flat[0] + + @property + def tiny(self): + """Return the value for tiny, alias of smallest_normal. + + Returns + ------- + tiny : float + Value for the smallest normal, alias of smallest_normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + return self.smallest_normal + + +@set_module('numpy') +class iinfo: + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `iinfo` returns information. + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + int_type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if self.kind not in 'iu': + raise ValueError("Invalid integer data type %r." % (self.kind,)) + + @property + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits-1))) + iinfo._min_vals[self.key] = val + return val + + @property + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits-1)) - 1) + iinfo._max_vals[self.key] = val + return val + + def __str__(self): + """String representation.""" + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'min = %(min)s\n' + 'max = %(max)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/getlimits.pyi b/.venv/lib/python3.12/site-packages/numpy/core/getlimits.pyi new file mode 100644 index 00000000..da5e3c23 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/getlimits.pyi @@ -0,0 +1,6 @@ +from numpy import ( + finfo as finfo, + iinfo as iinfo, +) + +__all__: list[str] diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__multiarray_api.c b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__multiarray_api.c new file mode 100644 index 00000000..4fa051c1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__multiarray_api.c @@ -0,0 +1,314 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { + (void *) PyArray_GetNDArrayCVersion, + (void *) &PyBigArray_Type, + (void *) &PyArray_Type, + (void *) &PyArrayDescr_Type, + (void *) &PyArrayFlags_Type, + (void *) &PyArrayIter_Type, + (void *) &PyArrayMultiIter_Type, + (int *) &NPY_NUMUSERTYPES, + (void *) &PyBoolArrType_Type, + (void *) &_PyArrayScalar_BoolValues, + (void *) &PyGenericArrType_Type, + (void *) &PyNumberArrType_Type, + (void *) &PyIntegerArrType_Type, + (void *) &PySignedIntegerArrType_Type, + (void *) &PyUnsignedIntegerArrType_Type, + (void *) &PyInexactArrType_Type, + (void *) &PyFloatingArrType_Type, + (void *) &PyComplexFloatingArrType_Type, + (void *) &PyFlexibleArrType_Type, + (void *) &PyCharacterArrType_Type, + (void *) &PyByteArrType_Type, + (void *) &PyShortArrType_Type, + (void *) &PyIntArrType_Type, + (void *) &PyLongArrType_Type, + (void *) &PyLongLongArrType_Type, + (void *) &PyUByteArrType_Type, + (void *) &PyUShortArrType_Type, + (void *) &PyUIntArrType_Type, + (void *) &PyULongArrType_Type, + (void *) &PyULongLongArrType_Type, + (void *) &PyFloatArrType_Type, + (void *) &PyDoubleArrType_Type, + (void *) &PyLongDoubleArrType_Type, + (void *) &PyCFloatArrType_Type, + (void *) &PyCDoubleArrType_Type, + (void *) &PyCLongDoubleArrType_Type, + (void *) &PyObjectArrType_Type, + (void *) &PyStringArrType_Type, + (void *) &PyUnicodeArrType_Type, + (void *) &PyVoidArrType_Type, + (void *) PyArray_SetNumericOps, + (void *) PyArray_GetNumericOps, + (void *) PyArray_INCREF, + (void *) PyArray_XDECREF, + (void *) PyArray_SetStringFunction, + (void *) PyArray_DescrFromType, + (void *) PyArray_TypeObjectFromType, + (void *) PyArray_Zero, + (void *) PyArray_One, + (void *) PyArray_CastToType, + (void *) PyArray_CastTo, + (void *) PyArray_CastAnyTo, + (void *) PyArray_CanCastSafely, + (void *) PyArray_CanCastTo, + (void *) PyArray_ObjectType, + (void *) PyArray_DescrFromObject, + (void *) PyArray_ConvertToCommonType, + (void *) PyArray_DescrFromScalar, + (void *) PyArray_DescrFromTypeObject, + (void *) PyArray_Size, + (void *) PyArray_Scalar, + (void *) PyArray_FromScalar, + (void *) PyArray_ScalarAsCtype, + (void *) PyArray_CastScalarToCtype, + (void *) PyArray_CastScalarDirect, + (void *) PyArray_ScalarFromObject, + (void *) PyArray_GetCastFunc, + (void *) PyArray_FromDims, + (void *) PyArray_FromDimsAndDataAndDescr, + (void *) PyArray_FromAny, + (void *) PyArray_EnsureArray, + (void *) PyArray_EnsureAnyArray, + (void *) PyArray_FromFile, + (void *) PyArray_FromString, + (void *) PyArray_FromBuffer, + (void *) PyArray_FromIter, + (void *) PyArray_Return, + (void *) PyArray_GetField, + (void *) PyArray_SetField, + (void *) PyArray_Byteswap, + (void *) PyArray_Resize, + (void *) PyArray_MoveInto, + (void *) PyArray_CopyInto, + (void *) PyArray_CopyAnyInto, + (void *) PyArray_CopyObject, + (void *) PyArray_NewCopy, + (void *) PyArray_ToList, + (void *) PyArray_ToString, + (void *) PyArray_ToFile, + (void *) PyArray_Dump, + (void *) PyArray_Dumps, + (void *) PyArray_ValidType, + (void *) PyArray_UpdateFlags, + (void *) PyArray_New, + (void *) PyArray_NewFromDescr, + (void *) PyArray_DescrNew, + (void *) PyArray_DescrNewFromType, + (void *) PyArray_GetPriority, + (void *) PyArray_IterNew, + (void *) PyArray_MultiIterNew, + (void *) PyArray_PyIntAsInt, + (void *) PyArray_PyIntAsIntp, + (void *) PyArray_Broadcast, + (void *) PyArray_FillObjectArray, + (void *) PyArray_FillWithScalar, + (void *) PyArray_CheckStrides, + (void *) PyArray_DescrNewByteorder, + (void *) PyArray_IterAllButAxis, + (void *) PyArray_CheckFromAny, + (void *) PyArray_FromArray, + (void *) PyArray_FromInterface, + (void *) PyArray_FromStructInterface, + (void *) PyArray_FromArrayAttr, + (void *) PyArray_ScalarKind, + (void *) PyArray_CanCoerceScalar, + (void *) PyArray_NewFlagsObject, + (void *) PyArray_CanCastScalar, + (void *) PyArray_CompareUCS4, + (void *) PyArray_RemoveSmallest, + (void *) PyArray_ElementStrides, + (void *) PyArray_Item_INCREF, + (void *) PyArray_Item_XDECREF, + (void *) PyArray_FieldNames, + (void *) PyArray_Transpose, + (void *) PyArray_TakeFrom, + (void *) PyArray_PutTo, + (void *) PyArray_PutMask, + (void *) PyArray_Repeat, + (void *) PyArray_Choose, + (void *) PyArray_Sort, + (void *) PyArray_ArgSort, + (void *) PyArray_SearchSorted, + (void *) PyArray_ArgMax, + (void *) PyArray_ArgMin, + (void *) PyArray_Reshape, + (void *) PyArray_Newshape, + (void *) PyArray_Squeeze, + (void *) PyArray_View, + (void *) PyArray_SwapAxes, + (void *) PyArray_Max, + (void *) PyArray_Min, + (void *) PyArray_Ptp, + (void *) PyArray_Mean, + (void *) PyArray_Trace, + (void *) PyArray_Diagonal, + (void *) PyArray_Clip, + (void *) PyArray_Conjugate, + (void *) PyArray_Nonzero, + (void *) PyArray_Std, + (void *) PyArray_Sum, + (void *) PyArray_CumSum, + (void *) PyArray_Prod, + (void *) PyArray_CumProd, + (void *) PyArray_All, + (void *) PyArray_Any, + (void *) PyArray_Compress, + (void *) PyArray_Flatten, + (void *) PyArray_Ravel, + (void *) PyArray_MultiplyList, + (void *) PyArray_MultiplyIntList, + (void *) PyArray_GetPtr, + (void *) PyArray_CompareLists, + (void *) PyArray_AsCArray, + (void *) PyArray_As1D, + (void *) PyArray_As2D, + (void *) PyArray_Free, + (void *) PyArray_Converter, + (void *) PyArray_IntpFromSequence, + (void *) PyArray_Concatenate, + (void *) PyArray_InnerProduct, + (void *) PyArray_MatrixProduct, + (void *) PyArray_CopyAndTranspose, + (void *) PyArray_Correlate, + (void *) PyArray_TypestrConvert, + (void *) PyArray_DescrConverter, + (void *) PyArray_DescrConverter2, + (void *) PyArray_IntpConverter, + (void *) PyArray_BufferConverter, + (void *) PyArray_AxisConverter, + (void *) PyArray_BoolConverter, + (void *) PyArray_ByteorderConverter, + (void *) PyArray_OrderConverter, + (void *) PyArray_EquivTypes, + (void *) PyArray_Zeros, + (void *) PyArray_Empty, + (void *) PyArray_Where, + (void *) PyArray_Arange, + (void *) PyArray_ArangeObj, + (void *) PyArray_SortkindConverter, + (void *) PyArray_LexSort, + (void *) PyArray_Round, + (void *) PyArray_EquivTypenums, + (void *) PyArray_RegisterDataType, + (void *) PyArray_RegisterCastFunc, + (void *) PyArray_RegisterCanCast, + (void *) PyArray_InitArrFuncs, + (void *) PyArray_IntTupleFromIntp, + (void *) PyArray_TypeNumFromName, + (void *) PyArray_ClipmodeConverter, + (void *) PyArray_OutputConverter, + (void *) PyArray_BroadcastToShape, + (void *) _PyArray_SigintHandler, + (void *) _PyArray_GetSigintBuf, + (void *) PyArray_DescrAlignConverter, + (void *) PyArray_DescrAlignConverter2, + (void *) PyArray_SearchsideConverter, + (void *) PyArray_CheckAxis, + (void *) PyArray_OverflowMultiplyList, + (void *) PyArray_CompareString, + (void *) PyArray_MultiIterFromObjects, + (void *) PyArray_GetEndianness, + (void *) PyArray_GetNDArrayCFeatureVersion, + (void *) PyArray_Correlate2, + (void *) PyArray_NeighborhoodIterNew, + (void *) &PyTimeIntegerArrType_Type, + (void *) &PyDatetimeArrType_Type, + (void *) &PyTimedeltaArrType_Type, + (void *) &PyHalfArrType_Type, + (void *) &NpyIter_Type, + (void *) PyArray_SetDatetimeParseFunction, + (void *) PyArray_DatetimeToDatetimeStruct, + (void *) PyArray_TimedeltaToTimedeltaStruct, + (void *) PyArray_DatetimeStructToDatetime, + (void *) PyArray_TimedeltaStructToTimedelta, + (void *) NpyIter_New, + (void *) NpyIter_MultiNew, + (void *) NpyIter_AdvancedNew, + (void *) NpyIter_Copy, + (void *) NpyIter_Deallocate, + (void *) NpyIter_HasDelayedBufAlloc, + (void *) NpyIter_HasExternalLoop, + (void *) NpyIter_EnableExternalLoop, + (void *) NpyIter_GetInnerStrideArray, + (void *) NpyIter_GetInnerLoopSizePtr, + (void *) NpyIter_Reset, + (void *) NpyIter_ResetBasePointers, + (void *) NpyIter_ResetToIterIndexRange, + (void *) NpyIter_GetNDim, + (void *) NpyIter_GetNOp, + (void *) NpyIter_GetIterNext, + (void *) NpyIter_GetIterSize, + (void *) NpyIter_GetIterIndexRange, + (void *) NpyIter_GetIterIndex, + (void *) NpyIter_GotoIterIndex, + (void *) NpyIter_HasMultiIndex, + (void *) NpyIter_GetShape, + (void *) NpyIter_GetGetMultiIndex, + (void *) NpyIter_GotoMultiIndex, + (void *) NpyIter_RemoveMultiIndex, + (void *) NpyIter_HasIndex, + (void *) NpyIter_IsBuffered, + (void *) NpyIter_IsGrowInner, + (void *) NpyIter_GetBufferSize, + (void *) NpyIter_GetIndexPtr, + (void *) NpyIter_GotoIndex, + (void *) NpyIter_GetDataPtrArray, + (void *) NpyIter_GetDescrArray, + (void *) NpyIter_GetOperandArray, + (void *) NpyIter_GetIterView, + (void *) NpyIter_GetReadFlags, + (void *) NpyIter_GetWriteFlags, + (void *) NpyIter_DebugPrint, + (void *) NpyIter_IterationNeedsAPI, + (void *) NpyIter_GetInnerFixedStrideArray, + (void *) NpyIter_RemoveAxis, + (void *) NpyIter_GetAxisStrideArray, + (void *) NpyIter_RequiresBuffering, + (void *) NpyIter_GetInitialDataPtrArray, + (void *) NpyIter_CreateCompatibleStrides, + (void *) PyArray_CastingConverter, + (void *) PyArray_CountNonzero, + (void *) PyArray_PromoteTypes, + (void *) PyArray_MinScalarType, + (void *) PyArray_ResultType, + (void *) PyArray_CanCastArrayTo, + (void *) PyArray_CanCastTypeTo, + (void *) PyArray_EinsteinSum, + (void *) PyArray_NewLikeArray, + (void *) PyArray_GetArrayParamsFromObject, + (void *) PyArray_ConvertClipmodeSequence, + (void *) PyArray_MatrixProduct2, + (void *) NpyIter_IsFirstVisit, + (void *) PyArray_SetBaseObject, + (void *) PyArray_CreateSortedStridePerm, + (void *) PyArray_RemoveAxesInPlace, + (void *) PyArray_DebugPrint, + (void *) PyArray_FailUnlessWriteable, + (void *) PyArray_SetUpdateIfCopyBase, + (void *) PyDataMem_NEW, + (void *) PyDataMem_FREE, + (void *) PyDataMem_RENEW, + (void *) PyDataMem_SetEventHook, + (NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING, + (void *) PyArray_MapIterSwapAxes, + (void *) PyArray_MapIterArray, + (void *) PyArray_MapIterNext, + (void *) PyArray_Partition, + (void *) PyArray_ArgPartition, + (void *) PyArray_SelectkindConverter, + (void *) PyDataMem_NEW_ZEROED, + (void *) PyArray_CheckAnyScalarExact, + (void *) PyArray_MapIterArrayCopyIfOverlap, + (void *) PyArray_ResolveWritebackIfCopy, + (void *) PyArray_SetWritebackIfCopyBase, + (void *) PyDataMem_SetHandler, + (void *) PyDataMem_GetHandler, + (PyObject* *) &PyDataMem_DefaultHandler +}; diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__multiarray_api.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__multiarray_api.h new file mode 100644 index 00000000..4c626832 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__multiarray_api.h @@ -0,0 +1,1566 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArray_Type; + +extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull; +#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull)) + +extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; + +extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; + +extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; + +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; + +NPY_NO_EXPORT int PyArray_SetNumericOps \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ + (void); +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CastTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CastAnyTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ + (PyObject *); +NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ + (PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromDims \ + (int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ + (int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data)); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); +NPY_NO_EXPORT int PyArray_MoveInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT void PyArray_FillObjectArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ + (PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_CompareUCS4 \ + (npy_ucs4 const *, npy_ucs4 const *, size_t); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_FieldNames \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int const *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp const*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp const *, npy_intp const *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_As1D \ + (PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)); +NPY_NO_EXPORT int PyArray_As2D \ + (PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_TypestrConvert \ + (int, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_Descr *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp const *); +NPY_NO_EXPORT int PyArray_TypeNumFromName \ + (char const *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT void _PyArray_SigintHandler \ + (int); +NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ + (void); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_CompareString \ + (const char *, const char *, size_t); +NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); +extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; + +NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ + (PyObject *NPY_UNUSED(op)); +NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ + (npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *); +NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ + (npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *); +NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ + (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)); +NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ + (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp const *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ + (PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context)); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp const *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, const npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ + (PyDataMem_EventHookFunc *, void *, void **); +extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; + +NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ + (PyArrayMapIterObject *, PyArrayObject **, int); +NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_MapIterNext \ + (PyArrayMapIterObject *); +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); +NPY_NO_EXPORT int PyArray_CheckAnyScalarExact \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyDataMem_SetHandler \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyDataMem_GetHandler \ + (void); +extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler; + + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_SetNumericOps \ + (*(int (*)(PyObject *)) \ + PyArray_API[40]) +#define PyArray_GetNumericOps \ + (*(PyObject * (*)(void)) \ + PyArray_API[41]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CastTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CastAnyTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) +#define PyArray_ScalarFromObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[65]) +#define PyArray_GetCastFunc \ + (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ + PyArray_API[66]) +#define PyArray_FromDims \ + (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))) \ + PyArray_API[67]) +#define PyArray_FromDimsAndDataAndDescr \ + (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data))) \ + PyArray_API[68]) +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ + PyArray_API[80]) +#define PyArray_MoveInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[81]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[82]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[83]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject* (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillObjectArray \ + (*(void (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[103]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_NewFlagsObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[115]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_CompareUCS4 \ + (*(int (*)(npy_ucs4 const *, npy_ucs4 const *, size_t)) \ + PyArray_API[117]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_FieldNames \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[122]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int const *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp const*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp const *, npy_intp const *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_As1D \ + (*(int (*)(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))) \ + PyArray_API[163]) +#define PyArray_As2D \ + (*(int (*)(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))) \ + PyArray_API[164]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_CopyAndTranspose \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[171]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_TypestrConvert \ + (*(int (*)(int, int)) \ + PyArray_API[173]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_Descr *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp const *)) \ + PyArray_API[196]) +#define PyArray_TypeNumFromName \ + (*(int (*)(char const *)) \ + PyArray_API[197]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define _PyArray_SigintHandler \ + (*(void (*)(int)) \ + PyArray_API[201]) +#define _PyArray_GetSigintBuf \ + (*(void* (*)(void)) \ + PyArray_API[202]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[207]) +#define PyArray_CompareString \ + (*(int (*)(const char *, const char *, size_t)) \ + PyArray_API[208]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject* (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) +#define PyArray_SetDatetimeParseFunction \ + (*(void (*)(PyObject *NPY_UNUSED(op))) \ + PyArray_API[219]) +#define PyArray_DatetimeToDatetimeStruct \ + (*(void (*)(npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *)) \ + PyArray_API[220]) +#define PyArray_TimedeltaToTimedeltaStruct \ + (*(void (*)(npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *)) \ + PyArray_API[221]) +#define PyArray_DatetimeStructToDatetime \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))) \ + PyArray_API[222]) +#define PyArray_TimedeltaStructToTimedelta \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))) \ + PyArray_API[223]) +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp const *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_GetArrayParamsFromObject \ + (*(int (*)(PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context))) \ + PyArray_API[278]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, const npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define PyDataMem_SetEventHook \ + (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ + PyArray_API[291]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_MapIterSwapAxes \ + (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ + PyArray_API[293]) +#define PyArray_MapIterArray \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[294]) +#define PyArray_MapIterNext \ + (*(void (*)(PyArrayMapIterObject *)) \ + PyArray_API[295]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) +#define PyArray_CheckAnyScalarExact \ + (*(int (*)(PyObject *)) \ + PyArray_API[300]) +#define PyArray_MapIterArrayCopyIfOverlap \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[301]) +#define PyArray_ResolveWritebackIfCopy \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[302]) +#define PyArray_SetWritebackIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[303]) + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_SetHandler \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[304]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_GetHandler \ + (*(PyObject * (*)(void)) \ + PyArray_API[305]) +#endif +#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306]) + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version 0x%x but this version of numpy is 0x%x . "\ + "Check the section C-API incompatibility at the "\ + "Troubleshooting ImportError section at "\ + "https://numpy.org/devdocs/user/troubleshooting-importerror.html"\ + "#c-api-incompatibility "\ + "for indications on how to solve this problem .", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as big endian, but " + "detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as little endian, but " + "detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__ufunc_api.c b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__ufunc_api.c new file mode 100644 index 00000000..d1b4a87b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__ufunc_api.c @@ -0,0 +1,50 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyUFunc_API[] = { + (void *) &PyUFunc_Type, + (void *) PyUFunc_FromFuncAndData, + (void *) PyUFunc_RegisterLoopForType, + (void *) PyUFunc_GenericFunction, + (void *) PyUFunc_f_f_As_d_d, + (void *) PyUFunc_d_d, + (void *) PyUFunc_f_f, + (void *) PyUFunc_g_g, + (void *) PyUFunc_F_F_As_D_D, + (void *) PyUFunc_F_F, + (void *) PyUFunc_D_D, + (void *) PyUFunc_G_G, + (void *) PyUFunc_O_O, + (void *) PyUFunc_ff_f_As_dd_d, + (void *) PyUFunc_ff_f, + (void *) PyUFunc_dd_d, + (void *) PyUFunc_gg_g, + (void *) PyUFunc_FF_F_As_DD_D, + (void *) PyUFunc_DD_D, + (void *) PyUFunc_FF_F, + (void *) PyUFunc_GG_G, + (void *) PyUFunc_OO_O, + (void *) PyUFunc_O_O_method, + (void *) PyUFunc_OO_O_method, + (void *) PyUFunc_On_Om, + (void *) PyUFunc_GetPyValues, + (void *) PyUFunc_checkfperr, + (void *) PyUFunc_clearfperr, + (void *) PyUFunc_getfperr, + (void *) PyUFunc_handlefperr, + (void *) PyUFunc_ReplaceLoopBySignature, + (void *) PyUFunc_FromFuncAndDataAndSignature, + (void *) PyUFunc_SetUsesArraysAsData, + (void *) PyUFunc_e_e, + (void *) PyUFunc_e_e_As_f_f, + (void *) PyUFunc_e_e_As_d_d, + (void *) PyUFunc_ee_e, + (void *) PyUFunc_ee_e_As_ff_f, + (void *) PyUFunc_ee_e_As_dd_d, + (void *) PyUFunc_DefaultTypeResolver, + (void *) PyUFunc_ValidateCasting, + (void *) PyUFunc_RegisterLoopForDescr, + (void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity +}; diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__ufunc_api.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__ufunc_api.h new file mode 100644 index 00000000..e2efe29e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/__ufunc_api.h @@ -0,0 +1,314 @@ + +#ifdef _UMATHMODULE + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); +NPY_NO_EXPORT int PyUFunc_GenericFunction \ + (PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op)); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **); +NPY_NO_EXPORT int PyUFunc_checkfperr \ + (int, PyObject *, int *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_handlefperr \ + (int, PyObject *, int, int *); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ + (void **NPY_UNUSED(data), size_t NPY_UNUSED(i)); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_GenericFunction \ + (*(int (*)(PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op))) \ + PyUFunc_API[3]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_GetPyValues \ + (*(int (*)(char *, int *, int *, PyObject **)) \ + PyUFunc_API[25]) +#define PyUFunc_checkfperr \ + (*(int (*)(int, PyObject *, int *)) \ + PyUFunc_API[26]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_handlefperr \ + (*(int (*)(int, PyObject *, int, int *)) \ + PyUFunc_API[29]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_SetUsesArraysAsData \ + (*(int (*)(void **NPY_UNUSED(data), size_t NPY_UNUSED(i))) \ + PyUFunc_API[32]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) + +#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION +#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ + PyUFunc_API[42]) +#endif + +static inline int +_import_umath(void) +{ + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, + "numpy.core._multiarray_umath failed to import"); + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return NULL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + }\ + } while(0) + +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_dtype_api.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_dtype_api.h new file mode 100644 index 00000000..39fbc500 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_dtype_api.h @@ -0,0 +1,408 @@ +/* + * DType related API shared by the (experimental) public API And internal API. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ + +#define __EXPERIMENTAL_DTYPE_API_VERSION 11 + +struct PyArrayMethodObject_tag; + +/* + * Largely opaque struct for DType classes (i.e. metaclass instances). + * The internal definition is currently in `ndarraytypes.h` (export is a bit + * more complex because `PyArray_Descr` is a DTypeMeta internally but not + * externally). + */ +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + + typedef struct PyArray_DTypeMeta_tag { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + /* Allow growing (at the moment also beyond this) */ + void *reserved[3]; + } PyArray_DTypeMeta; + +#endif /* not internal build */ + +/* + * ****************************************************** + * ArrayMethod API (Casting and UFuncs) + * ****************************************************** + */ +/* + * NOTE: Expected changes: + * * probably split runtime and general flags into two + * * should possibly not use an enum for typedef for more stable ABI? + */ +typedef enum { + /* Flag for whether the GIL is required */ + NPY_METH_REQUIRES_PYAPI = 1 << 0, + /* + * Some functions cannot set floating point error flags, this flag + * gives us the option (not requirement) to skip floating point error + * setup/check. No function should set error flags and ignore them + * since it would interfere with chaining operations (e.g. casting). + */ + NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1, + /* Whether the method supports unaligned access (not runtime) */ + NPY_METH_SUPPORTS_UNALIGNED = 1 << 2, + /* + * Used for reductions to allow reordering the operation. At this point + * assume that if set, it also applies to normal operations though! + */ + NPY_METH_IS_REORDERABLE = 1 << 3, + /* + * Private flag for now for *logic* functions. The logical functions + * `logical_or` and `logical_and` can always cast the inputs to booleans + * "safely" (because that is how the cast to bool is defined). + * @seberg: I am not sure this is the best way to handle this, so its + * private for now (also it is very limited anyway). + * There is one "exception". NA aware dtypes cannot cast to bool + * (hopefully), so the `??->?` loop should error even with this flag. + * But a second NA fallback loop will be necessary. + */ + _NPY_METH_FORCE_CAST_INPUTS = 1 << 17, + + /* All flags which can change at runtime */ + NPY_METH_RUNTIME_FLAGS = ( + NPY_METH_REQUIRES_PYAPI | + NPY_METH_NO_FLOATINGPOINT_ERRORS), +} NPY_ARRAYMETHOD_FLAGS; + + +typedef struct PyArrayMethod_Context_tag { + /* The caller, which is typically the original ufunc. May be NULL */ + PyObject *caller; + /* The method "self". Publically currentl an opaque object. */ + struct PyArrayMethodObject_tag *method; + + /* Operand descriptors, filled in by resolve_descriptors */ + PyArray_Descr **descriptors; + /* Structure may grow (this is harmless for DType authors) */ +} PyArrayMethod_Context; + + +/* + * The main object for creating a new ArrayMethod. We use the typical `slots` + * mechanism used by the Python limited API (see below for the slot defs). + */ +typedef struct { + const char *name; + int nin, nout; + NPY_CASTING casting; + NPY_ARRAYMETHOD_FLAGS flags; + PyArray_DTypeMeta **dtypes; + PyType_Slot *slots; +} PyArrayMethod_Spec; + + +/* + * ArrayMethod slots + * ----------------- + * + * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed + * but can be deprecated and arbitrarily extended. + */ +#define NPY_METH_resolve_descriptors 1 +/* We may want to adapt the `get_loop` signature a bit: */ +#define _NPY_METH_get_loop 2 +#define NPY_METH_get_reduction_initial 3 +/* specific loops for constructions/default get_loop: */ +#define NPY_METH_strided_loop 4 +#define NPY_METH_contiguous_loop 5 +#define NPY_METH_unaligned_strided_loop 6 +#define NPY_METH_unaligned_contiguous_loop 7 +#define NPY_METH_contiguous_indexed_loop 8 + +/* + * The resolve descriptors function, must be able to handle NULL values for + * all output (but not input) `given_descrs` and fill `loop_descrs`. + * Return -1 on error or 0 if the operation is not possible without an error + * set. (This may still be in flux.) + * Otherwise must return the "casting safety", for normal functions, this is + * almost always "safe" (or even "equivalent"?). + * + * `resolve_descriptors` is optional if all output DTypes are non-parametric. + */ +typedef NPY_CASTING (resolve_descriptors_function)( + /* "method" is currently opaque (necessary e.g. to wrap Python) */ + struct PyArrayMethodObject_tag *method, + /* DTypes the method was created for */ + PyArray_DTypeMeta **dtypes, + /* Input descriptors (instances). Outputs may be NULL. */ + PyArray_Descr **given_descrs, + /* Exact loop descriptors to use, must not hold references on error */ + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + +typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + + +typedef int (get_loop_function)( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + +/** + * Query an ArrayMethod for the initial value for use in reduction. + * + * @param context The arraymethod context, mainly to access the descriptors. + * @param reduction_is_empty Whether the reduction is empty. When it is, the + * value returned may differ. In this case it is a "default" value that + * may differ from the "identity" value normally used. For example: + * - `0.0` is the default for `sum([])`. But `-0.0` is the correct + * identity otherwise as it preserves the sign for `sum([-0.0])`. + * - We use no identity for object, but return the default of `0` and `1` + * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`. + * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work. + * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN` + * not a good *default* when there are no items. + * @param initial Pointer to initial data to be filled (if possible) + * + * @returns -1, 0, or 1 indicating error, no initial value, and initial being + * successfully filled. Errors must not be given where 0 is correct, NumPy + * may call this even when not strictly necessary. + */ +typedef int (get_reduction_initial_function)( + PyArrayMethod_Context *context, npy_bool reduction_is_empty, + char *initial); + +/* + * The following functions are only used by the wrapping array method defined + * in umath/wrapping_array_method.c + */ + +/* + * The function to convert the given descriptors (passed in to + * `resolve_descriptors`) and translates them for the wrapped loop. + * The new descriptors MUST be viewable with the old ones, `NULL` must be + * supported (for outputs) and should normally be forwarded. + * + * The function must clean up on error. + * + * NOTE: We currently assume that this translation gives "viewable" results. + * I.e. there is no additional casting related to the wrapping process. + * In principle that could be supported, but not sure it is useful. + * This currently also means that e.g. alignment must apply identically + * to the new dtypes. + * + * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast` + * there is no way to "pass out" the result of this function. This means + * it will be called twice for every ufunc call. + * (I am considering including `auxdata` as an "optional" parameter to + * `resolve_descriptors`, so that it can be filled there if not NULL.) + */ +typedef int translate_given_descrs_func(int nin, int nout, + PyArray_DTypeMeta *wrapped_dtypes[], + PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]); + +/** + * The function to convert the actual loop descriptors (as returned by the + * original `resolve_descriptors` function) to the ones the output array + * should use. + * This function must return "viewable" types, it must not mutate them in any + * form that would break the inner-loop logic. Does not need to support NULL. + * + * The function must clean up on error. + * + * @param nargs Number of arguments + * @param new_dtypes The DTypes of the output (usually probably not needed) + * @param given_descrs Original given_descrs to the resolver, necessary to + * fetch any information related to the new dtypes from the original. + * @param original_descrs The `loop_descrs` returned by the wrapped loop. + * @param loop_descrs The output descriptors, compatible to `original_descrs`. + * + * @returns 0 on success, -1 on failure. + */ +typedef int translate_loop_descrs_func(int nin, int nout, + PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[], + PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); + + +/* + * A traverse loop working on a single array. This is similar to the general + * strided-loop function. This is designed for loops that need to visit every + * element of a single array. + * + * Currently this is used for array clearing, via the NPY_DT_get_clear_loop + * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook. + * These are most useful for handling arrays storing embedded references to + * python objects or heap-allocated data. + * + * The `void *traverse_context` is passed in because we may need to pass in + * Intepreter state or similar in the future, but we don't want to pass in + * a full context (with pointers to dtypes, method, caller which all make + * no sense for a traverse function). + * + * We assume for now that this context can be just passed through in the + * the future (for structured dtypes). + * + */ +typedef int (traverse_loop_function)( + void *traverse_context, PyArray_Descr *descr, char *data, + npy_intp size, npy_intp stride, NpyAuxData *auxdata); + + +/* + * Simplified get_loop function specific to dtype traversal + * + * It should set the flags needed for the traversal loop and set out_loop to the + * loop function, which must be a valid traverse_loop_function + * pointer. Currently this is used for zero-filling and clearing arrays storing + * embedded references. + * + */ +typedef int (get_traverse_loop_function)( + void *traverse_context, PyArray_Descr *descr, + int aligned, npy_intp fixed_stride, + traverse_loop_function **out_loop, NpyAuxData **out_auxdata, + NPY_ARRAYMETHOD_FLAGS *flags); + + +/* + * **************************** + * DTYPE API + * **************************** + */ + +#define NPY_DT_ABSTRACT 1 << 1 +#define NPY_DT_PARAMETRIC 1 << 2 +#define NPY_DT_NUMERIC 1 << 3 + +/* + * These correspond to slots in the NPY_DType_Slots struct and must + * be in the same order as the members of that struct. If new slots + * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also + * be updated + */ + +#define NPY_DT_discover_descr_from_pyobject 1 +// this slot is considered private because its API hasn't beed decided +#define _NPY_DT_is_known_scalar_type 2 +#define NPY_DT_default_descr 3 +#define NPY_DT_common_dtype 4 +#define NPY_DT_common_instance 5 +#define NPY_DT_ensure_canonical 6 +#define NPY_DT_setitem 7 +#define NPY_DT_getitem 8 +#define NPY_DT_get_clear_loop 9 +#define NPY_DT_get_fill_zero_loop 10 + +// These PyArray_ArrFunc slots will be deprecated and replaced eventually +// getitem and setitem can be defined as a performance optimization; +// by default the user dtypes call `legacy_getitem_using_DType` and +// `legacy_setitem_using_DType`, respectively. This functionality is +// only supported for basic NumPy DTypes. + + +// used to separate dtype slots from arrfuncs slots +// intended only for internal use but defined here for clarity +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) + +// Cast is disabled +// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET + +#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET + +#define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET + +// Casting related slots are disabled. See +// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163 +// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET + +// These are deprecated in NumPy 1.19, so are disabled here. +// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET + +// TODO: These slots probably still need some thought, and/or a way to "grow"? +typedef struct { + PyTypeObject *typeobj; /* type of python scalar or NULL */ + int flags; /* flags, including parametric and abstract */ + /* NULL terminated cast definitions. Use NULL for the newly created DType */ + PyArrayMethod_Spec **casts; + PyType_Slot *slots; + /* Baseclass or NULL (will always subclass `np.dtype`) */ + PyTypeObject *baseclass; +} PyArrayDTypeMeta_Spec; + + +typedef PyArray_Descr *(discover_descr_from_pyobject_function)( + PyArray_DTypeMeta *cls, PyObject *obj); + +/* + * Before making this public, we should decide whether it should pass + * the type, or allow looking at the object. A possible use-case: + * `np.array(np.array([0]), dtype=np.ndarray)` + * Could consider arrays that are not `dtype=ndarray` "scalars". + */ +typedef int (is_known_scalar_type_function)( + PyArray_DTypeMeta *cls, PyTypeObject *obj); + +typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls); +typedef PyArray_DTypeMeta *(common_dtype_function)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); +typedef PyArray_Descr *(common_instance_function)( + PyArray_Descr *dtype1, PyArray_Descr *dtype2); +typedef PyArray_Descr *(ensure_canonical_function)(PyArray_Descr *dtype); + +/* + * TODO: These two functions are currently only used for experimental DType + * API support. Their relation should be "reversed": NumPy should + * always use them internally. + * There are open points about "casting safety" though, e.g. setting + * elements is currently always unsafe. + */ +typedef int(setitemfunction)(PyArray_Descr *, PyObject *, char *); +typedef PyObject *(getitemfunction)(PyArray_Descr *, char *); + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 00000000..b365cb50 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_numpyconfig.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_numpyconfig.h new file mode 100644 index 00000000..9e02322d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/_numpyconfig.h @@ -0,0 +1,32 @@ +#define NPY_HAVE_ENDIAN_H 1 + +#define NPY_SIZEOF_SHORT 2 +#define NPY_SIZEOF_INT 4 +#define NPY_SIZEOF_LONG 8 +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 16 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_OFF_T 8 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 + +#define NPY_USE_C99_COMPLEX 1 +#define NPY_HAVE_COMPLEX_DOUBLE 1 +#define NPY_HAVE_COMPLEX_FLOAT 1 +#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1 +#define NPY_USE_C99_FORMATS 1 + +/* #undef NPY_NO_SIGNAL */ +#define NPY_NO_SMP 0 + +#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#define NPY_ABI_VERSION 0x01000009 +#define NPY_API_VERSION 0x00000011 + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/arrayobject.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/arrayobject.h new file mode 100644 index 00000000..da47bb09 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/arrayobject.h @@ -0,0 +1,12 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" +#include "npy_interrupt.h" + +#ifdef NPY_NO_PREFIX +#include "noprefix.h" +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/arrayscalars.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/arrayscalars.h new file mode 100644 index 00000000..258bf95b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/arrayscalars.h @@ -0,0 +1,186 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyBytesObject +typedef struct { + /* note that the PyObject_HEAD macro lives right here */ + PyUnicodeObject base; + Py_UCS4 *obval; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + char *buffer_fmt; + #endif +} PyUnicodeScalarObject; + + +typedef struct { + PyObject_VAR_HEAD + char *obval; + PyArray_Descr *descr; + int flags; + PyObject *base; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ + #endif +} PyVoidScalarObject; + +/* Macros + Py<Cls><bitsize>ScalarObject + Py<Cls><bitsize>ArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ + return Py_INCREF(PyArrayScalar_FromLong(i)), \ + PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/experimental_dtype_api.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/experimental_dtype_api.h new file mode 100644 index 00000000..19088dab --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/experimental_dtype_api.h @@ -0,0 +1,365 @@ +/* + * This header exports the new experimental DType API as proposed in + * NEPs 41 to 43. For background, please check these NEPs. Otherwise, + * this header also serves as documentation for the time being. + * + * The header includes `_dtype_api.h` which holds most definition while this + * header mainly wraps functions for public consumption. + * + * Please do not hesitate to contact @seberg with questions. This is + * developed together with https://github.com/seberg/experimental_user_dtypes + * and those interested in experimenting are encouraged to contribute there. + * + * To use the functions defined in the header, call:: + * + * if (import_experimental_dtype_api(version) < 0) { + * return NULL; + * } + * + * in your module init. (A version mismatch will be reported, just update + * to the correct one, this will alert you of possible changes.) + * + * The following lists the main symbols currently exported. Please do not + * hesitate to ask for help or clarification: + * + * - PyUFunc_AddLoopFromSpec: + * + * Register a new loop for a ufunc. This uses the `PyArrayMethod_Spec` + * which must be filled in (see in-line comments). + * + * - PyUFunc_AddWrappingLoop: + * + * Register a new loop which reuses an existing one, but modifies the + * result dtypes. Please search the internal NumPy docs for more info + * at this point. (Used for physical units dtype.) + * + * - PyUFunc_AddPromoter: + * + * Register a new promoter for a ufunc. A promoter is a function stored + * in a PyCapsule (see in-line comments). It is passed the operation and + * requested DType signatures and can mutate it to attempt a new search + * for a matching loop/promoter. + * I.e. for Numba a promoter could even add the desired loop. + * + * - PyArrayInitDTypeMeta_FromSpec: + * + * Initialize a new DType. It must currently be a static Python C type + * that is declared as `PyArray_DTypeMeta` and not `PyTypeObject`. + * Further, it must subclass `np.dtype` and set its type to + * `PyArrayDTypeMeta_Type` (before calling `PyType_Read()`). + * + * - PyArray_CommonDType: + * + * Find the common-dtype ("promotion") for two DType classes. Similar + * to `np.result_type`, but works on the classes and not instances. + * + * - PyArray_PromoteDTypeSequence: + * + * Same as CommonDType, but works with an arbitrary number of DTypes. + * This function is smarter and can often return successful and unambiguous + * results when `common_dtype(common_dtype(dt1, dt2), dt3)` would + * depend on the operation order or fail. Nevertheless, DTypes should + * aim to ensure that their common-dtype implementation is associative + * and commutative! (Mainly, unsigned and signed integers are not.) + * + * For guaranteed consistent results DTypes must implement common-Dtype + * "transitively". If A promotes B and B promotes C, than A must generally + * also promote C; where "promotes" means implements the promotion. + * (There are some exceptions for abstract DTypes) + * + * - PyArray_GetDefaultDescr: + * + * Given a DType class, returns the default instance (descriptor). + * This is an inline function checking for `singleton` first and only + * calls the `default_descr` function if necessary. + * + * - PyArray_DoubleDType, etc.: + * + * Aliases to the DType classes for the builtin NumPy DTypes. + * + * WARNING + * ======= + * + * By using this header, you understand that this is a fully experimental + * exposure. Details are expected to change, and some options may have no + * effect. (Please contact @seberg if you have questions!) + * If the exposure stops working, please file a bug report with NumPy. + * Further, a DType created using this API/header should still be expected + * to be incompatible with some functionality inside and outside of NumPy. + * In this case crashes must be expected. Please report any such problems + * so that they can be fixed before final exposure. + * Furthermore, expect missing checks for programming errors which the final + * API is expected to have. + * + * Symbols with a leading underscore are likely to not be included in the + * first public version, if these are central to your use-case, please let + * us know, so that we can reconsider. + * + * "Array-like" consumer API not yet under considerations + * ====================================================== + * + * The new DType API is designed in a way to make it potentially useful for + * alternative "array-like" implementations. This will require careful + * exposure of details and functions and is not part of this experimental API. + * + * Brief (incompatibility) changelog + * ================================= + * + * 2. None (only additions). + * 3. New `npy_intp *view_offset` argument for `resolve_descriptors`. + * This replaces the `NPY_CAST_IS_VIEW` flag. It can be set to 0 if the + * operation is a view, and is pre-initialized to `NPY_MIN_INTP` indicating + * that the operation is not a view. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ + +#include <Python.h> +#include "ndarraytypes.h" +#include "_dtype_api.h" + +/* + * The contents of PyArrayMethodObject are currently opaque (is there a way + * good way to make them be `PyObject *`?) + */ +typedef struct PyArrayMethodObject_tag PyArrayMethodObject; + +/* + * There must be a better way?! -- Oh well, this is experimental + * (my issue with it, is that I cannot undef those helpers). + */ +#if defined(PY_ARRAY_UNIQUE_SYMBOL) + #define NPY_EXP_DTYPE_API_CONCAT_HELPER2(x, y) x ## y + #define NPY_EXP_DTYPE_API_CONCAT_HELPER(arg) NPY_EXP_DTYPE_API_CONCAT_HELPER2(arg, __experimental_dtype_api_table) + #define __experimental_dtype_api_table NPY_EXP_DTYPE_API_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) +#else + #define __experimental_dtype_api_table __experimental_dtype_api_table +#endif + +/* Support for correct multi-file projects: */ +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) + extern void **__experimental_dtype_api_table; +#else + /* + * Just a hack so I don't forget importing as much myself, I spend way too + * much time noticing it the first time around :). + */ + static void + __not_imported(void) + { + printf("*****\nCritical error, dtype API not imported\n*****\n"); + } + + static void *__uninitialized_table[] = { + &__not_imported, &__not_imported, &__not_imported, &__not_imported, + &__not_imported, &__not_imported, &__not_imported, &__not_imported}; + + #if defined(PY_ARRAY_UNIQUE_SYMBOL) + void **__experimental_dtype_api_table = __uninitialized_table; + #else + static void **__experimental_dtype_api_table = __uninitialized_table; + #endif +#endif + + +typedef int _ufunc_addloop_fromspec_func( + PyObject *ufunc, PyArrayMethod_Spec *spec); +/* + * The main ufunc registration function. This adds a new implementation/loop + * to a ufunc. It replaces `PyUFunc_RegisterLoopForType`. + */ +#define PyUFunc_AddLoopFromSpec \ + (*(_ufunc_addloop_fromspec_func *)(__experimental_dtype_api_table[0])) + + +/* Please see the NumPy definitions in `array_method.h` for details on these */ +typedef int translate_given_descrs_func(int nin, int nout, + PyArray_DTypeMeta *wrapped_dtypes[], + PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]); +typedef int translate_loop_descrs_func(int nin, int nout, + PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[], + PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); + +typedef int _ufunc_wrapping_loop_func(PyObject *ufunc_obj, + PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], + translate_given_descrs_func *translate_given_descrs, + translate_loop_descrs_func *translate_loop_descrs); +#define PyUFunc_AddWrappingLoop \ + (*(_ufunc_wrapping_loop_func *)(__experimental_dtype_api_table[7])) + +/* + * Type of the C promoter function, which must be wrapped into a + * PyCapsule with name "numpy._ufunc_promoter". + * + * Note that currently the output dtypes are always NULL unless they are + * also part of the signature. This is an implementation detail and could + * change in the future. However, in general promoters should not have a + * need for output dtypes. + * (There are potential use-cases, these are currently unsupported.) + */ +typedef int promoter_function(PyObject *ufunc, + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +/* + * Function to register a promoter. + * + * @param ufunc The ufunc object to register the promoter with. + * @param DType_tuple A Python tuple containing DTypes or None matching the + * number of inputs and outputs of the ufunc. + * @param promoter A PyCapsule with name "numpy._ufunc_promoter" containing + * a pointer to a `promoter_function`. + */ +typedef int _ufunc_addpromoter_func( + PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter); +#define PyUFunc_AddPromoter \ + (*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1])) + +#define PyArrayDTypeMeta_Type \ + (*(PyTypeObject *)__experimental_dtype_api_table[2]) +typedef int __dtypemeta_fromspec( + PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *dtype_spec); +/* + * Finalize creation of a DTypeMeta. You must ensure that the DTypeMeta is + * a proper subclass. The DTypeMeta object has additional fields compared to + * a normal PyTypeObject! + * The only (easy) creation of a new DType is to create a static Type which + * inherits `PyArray_DescrType`, sets its type to `PyArrayDTypeMeta_Type` and + * uses `PyArray_DTypeMeta` defined above as the C-structure. + */ +#define PyArrayInitDTypeMeta_FromSpec \ + ((__dtypemeta_fromspec *)(__experimental_dtype_api_table[3])) + + +/* + * ************************************* + * WORKING WITH DTYPES + * ************************************* + */ + +typedef PyArray_DTypeMeta *__common_dtype( + PyArray_DTypeMeta *DType1, PyArray_DTypeMeta *DType2); +#define PyArray_CommonDType \ + ((__common_dtype *)(__experimental_dtype_api_table[4])) + + +typedef PyArray_DTypeMeta *__promote_dtype_sequence( + npy_intp num, PyArray_DTypeMeta *DTypes[]); +#define PyArray_PromoteDTypeSequence \ + ((__promote_dtype_sequence *)(__experimental_dtype_api_table[5])) + + +typedef PyArray_Descr *__get_default_descr( + PyArray_DTypeMeta *DType); +#define _PyArray_GetDefaultDescr \ + ((__get_default_descr *)(__experimental_dtype_api_table[6])) + +static inline PyArray_Descr * +PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType) +{ + if (DType->singleton != NULL) { + Py_INCREF(DType->singleton); + return DType->singleton; + } + return _PyArray_GetDefaultDescr(DType); +} + + +/* + * NumPy's builtin DTypes: + */ +#define PyArray_BoolDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[10]) +/* Integers */ +#define PyArray_ByteDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[11]) +#define PyArray_UByteDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[12]) +#define PyArray_ShortDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[13]) +#define PyArray_UShortDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[14]) +#define PyArray_IntDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[15]) +#define PyArray_UIntDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[16]) +#define PyArray_LongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[17]) +#define PyArray_ULongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[18]) +#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[19]) +#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[20]) +/* Integer aliases */ +#define PyArray_Int8Type (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[21]) +#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[22]) +#define PyArray_Int16DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[23]) +#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[24]) +#define PyArray_Int32DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[25]) +#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[26]) +#define PyArray_Int64DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[27]) +#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[28]) +#define PyArray_IntpDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[29]) +#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[30]) +/* Floats */ +#define PyArray_HalfType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[31]) +#define PyArray_FloatDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[32]) +#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[33]) +#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[34]) +/* Complex */ +#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[35]) +#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[36]) +#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[37]) +/* String/Bytes */ +#define PyArray_StringDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[38]) +#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[39]) +/* Datetime/Timedelta */ +#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[40]) +#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[41]) +/* Object/Void */ +#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[42]) +#define PyArray_VoidDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[43]) + +/* + * ******************************** + * Initialization + * ******************************** + * + * Import the experimental API, the version must match the one defined in + * the header to ensure changes are taken into account. NumPy will further + * runtime-check this. + * You must call this function to use the symbols defined in this file. + */ +#if !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY) + +static int +import_experimental_dtype_api(int version) +{ + if (version != __EXPERIMENTAL_DTYPE_API_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "DType API version %d did not match header version %d. Please " + "update the import statement and check for API changes.", + version, __EXPERIMENTAL_DTYPE_API_VERSION); + return -1; + } + if (__experimental_dtype_api_table != __uninitialized_table) { + /* already imported. */ + return 0; + } + + PyObject *multiarray = PyImport_ImportModule("numpy.core._multiarray_umath"); + if (multiarray == NULL) { + return -1; + } + + PyObject *api = PyObject_CallMethod(multiarray, + "_get_experimental_dtype_api", "i", version); + Py_DECREF(multiarray); + if (api == NULL) { + return -1; + } + __experimental_dtype_api_table = (void **)PyCapsule_GetPointer(api, + "experimental_dtype_api_table"); + Py_DECREF(api); + + if (__experimental_dtype_api_table == NULL) { + __experimental_dtype_api_table = __uninitialized_table; + return -1; + } + return 0; +} + +#endif /* !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY) */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/halffloat.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/halffloat.h new file mode 100644 index 00000000..95040166 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/halffloat.h @@ -0,0 +1,70 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ + +#include <Python.h> +#include <numpy/npy_math.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); +npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ndarrayobject.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ndarrayobject.h new file mode 100644 index 00000000..36cfdd6f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ndarrayobject.h @@ -0,0 +1,251 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <Python.h> +#include "ndarraytypes.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + + +/* C-API that requires previous API to be defined */ + +#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) +#define PyArray_IsPythonNumber(obj) \ + (PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ + PyUnicode_Check(obj)) + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL) + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) +#ifndef PYPY_VERSION +#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) +#define NPY_REFCOUNT PyArray_REFCOUNT +#endif +#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +static inline void +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + } + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + +/* Compatibility with old Numeric stuff -- don't use in new code */ + +#define PyArray_FromDimsAndData(nd, d, type, data) \ + PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ + data) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict). +*/ + +static inline int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_Size(value) != 3) { + return 0; + } + title = PyTuple_GetItem(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#endif + return 0; +} + +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ndarraytypes.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ndarraytypes.h new file mode 100644 index 00000000..742ba526 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ndarraytypes.h @@ -0,0 +1,1945 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +#if !defined(_NPY_NO_DEPRECATIONS) && \ + ((defined(__GNUC__)&& __GNUC__ >= 6) || \ + __has_extension(attribute_deprecated_with_message)) +#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text))) +#else +#define NPY_ATTR_DEPRECATE(text) +#endif + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"), + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; +#if defined(_MSC_VER) && !defined(__clang__) +#pragma deprecated(NPY_CHAR) +#endif + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +/* + * Changing this may break Numpy API compatibility + * due to changing offsets in PyArray_ArrFuncs, so be + * careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will + * depend on the data type. + */ +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2, + NPY_STABLESORT=2, +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_STABLESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0 +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + _NPY_ERROR_OCCURRED_IN_CAST = -1, + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +typedef enum { + NPY_VALID=0, + NPY_SAME=1, + NPY_FULL=2 +} NPY_CORRELATEMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) + +/* The FR in the unit names stands for frequency */ +typedef enum { + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +/* +* Macros to define how array, and dimension/strides data is +* allocated. These should be made private +*/ + +#define NPY_USE_PYMEM 1 + + +#if NPY_USE_PYMEM == 1 +/* use the Raw versions which are safe to call with the GIL released */ +#define PyArray_malloc PyMem_RawMalloc +#define PyArray_free PyMem_RawFree +#define PyArray_realloc PyMem_RawRealloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. Should return 0 on success + * and -1 on failure. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; + /* Cached hash value (-1 if not yet computed). + * This was added for NumPy 2.0.0. + */ + npy_hash_t hash; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * Memory handler structure for array data. + */ +/* The declaration of free differs from PyMemAllocatorEx */ +typedef struct { + void *ctx; + void* (*malloc) (void *ctx, size_t size); + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + void (*free) (void *ctx, void *ptr, size_t size); + /* + * This is the end of the version=1 struct. Only add new fields after + * this line + */ +} PyDataMemAllocator; + +typedef struct { + char name[127]; /* multiple of 64 to keep the struct aligned */ + uint8_t version; /* currently 1 */ + PyDataMemAllocator allocator; +} PyDataMem_Handler; + + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of WRITEBACKIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that should be + * decref'd on deletion + * + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ +#endif + /* + * For malloc/calloc/realloc/free per object + */ +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + PyObject *mem_handler; +#endif +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +/* + * Removed 2020-Nov-25, NumPy 1.20 + * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + * + * The above macro was removed as it gave a false sense of a stable ABI + * with respect to the structures size. If you require a runtime constant, + * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please + * see the PyArrayObject documentation or ask the NumPy developers for + * information on how to correctly replace the macro in a way that is + * compatible with multiple NumPy versions. + */ + + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * A higher dimensional array always has the same contiguity flags as + * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are + * effectively ignored when checking for contiguity. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropriate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when PyArray_ResolveWritebackIfCopy is called. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 + +/* + * No copy may be made while converting from an object/array (result is a view) + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURENOCOPY 0x4000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 +/* + * If output operands overlap with other operands (based on heuristics that + * has false positives but no false negatives), make temporary copies to + * eliminate overlap. + */ +#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 +/* Assume iterator order data access for COPY_IF_OVERLAP */ +#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp)(ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->coordinates[__npy_i] = \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* + * Store the information needed for fancy-indexing over an array. The + * fields are slightly unordered to keep consec, dataptr and subspace + * where they were originally. + */ +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + NpyIter *outer; /* index objects + iterator */ + void *unused[NPY_MAXDIMS - 2]; + PyArrayObject *array; + /* Flat iterator for the indexed array. For compatibility solely. */ + PyArrayIterObject *ait; + + /* + * Subspace array. For binary compatibility (was an iterator, + * but only the check for NULL should be used). + */ + PyArrayObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + npy_intp fancy_strides[NPY_MAXDIMS]; + + /* pointer when all fancy indices are 0 */ + char *baseoffset; + + /* + * after binding consec denotes at which axis the fancy axes + * are inserted. + */ + int consec; + char *dataptr; + + int nd_fancy; + npy_intp fancy_dims[NPY_MAXDIMS]; + + /* + * Whether the iterator (any of the iterators) requires API. This is + * unused by NumPy itself; ArrayMethod flags are more precise. + */ + int needs_api; + + /* + * Extra op information. + */ + PyArrayObject *extra_op; + PyArray_Descr *extra_op_dtype; /* desired dtype */ + npy_uint32 *extra_op_flags; /* Iterator flags */ + + NpyIter *extra_op_iter; + NpyIter_IterNextFunc *extra_op_next; + char **extra_op_ptrs; + + /* + * Information about the iteration state. + */ + NpyIter_IterNextFunc *outer_next; + char **outer_ptrs; + npy_intp *outer_strides; + + /* + * Information about the subspace iterator. + */ + NpyIter *subspace_iter; + NpyIter_IterNextFunc *subspace_next; + char **subspace_ptrs; + npy_intp *subspace_strides; + + /* Count for the external loop (which ever it is) for API iteration */ + npy_intp iter_count; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static inline int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#include "_neighborhood_iterator_imp.h" +#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ + + + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static inline int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static inline void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static inline npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static inline npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static inline npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static inline NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static inline NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static inline npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static inline int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static inline int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static inline PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +/* + * SETITEM should only be used if it is known that the value is a scalar + * and of a type understood by the arrays dtype. + * Use `PyArray_Pack` if the value may be of a different dtype. + */ +static inline int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem(v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static inline PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + static inline NPY_RETURNS_BORROWED_REF PyObject * + PyArray_HANDLER(PyArrayObject *arr) + { + return ((PyArrayObject_fields *)arr)->mem_handler; + } +#endif + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's stored in the + * PyCapsule returned by an array's __array_struct__ attribute. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are currently considered + * private API and may change without warning! + * Part of this (at least the size) is expected to be public API without + * further modifications. + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point they will be heap types. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + void *reserved[3]; + } PyArray_DTypeMeta; + +#endif /* NPY_INTERNAL_BUILD */ + + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/noprefix.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/noprefix.h new file mode 100644 index 00000000..cea5b0d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/noprefix.h @@ -0,0 +1,211 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ + +/* + * You can directly include noprefix.h as a backward + * compatibility measure + */ +#ifndef NPY_NO_PREFIX +#include "ndarrayobject.h" +#include "npy_interrupt.h" +#endif + +#define SIGSETJMP NPY_SIGSETJMP +#define SIGLONGJMP NPY_SIGLONGJMP +#define SIGJMP_BUF NPY_SIGJMP_BUF + +#define MAX_DIMS NPY_MAXDIMS + +#define longlong npy_longlong +#define ulonglong npy_ulonglong +#define Bool npy_bool +#define longdouble npy_longdouble +#define byte npy_byte + +#ifndef _BSD_SOURCE +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#endif + +#define ubyte npy_ubyte +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#define cfloat npy_cfloat +#define cdouble npy_cdouble +#define clongdouble npy_clongdouble +#define Int8 npy_int8 +#define UInt8 npy_uint8 +#define Int16 npy_int16 +#define UInt16 npy_uint16 +#define Int32 npy_int32 +#define UInt32 npy_uint32 +#define Int64 npy_int64 +#define UInt64 npy_uint64 +#define Int128 npy_int128 +#define UInt128 npy_uint128 +#define Int256 npy_int256 +#define UInt256 npy_uint256 +#define Float16 npy_float16 +#define Complex32 npy_complex32 +#define Float32 npy_float32 +#define Complex64 npy_complex64 +#define Float64 npy_float64 +#define Complex128 npy_complex128 +#define Float80 npy_float80 +#define Complex160 npy_complex160 +#define Float96 npy_float96 +#define Complex192 npy_complex192 +#define Float128 npy_float128 +#define Complex256 npy_complex256 +#define intp npy_intp +#define uintp npy_uintp +#define datetime npy_datetime +#define timedelta npy_timedelta + +#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG +#define SIZEOF_INTP NPY_SIZEOF_INTP +#define SIZEOF_UINTP NPY_SIZEOF_UINTP +#define SIZEOF_HALF NPY_SIZEOF_HALF +#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE +#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME +#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA + +#define LONGLONG_FMT NPY_LONGLONG_FMT +#define ULONGLONG_FMT NPY_ULONGLONG_FMT +#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX +#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX + +#define MAX_INT8 127 +#define MIN_INT8 -128 +#define MAX_UINT8 255 +#define MAX_INT16 32767 +#define MIN_INT16 -32768 +#define MAX_UINT16 65535 +#define MAX_INT32 2147483647 +#define MIN_INT32 (-MAX_INT32 - 1) +#define MAX_UINT32 4294967295U +#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) +#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) +#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) +#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) +#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) +#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) + +#define MAX_BYTE NPY_MAX_BYTE +#define MIN_BYTE NPY_MIN_BYTE +#define MAX_UBYTE NPY_MAX_UBYTE +#define MAX_SHORT NPY_MAX_SHORT +#define MIN_SHORT NPY_MIN_SHORT +#define MAX_USHORT NPY_MAX_USHORT +#define MAX_INT NPY_MAX_INT +#define MIN_INT NPY_MIN_INT +#define MAX_UINT NPY_MAX_UINT +#define MAX_LONG NPY_MAX_LONG +#define MIN_LONG NPY_MIN_LONG +#define MAX_ULONG NPY_MAX_ULONG +#define MAX_LONGLONG NPY_MAX_LONGLONG +#define MIN_LONGLONG NPY_MIN_LONGLONG +#define MAX_ULONGLONG NPY_MAX_ULONGLONG +#define MIN_DATETIME NPY_MIN_DATETIME +#define MAX_DATETIME NPY_MAX_DATETIME +#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA +#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA + +#define BITSOF_BOOL NPY_BITSOF_BOOL +#define BITSOF_CHAR NPY_BITSOF_CHAR +#define BITSOF_SHORT NPY_BITSOF_SHORT +#define BITSOF_INT NPY_BITSOF_INT +#define BITSOF_LONG NPY_BITSOF_LONG +#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG +#define BITSOF_HALF NPY_BITSOF_HALF +#define BITSOF_FLOAT NPY_BITSOF_FLOAT +#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE +#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE +#define BITSOF_DATETIME NPY_BITSOF_DATETIME +#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA + +#define _pya_malloc PyArray_malloc +#define _pya_free PyArray_free +#define _pya_realloc PyArray_realloc + +#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF +#define BEGIN_THREADS NPY_BEGIN_THREADS +#define END_THREADS NPY_END_THREADS +#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF +#define ALLOW_C_API NPY_ALLOW_C_API +#define DISABLE_C_API NPY_DISABLE_C_API + +#define PY_FAIL NPY_FAIL +#define PY_SUCCEED NPY_SUCCEED + +#ifndef TRUE +#define TRUE NPY_TRUE +#endif + +#ifndef FALSE +#define FALSE NPY_FALSE +#endif + +#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT + +#define CONTIGUOUS NPY_CONTIGUOUS +#define C_CONTIGUOUS NPY_C_CONTIGUOUS +#define FORTRAN NPY_FORTRAN +#define F_CONTIGUOUS NPY_F_CONTIGUOUS +#define OWNDATA NPY_OWNDATA +#define FORCECAST NPY_FORCECAST +#define ENSURECOPY NPY_ENSURECOPY +#define ENSUREARRAY NPY_ENSUREARRAY +#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES +#define ALIGNED NPY_ALIGNED +#define NOTSWAPPED NPY_NOTSWAPPED +#define WRITEABLE NPY_WRITEABLE +#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY +#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR +#define BEHAVED NPY_BEHAVED +#define BEHAVED_NS NPY_BEHAVED_NS +#define CARRAY NPY_CARRAY +#define CARRAY_RO NPY_CARRAY_RO +#define FARRAY NPY_FARRAY +#define FARRAY_RO NPY_FARRAY_RO +#define DEFAULT NPY_DEFAULT +#define IN_ARRAY NPY_IN_ARRAY +#define OUT_ARRAY NPY_OUT_ARRAY +#define INOUT_ARRAY NPY_INOUT_ARRAY +#define IN_FARRAY NPY_IN_FARRAY +#define OUT_FARRAY NPY_OUT_FARRAY +#define INOUT_FARRAY NPY_INOUT_FARRAY +#define UPDATE_ALL NPY_UPDATE_ALL + +#define OWN_DATA NPY_OWNDATA +#define BEHAVED_FLAGS NPY_BEHAVED +#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS +#define CARRAY_FLAGS_RO NPY_CARRAY_RO +#define CARRAY_FLAGS NPY_CARRAY +#define FARRAY_FLAGS NPY_FARRAY +#define FARRAY_FLAGS_RO NPY_FARRAY_RO +#define DEFAULT_FLAGS NPY_DEFAULT +#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS + +#ifndef MIN +#define MIN PyArray_MIN +#endif +#ifndef MAX +#define MAX PyArray_MAX +#endif +#define MAX_INTP NPY_MAX_INTP +#define MIN_INTP NPY_MIN_INTP +#define MAX_UINTP NPY_MAX_UINTP +#define INTP_FMT NPY_INTP_FMT + +#ifndef PYPY_VERSION +#define REFCOUNT PyArray_REFCOUNT +#define MAX_ELSIZE NPY_MAX_ELSIZE +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h new file mode 100644 index 00000000..6455d40d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -0,0 +1,124 @@ +#ifndef NPY_DEPRECATED_INCLUDES +#error "Should never include npy_*_*_deprecated_api directly." +#endif + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + +/* Emit a warning if the user did not specifically request the old API */ +#ifndef NPY_NO_DEPRECATED_API +#if defined(_WIN32) +#define _WARN___STR2__(x) #x +#define _WARN___STR1__(x) _WARN___STR2__(x) +#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " +#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") +#else +#warning "Using deprecated NumPy API, disable it with " \ + "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" +#endif +#endif + +/* + * This header exists to collect all dangerous/deprecated NumPy API + * as of NumPy 1.7. + * + * This is an attempt to remove bad API, the proliferation of macros, + * and namespace pollution currently produced by the NumPy headers. + */ + +/* These array flags are deprecated as of NumPy 1.7 */ +#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS + +/* + * The consistent NPY_ARRAY_* names which don't pollute the NPY_* + * namespace were added in NumPy 1.7. + * + * These versions of the carray flags are deprecated, but + * probably should only be removed after two releases instead of one. + */ +#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS +#define NPY_OWNDATA NPY_ARRAY_OWNDATA +#define NPY_FORCECAST NPY_ARRAY_FORCECAST +#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY +#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY +#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES +#define NPY_ALIGNED NPY_ARRAY_ALIGNED +#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED +#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE +#define NPY_BEHAVED NPY_ARRAY_BEHAVED +#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS +#define NPY_CARRAY NPY_ARRAY_CARRAY +#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO +#define NPY_DEFAULT NPY_ARRAY_DEFAULT +#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY +#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY +#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY +#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY +#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY +#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY +#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL + +/* This way of accessing the default type is deprecated as of NumPy 1.7 */ +#define PyArray_DEFAULT NPY_DEFAULT_TYPE + +/* These DATETIME bits aren't used internally */ +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ + PyDict_GetItemString( \ + descr->metadata, NPY_METADATA_DTSTR), NULL)))) + +/* + * Deprecated as of NumPy 1.7, this kind of shortcut doesn't + * belong in the public API. + */ +#define NPY_AO PyArrayObject + +/* + * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't + * belong in the public API. + */ +#define fortran fortran_ + +/* + * Deprecated as of NumPy 1.7, as it is a namespace-polluting + * macro. + */ +#define FORTRAN_IF PyArray_FORTRAN_IF + +/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ +#define NPY_METADATA_DTSTR "__timeunit__" + +/* + * Deprecated as of NumPy 1.7. + * The reasoning: + * - These are for datetime, but there's no datetime "namespace". + * - They just turn NPY_STR_<x> into "<x>", which is just + * making something simple be indirected. + */ +#define NPY_STR_Y "Y" +#define NPY_STR_M "M" +#define NPY_STR_W "W" +#define NPY_STR_D "D" +#define NPY_STR_h "h" +#define NPY_STR_m "m" +#define NPY_STR_s "s" +#define NPY_STR_ms "ms" +#define NPY_STR_us "us" +#define NPY_STR_ns "ns" +#define NPY_STR_ps "ps" +#define NPY_STR_fs "fs" +#define NPY_STR_as "as" + +/* + * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be + * removed in the next major release. + */ +#include "old_defines.h" + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_3kcompat.h new file mode 100644 index 00000000..62fde943 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_3kcompat.h @@ -0,0 +1,595 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting different minor versions of Python 3. + * It was originally used to support the transition from Python 2, + * hence the "3k" naming. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. Although the stuff below is unlikely to change, we don't provide + * strong backwards compatibility guarantees at the moment. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ + +#include <Python.h> +#include <stdio.h> + +#ifndef NPY_PY3K +#define NPY_PY3K 1 +#endif + +#include "numpy/npy_common.h" +#include "numpy/ndarrayobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * PyInt -> PyLong + */ + + +/* + * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is + * included here because it is missing from the PyPy API. It completes the PyLong_As* + * group of functions and can be useful in replacing PyInt_Check. + */ +static inline int +Npy__PyLong_AsInt(PyObject *obj) +{ + int overflow; + long result = PyLong_AsLongAndOverflow(obj, &overflow); + + /* INT_MAX and INT_MIN are defined in Python.h */ + if (overflow || result > INT_MAX || result < INT_MIN) { + /* XXX: could be cute and give a different + message for overflow == -1 */ + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)result; +} + + +#if defined(NPY_PY3K) +/* Return True only if the long fits in a C long */ +static inline int PyInt_Check(PyObject *op) { + int overflow = 0; + if (!PyLong_Check(op)) { + return 0; + } + PyLong_AsLongAndOverflow(op, &overflow); + return (overflow == 0); +} + + +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AsLong PyLong_AsLong +#define PyInt_AS_LONG PyLong_AsLong +#define PyInt_AsSsize_t PyLong_AsSsize_t +#define PyNumber_Int PyNumber_Long + +/* NOTE: + * + * Since the PyLong type is very different from the fixed-range PyInt, + * we don't define PyInt_Type -> PyLong_Type. + */ +#endif /* NPY_PY3K */ + +/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ +#ifdef NPY_PY3K +# define NpySlice_GetIndicesEx PySlice_GetIndicesEx +#else +# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ + PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) +#endif + +#if PY_VERSION_HEX < 0x030900a4 + /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */ + #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0) + /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */ + #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0) + /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */ + #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0) +#endif + + +#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) + +/* + * PyString -> PyBytes + */ + +#if defined(NPY_PY3K) + +#define PyString_Type PyBytes_Type +#define PyString_Check PyBytes_Check +#define PyStringObject PyBytesObject +#define PyString_FromString PyBytes_FromString +#define PyString_FromStringAndSize PyBytes_FromStringAndSize +#define PyString_AS_STRING PyBytes_AS_STRING +#define PyString_AsStringAndSize PyBytes_AsStringAndSize +#define PyString_FromFormat PyBytes_FromFormat +#define PyString_Concat PyBytes_Concat +#define PyString_ConcatAndDel PyBytes_ConcatAndDel +#define PyString_AsString PyBytes_AsString +#define PyString_GET_SIZE PyBytes_GET_SIZE +#define PyString_Size PyBytes_Size + +#define PyUString_Type PyUnicode_Type +#define PyUString_Check PyUnicode_Check +#define PyUStringObject PyUnicodeObject +#define PyUString_FromString PyUnicode_FromString +#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize +#define PyUString_FromFormat PyUnicode_FromFormat +#define PyUString_Concat PyUnicode_Concat2 +#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel +#define PyUString_GET_SIZE PyUnicode_GET_SIZE +#define PyUString_Size PyUnicode_Size +#define PyUString_InternFromString PyUnicode_InternFromString +#define PyUString_Format PyUnicode_Format + +#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) + +#else + +#define PyBytes_Type PyString_Type +#define PyBytes_Check PyString_Check +#define PyBytesObject PyStringObject +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_AsStringAndSize PyString_AsStringAndSize +#define PyBytes_FromFormat PyString_FromFormat +#define PyBytes_Concat PyString_Concat +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define PyBytes_AsString PyString_AsString +#define PyBytes_GET_SIZE PyString_GET_SIZE +#define PyBytes_Size PyString_Size + +#define PyUString_Type PyString_Type +#define PyUString_Check PyString_Check +#define PyUStringObject PyStringObject +#define PyUString_FromString PyString_FromString +#define PyUString_FromStringAndSize PyString_FromStringAndSize +#define PyUString_FromFormat PyString_FromFormat +#define PyUString_Concat PyString_Concat +#define PyUString_ConcatAndDel PyString_ConcatAndDel +#define PyUString_GET_SIZE PyString_GET_SIZE +#define PyUString_Size PyString_Size +#define PyUString_InternFromString PyString_InternFromString +#define PyUString_Format PyString_Format + +#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) + +#endif /* NPY_PY3K */ + +/* + * Macros to protect CRT calls against instant termination when passed an + * invalid parameter (https://bugs.python.org/issue23524). + */ +#if defined _MSC_VER && _MSC_VER >= 1900 + +#include <stdlib.h> + +extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; +#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ + _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); +#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); } + +#else + +#define NPY_BEGIN_SUPPRESS_IPH +#define NPY_END_SUPPRESS_IPH + +#endif /* _MSC_VER >= 1900 */ + + +static inline void +PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) +{ + Py_SETREF(*left, PyUnicode_Concat(*left, right)); + Py_DECREF(right); +} + +static inline void +PyUnicode_Concat2(PyObject **left, PyObject *right) +{ + Py_SETREF(*left, PyUnicode_Concat(*left, right)); +} + +/* + * PyFile_* compatibility + */ + +/* + * Get a FILE* handle to the file represented by the Python object + */ +static inline FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2, unbuf; + Py_ssize_t fd2_tmp; + PyObject *ret, *os, *io, *io_raw; + npy_off_t pos; + FILE *handle; + + /* For Python 2 PyFileObject, use PyFile_AsFile */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return PyFile_AsFile(file); + } +#endif + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError); + Py_DECREF(ret); + if (fd2_tmp == -1 && PyErr_Occurred()) { + return NULL; + } + if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) { + PyErr_SetString(PyExc_IOError, + "Getting an 'int' from os.dup() failed"); + return NULL; + } + fd2 = (int)fd2_tmp; + + /* Convert to FILE* handle */ +#ifdef _WIN32 + NPY_BEGIN_SUPPRESS_IPH + handle = _fdopen(fd2, mode); + NPY_END_SUPPRESS_IPH +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object via " + "_fdopen failed. If you built NumPy, you probably " + "linked with the wrong debug/release runtime"); + return NULL; + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static inline int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd, unbuf; + PyObject *ret, *io, *io_raw; + npy_off_t position; + + /* For Python 2 PyFileObject, do nothing */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return 0; + } +#endif + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static inline int +npy_PyFile_Check(PyObject *file) +{ + int fd; + /* For Python 2, check if it is a PyFileObject */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return 1; + } +#endif + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + PyErr_Clear(); + return 0; + } + return 1; +} + +static inline PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static inline int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + + +/* This is a copy of _PyErr_ChainExceptions + */ +static inline void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + + +/* This is a copy of _PyErr_ChainExceptions, with: + * - a minimal implementation for python 2 + * - __cause__ used instead of __context__ + */ +static inline void +npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* + * PyObject_Cmp + */ +#if defined(NPY_PY3K) +static inline int +PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) +{ + int v; + v = PyObject_RichCompareBool(i1, i2, Py_LT); + if (v == 1) { + *cmp = -1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_GT); + if (v == 1) { + *cmp = 1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_EQ); + if (v == 1) { + *cmp = 0; + return 1; + } + else { + *cmp = 0; + return -1; + } +} +#endif + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +static inline PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static inline void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static inline int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_common.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_common.h new file mode 100644 index 00000000..9e98f8ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_common.h @@ -0,0 +1,1086 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ + +/* need Python.h for npy_intp, npy_uintp */ +#include <Python.h> + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include <npy_config.h> +#endif + +/* + * using static inline modifiers when defining npy_math functions + * allows the compiler to make optimizations when possible + */ +#ifndef NPY_INLINE_MATH +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + #define NPY_INLINE_MATH 1 +#else + #define NPY_INLINE_MATH 0 +#endif +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef NPY_HAVE_SSE +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + +/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */ +#if defined(_MSC_VER) && !defined(__clang__) + #define NPY_INLINE __inline +/* clang included here to handle clang-cl on Windows */ +#elif defined(__GNUC__) || defined(__clang__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef _MSC_VER + #define NPY_FINLINE static __forceinline +#elif defined(__GNUC__) + #define NPY_FINLINE static inline __attribute__((always_inline)) +#else + #define NPY_FINLINE static +#endif + +#if defined(_MSC_VER) + #define NPY_NOINLINE static __declspec(noinline) +#elif defined(__GNUC__) || defined(__clang__) + #define NPY_NOINLINE static __attribute__((noinline)) +#else + #define NPY_NOINLINE static +#endif + +#ifdef HAVE___THREAD + #define NPY_TLS __thread +#else + #ifdef HAVE___DECLSPEC_THREAD_ + #define NPY_TLS __declspec(thread) + #else + #define NPY_TLS + #endif +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Issue gh-2256 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include <io.h> + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #include <sys/types.h> + #ifndef _WIN32 + #include <unistd.h> + #endif + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate pointer size for this + * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. + */ +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf, + * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those + * functions use different formatting codes that are portably specified + * according to the Python documentation. See issue gh-2388. + */ +#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "lld" +#endif + +/* + * We can only use C99 formats for npy_int_p if it is the same as + * intp_t, hence the condition on HAVE_UNITPTR_T + */ +#if (NPY_USE_C99_FORMATS) == 1 \ + && (defined HAVE_UINTPTR_T) \ + && (defined HAVE_INTTYPES_H) + #include <inttypes.h> + #undef NPY_INTP_FMT + #define NPY_INTP_FMT PRIdPTR +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# else +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 +/* + * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double). + * In some certain cases, it may forced to be equal to sizeof(double) + * even against the compiler implementation and the same goes for + * `complex long double`. + * + * Therefore, avoid `long double`, use `npy_longdouble` instead, + * and when it comes to standard math functions make sure of using + * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`. + * For example: + * npy_longdouble *ptr, x; + * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + * npy_longdouble r = modf(x, ptr); + * #else + * npy_longdouble r = modfl(x, ptr); + * #endif + * + * See https://github.com/numpy/numpy/issues/20348 + */ +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + #define NPY_LONGDOUBLE_FMT "g" + typedef double npy_longdouble; +#else + #define NPY_LONGDOUBLE_FMT "Lg" + typedef long double npy_longdouble; +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP + +/* + * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being + * able to do .real/.imag. Will have to convert code first. + */ +#if 0 +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) +typedef complex npy_cdouble; +#else +typedef struct { double real, imag; } npy_cdouble; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) +typedef complex float npy_cfloat; +#else +typedef struct { float real, imag; } npy_cfloat; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) +typedef complex long double npy_clongdouble; +#else +typedef struct {npy_longdouble real, imag;} npy_clongdouble; +#endif +#endif +#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE +#error npy_cdouble definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { double real, imag; } npy_cdouble; + +#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT +#error npy_cfloat definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { float real, imag; } npy_cfloat; + +#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE +#error npy_clongdouble definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { npy_longdouble real, imag; } npy_clongdouble; + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include <limits.h> +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#elif NPY_BITSOF_LONG == 128 +#define NPY_INT128 NPY_LONG +#define NPY_UINT128 NPY_ULONG + typedef long npy_int128; + typedef unsigned long npy_uint128; +#define PyInt128ScalarObject PyLongScalarObject +#define PyInt128ArrType_Type PyLongArrType_Type +#define PyUInt128ScalarObject PyULongScalarObject +#define PyUInt128ArrType_Type PyULongArrType_Type +#define NPY_INT128_FMT NPY_LONG_FMT +#define NPY_UINT128_FMT NPY_ULONG_FMT +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#elif NPY_BITSOF_LONGLONG == 128 +# ifndef NPY_INT128 +# define NPY_INT128 NPY_LONGLONG +# define NPY_UINT128 NPY_ULONGLONG + typedef npy_longlong npy_int128; + typedef npy_ulonglong npy_uint128; +# define PyInt128ScalarObject PyLongLongScalarObject +# define PyInt128ArrType_Type PyLongLongArrType_Type +# define PyUInt128ScalarObject PyULongLongScalarObject +# define PyUInt128ArrType_Type PyULongLongArrType_Type +#define NPY_INT128_FMT NPY_LONGLONG_FMT +#define NPY_UINT128_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT128 +# define NPY_MIN_LONGLONG NPY_MIN_INT128 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 +#elif NPY_BITSOF_LONGLONG == 256 +# define NPY_INT256 NPY_LONGLONG +# define NPY_UINT256 NPY_ULONGLONG + typedef npy_longlong npy_int256; + typedef npy_ulonglong npy_uint256; +# define PyInt256ScalarObject PyLongLongScalarObject +# define PyInt256ArrType_Type PyLongLongArrType_Type +# define PyUInt256ScalarObject PyULongLongScalarObject +# define PyUInt256ArrType_Type PyULongLongArrType_Type +#define NPY_INT256_FMT NPY_LONGLONG_FMT +#define NPY_UINT256_FMT NPY_ULONGLONG_FMT +# define NPY_MAX_LONGLONG NPY_MAX_INT256 +# define NPY_MIN_LONGLONG NPY_MIN_INT256 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_INT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_INT +#define NPY_UINT128 NPY_UINT + typedef int npy_int128; + typedef unsigned int npy_uint128; +# define PyInt128ScalarObject PyIntScalarObject +# define PyInt128ArrType_Type PyIntArrType_Type +# define PyUInt128ScalarObject PyUIntScalarObject +# define PyUInt128ArrType_Type PyUIntArrType_Type +#define NPY_INT128_FMT NPY_INT_FMT +#define NPY_UINT128_FMT NPY_UINT_FMT +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_SHORT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_SHORT +#define NPY_UINT128 NPY_USHORT + typedef short npy_int128; + typedef unsigned short npy_uint128; +# define PyInt128ScalarObject PyShortScalarObject +# define PyInt128ArrType_Type PyShortArrType_Type +# define PyUInt128ScalarObject PyUShortScalarObject +# define PyUInt128ArrType_Type PyUShortArrType_Type +#define NPY_INT128_FMT NPY_SHORT_FMT +#define NPY_UINT128_FMT NPY_USHORT_FMT +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_BYTE +#define NPY_UINT128 NPY_UBYTE + typedef signed char npy_int128; + typedef unsigned char npy_uint128; +# define PyInt128ScalarObject PyByteScalarObject +# define PyInt128ArrType_Type PyByteArrType_Type +# define PyUInt128ScalarObject PyUByteScalarObject +# define PyUInt128ArrType_Type PyUByteArrType_Type +#define NPY_INT128_FMT NPY_BYTE_FMT +#define NPY_UINT128_FMT NPY_UBYTE_FMT +#endif +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 256 +#define NPY_FLOAT256 NPY_LONGDOUBLE +#define NPY_COMPLEX512 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float256; + typedef npy_clongdouble npy_complex512; +# define PyFloat256ScalarObject PyLongDoubleScalarObject +# define PyComplex512ScalarObject PyCLongDoubleScalarObject +# define PyFloat256ArrType_Type PyLongDoubleArrType_Type +# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_cpu.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_cpu.h new file mode 100644 index 00000000..a19f8e6b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_cpu.h @@ -0,0 +1,129 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB + * NPY_CPU_RISCV64 + * NPY_CPU_LOONGARCH + * NPY_CPU_WASM + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ + +#include "numpyconfig.h" + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_PPC64LE +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_PPC64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check + * for those specifically first before defaulting to ppc + */ + #define NPY_CPU_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64) + /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */ + #if defined(__ARMEB__) || defined(__AARCH64EB__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEB_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEB_AARCH64 + #else + #define NPY_CPU_ARMEB + #endif + #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEL_AARCH32 + #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__) + #define NPY_CPU_ARMEL_AARCH64 + #else + #define NPY_CPU_ARMEL + #endif + #else + # error Unknown ARM CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB +#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 + #define NPY_CPU_RISCV64 +#elif defined(__loongarch__) + #define NPY_CPU_LOONGARCH +#elif defined(__EMSCRIPTEN__) + /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + #define NPY_CPU_WASM +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +/* + * Except for the following architectures, memory access is limited to the natural + * alignment of data types otherwise it may lead to bus error or performance regression. + * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. +*/ +#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) + #define NPY_ALIGNMENT_REQUIRED 0 +#endif +#ifndef NPY_ALIGNMENT_REQUIRED + #define NPY_ALIGNMENT_REQUIRED 1 +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_endian.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_endian.h new file mode 100644 index 00000000..5e58a7f5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_endian.h @@ -0,0 +1,77 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) + /* Use endian.h if available */ + + #if defined(NPY_HAVE_ENDIAN_H) + #include <endian.h> + #elif defined(NPY_HAVE_SYS_ENDIAN_H) + #include <sys/endian.h> + #endif + + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_ARMEL_AARCH32) \ + || defined(NPY_CPU_ARMEL_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) \ + || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_WASM) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_ARMEB_AARCH32) \ + || defined(NPY_CPU_ARMEB_AARCH64) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + + #else + #error Unknown CPU: can not set endianness + #endif + +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_interrupt.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_interrupt.h new file mode 100644 index 00000000..69a0374d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_interrupt.h @@ -0,0 +1,56 @@ +/* + * This API is only provided because it is part of publicly exported + * headers. Its use is considered DEPRECATED, and it will be removed + * eventually. + * (This includes the _PyArray_SigintHandler and _PyArray_GetSigintBuf + * functions which are however, public API, and not headers.) + * + * Instead of using these non-threadsafe macros consider periodically + * querying `PyErr_CheckSignals()` or `PyOS_InterruptOccurred()` will work. + * Both of these require holding the GIL, although cpython could add a + * version of `PyOS_InterruptOccurred()` which does not. Such a version + * actually exists as private API in Python 3.10, and backported to 3.9 and 3.8, + * see also https://bugs.python.org/issue41037 and + * https://github.com/python/cpython/pull/20599). + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ + +#ifndef NPY_NO_SIGNAL + +#include <setjmp.h> +#include <signal.h> + +#ifndef sigsetjmp + +#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) +#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) +#define NPY_SIGJMP_BUF jmp_buf + +#else + +#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) +#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) +#define NPY_SIGJMP_BUF sigjmp_buf + +#endif + +# define NPY_SIGINT_ON { \ + PyOS_sighandler_t _npy_sig_save; \ + _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ + if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ + 1) == 0) { \ + +# define NPY_SIGINT_OFF } \ + PyOS_setsig(SIGINT, _npy_sig_save); \ + } + +#else /* NPY_NO_SIGNAL */ + +#define NPY_SIGINT_ON +#define NPY_SIGINT_OFF + +#endif /* HAVE_SIGSETJMP */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_math.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_math.h new file mode 100644 index 00000000..2fcd41eb --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_math.h @@ -0,0 +1,563 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ + +#include <numpy/npy_common.h> + +#include <math.h> + +/* By adding static inline specifiers to npy_math function definitions when + appropriate, compiler is given the opportunity to optimize */ +#if NPY_INLINE_MATH +#define NPY_INPLACE static inline +#else +#define NPY_INPLACE +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +static inline float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +static inline float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +static inline float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +static inline float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * Integer functions. + */ +NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); + +NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); +NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); +NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); + +NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a); +NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a); +NPY_INPLACE uint8_t npy_popcountu(npy_uint a); +NPY_INPLACE uint8_t npy_popcountul(npy_ulong a); +NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a); +NPY_INPLACE uint8_t npy_popcounthh(npy_byte a); +NPY_INPLACE uint8_t npy_popcounth(npy_short a); +NPY_INPLACE uint8_t npy_popcount(npy_int a); +NPY_INPLACE uint8_t npy_popcountl(npy_long a); +NPY_INPLACE uint8_t npy_popcountll(npy_longlong a); + +/* + * C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE double npy_sin(double x); +NPY_INPLACE double npy_cos(double x); +NPY_INPLACE double npy_tan(double x); +NPY_INPLACE double npy_hypot(double x, double y); +NPY_INPLACE double npy_log2(double x); +NPY_INPLACE double npy_atan2(double x, double y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinh sinh +#define npy_cosh cosh +#define npy_tanh tanh +#define npy_asin asin +#define npy_acos acos +#define npy_atan atan +#define npy_log log +#define npy_log10 log10 +#define npy_cbrt cbrt +#define npy_fabs fabs +#define npy_ceil ceil +#define npy_fmod fmod +#define npy_floor floor +#define npy_expm1 expm1 +#define npy_log1p log1p +#define npy_acosh acosh +#define npy_asinh asinh +#define npy_atanh atanh +#define npy_rint rint +#define npy_trunc trunc +#define npy_exp2 exp2 +#define npy_frexp frexp +#define npy_ldexp ldexp +#define npy_copysign copysign +#define npy_exp exp +#define npy_sqrt sqrt +#define npy_pow pow +#define npy_modf modf +#define npy_nextafter nextafter + +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #define npy_isnan(x) isnan(x) +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#define npy_signbit(x) signbit((x)) + +/* + * float C99 math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE float npy_sinf(float x); +NPY_INPLACE float npy_cosf(float x); +NPY_INPLACE float npy_tanf(float x); +NPY_INPLACE float npy_expf(float x); +NPY_INPLACE float npy_sqrtf(float x); +NPY_INPLACE float npy_hypotf(float x, float y); +NPY_INPLACE float npy_log2f(float x); +NPY_INPLACE float npy_atan2f(float x, float y); +NPY_INPLACE float npy_powf(float x, float y); +NPY_INPLACE float npy_modff(float x, float* y); + +/* Mandatory C99 float math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ + +#define npy_sinhf sinhf +#define npy_coshf coshf +#define npy_tanhf tanhf +#define npy_asinf asinf +#define npy_acosf acosf +#define npy_atanf atanf +#define npy_logf logf +#define npy_log10f log10f +#define npy_cbrtf cbrtf +#define npy_fabsf fabsf +#define npy_ceilf ceilf +#define npy_fmodf fmodf +#define npy_floorf floorf +#define npy_expm1f expm1f +#define npy_log1pf log1pf +#define npy_asinhf asinhf +#define npy_acoshf acoshf +#define npy_atanhf atanhf +#define npy_rintf rintf +#define npy_truncf truncf +#define npy_exp2f exp2f +#define npy_frexpf frexpf +#define npy_ldexpf ldexpf +#define npy_copysignf copysignf +#define npy_nextafterf nextafterf + +float npy_spacingf(float x); + +/* + * long double C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinhl sinhl +#define npy_coshl coshl +#define npy_tanhl tanhl +#define npy_fabsl fabsl +#define npy_floorl floorl +#define npy_ceill ceill +#define npy_rintl rintl +#define npy_truncl truncl +#define npy_cbrtl cbrtl +#define npy_log10l log10l +#define npy_logl logl +#define npy_expm1l expm1l +#define npy_asinl asinl +#define npy_acosl acosl +#define npy_atanl atanl +#define npy_asinhl asinhl +#define npy_acoshl acoshl +#define npy_atanhl atanhl +#define npy_log1pl log1pl +#define npy_exp2l exp2l +#define npy_fmodl fmodl +#define npy_frexpl frexpl +#define npy_ldexpl ldexpl +#define npy_copysignl copysignl +#define npy_nextafterl nextafterl + +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +NPY_INPLACE double npy_deg2rad(double x); +NPY_INPLACE double npy_rad2deg(double x); +NPY_INPLACE double npy_logaddexp(double x, double y); +NPY_INPLACE double npy_logaddexp2(double x, double y); +NPY_INPLACE double npy_divmod(double x, double y, double *modulus); +NPY_INPLACE double npy_heaviside(double x, double h0); + +NPY_INPLACE float npy_deg2radf(float x); +NPY_INPLACE float npy_rad2degf(float x); +NPY_INPLACE float npy_logaddexpf(float x, float y); +NPY_INPLACE float npy_logaddexp2f(float x, float y); +NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); +NPY_INPLACE float npy_heavisidef(float x, float h0); + +NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, + npy_longdouble *modulus); +NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +/* + * C99 specifies that complex numbers have the same representation as + * an array of two elements, where the first element is the real part + * and the second element is the imaginary part. + */ +#define __NPY_CPACK_IMP(x, y, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } z1; \ + \ + z1.a[0] = (x); \ + z1.a[1] = (y); \ + \ + return z1.z; + +static inline npy_cdouble npy_cpack(double x, double y) +{ + __NPY_CPACK_IMP(x, y, double, npy_cdouble); +} + +static inline npy_cfloat npy_cpackf(float x, float y) +{ + __NPY_CPACK_IMP(x, y, float, npy_cfloat); +} + +static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CPACK_IMP + +/* + * Same remark as above, but in the other direction: extract first/second + * member of complex number, assuming a C99-compatible representation + * + * Those are defineds as static inline, and such as a reasonable compiler would + * most likely compile this to one or two instructions (on CISC at least) + */ +#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } __z_repr; \ + __z_repr.z = z; \ + \ + return __z_repr.a[index]; + +static inline double npy_creal(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); +} + +static inline double npy_cimag(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); +} + +static inline float npy_crealf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); +} + +static inline float npy_cimagf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); +} + +static inline npy_longdouble npy_creall(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); +} + +static inline npy_longdouble npy_cimagl(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CEXTRACT_IMP + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); +npy_cdouble npy_ctan(npy_cdouble z); + +npy_cdouble npy_ccosh(npy_cdouble z); +npy_cdouble npy_csinh(npy_cdouble z); +npy_cdouble npy_ctanh(npy_cdouble z); + +npy_cdouble npy_cacos(npy_cdouble z); +npy_cdouble npy_casin(npy_cdouble z); +npy_cdouble npy_catan(npy_cdouble z); + +npy_cdouble npy_cacosh(npy_cdouble z); +npy_cdouble npy_casinh(npy_cdouble z); +npy_cdouble npy_catanh(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); +npy_cfloat npy_ctanf(npy_cfloat z); + +npy_cfloat npy_ccoshf(npy_cfloat z); +npy_cfloat npy_csinhf(npy_cfloat z); +npy_cfloat npy_ctanhf(npy_cfloat z); + +npy_cfloat npy_cacosf(npy_cfloat z); +npy_cfloat npy_casinf(npy_cfloat z); +npy_cfloat npy_catanf(npy_cfloat z); + +npy_cfloat npy_cacoshf(npy_cfloat z); +npy_cfloat npy_casinhf(npy_cfloat z); +npy_cfloat npy_catanhf(npy_cfloat z); + + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); +npy_clongdouble npy_ctanl(npy_clongdouble z); + +npy_clongdouble npy_ccoshl(npy_clongdouble z); +npy_clongdouble npy_csinhl(npy_clongdouble z); +npy_clongdouble npy_ctanhl(npy_clongdouble z); + +npy_clongdouble npy_cacosl(npy_clongdouble z); +npy_clongdouble npy_casinl(npy_clongdouble z); +npy_clongdouble npy_catanl(npy_clongdouble z); + +npy_clongdouble npy_cacoshl(npy_clongdouble z); +npy_clongdouble npy_casinhl(npy_clongdouble z); +npy_clongdouble npy_catanhl(npy_clongdouble z); + + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ +int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#if NPY_INLINE_MATH +#include "npy_math_internal.h" +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 00000000..39658c0b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,20 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif /* NPY_NO_DEPRECATED_API */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_os.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_os.h new file mode 100644 index 00000000..0ce5d78b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/npy_os.h @@ -0,0 +1,42 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +/* We are on Windows.*/ +#elif defined(_WIN32) + /* We are using MinGW (64-bit or 32-bit)*/ + #if defined(__MINGW32__) || defined(__MINGW64__) + #define NPY_OS_MINGW + /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/ + #elif defined(_WIN64) + #define NPY_OS_WIN64 + /* Otherwise assume we are targeting 32-bit Windows*/ + #else + #define NPY_OS_WIN32 + #endif +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#elif defined(__HAIKU__) + #define NPY_OS_HAIKU +#else + #define NPY_OS_UNKNOWN +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/numpyconfig.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/numpyconfig.h new file mode 100644 index 00000000..1c25aa5f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/numpyconfig.h @@ -0,0 +1,138 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * hardcoded. + * + * Note that distutils/pip will attempt a universal2 build when Python itself + * is built as universal2, hence this hardcoding is needed even if we do not + * support universal2 wheels anymore (see gh-22796). + * This code block can be removed after we have dropped the setup.py based + * build completely. + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + #undef NPY_SIZEOF_PY_INTPTR_T + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #define NPY_SIZEOF_PY_INTPTR_T 8 + #else + #define NPY_SIZEOF_LONG 4 + #define NPY_SIZEOF_PY_INTPTR_T 4 + #endif + + #undef NPY_SIZEOF_LONGDOUBLE + #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE + #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #endif + #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #endif + + #if defined(__arm64__) + #define NPY_SIZEOF_LONGDOUBLE 8 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 + #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1 + #elif defined(__x86_64) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1 + #elif defined (__i386) + #define NPY_SIZEOF_LONGDOUBLE 12 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24 + #elif defined(__ppc__) || defined (__ppc64__) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #else + #error "unknown architecture" + #endif +#endif + + +/** + * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro, + * we include API version numbers for specific versions of NumPy. + * To exclude all API that was deprecated as of 1.7, add the following before + * #including any NumPy headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * The same is true for NPY_TARGET_VERSION, although NumPy will default to + * a backwards compatible build anyway. + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000009 +#define NPY_1_10_API_VERSION 0x0000000a +#define NPY_1_11_API_VERSION 0x0000000a +#define NPY_1_12_API_VERSION 0x0000000a +#define NPY_1_13_API_VERSION 0x0000000b +#define NPY_1_14_API_VERSION 0x0000000c +#define NPY_1_15_API_VERSION 0x0000000c +#define NPY_1_16_API_VERSION 0x0000000d +#define NPY_1_17_API_VERSION 0x0000000d +#define NPY_1_18_API_VERSION 0x0000000d +#define NPY_1_19_API_VERSION 0x0000000d +#define NPY_1_20_API_VERSION 0x0000000e +#define NPY_1_21_API_VERSION 0x0000000e +#define NPY_1_22_API_VERSION 0x0000000f +#define NPY_1_23_API_VERSION 0x00000010 +#define NPY_1_24_API_VERSION 0x00000010 +#define NPY_1_25_API_VERSION 0x00000011 + + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version we are compiling to be compatible with. The version + * Number is always increased when the API changes via: `NPY_API_VERSION` + * (and should maybe just track the NumPy version). + * + * If we have an internal build, we always target the current version of + * course. + * + * For downstream users, we default to an older version to provide them with + * maximum compatibility by default. Downstream can choose to extend that + * default, or narrow it down if they wish to use newer API. If you adjust + * this, consider the Python version support (example for 1.25.x): + * + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 + * + * Users of the stable ABI may wish to target the last Python that is not + * end of life. This would be 3.8 at NumPy 1.25 release time. + * 1.17 as default was the choice of oldest-support-numpy at the time and + * has in practice no limit (comapared to 1.19). Even earlier becomes legacy. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* NumPy internal build, always use current version. */ + #define NPY_FEATURE_VERSION NPY_API_VERSION +#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION + /* user provided a target version, use it */ + #define NPY_FEATURE_VERSION NPY_TARGET_VERSION +#else + /* Use the default (increase when dropping Python 3.9 support) */ + #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION +#endif + +/* Sanity check the (requested) feature version */ +#if NPY_FEATURE_VERSION > NPY_API_VERSION + #error "NPY_TARGET_VERSION higher than NumPy headers!" +#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION + /* No support for irrelevant old targets, no need for error, but warn. */ + #warning "Requested NumPy target lower than supported NumPy 1.15." +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/old_defines.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/old_defines.h new file mode 100644 index 00000000..b3fa6775 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/old_defines.h @@ -0,0 +1,187 @@ +/* This header is deprecated as of NumPy 1.7 */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ + +#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION +#error The header "old_defines.h" is deprecated as of NumPy 1.7. +#endif + +#define NDARRAY_VERSION NPY_VERSION + +#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE +#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE +#define PyArray_BUFSIZE NPY_BUFSIZE + +#define PyArray_PRIORITY NPY_PRIORITY +#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY +#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE + +#define NPY_MAX PyArray_MAX +#define NPY_MIN PyArray_MIN + +#define PyArray_TYPES NPY_TYPES +#define PyArray_BOOL NPY_BOOL +#define PyArray_BYTE NPY_BYTE +#define PyArray_UBYTE NPY_UBYTE +#define PyArray_SHORT NPY_SHORT +#define PyArray_USHORT NPY_USHORT +#define PyArray_INT NPY_INT +#define PyArray_UINT NPY_UINT +#define PyArray_LONG NPY_LONG +#define PyArray_ULONG NPY_ULONG +#define PyArray_LONGLONG NPY_LONGLONG +#define PyArray_ULONGLONG NPY_ULONGLONG +#define PyArray_HALF NPY_HALF +#define PyArray_FLOAT NPY_FLOAT +#define PyArray_DOUBLE NPY_DOUBLE +#define PyArray_LONGDOUBLE NPY_LONGDOUBLE +#define PyArray_CFLOAT NPY_CFLOAT +#define PyArray_CDOUBLE NPY_CDOUBLE +#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE +#define PyArray_OBJECT NPY_OBJECT +#define PyArray_STRING NPY_STRING +#define PyArray_UNICODE NPY_UNICODE +#define PyArray_VOID NPY_VOID +#define PyArray_DATETIME NPY_DATETIME +#define PyArray_TIMEDELTA NPY_TIMEDELTA +#define PyArray_NTYPES NPY_NTYPES +#define PyArray_NOTYPE NPY_NOTYPE +#define PyArray_CHAR NPY_CHAR +#define PyArray_USERDEF NPY_USERDEF +#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES + +#define PyArray_INTP NPY_INTP +#define PyArray_UINTP NPY_UINTP + +#define PyArray_INT8 NPY_INT8 +#define PyArray_UINT8 NPY_UINT8 +#define PyArray_INT16 NPY_INT16 +#define PyArray_UINT16 NPY_UINT16 +#define PyArray_INT32 NPY_INT32 +#define PyArray_UINT32 NPY_UINT32 + +#ifdef NPY_INT64 +#define PyArray_INT64 NPY_INT64 +#define PyArray_UINT64 NPY_UINT64 +#endif + +#ifdef NPY_INT128 +#define PyArray_INT128 NPY_INT128 +#define PyArray_UINT128 NPY_UINT128 +#endif + +#ifdef NPY_FLOAT16 +#define PyArray_FLOAT16 NPY_FLOAT16 +#define PyArray_COMPLEX32 NPY_COMPLEX32 +#endif + +#ifdef NPY_FLOAT80 +#define PyArray_FLOAT80 NPY_FLOAT80 +#define PyArray_COMPLEX160 NPY_COMPLEX160 +#endif + +#ifdef NPY_FLOAT96 +#define PyArray_FLOAT96 NPY_FLOAT96 +#define PyArray_COMPLEX192 NPY_COMPLEX192 +#endif + +#ifdef NPY_FLOAT128 +#define PyArray_FLOAT128 NPY_FLOAT128 +#define PyArray_COMPLEX256 NPY_COMPLEX256 +#endif + +#define PyArray_FLOAT32 NPY_FLOAT32 +#define PyArray_COMPLEX64 NPY_COMPLEX64 +#define PyArray_FLOAT64 NPY_FLOAT64 +#define PyArray_COMPLEX128 NPY_COMPLEX128 + + +#define PyArray_TYPECHAR NPY_TYPECHAR +#define PyArray_BOOLLTR NPY_BOOLLTR +#define PyArray_BYTELTR NPY_BYTELTR +#define PyArray_UBYTELTR NPY_UBYTELTR +#define PyArray_SHORTLTR NPY_SHORTLTR +#define PyArray_USHORTLTR NPY_USHORTLTR +#define PyArray_INTLTR NPY_INTLTR +#define PyArray_UINTLTR NPY_UINTLTR +#define PyArray_LONGLTR NPY_LONGLTR +#define PyArray_ULONGLTR NPY_ULONGLTR +#define PyArray_LONGLONGLTR NPY_LONGLONGLTR +#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR +#define PyArray_HALFLTR NPY_HALFLTR +#define PyArray_FLOATLTR NPY_FLOATLTR +#define PyArray_DOUBLELTR NPY_DOUBLELTR +#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR +#define PyArray_CFLOATLTR NPY_CFLOATLTR +#define PyArray_CDOUBLELTR NPY_CDOUBLELTR +#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR +#define PyArray_OBJECTLTR NPY_OBJECTLTR +#define PyArray_STRINGLTR NPY_STRINGLTR +#define PyArray_STRINGLTR2 NPY_STRINGLTR2 +#define PyArray_UNICODELTR NPY_UNICODELTR +#define PyArray_VOIDLTR NPY_VOIDLTR +#define PyArray_DATETIMELTR NPY_DATETIMELTR +#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR +#define PyArray_CHARLTR NPY_CHARLTR +#define PyArray_INTPLTR NPY_INTPLTR +#define PyArray_UINTPLTR NPY_UINTPLTR +#define PyArray_GENBOOLLTR NPY_GENBOOLLTR +#define PyArray_SIGNEDLTR NPY_SIGNEDLTR +#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR +#define PyArray_FLOATINGLTR NPY_FLOATINGLTR +#define PyArray_COMPLEXLTR NPY_COMPLEXLTR + +#define PyArray_QUICKSORT NPY_QUICKSORT +#define PyArray_HEAPSORT NPY_HEAPSORT +#define PyArray_MERGESORT NPY_MERGESORT +#define PyArray_SORTKIND NPY_SORTKIND +#define PyArray_NSORTS NPY_NSORTS + +#define PyArray_NOSCALAR NPY_NOSCALAR +#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR +#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR +#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR +#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR +#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR +#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR +#define PyArray_SCALARKIND NPY_SCALARKIND +#define PyArray_NSCALARKINDS NPY_NSCALARKINDS + +#define PyArray_ANYORDER NPY_ANYORDER +#define PyArray_CORDER NPY_CORDER +#define PyArray_FORTRANORDER NPY_FORTRANORDER +#define PyArray_ORDER NPY_ORDER + +#define PyDescr_ISBOOL PyDataType_ISBOOL +#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED +#define PyDescr_ISSIGNED PyDataType_ISSIGNED +#define PyDescr_ISINTEGER PyDataType_ISINTEGER +#define PyDescr_ISFLOAT PyDataType_ISFLOAT +#define PyDescr_ISNUMBER PyDataType_ISNUMBER +#define PyDescr_ISSTRING PyDataType_ISSTRING +#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX +#define PyDescr_ISPYTHON PyDataType_ISPYTHON +#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE +#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF +#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED +#define PyDescr_ISOBJECT PyDataType_ISOBJECT +#define PyDescr_HASFIELDS PyDataType_HASFIELDS + +#define PyArray_LITTLE NPY_LITTLE +#define PyArray_BIG NPY_BIG +#define PyArray_NATIVE NPY_NATIVE +#define PyArray_SWAP NPY_SWAP +#define PyArray_IGNORE NPY_IGNORE + +#define PyArray_NATBYTE NPY_NATBYTE +#define PyArray_OPPBYTE NPY_OPPBYTE + +#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE + +#define PyArray_USE_PYMEM NPY_USE_PYMEM + +#define PyArray_RemoveLargest PyArray_RemoveSmallest + +#define PyArray_UCS4 npy_ucs4 + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/LICENSE.txt b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/LICENSE.txt new file mode 100644 index 00000000..d72a7c38 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com> + Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com> + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/bitgen.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/bitgen.h new file mode 100644 index 00000000..162dd5c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/bitgen.h @@ -0,0 +1,20 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ + +#pragma once +#include <stddef.h> +#include <stdbool.h> +#include <stdint.h> + +/* Must match the declaration in numpy/random/<any>.pxd */ + +typedef struct bitgen { + void *state; + uint64_t (*next_uint64)(void *st); + uint32_t (*next_uint32)(void *st); + double (*next_double)(void *st); + uint64_t (*next_raw)(void *st); +} bitgen_t; + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/distributions.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/distributions.h new file mode 100644 index 00000000..e7fa4bd0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/distributions.h @@ -0,0 +1,209 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <Python.h> +#include "numpy/npy_common.h" +#include <stddef.h> +#include <stdbool.h> +#include <stdint.h> + +#include "numpy/npy_math.h" +#include "numpy/random/bitgen.h" + +/* + * RAND_INT_TYPE is used to share integer generators with RandomState which + * used long in place of int64_t. If changing a distribution that uses + * RAND_INT_TYPE, then the original unmodified copy must be retained for + * use in RandomState by copying to the legacy distributions source file. + */ +#ifdef NP_RANDOM_LEGACY +#define RAND_INT_TYPE long +#define RAND_INT_MAX LONG_MAX +#else +#define RAND_INT_TYPE int64_t +#define RAND_INT_MAX INT64_MAX +#endif + +#ifdef _MSC_VER +#define DECLDIR __declspec(dllexport) +#else +#define DECLDIR extern +#endif + +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? x : y) +#define MAX(x, y) (((x) > (y)) ? x : y) +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338328 +#endif + +typedef struct s_binomial_t { + int has_binomial; /* !=0: following parameters initialized for binomial */ + double psave; + RAND_INT_TYPE nsave; + double r; + double q; + double fm; + RAND_INT_TYPE m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; +} binomial_t; + +DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); +DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); +DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); +DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); +DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); +DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); + +DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); +DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR double random_standard_normal(bitgen_t *bitgen_state); +DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); +DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); + +DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); + +DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); +DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); + +DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); +DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); +DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); +DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); +DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); +DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); +DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); +DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); +DECLDIR double random_power(bitgen_t *bitgen_state, double a); +DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); +DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); +DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); +DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc); +DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc); +DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); +DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); +DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right); + +DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); +DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, + double p); + +DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); + +DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); +DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, + int64_t good, int64_t bad, int64_t sample); +DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); + +/* Generate random uint64 numbers in closed interval [off, off + rng]. */ +DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, uint64_t mask, + bool use_masked); + +/* Generate random uint32 numbers in closed interval [off, off + rng]. */ +DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, uint8_t mask, + bool use_masked, int *bcnt, + uint32_t *buf); +DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_bool mask, + bool use_masked, int *bcnt, + uint32_t *buf); + +DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, npy_intp cnt, + bool use_masked, uint64_t *out); +DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, + uint32_t rng, npy_intp cnt, + bool use_masked, uint32_t *out); +DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, + uint16_t rng, npy_intp cnt, + bool use_masked, uint16_t *out); +DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, npy_intp cnt, + bool use_masked, uint8_t *out); +DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_intp cnt, + bool use_masked, npy_bool *out); + +DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, + double *pix, npy_intp d, binomial_t *binomial); + +/* multivariate hypergeometric, "count" method */ +DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* multivariate hypergeometric, "marginals" method */ +DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* Common to legacy-distributions.c and distributions.c but not exported */ + +RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +double random_loggam(double x); +static inline double next_double(bitgen_t *bitgen_state) { + return bitgen_state->next_double(bitgen_state->state); +} + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/libdivide.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/libdivide.h new file mode 100644 index 00000000..f4eb8039 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/random/libdivide.h @@ -0,0 +1,2079 @@ +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com> +// Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com> +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ + +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 + +#include <stdint.h> + +#if defined(__cplusplus) + #include <cstdlib> + #include <cstdio> + #include <type_traits> +#else + #include <stdlib.h> + #include <stdio.h> +#endif + +#if defined(LIBDIVIDE_AVX512) + #include <immintrin.h> +#elif defined(LIBDIVIDE_AVX2) + #include <immintrin.h> +#elif defined(LIBDIVIDE_SSE2) + #include <emmintrin.h> +#endif + +#if defined(_MSC_VER) + #include <intrin.h> + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC +#endif + +#if !defined(__has_builtin) + #define __has_builtin(x) 0 +#endif + +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif +#endif + +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 +#endif + +#if defined(__i386__) + #define LIBDIVIDE_i386 +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM +#endif + +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif + +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + abort(); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + abort(); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) +#endif + +#ifdef __cplusplus +namespace libdivide { +#endif + +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) + +struct libdivide_u32_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_t { + int64_t magic; + uint8_t more; +}; + +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; + +#pragma pack(pop) + +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. + +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; + +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); + +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); + +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); + +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { + uint64_t xl = x, yl = y; + uint64_t rl = xl * yl; + return (uint32_t)(rl >> 32); +} + +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) + __uint128_t xl = x, yl = y; + __uint128_t rl = xl * yl; + return (uint64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; + uint64_t temp = x1y0 + x0y0_hi; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); +#endif +} + +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) + __int128_t xl = x, yl = y; + __int128_t rl = xl * yl; + return (int64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); +#endif +} + +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros + return __builtin_clz(val); +#elif defined(LIBDIVIDE_VC) + unsigned long result; + if (_BitScanReverse(&result, val)) { + return 31 - result; + } + return 0; +#else + if (val == 0) + return 32; + int32_t result = 8; + uint32_t hi = 0xFFU << 24; + while ((val & hi) == 0) { + hi >>= 8; + result += 8; + } + while (val & hi) { + result -= 1; + hi <<= 1; + } + return result; +#endif +} + +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros + return __builtin_clzll(val); +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) + unsigned long result; + if (_BitScanReverse64(&result, val)) { + return 63 - result; + } + return 0; +#else + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); +#endif +} + +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint32_t result; + __asm__("divl %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#else + uint64_t n = ((uint64_t)u1 << 32) | u0; + uint32_t result = (uint32_t)(n / v); + *r = (uint32_t)(n - result * (uint64_t)v); + return result; +#endif +} + +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r +static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint64_t result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; +#else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm + + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm + + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } + + // count leading zeros + s = libdivide_count_leading_zeros64(v); + if (s > 0) { + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif +} + +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; +#else + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; + + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} + +////////// UINT32 + +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint8_t more; + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint32_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. + } + return result; +} + +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + +uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint32_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_32_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + uint32_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +/////////// UINT64 + +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint64_t proposed_m, rem; + uint8_t more; + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint64_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. + } + return result; +} + +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + +uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint64_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_64_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + uint64_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +/////////// SINT32 + +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s32_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + LIBDIVIDE_ASSERT(floor_log_2_d >= 1); + + uint8_t more; + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); + const uint32_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + +int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; + return q; + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; + } + int32_t q = (int32_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; + } +} + +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); +} + +///////////// SINT64 + +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s64_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint8_t more; + uint64_t rem, proposed_m; + proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); + const uint64_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + +int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; + return q; + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; + } + int64_t q = (int64_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + int64_t magic = denom->magic; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#endif + +/////////// C++ stuff + +#ifdef __cplusplus + +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE +}; + +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i +#endif + +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } +#endif + +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template<bool IS_INTEGRAL, bool IS_SIGNED, int SIZEOF, int ALGO> struct dispatcher { }; + +template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFULL> { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFREE> { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFULL> { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFREE> { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFULL> { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFREE> { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFULL> { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFREE> { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template<typename T, int ALGO = BRANCHFULL> +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider<T, ALGO>& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider<T, ALGO>& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher<std::is_integral<T>::value, + std::is_signed<T>::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template<typename T, int ALGO> +T operator/(T n, const divider<T, ALGO>& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template<typename T, int ALGO> +T& operator/=(T& n, const divider<T, ALGO>& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template<typename T, int ALGO> + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider<T, ALGO>& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template<typename T, int ALGO> + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider<T, ALGO>& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider<T> +template <typename T> +using branchfree_divider = divider<T, BRANCHFREE>; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ufuncobject.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ufuncobject.h new file mode 100644 index 00000000..9e00f2e5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/ufuncobject.h @@ -0,0 +1,359 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ + +#include <numpy/npy_math.h> +#include <numpy/npy_common.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp const *dimensions, + npy_intp const *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * Legacy loop selector. (This should NOT normally be used and we can expect + * that only the `PyUFunc_DefaultLegacyInnerLoopSelector` is ever set). + * However, unlike the masked version, it probably still works. + * + * ufunc: The ufunc object. + * dtypes: An array which has been populated with dtypes, + * in most cases by the type resolution function + * for the same ufunc. + * out_innerloop: Should be populated with the correct ufunc inner + * loop for the given type. + * out_innerloopdata: Should be populated with the void* data to + * be passed into the out_innerloop function. + * out_needs_api: If the inner loop needs to use the Python API, + * should set the to 1, otherwise should leave + * this untouched. + */ +typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyUFuncGenericFunction *out_innerloop, + void **out_innerloopdata, + int *out_needs_api); + + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* + * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero + * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, + * PyUFunc_IdentityValue. + */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void **data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Used to be unused field 'check_return' */ + int reserved1; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + /* + * A function which returns an inner loop written for + * NumPy 1.6 and earlier ufuncs. This is for backwards + * compatibility, and may be NULL if inner_loop_selector + * is specified. + */ + PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; + /* + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) + */ + #ifndef Py_LIMITED_API + vectorcallfunc vectorcall; + #else + void *vectorcall; + #endif + + /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */ + void *_always_null_previously_masked_innerloop_selector; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; + + /* New in NPY_API_VERSION 0x0000000D and above */ + #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION + /* + * for each core_num_dim_ix distinct dimension names, + * the possible "frozen" size (-1 if not frozen). + */ + npy_intp *core_dim_sizes; + + /* + * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags + */ + npy_uint32 *core_dim_flags; + + /* Identity for reduction, when identity == PyUFunc_IdentityValue */ + PyObject *identity_value; + #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */ + + /* New in NPY_API_VERSION 0x0000000F and above */ + #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + /* New private fields related to dispatching */ + void *_dispatch_cache; + /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ + PyObject *_loops; + #endif +} PyUFuncObject; + +#include "arrayobject.h" +/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ +/* the core dimension's size will be determined by the operands. */ +#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 +/* the core dimension may be absent */ +#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 +/* flags inferred during execution */ +#define UFUNC_CORE_DIM_MISSING 0x00040000 + +#define UFUNC_ERR_IGNORE 0 +#define UFUNC_ERR_WARN 1 +#define UFUNC_ERR_RAISE 2 +#define UFUNC_ERR_CALL 3 +#define UFUNC_ERR_PRINT 4 +#define UFUNC_ERR_LOG 5 + + /* Python side integer mask */ + +#define UFUNC_MASK_DIVIDEBYZERO 0x07 +#define UFUNC_MASK_OVERFLOW 0x3f +#define UFUNC_MASK_UNDERFLOW 0x1ff +#define UFUNC_MASK_INVALID 0xfff + +#define UFUNC_SHIFT_DIVIDEBYZERO 0 +#define UFUNC_SHIFT_OVERFLOW 3 +#define UFUNC_SHIFT_UNDERFLOW 6 +#define UFUNC_SHIFT_INVALID 9 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + /* Default user error mode */ +#define UFUNC_ERR_DEFAULT \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of -1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. Intended for + * bitwise_and reduction. + */ +#define PyUFunc_MinusOne 2 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 +/* + * UFunc unit is an identity_value, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_IdentityValue -3 + + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#include "__ufunc_api.h" + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +/* + * THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() +#define generate_overflow_error() npy_set_floatstatus_overflow() + + /* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/utils.h b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/utils.h new file mode 100644 index 00000000..97f06092 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/include/numpy/utils.h @@ -0,0 +1,37 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +#if defined(__GNUC__) || defined(__ICC) || defined(__clang__) + #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined(_MSC_VER) + #define NPY_DECL_ALIGNED(x) __declspec(align(x)) +#else + #define NPY_DECL_ALIGNED(x) +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED +#define NPY_EXPAND(x) x + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + +#define NPY_CAT__(a, b) a ## b +#define NPY_CAT_(a, b) NPY_CAT__(a, b) +#define NPY_CAT(a, b) NPY_CAT_(a, b) + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */ diff --git a/.venv/lib/python3.12/site-packages/numpy/core/lib/libnpymath.a b/.venv/lib/python3.12/site-packages/numpy/core/lib/libnpymath.a new file mode 100644 index 00000000..96a955e0 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/lib/libnpymath.a Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/.venv/lib/python3.12/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 00000000..5840f5e1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=-lm +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/.venv/lib/python3.12/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/.venv/lib/python3.12/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 00000000..3e465ad2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy.core +prefix=${pkgdir} +libdir=${prefix}/lib +includedir=${prefix}/include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/.venv/lib/python3.12/site-packages/numpy/core/memmap.py b/.venv/lib/python3.12/site-packages/numpy/core/memmap.py new file mode 100644 index 00000000..79c69545 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/memmap.py @@ -0,0 +1,338 @@ +from contextlib import nullcontext + +import numpy as np +from .._utils import set_module +from .numeric import uint8, ndarray, dtype +from numpy.compat import os_fspath, is_pathlib_path + +__all__ = ['memmap'] + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly":"r", + "copyonwrite":"c", + "readwrite":"r+", + "write":"w+" + } + + +@set_module('numpy') +class memmap(ndarray): + """Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. NumPy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Flush the memmap instance to write the changes to the file. Currently there + is no API to close the underlying ``mmap``. It is tricky to ensure the + resource is actually closed, since it may be shared between different + memmap instances. + + + Parameters + ---------- + filename : str, file-like object, or pathlib.Path instance + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + | | If ``mode == 'w+'`` then `shape` must also be specified. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : tuple, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: + :term:`row-major`, C-style or :term:`column-major`, + Fortran-style. This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str or pathlib.Path instance + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk. + + + See also + -------- + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped files cannot be larger than 2GB on 32-bit systems. + + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + + Examples + -------- + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Flushes memory changes to disk in order to read them back + + >>> fp.flush() + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError as e: + if mode not in valid_filemodes: + raise ValueError( + "mode must be one of {!r} (got {!r})" + .format(valid_filemodes + list(mode_equivalents.keys()), mode) + ) from None + + if mode == 'w+' and shape is None: + raise ValueError("shape must be given if mode == 'w+'") + + if hasattr(filename, 'read'): + f_ctx = nullcontext(filename) + else: + f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') + + with f_ctx as fid: + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if bytes % _dbytes: + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, tuple): + shape = (shape,) + size = np.intp(1) # avoid default choice of np.int_, which might overflow + for k in shape: + size *= k + + bytes = int(offset + size*_dbytes) + + if mode in ('w+', 'r+') and flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + array_offset = offset - start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=array_offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if is_pathlib_path(filename): + # special case - if we were constructed with a pathlib.path, + # then filename is a path object, not a string + self.filename = filename.resolve() + elif hasattr(fid, "name") and isinstance(fid.name, str): + # py3 returns int for TemporaryFile().name + self.filename = os.path.abspath(fid.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() + + def __array_wrap__(self, arr, context=None): + arr = super().__array_wrap__(arr, context) + + # Return a memmap if a memmap was given as the output of the + # ufunc. Leave the arr class unchanged if self is not a memmap + # to keep original memmap subclasses behavior + if self is arr or type(self) is not memmap: + return arr + # Return scalar instead of 0d memmap, e.g. for np.sum with + # axis=None + if arr.shape == (): + return arr[()] + # Return ndarray otherwise + return arr.view(np.ndarray) + + def __getitem__(self, index): + res = super().__getitem__(index) + if type(res) is memmap and res._mmap is None: + return res.view(type=ndarray) + return res diff --git a/.venv/lib/python3.12/site-packages/numpy/core/memmap.pyi b/.venv/lib/python3.12/site-packages/numpy/core/memmap.pyi new file mode 100644 index 00000000..03c6b772 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/memmap.pyi @@ -0,0 +1,3 @@ +from numpy import memmap as memmap + +__all__: list[str] diff --git a/.venv/lib/python3.12/site-packages/numpy/core/multiarray.py b/.venv/lib/python3.12/site-packages/numpy/core/multiarray.py new file mode 100644 index 00000000..d1128334 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/multiarray.py @@ -0,0 +1,1715 @@ +""" +Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. + +""" + +import functools +from . import overrides +from . import _multiarray_umath +from ._multiarray_umath import * # noqa: F403 +# These imports are needed for backward compatibility, +# do not change them. issue gh-15518 +# _get_ndarray_c_version is semi-public, on purpose not added to __all__ +from ._multiarray_umath import ( + fastCopyAndTranspose, _flagdict, from_dlpack, _place, _reconstruct, + _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version, + _get_madvise_hugepage, _set_madvise_hugepage, + _get_promotion_state, _set_promotion_state, _using_numpy2_behavior + ) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', + '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string', + '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray', + 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount', + 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', + 'get_handler_name', 'get_handler_version', 'inner', 'interp', + 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory', + 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', + 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', + 'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function', + 'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo', + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros', + '_get_promotion_state', '_set_promotion_state', '_using_numpy2_behavior'] + +# For backward compatibility, make sure pickle imports these functions from here +_reconstruct.__module__ = 'numpy.core.multiarray' +scalar.__module__ = 'numpy.core.multiarray' + + +from_dlpack.__module__ = 'numpy' +arange.__module__ = 'numpy' +array.__module__ = 'numpy' +asarray.__module__ = 'numpy' +asanyarray.__module__ = 'numpy' +ascontiguousarray.__module__ = 'numpy' +asfortranarray.__module__ = 'numpy' +datetime_data.__module__ = 'numpy' +empty.__module__ = 'numpy' +frombuffer.__module__ = 'numpy' +fromfile.__module__ = 'numpy' +fromiter.__module__ = 'numpy' +frompyfunc.__module__ = 'numpy' +fromstring.__module__ = 'numpy' +geterrobj.__module__ = 'numpy' +may_share_memory.__module__ = 'numpy' +nested_iters.__module__ = 'numpy' +promote_types.__module__ = 'numpy' +set_numeric_ops.__module__ = 'numpy' +seterrobj.__module__ = 'numpy' +zeros.__module__ = 'numpy' +_get_promotion_state.__module__ = 'numpy' +_set_promotion_state.__module__ = 'numpy' +_using_numpy2_behavior.__module__ = 'numpy' + + +# We can't verify dispatcher signatures because NumPy's C functions don't +# support introspection. +array_function_from_c_func_and_dispatcher = functools.partial( + overrides.array_function_from_dispatcher, + module='numpy', docs_from_dispatcher=True, verify=False) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) +def empty_like(prototype, dtype=None, order=None, subok=None, shape=None): + """ + empty_like(prototype, dtype=None, order='K', subok=True, shape=None) + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + prototype : array_like + The shape and data-type of `prototype` define these same attributes + of the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `prototype` is Fortran + contiguous, 'C' otherwise. 'K' means match the layout of `prototype` + as closely as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `prototype`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `prototype`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + + Notes + ----- + This function does *not* initialize the returned array; to do that use + `zeros_like` or `ones_like` instead. It may be marginally faster than + the functions that do set the array values. + + Examples + -------- + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], # uninitialized + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """ + return (prototype,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) +def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): + """ + concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind") + + Join a sequence of arrays along an existing axis. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. If axis is None, + arrays are flattened before use. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.20.0 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.20.0 + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + stack : Stack a sequence of arrays along a new axis. + block : Assemble arrays from blocks. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + column_stack : Stack 1-D arrays as columns into a 2-D array. + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + >>> np.concatenate((a, b), axis=None) + array([1, 2, 3, 4, 5, 6]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data=[0, 1, 2, 2, 3, 4], + mask=False, + fill_value=999999) + >>> np.ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) +def inner(a, b): + """ + inner(a, b, /) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions must match. + + Returns + ------- + out : ndarray + If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` + + Raises + ------ + ValueError + If both `a` and `b` are nonscalar and their last dimensions have + different sizes. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-2,j0,...,js-2] + = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + Some multidimensional examples: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> c = np.inner(a, b) + >>> c.shape + (2, 3) + >>> c + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + >>> a = np.arange(2).reshape((1,1,2)) + >>> b = np.arange(6).reshape((3,2)) + >>> c = np.inner(a, b) + >>> c.shape + (1, 1, 3) + >>> c + array([[[1, 3, 5]]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[7., 0.], + [0., 7.]]) + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) +def where(condition, x=None, y=None): + """ + where(condition, [x, y], /) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only `condition` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be + preferred, as it behaves correctly for subclasses. The rest of this + documentation covers only the case where all three arguments are + provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : ndarray + An array with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + See Also + -------- + choose + nonzero : The function that is called when x and y are omitted + + Notes + ----- + If all the arrays are 1-D, `where` is equivalent to:: + + [xv if c else yv + for c, xv, yv in zip(condition, x, y)] + + Examples + -------- + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.where(a < 5, a, 10*a) + array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional arrays too: + + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast + array([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = np.array([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> np.where(a < 4, a, -1) # -1 is broadcast + array([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + return (condition, x, y) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) +def lexsort(keys, axis=None): + """ + lexsort(keys, axis=-1) + + Perform an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, which can be interpreted as columns in a + spreadsheet, lexsort returns an array of integer indices that describes + the sort order by multiple columns. The last key in the sequence is used + for the primary sort order, the second-to-last key for the secondary sort + order, and so on. The keys argument must be a sequence of objects that + can be converted to arrays of the same shape. If a 2D array is provided + for the keys argument, its rows are interpreted as the sorting keys and + sorting is according to the last row, second last row etc. + + Parameters + ---------- + keys : (k, N) array or tuple containing k (N,)-shaped sequences + The `k` different "columns" to be sorted. The last column (or row if + `keys` is a 2D array) is the primary sort key. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis. + + Returns + ------- + indices : (N,) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort two columns of numbers: + + >>> a = [1,5,1,4,3,4,4] # First column + >>> b = [9,4,0,4,0,2,1] # Second column + >>> ind = np.lexsort((b,a)) # Sort by a, then by b + >>> ind + array([2, 0, 4, 6, 5, 3, 1]) + + >>> [(a[i],b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Note that sorting is first according to the elements of ``a``. + Secondary sorting is according to the elements of ``b``. + + A normal ``argsort`` would have yielded: + + >>> [(a[i],b[i]) for i in np.argsort(a)] + [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] + + Structured arrays are sorted lexically by ``argsort``: + + >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], + ... dtype=np.dtype([('x', int), ('y', int)])) + + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + """ + if isinstance(keys, tuple): + return keys + else: + return (keys,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) +def can_cast(from_, to, casting=None): + """ + can_cast(from_, to, casting='safe') + + Returns True if cast between data types can occur according to the + casting rule. If from is a scalar or array scalar, also returns + True if the scalar value can be cast without overflow or truncation + to an integer. + + Parameters + ---------- + from_ : dtype, dtype specifier, scalar, or array + Data type, scalar, or array to cast from. + to : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + .. versionchanged:: 1.17.0 + Casting between a simple data type and a structured one is possible only + for "unsafe" casting. Casting to multiple fields is allowed, but + casting from multiple fields is not. + + .. versionchanged:: 1.9.0 + Casting from numeric to string types in 'safe' casting mode requires + that the string dtype length is long enough to store the maximum + integer/float value converted. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + Casting scalars + + >>> np.can_cast(100, 'i1') + True + >>> np.can_cast(150, 'i1') + False + >>> np.can_cast(150, 'u1') + True + + >>> np.can_cast(3.5e100, np.float32) + False + >>> np.can_cast(1000.0, np.float32) + True + + Array scalar checks the value, array does not + + >>> np.can_cast(np.array(1000.0), np.float32) + True + >>> np.can_cast(np.array([1000.0]), np.float32) + False + + Using the casting rules + + >>> np.can_cast('i8', 'i8', 'no') + True + >>> np.can_cast('<i8', '>i8', 'no') + False + + >>> np.can_cast('<i8', '>i8', 'equiv') + True + >>> np.can_cast('<i4', '>i8', 'equiv') + False + + >>> np.can_cast('<i4', '>i8', 'safe') + True + >>> np.can_cast('<i8', '>i4', 'safe') + False + + >>> np.can_cast('<i8', '>i4', 'same_kind') + True + >>> np.can_cast('<i8', '>u4', 'same_kind') + False + + >>> np.can_cast('<i8', '>u4', 'unsafe') + True + + """ + return (from_,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) +def min_scalar_type(a): + """ + min_scalar_type(a, /) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + Notes + ----- + .. versionadded:: 1.6.0 + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + type promotion rules to the arguments. + + Type promotion in NumPy works similarly to the rules in languages + like C++, with some slight differences. When both scalars and + arrays are used, the array's type takes precedence and the actual value + of the scalar is taken into account. + + For example, calculating 3*a, where a is an array of 32-bit floats, + intuitively should result in a 32-bit float output. If the 3 is a + 32-bit integer, the NumPy rules indicate it can't convert losslessly + into a 32-bit float, so a 64-bit float should be the result type. + By examining the value of the constant, '3', we see that it fits in + an 8-bit integer, which can be cast losslessly into the 32-bit float. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Notes + ----- + .. versionadded:: 1.6.0 + + The specific algorithm used is as follows. + + Categories are determined by first checking which of boolean, + integer (int/uint), or floating point (float/complex) the maximum + kind of all the arrays and the scalars are. + + If there are only scalars or the maximum category of the scalars + is higher than the maximum category of the arrays, + the data types are combined with :func:`promote_types` + to produce the return value. + + Otherwise, `min_scalar_type` is called on each scalar, and + the resulting data types are all combined with :func:`promote_types` + to produce the return value. + + The set of int values is not a subset of the uint values for types + with the same number of bits, something not reflected in + :func:`min_scalar_type`, but handled as a special case in `result_type`. + + Examples + -------- + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """ + return arrays_and_dtypes + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) +def dot(a, b, out=None): + """ + dot(a, b, out=None) + + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. + + - If either `a` or `b` is 0-D (scalar), it is equivalent to + :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is + preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of + `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + It uses an optimized BLAS library when possible (see `numpy.linalg`). + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + matmul : '@' operator as method with out parameter. + linalg.multi_dot : Chained dot product. + + Examples + -------- + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it is the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """ + return (a, b, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) +def vdot(a, b): + """ + vdot(a, b, /) + + Return the dot product of two vectors. + + The vdot(`a`, `b`) function handles complex numbers differently than + dot(`a`, `b`). If the first argument is complex the complex conjugate + of the first argument is used for the calculation of the dot product. + + Note that `vdot` handles multidimensional arrays differently than `dot`: + it does *not* perform a matrix product, but flattens input arguments + to 1-D vectors first. Consequently, it should only be used for vectors. + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) +def bincount(x, weights=None, minlength=None): + """ + bincount(x, /, weights=None, minlength=0) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + A minimum number of bins for the output array. + + .. versionadded:: 1.6.0 + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is negative. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=float)) + Traceback (most recent call last): + ... + TypeError: Cannot cast array data from dtype('float64') to dtype('int64') + according to the rule 'safe' + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """ + return (x, weights) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) +def ravel_multi_index(multi_index, dims, mode=None, order=None): + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as + indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Notes + ----- + .. versionadded:: 1.6.0 + + Examples + -------- + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """ + return multi_index + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) +def unravel_index(indices, shape=None, order=None): + """ + unravel_index(indices, shape, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``shape``. Before version 1.6.0, + this function accepted just one index value. + shape : tuple of ints + The shape of the array to use for unraveling ``indices``. + + .. versionchanged:: 1.16.0 + Renamed from ``dims`` to ``shape``. + + order : {'C', 'F'}, optional + Determines whether the indices should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + .. versionadded:: 1.6.0 + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """ + return (indices,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) +def copyto(dst, src, casting=None, where=None): + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + + Examples + -------- + >>> A = np.array([4, 5, 6]) + >>> B = [1, 2, 3] + >>> np.copyto(A, B) + >>> A + array([1, 2, 3]) + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> B = [[4, 5, 6], [7, 8, 9]] + >>> np.copyto(A, B) + >>> A + array([[4, 5, 6], + [7, 8, 9]]) + + """ + return (dst, src, where) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) +def putmask(a, /, mask, values): + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : ndarray + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """ + return (a, mask, values) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) +def packbits(a, axis=None, bitorder='big'): + """ + packbits(a, /, axis=None, bitorder='big') + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + a : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + bitorder : {'big', 'little'}, optional + The order of the input bits. 'big' will mimic bin(val), + ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will + reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. + Defaults to 'big'. + + .. versionadded:: 1.17.0 + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) +def unpackbits(a, axis=None, count=None, bitorder='big'): + """ + unpackbits(a, /, axis=None, count=None, bitorder='big') + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `a` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is + either 1-D (if `axis` is ``None``) or the same shape as the input + array with unpacking done along the axis specified. + + Parameters + ---------- + a : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + count : int or None, optional + The number of elements to unpack along `axis`, provided as a way + of undoing the effect of packing a size that is not a multiple + of eight. A non-negative number means to only unpack `count` + bits. A negative number means to trim off that many bits from + the end. ``None`` means to unpack the entire array (the + default). Counts larger than the available number of bits will + add zero padding to the output. Negative counts must not + exceed the available number of bits. + + .. versionadded:: 1.17.0 + + bitorder : {'big', 'little'}, optional + The order of the returned bits. 'big' will mimic bin(val), + ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse + the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. + Defaults to 'big'. + + .. versionadded:: 1.17.0 + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in + a uint8 array. + + Examples + -------- + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + >>> c = np.unpackbits(a, axis=1, count=-3) + >>> c + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0]], dtype=uint8) + + >>> p = np.packbits(b, axis=0) + >>> np.unpackbits(p, axis=0) + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) + >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) + True + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) +def shares_memory(a, b, max_work=None): + """ + shares_memory(a, b, /, max_work=None) + + Determine if two arrays share memory. + + .. warning:: + + This function can be exponentially slow for some inputs, unless + `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``. + If in doubt, use `numpy.may_share_memory` instead. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=MAY_SHARE_EXACT (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. Finding + the exact solution may take extremely long in some cases. + max_work=MAY_SHARE_BOUNDS + Only the memory bounds of a and b are checked. + + Raises + ------ + numpy.exceptions.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> x = np.array([1, 2, 3, 4]) + >>> np.shares_memory(x, np.array([5, 6, 7])) + False + >>> np.shares_memory(x[::2], x) + True + >>> np.shares_memory(x[::2], x[1::2]) + False + + Checking whether two arrays share memory is NP-complete, and + runtime may increase exponentially in the number of + dimensions. Hence, `max_work` should generally be set to a finite + number, as it is possible to construct examples that take + extremely long to run: + + >>> from numpy.lib.stride_tricks import as_strided + >>> x = np.zeros([192163377], dtype=np.int8) + >>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049)) + >>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1)) + >>> np.shares_memory(x1, x2, max_work=1000) + Traceback (most recent call last): + ... + numpy.exceptions.TooHardError: Exceeded max_work + + Running ``np.shares_memory(x1, x2)`` without `max_work` set takes + around 1 minute for this case. It is possible to find problems + that take still significantly longer. + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) +def may_share_memory(a, b, max_work=None): + """ + may_share_memory(a, b, /, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) +def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): + """ + is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) + + Calculates which of the given dates are valid days, and which are not. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True]) + """ + return (dates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) +def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + numpy.datetime64('2011-10-03') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + numpy.datetime64('2012-02-29') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + numpy.datetime64('2011-01-19') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + numpy.datetime64('2012-05-13') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + numpy.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + numpy.datetime64('2011-03-22') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + numpy.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + numpy.datetime64('2011-03-23') + """ + return (dates, offsets, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) +def busday_count(begindates, enddates, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + >>> np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """ + return (begindates, enddates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher( + _multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone=None, casting=None): + """ + datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') + + Convert an array of datetimes into an array of strings. + + Parameters + ---------- + arr : array_like of datetime64 + The array of UTC timestamps to format. + unit : str + One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`. + timezone : {'naive', 'UTC', 'local'} or tzinfo + Timezone information to use when displaying the datetime. If 'UTC', end + with a Z to indicate UTC time. If 'local', convert to the local timezone + first, and suffix with a +-#### timezone offset. If a tzinfo object, + then do as with 'local', but use the specified timezone. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} + Casting to allow when changing between datetime units. + + Returns + ------- + str_arr : ndarray + An array of strings the same shape as `arr`. + + Examples + -------- + >>> import pytz + >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='<U35') + + Note that we picked datetimes that cross a DST boundary. Passing in a + ``pytz`` timezone object will print the appropriate offset + + >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39') + + Passing in a unit will change the precision + + >>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='<U32') + >>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='<U38') + + 'casting' can be used to specify whether precision can be changed + + >>> np.datetime_as_string(d, unit='h', casting='safe') + Traceback (most recent call last): + ... + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """ + return (arr,) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/multiarray.pyi b/.venv/lib/python3.12/site-packages/numpy/core/multiarray.pyi new file mode 100644 index 00000000..dc05f812 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/multiarray.pyi @@ -0,0 +1,1022 @@ +# TODO: Sort out any and all missing functions in this namespace + +import os +import datetime as dt +from collections.abc import Sequence, Callable, Iterable +from typing import ( + Literal as L, + Any, + overload, + TypeVar, + SupportsIndex, + final, + Final, + Protocol, + ClassVar, +) + +from numpy import ( + # Re-exports + busdaycalendar as busdaycalendar, + broadcast as broadcast, + dtype as dtype, + ndarray as ndarray, + nditer as nditer, + + # The rest + ufunc, + str_, + bool_, + uint8, + intp, + int_, + float64, + timedelta64, + datetime64, + generic, + unsignedinteger, + signedinteger, + floating, + complexfloating, + _OrderKACF, + _OrderCF, + _CastingKind, + _ModeKind, + _SupportsBuffer, + _IOProtocol, + _CopyMode, + _NDIterFlagsKind, + _NDIterOpFlagsKind, +) + +from numpy._typing import ( + # Shapes + _ShapeLike, + + # DTypes + DTypeLike, + _DTypeLike, + + # Arrays + NDArray, + ArrayLike, + _ArrayLike, + _SupportsArrayFunc, + _NestedSequence, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeBytes_co, + _ScalarLike_co, + _IntLike_co, + _FloatLike_co, + _TD64Like_co, +) + +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +# Valid time units +_UnitKind = L[ + "Y", + "M", + "D", + "h", + "m", + "s", + "ms", + "us", "μs", + "ns", + "ps", + "fs", + "as", +] +_RollKind = L[ # `raise` is deliberately excluded + "nat", + "forward", + "following", + "backward", + "preceding", + "modifiedfollowing", + "modifiedpreceding", +] + +class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]): + def __len__(self) -> int: ... + def __getitem__(self, key: _T_contra, /) -> _T_co: ... + +__all__: list[str] + +ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) +BUFSIZE: L[8192] +CLIP: L[0] +WRAP: L[1] +RAISE: L[2] +MAXDIMS: L[32] +MAY_SHARE_BOUNDS: L[0] +MAY_SHARE_EXACT: L[-1] +tracemalloc_domain: L[389047] + +@overload +def empty_like( + prototype: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> _ArrayType: ... +@overload +def empty_like( + prototype: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def empty_like( + prototype: object, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[Any]: ... +@overload +def empty_like( + prototype: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def empty_like( + prototype: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[Any]: ... + +@overload +def array( + object: _ArrayType, + dtype: None = ..., + *, + copy: bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: int = ..., + like: None | _SupportsArrayFunc = ..., +) -> _ArrayType: ... +@overload +def array( + object: _ArrayLike[_SCT], + dtype: None = ..., + *, + copy: bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def array( + object: object, + dtype: None = ..., + *, + copy: bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def array( + object: Any, + dtype: _DTypeLike[_SCT], + *, + copy: bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def array( + object: Any, + dtype: DTypeLike, + *, + copy: bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def zeros( + shape: _ShapeLike, + dtype: None = ..., + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def zeros( + shape: _ShapeLike, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def zeros( + shape: _ShapeLike, + dtype: DTypeLike, + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def empty( + shape: _ShapeLike, + dtype: None = ..., + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike, + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def unravel_index( # type: ignore[misc] + indices: _IntLike_co, + shape: _ShapeLike, + order: _OrderCF = ..., +) -> tuple[intp, ...]: ... +@overload +def unravel_index( + indices: _ArrayLikeInt_co, + shape: _ShapeLike, + order: _OrderCF = ..., +) -> tuple[NDArray[intp], ...]: ... + +@overload +def ravel_multi_index( # type: ignore[misc] + multi_index: Sequence[_IntLike_co], + dims: Sequence[SupportsIndex], + mode: _ModeKind | tuple[_ModeKind, ...] = ..., + order: _OrderCF = ..., +) -> intp: ... +@overload +def ravel_multi_index( + multi_index: Sequence[_ArrayLikeInt_co], + dims: Sequence[SupportsIndex], + mode: _ModeKind | tuple[_ModeKind, ...] = ..., + order: _OrderCF = ..., +) -> NDArray[intp]: ... + +# NOTE: Allow any sequence of array-like objects +@overload +def concatenate( # type: ignore[misc] + arrays: _ArrayLike[_SCT], + /, + axis: None | SupportsIndex = ..., + out: None = ..., + *, + dtype: None = ..., + casting: None | _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def concatenate( # type: ignore[misc] + arrays: _SupportsLenAndGetItem[int, ArrayLike], + /, + axis: None | SupportsIndex = ..., + out: None = ..., + *, + dtype: None = ..., + casting: None | _CastingKind = ... +) -> NDArray[Any]: ... +@overload +def concatenate( # type: ignore[misc] + arrays: _SupportsLenAndGetItem[int, ArrayLike], + /, + axis: None | SupportsIndex = ..., + out: None = ..., + *, + dtype: _DTypeLike[_SCT], + casting: None | _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def concatenate( # type: ignore[misc] + arrays: _SupportsLenAndGetItem[int, ArrayLike], + /, + axis: None | SupportsIndex = ..., + out: None = ..., + *, + dtype: DTypeLike, + casting: None | _CastingKind = ... +) -> NDArray[Any]: ... +@overload +def concatenate( + arrays: _SupportsLenAndGetItem[int, ArrayLike], + /, + axis: None | SupportsIndex = ..., + out: _ArrayType = ..., + *, + dtype: DTypeLike = ..., + casting: None | _CastingKind = ... +) -> _ArrayType: ... + +def inner( + a: ArrayLike, + b: ArrayLike, + /, +) -> Any: ... + +@overload +def where( + condition: ArrayLike, + /, +) -> tuple[NDArray[intp], ...]: ... +@overload +def where( + condition: ArrayLike, + x: ArrayLike, + y: ArrayLike, + /, +) -> NDArray[Any]: ... + +def lexsort( + keys: ArrayLike, + axis: None | SupportsIndex = ..., +) -> Any: ... + +def can_cast( + from_: ArrayLike | DTypeLike, + to: DTypeLike, + casting: None | _CastingKind = ..., +) -> bool: ... + +def min_scalar_type( + a: ArrayLike, /, +) -> dtype[Any]: ... + +def result_type( + *arrays_and_dtypes: ArrayLike | DTypeLike, +) -> dtype[Any]: ... + +@overload +def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +@overload +def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ... + +@overload +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> bool_: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... +@overload +def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +@overload +def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... + +def bincount( + x: ArrayLike, + /, + weights: None | ArrayLike = ..., + minlength: SupportsIndex = ..., +) -> NDArray[intp]: ... + +def copyto( + dst: NDArray[Any], + src: ArrayLike, + casting: None | _CastingKind = ..., + where: None | _ArrayLikeBool_co = ..., +) -> None: ... + +def putmask( + a: NDArray[Any], + /, + mask: _ArrayLikeBool_co, + values: ArrayLike, +) -> None: ... + +def packbits( + a: _ArrayLikeInt_co, + /, + axis: None | SupportsIndex = ..., + bitorder: L["big", "little"] = ..., +) -> NDArray[uint8]: ... + +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: None | SupportsIndex = ..., + count: None | SupportsIndex = ..., + bitorder: L["big", "little"] = ..., +) -> NDArray[uint8]: ... + +def shares_memory( + a: object, + b: object, + /, + max_work: None | int = ..., +) -> bool: ... + +def may_share_memory( + a: object, + b: object, + /, + max_work: None | int = ..., +) -> bool: ... + +@overload +def asarray( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def asarray( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def asanyarray( + a: _ArrayType, # Preserve subclass-information + dtype: None = ..., + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> _ArrayType: ... +@overload +def asanyarray( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def asanyarray( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def asanyarray( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def asanyarray( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def ascontiguousarray( + a: _ArrayLike[_SCT], + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def ascontiguousarray( + a: object, + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: _DTypeLike[_SCT], + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: DTypeLike, + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def asfortranarray( + a: _ArrayLike[_SCT], + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def asfortranarray( + a: object, + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def asfortranarray( + a: Any, + dtype: _DTypeLike[_SCT], + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def asfortranarray( + a: Any, + dtype: DTypeLike, + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +# In practice `list[Any]` is list with an int, int and a valid +# `np.seterrcall()` object +def geterrobj() -> list[Any]: ... +def seterrobj(errobj: list[Any], /) -> None: ... + +def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ... + +# `sep` is a de facto mandatory argument, as its default value is deprecated +@overload +def fromstring( + string: str | bytes, + dtype: None = ..., + count: SupportsIndex = ..., + *, + sep: str, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def fromstring( + string: str | bytes, + dtype: _DTypeLike[_SCT], + count: SupportsIndex = ..., + *, + sep: str, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def fromstring( + string: str | bytes, + dtype: DTypeLike, + count: SupportsIndex = ..., + *, + sep: str, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +def frompyfunc( + func: Callable[..., Any], /, + nin: SupportsIndex, + nout: SupportsIndex, + *, + identity: Any = ..., +) -> ufunc: ... + +@overload +def fromfile( + file: str | bytes | os.PathLike[Any] | _IOProtocol, + dtype: None = ..., + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def fromfile( + file: str | bytes | os.PathLike[Any] | _IOProtocol, + dtype: _DTypeLike[_SCT], + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def fromfile( + file: str | bytes | os.PathLike[Any] | _IOProtocol, + dtype: DTypeLike, + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def fromiter( + iter: Iterable[Any], + dtype: _DTypeLike[_SCT], + count: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def fromiter( + iter: Iterable[Any], + dtype: DTypeLike, + count: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: None = ..., + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: _DTypeLike[_SCT], + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: DTypeLike, + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def arange( # type: ignore[misc] + stop: _IntLike_co, + /, *, + dtype: None = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def arange( # type: ignore[misc] + start: _IntLike_co, + stop: _IntLike_co, + step: _IntLike_co = ..., + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def arange( # type: ignore[misc] + stop: _FloatLike_co, + /, *, + dtype: None = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[floating[Any]]: ... +@overload +def arange( # type: ignore[misc] + start: _FloatLike_co, + stop: _FloatLike_co, + step: _FloatLike_co = ..., + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[floating[Any]]: ... +@overload +def arange( + stop: _TD64Like_co, + /, *, + dtype: None = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[timedelta64]: ... +@overload +def arange( + start: _TD64Like_co, + stop: _TD64Like_co, + step: _TD64Like_co = ..., + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[timedelta64]: ... +@overload +def arange( # both start and stop must always be specified for datetime64 + start: datetime64, + stop: datetime64, + step: datetime64 = ..., + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[datetime64]: ... +@overload +def arange( + stop: Any, + /, *, + dtype: _DTypeLike[_SCT], + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def arange( + start: Any, + stop: Any, + step: Any = ..., + dtype: _DTypeLike[_SCT] = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def arange( + stop: Any, /, + *, + dtype: DTypeLike, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def arange( + start: Any, + stop: Any, + step: Any = ..., + dtype: DTypeLike = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +def datetime_data( + dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, +) -> tuple[str, int]: ... + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here + +@overload +def busday_count( # type: ignore[misc] + begindates: _ScalarLike_co | dt.date, + enddates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> int_: ... +@overload +def busday_count( # type: ignore[misc] + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> NDArray[int_]: ... +@overload +def busday_count( + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +# `roll="raise"` is (more or less?) equivalent to `casting="safe"` +@overload +def busday_offset( # type: ignore[misc] + dates: datetime64 | dt.date, + offsets: _TD64Like_co | dt.timedelta, + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> datetime64: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> NDArray[datetime64]: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ScalarLike_co | dt.date, + offsets: _ScalarLike_co | dt.timedelta, + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> datetime64: ... +@overload +def busday_offset( # type: ignore[misc] + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> NDArray[datetime64]: ... +@overload +def busday_offset( + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +@overload +def is_busday( # type: ignore[misc] + dates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> bool_: ... +@overload +def is_busday( # type: ignore[misc] + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: None = ..., +) -> NDArray[bool_]: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + busdaycal: None | busdaycalendar = ..., + out: _ArrayType = ..., +) -> _ArrayType: ... + +@overload +def datetime_as_string( # type: ignore[misc] + arr: datetime64 | dt.date, + unit: None | L["auto"] | _UnitKind = ..., + timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., + casting: _CastingKind = ..., +) -> str_: ... +@overload +def datetime_as_string( + arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], + unit: None | L["auto"] | _UnitKind = ..., + timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., + casting: _CastingKind = ..., +) -> NDArray[str_]: ... + +@overload +def compare_chararrays( + a1: _ArrayLikeStr_co, + a2: _ArrayLikeStr_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[bool_]: ... +@overload +def compare_chararrays( + a1: _ArrayLikeBytes_co, + a2: _ArrayLikeBytes_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[bool_]: ... + +def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... + +_GetItemKeys = L[ + "C", "CONTIGUOUS", "C_CONTIGUOUS", + "F", "FORTRAN", "F_CONTIGUOUS", + "W", "WRITEABLE", + "B", "BEHAVED", + "O", "OWNDATA", + "A", "ALIGNED", + "X", "WRITEBACKIFCOPY", + "CA", "CARRAY", + "FA", "FARRAY", + "FNC", + "FORC", +] +_SetItemKeys = L[ + "A", "ALIGNED", + "W", "WRITEABLE", + "X", "WRITEBACKIFCOPY", +] + +@final +class flagsobj: + __hash__: ClassVar[None] # type: ignore[assignment] + aligned: bool + # NOTE: deprecated + # updateifcopy: bool + writeable: bool + writebackifcopy: bool + @property + def behaved(self) -> bool: ... + @property + def c_contiguous(self) -> bool: ... + @property + def carray(self) -> bool: ... + @property + def contiguous(self) -> bool: ... + @property + def f_contiguous(self) -> bool: ... + @property + def farray(self) -> bool: ... + @property + def fnc(self) -> bool: ... + @property + def forc(self) -> bool: ... + @property + def fortran(self) -> bool: ... + @property + def num(self) -> int: ... + @property + def owndata(self) -> bool: ... + def __getitem__(self, key: _GetItemKeys) -> bool: ... + def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + +def nested_iters( + op: ArrayLike | Sequence[ArrayLike], + axes: Sequence[Sequence[SupportsIndex]], + flags: None | Sequence[_NDIterFlagsKind] = ..., + op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + buffersize: SupportsIndex = ..., +) -> tuple[nditer, ...]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/numeric.py b/.venv/lib/python3.12/site-packages/numpy/core/numeric.py new file mode 100644 index 00000000..91ac3f86 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/numeric.py @@ -0,0 +1,2530 @@ +import functools +import itertools +import operator +import sys +import warnings +import numbers +import builtins + +import numpy as np +from . import multiarray +from .multiarray import ( + fastCopyAndTranspose, ALLOW_THREADS, + BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, + WRAP, arange, array, asarray, asanyarray, ascontiguousarray, + asfortranarray, broadcast, can_cast, compare_chararrays, + concatenate, copyto, dot, dtype, empty, + empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter, + fromstring, inner, lexsort, matmul, may_share_memory, + min_scalar_type, ndarray, nditer, nested_iters, promote_types, + putmask, result_type, set_numeric_ops, shares_memory, vdot, where, + zeros, normalize_axis_index, _get_promotion_state, _set_promotion_state, + _using_numpy2_behavior) + +from . import overrides +from . import umath +from . import shape_base +from .overrides import set_array_function_like_doc, set_module +from .umath import (multiply, invert, sin, PINF, NAN) +from . import numerictypes +from .numerictypes import longlong, intc, int_, float_, complex_, bool_ +from ..exceptions import ComplexWarning, TooHardError, AxisError +from ._ufunc_config import errstate, _no_nep50_warning + +bitwise_not = invert +ufunc = type(sin) +newaxis = None + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where', + 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', + 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'compare_chararrays', 'putmask', + 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', + 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', + 'BUFSIZE', 'ALLOW_THREADS', 'full', 'full_like', + 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', + 'MAY_SHARE_EXACT', '_get_promotion_state', '_set_promotion_state', + '_using_numpy2_behavior'] + + +def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): + return (a,) + + +@array_function_dispatch(_zeros_like_dispatcher) +def zeros_like(a, dtype=None, order='K', subok=True, shape=None): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + full_like : Return a new array with shape of input filled with value. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.zeros_like(y) + array([0., 0., 0.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) + # needed instead of a 0 to get same result as zeros for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + + +@set_array_function_like_doc +@set_module('numpy') +def ones(shape, dtype=None, order='C', *, like=None): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: C + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty : Return a new uninitialized array. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Examples + -------- + >>> np.ones(5) + array([1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[1.], + [1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[1., 1.], + [1., 1.]]) + + """ + if like is not None: + return _ones_with_like(like, shape, dtype=dtype, order=order) + + a = empty(shape, dtype, order) + multiarray.copyto(a, 1, casting='unsafe') + return a + + +_ones_with_like = array_function_dispatch()(ones) + + +def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): + return (a,) + + +@array_function_dispatch(_ones_like_dispatcher) +def ones_like(a, dtype=None, order='K', subok=True, shape=None): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + ones : Return a new array setting values to one. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.ones_like(y) + array([1., 1., 1.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) + multiarray.copyto(res, 1, casting='unsafe') + return res + + +def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None): + return(like,) + + +@set_array_function_like_doc +@set_module('numpy') +def full(shape, fill_value, dtype=None, order='C', *, like=None): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar or array_like + Fill value. + dtype : data-type, optional + The desired data-type for the array The default, None, means + ``np.array(fill_value).dtype``. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> np.full((2, 2), np.inf) + array([[inf, inf], + [inf, inf]]) + >>> np.full((2, 2), 10) + array([[10, 10], + [10, 10]]) + + >>> np.full((2, 2), [1, 2]) + array([[1, 2], + [1, 2]]) + + """ + if like is not None: + return _full_with_like( + like, shape, fill_value, dtype=dtype, order=order) + + if dtype is None: + fill_value = asarray(fill_value) + dtype = fill_value.dtype + a = empty(shape, dtype, order) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + + +_full_with_like = array_function_dispatch()(full) + + +def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None): + return (a,) + + +@array_function_dispatch(_full_like_dispatcher) +def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : array_like + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> x = np.arange(6, dtype=int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + >>> y = np.zeros([2, 2, 3], dtype=int) + >>> np.full_like(y, [0, 0, 255]) + array([[[ 0, 0, 255], + [ 0, 0, 255]], + [[ 0, 0, 255], + [ 0, 0, 255]]]) + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_count_nonzero_dispatcher) +def count_nonzero(a, axis=None, *, keepdims=False): + """ + Counts the number of non-zero values in the array ``a``. + + The word "non-zero" is in reference to the Python 2.x + built-in method ``__nonzero__()`` (renamed ``__bool__()`` + in Python 3.x) of Python objects that tests an object's + "truthfulness". For example, any number is considered + truthful if it is nonzero, whereas any string is considered + truthful if it is not the empty string. Thus, this function + (recursively) counts how many elements in ``a`` (and in + sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` + method evaluated to ``True``. + + Parameters + ---------- + a : array_like + The array for which to count non-zeros. + axis : int or tuple, optional + Axis or tuple of axes along which to count non-zeros. + Default is None, meaning that non-zeros will be counted + along a flattened version of ``a``. + + .. versionadded:: 1.12.0 + + keepdims : bool, optional + If this is set to True, the axes that are counted are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + .. versionadded:: 1.19.0 + + Returns + ------- + count : int or array of int + Number of non-zero values in the array along a given axis. + Otherwise, the total number of non-zero values in the array + is returned. + + See Also + -------- + nonzero : Return the coordinates of all the non-zero values. + + Examples + -------- + >>> np.count_nonzero(np.eye(4)) + 4 + >>> a = np.array([[0, 1, 7, 0], + ... [3, 0, 2, 19]]) + >>> np.count_nonzero(a) + 5 + >>> np.count_nonzero(a, axis=0) + array([1, 1, 2, 1]) + >>> np.count_nonzero(a, axis=1) + array([2, 3]) + >>> np.count_nonzero(a, axis=1, keepdims=True) + array([[2], + [3]]) + """ + if axis is None and not keepdims: + return multiarray.count_nonzero(a) + + a = asanyarray(a) + + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() + else: + a_bool = a.astype(np.bool_, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims) + + +@set_module('numpy') +def isfortran(a): + """ + Check if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete and, because of changes due to relaxed stride + checking, its return value for the same array may differ for versions + of NumPy >= 1.10.0 and previous versions. If you only want to check if an + array is Fortran contiguous use ``a.flags.f_contiguous`` instead. + + Parameters + ---------- + a : ndarray + Input array. + + Returns + ------- + isfortran : bool + Returns True if the array is Fortran contiguous but *not* C contiguous. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='F')) + False + + """ + return a.flags.fnc + + +def _argwhere_dispatcher(a): + return (a,) + + +@array_function_dispatch(_argwhere_dispatcher) +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : (N, a.ndim) ndarray + Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``nonzero(a)`` instead. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:,:0] + return transpose(nonzero(a)) + + +def _flatnonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_flatnonzero_dispatcher) +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to ``np.nonzero(np.ravel(a))[0]``. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of ``a.ravel()`` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return np.nonzero(np.ravel(a))[0] + + +def _correlate_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_correlate_dispatcher) +def correlate(a, v, mode='valid'): + r""" + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts: + + .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n + + with a and v sequences being zero-padded where necessary and + :math:`\overline x` denoting complex conjugation. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + old_behavior : bool + `old_behavior` was removed in NumPy 1.10. If you need the old + behavior, use `multiarray.correlate`. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + multiarray.correlate : Old, no conjugate, version of correlate. + scipy.signal.correlate : uses FFT which has superior performance on large arrays. + + Notes + ----- + The definition of correlation above is not unique and sometimes correlation + may be defined differently. Another common definition is: + + .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}} + + which is related to :math:`c_k` by :math:`c'_k = c_{-k}`. + + `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does + not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might + be preferable. + + + Examples + -------- + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + (:math:`\overline{c_{-k}}`) when the two input sequences a and v change + places: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + return multiarray.correlate2(a, v, mode) + + +def _convolve_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_convolve_dispatcher) +def convolve(a, v, mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode 'same' returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode 'valid' returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m} + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", + https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([2.5]) + + """ + a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) + if (len(v) > len(a)): + a, v = v, a + if len(a) == 0: + raise ValueError('a cannot be empty') + if len(v) == 0: + raise ValueError('v cannot be empty') + return multiarray.correlate(a, v[::-1], mode) + + +def _outer_dispatcher(a, b, out=None): + return (a, b, out) + + +@array_function_dispatch(_outer_dispatcher) +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors `a` and `b` of length ``M`` and ``N``, repsectively, + the outer product [1]_ is:: + + [[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ] + [a_1*b_0 . + [ ... . + [a_{M-1}*b_0 a_{M-1}*b_{N-1} ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + .. versionadded:: 1.9.0 + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to dimensions other than 1D and other + operations. ``np.multiply.outer(a.ravel(), b.ravel())`` + is the equivalent. + tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))`` + is the equivalent. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) + + +def _tensordot_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes. + + Given two tensors, `a` and `b`, and an array_like object containing + two array_like objects, ``(a_axes, b_axes)``, sum the products of + `a`'s and `b`'s elements (components) over the axes specified by + ``a_axes`` and ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions + of `a` and the first ``N`` dimensions of `b` are summed over. + + Parameters + ---------- + a, b : array_like + Tensors to "dot". + + axes : int or (2,) array_like + * integer_like + If an int N, sum over the last N axes of `a` and the first N axes + of `b` in order. The sizes of the corresponding axes must match. + * (2,) array_like + Or, a list of axes to be summed over, first sequence applying to `a`, + second to `b`. Both elements array_like must be of the same length. + + Returns + ------- + output : ndarray + The tensor dot product of the input. + + See Also + -------- + dot, einsum + + Notes + ----- + Three common use cases are: + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence for evaluation will be: first + the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and + Nth axis in `b` last. + + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + + The shape of the result consists of the non-contracted axes of the + first tensor, followed by the non-contracted axes of the second. + + Examples + -------- + A "traditional" example: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> # A slower but equivalent way of computing the same... + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]]) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)) + >>> a.shape = (2, 2, 2) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A.shape = (2, 2) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([['a', 'b'], + ['c', 'd']], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 for double-contraction + array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[['acc', 'bdd'], + ['aaacccc', 'bbbdddd']], + [['aaaaacccccc', 'bbbbbdddddd'], + ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) + array([[[[['a', 'b'], + ['c', 'd']], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[['abbbbb', 'cddddd'], + ['aabbbbbb', 'ccdddddd']], + [['aaabbbbbbb', 'cccddddddd'], + ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[['abb', 'cdd'], + ['aaabbbb', 'cccdddd']], + [['aaaaabbbbbb', 'cccccdddddd'], + ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) + + """ + try: + iter(axes) + except Exception: + axes_a = list(range(-axes, 0)) + axes_b = list(range(0, axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = a.ndim + bs = b.shape + ndb = b.ndim + equal = True + if na != nb: + equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = 1 + for axis in axes_a: + N2 *= as_[axis] + newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = 1 + for axis in axes_b: + N2 *= bs[axis] + newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + + +def _roll_dispatcher(a, shift, axis=None): + return (a,) + + +@array_function_dispatch(_roll_dispatcher) +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Notes + ----- + .. versionadded:: 1.12.0 + + Supports rolling over multiple dimensions simultaneously. + + Examples + -------- + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + >>> np.roll(x, -2) + array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) + + >>> x2 = np.reshape(x, (2, 5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, -1) + array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 0]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, -1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + >>> np.roll(x2, -1, axis=1) + array([[1, 2, 3, 4, 0], + [6, 7, 8, 9, 5]]) + >>> np.roll(x2, (1, 1), axis=(1, 0)) + array([[9, 5, 6, 7, 8], + [4, 0, 1, 2, 3]]) + >>> np.roll(x2, (2, 1), axis=(1, 0)) + array([[8, 9, 5, 6, 7], + [3, 4, 0, 1, 2]]) + + """ + a = asanyarray(a) + if axis is None: + return roll(a.ravel(), shift, 0).reshape(a.shape) + + else: + axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + broadcasted = broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError( + "'shift' and 'axis' should be scalars or 1D sequences") + shifts = {ax: 0 for ax in range(a.ndim)} + for sh, ax in broadcasted: + shifts[ax] += sh + + rolls = [((slice(None), slice(None)),)] * a.ndim + for ax, offset in shifts.items(): + offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. + if offset: + # (original, result), (original, result) + rolls[ax] = ((slice(None, -offset), slice(offset, None)), + (slice(-offset, None), slice(None, offset))) + + result = empty_like(a) + for indices in itertools.product(*rolls): + arr_index, res_index = zip(*indices) + result[res_index] = a[arr_index] + + return result + + +def _rollaxis_dispatcher(a, axis, start=None): + return (a,) + + +@array_function_dispatch(_rollaxis_dispatcher) +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to be rolled. The positions of the other axes do not + change relative to one another. + start : int, optional + When ``start <= axis``, the axis is rolled back until it lies in + this position. When ``start > axis``, the axis is rolled until it + lies before this position. The default, 0, results in a "complete" + roll. The following table describes how negative values of ``start`` + are interpreted: + + .. table:: + :align: left + + +-------------------+----------------------+ + | ``start`` | Normalized ``start`` | + +===================+======================+ + | ``-(arr.ndim+1)`` | raise ``AxisError`` | + +-------------------+----------------------+ + | ``-arr.ndim`` | 0 | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``-1`` | ``arr.ndim-1`` | + +-------------------+----------------------+ + | ``0`` | ``0`` | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``arr.ndim`` | ``arr.ndim`` | + +-------------------+----------------------+ + | ``arr.ndim + 1`` | raise ``AxisError`` | + +-------------------+----------------------+ + + .. |vdots| unicode:: U+22EE .. Vertical Ellipsis + + Returns + ------- + res : ndarray + For NumPy >= 1.10.0 a view of `a` is always returned. For earlier + NumPy versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + See Also + -------- + moveaxis : Move array axes to new positions. + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + axis = normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise AxisError(msg % ('start', -n, 'start', n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + return a[...] + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + + +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + + Used internally by multi-axis-checking logic. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If any axis provided is out of range + ValueError + If an axis is repeated + + See also + -------- + normalize_axis_index : normalizing a single scalar axis + """ + # Optimization to speed-up the most common cases. + if type(axis) not in (tuple, list): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError('repeated axis in `{}` argument'.format(argname)) + else: + raise ValueError('repeated axis') + return axis + + +def _moveaxis_dispatcher(a, source, destination): + return (a,) + + +@array_function_dispatch(_moveaxis_dispatcher) +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + .. versionadded:: 1.11.0 + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose : Permute the dimensions of an array. + swapaxes : Interchange two axes of an array. + + Examples + -------- + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxes(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = normalize_axis_tuple(source, a.ndim, 'source') + destination = normalize_axis_tuple(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + +def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): + return (a, b) + + +@array_function_dispatch(_cross_dispatcher) +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). Ignored if + both input vectors have dimension 2, as the return is scalar. + By default, the last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + ix_ : Construct index arrays. + + Notes + ----- + .. versionadded:: 1.9.0 + + Supports full broadcasting of the inputs. + + Examples + -------- + Vector cross-product. + + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + array(-3) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + # Check axisa and axisb are within bounds + axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') + axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') + + # Move working axis to the end of the shape + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # recast arrays as dtype + a = a.astype(dtype) + b = b.astype(dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = array(a2 * b1) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + assert b.shape[-1] == 2 + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + return moveaxis(cp, -1, axisc) + + +little_endian = (sys.byteorder == 'little') + + +@set_module('numpy') +def indices(dimensions, dtype=int, sparse=False): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0, 1, ... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + sparse : boolean, optional + Return a sparse representation of the grid instead of a dense + representation. Default is False. + + .. versionadded:: 1.17 + + Returns + ------- + grid : one ndarray or tuple of ndarrays + If sparse is False: + Returns one array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + If sparse is True: + Returns a tuple of arrays, with + ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with + dimensions[i] in the ith place + + See Also + -------- + mgrid, ogrid, meshgrid + + Notes + ----- + The output shape in the dense case is obtained by prepending the number + of dimensions in front of the tuple of dimensions, i.e. if `dimensions` + is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N, r0, ..., rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k, i0, i1, ..., iN-1] = ik + + Examples + -------- + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + If sparse is set to true, the grid will be returned in a sparse + representation. + + >>> i, j = np.indices((2, 3), sparse=True) + >>> i.shape + (2, 1) + >>> j.shape + (1, 3) + >>> i # row indices + array([[0], + [1]]) + >>> j # column indices + array([[0, 1, 2]]) + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,)*N + if sparse: + res = tuple() + else: + res = empty((N,)+dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i+1:] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +@set_array_function_like_doc +@set_module('numpy') +def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters would be + ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would not match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` and `like` are passed to `function`. + + Examples + -------- + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + array([[0., 0.], + [1., 1.]]) + + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + array([[0., 1.], + [0., 1.]]) + + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]]) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + if like is not None: + return _fromfunction_with_like( + like, function, shape, dtype=dtype, **kwargs) + + args = indices(shape, dtype=dtype) + return function(*args, **kwargs) + + +_fromfunction_with_like = array_function_dispatch()(fromfunction) + + +def _frombuffer(buf, dtype, shape, order): + return frombuffer(buf, dtype=dtype).reshape(shape, order=order) + + +@set_module('numpy') +def isscalar(element): + """ + Returns True if the type of `element` is a scalar type. + + Parameters + ---------- + element : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `element` is a scalar type, False if it is not. + + See Also + -------- + ndim : Get the number of dimensions of an array + + Notes + ----- + If you need a stricter way to identify a *numerical* scalar, use + ``isinstance(x, numbers.Number)``, as that returns ``False`` for most + non-numerical elements such as strings. + + In most cases ``np.ndim(x) == 0`` should be used instead of this function, + as that will also return true for 0d arrays. This is how numpy overloads + functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` + argument to `histogram`. Some key differences: + + +--------------------------------------+---------------+-------------------+ + | x |``isscalar(x)``|``np.ndim(x) == 0``| + +======================================+===============+===================+ + | PEP 3141 numeric objects (including | ``True`` | ``True`` | + | builtins) | | | + +--------------------------------------+---------------+-------------------+ + | builtin string and buffer objects | ``True`` | ``True`` | + +--------------------------------------+---------------+-------------------+ + | other builtin objects, like | ``False`` | ``True`` | + | `pathlib.Path`, `Exception`, | | | + | the result of `re.compile` | | | + +--------------------------------------+---------------+-------------------+ + | third-party objects like | ``False`` | ``True`` | + | `matplotlib.figure.Figure` | | | + +--------------------------------------+---------------+-------------------+ + | zero-dimensional numpy arrays | ``False`` | ``True`` | + +--------------------------------------+---------------+-------------------+ + | other numpy arrays | ``False`` | ``False`` | + +--------------------------------------+---------------+-------------------+ + | `list`, `tuple`, and other sequence | ``False`` | ``False`` | + | objects | | | + +--------------------------------------+---------------+-------------------+ + + Examples + -------- + >>> np.isscalar(3.1) + True + >>> np.isscalar(np.array(3.1)) + False + >>> np.isscalar([3.1]) + False + >>> np.isscalar(False) + True + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> np.isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> np.isscalar(Number()) + True + + """ + return (isinstance(element, generic) + or type(element) in ScalarType + or isinstance(element, numbers.Number)) + + +@set_module('numpy') +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, or the length + of the two's complement if `num` is negative, provided that `width` is + at least a sufficient number of bits for `num` to be represented in the + designated form. + + If the `width` value is insufficient, it will be ignored, and `num` will + be returned in binary (`num` > 0) or two's complement (`num` < 0) form + with its width equal to the minimum number of bits needed to represent + the number in the designated form. This behavior is deprecated and will + later raise an error. + + .. deprecated:: 1.12.0 + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + bin: Python's built-in binary representation generator of an integer. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + https://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=3) + '101' + >>> np.binary_repr(-3, width=5) + '11101' + + """ + def warn_if_insufficient(width, binwidth): + if width is not None and width < binwidth: + warnings.warn( + "Insufficient bit width provided. This behavior " + "will raise an error in the future.", DeprecationWarning, + stacklevel=3) + + # Ensure that num is a Python integer to avoid overflow or unwanted + # casts to floating point. + num = operator.index(num) + + if num == 0: + return '0' * (width or 1) + + elif num > 0: + binary = bin(num)[2:] + binwidth = len(binary) + outwidth = (binwidth if width is None + else builtins.max(binwidth, width)) + warn_if_insufficient(width, binwidth) + return binary.zfill(outwidth) + + else: + if width is None: + return '-' + bin(-num)[2:] + + else: + poswidth = len(bin(-num)[2:]) + + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 + + twocomp = 2**(poswidth + 1) + num + binary = bin(twocomp)[2:] + binwidth = len(binary) + + outwidth = builtins.max(binwidth, width) + warn_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary + + +@set_module('numpy') +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Positive and negative values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") + + num = abs(number) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + + +@set_array_function_like_doc +@set_module('numpy') +def identity(n, dtype=None, *, like=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> np.identity(3) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """ + if like is not None: + return _identity_with_like(like, n, dtype=dtype) + + from numpy import eye + return eye(n, dtype=dtype, like=like) + + +_identity_with_like = array_function_dispatch()(identity) + + +def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b) + + +@array_function_dispatch(_allclose_dispatcher) +def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + NaNs are treated as equal if they are in the same place and if + ``equal_nan=True``. Infs are treated as equal if they are in the same + place and of the same sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + .. versionadded:: 1.10.0 + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any, equal + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + The above equation is not symmetric in `a` and `b`, so that + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in + some rare cases. + + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + + `allclose` is not defined for non-numeric data types. + `bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + True + + """ + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return bool(res) + + +def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b) + + +@array_function_dispatch(_isclose_dispatcher) +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + that are much smaller than one (see Notes). + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + math.isclose + + Notes + ----- + .. versionadded:: 1.7.0 + + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, + the default value of atol is not zero, and is used to determine what + small values should be considered close to zero. The default value is + appropriate for expected values of order unity: if the expected values + are significantly smaller than one, it can result in false positives. + `atol` should be carefully selected for the use case at hand. A zero value + for `atol` will result in `False` if either `a` or `b` is zero. + + `isclose` is not defined for non-numeric data types. + `bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([ True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([ True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([ True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([ True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False]) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True]) + """ + def within_tol(x, y, atol, rtol): + with errstate(invalid='ignore'), _no_nep50_warning(): + return less_equal(abs(x-y), atol + rtol * abs(y)) + + x = asanyarray(a) + y = asanyarray(b) + + # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). + # This will cause casting of x later. Also, make sure to allow subclasses + # (e.g., for numpy.ma). + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dt = multiarray.result_type(y, 1.) + y = asanyarray(y, dtype=dt) + + xfin = isfinite(x) + yfin = isfinite(y) + if all(xfin) and all(yfin): + return within_tol(x, y, atol, rtol) + else: + finite = xfin & yfin + cond = zeros_like(finite, subok=True) + # Because we're using boolean indexing, x & y must be the same shape. + # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in + # lib.stride_tricks, though, so we can't import it here. + x = x * ones_like(cond) + y = y * ones_like(cond) + # Avoid subtraction with infinite/nan values... + cond[finite] = within_tol(x[finite], y[finite], atol, rtol) + # Check for equality of infinite values... + cond[~finite] = (x[~finite] == y[~finite]) + if equal_nan: + # Make NaN == NaN + both_nan = isnan(x) & isnan(y) + + # Needed to treat masked arrays correctly. = True would not work. + cond[both_nan] = both_nan[both_nan] + + return cond[()] # Flatten 0d arrays to scalars + + +def _array_equal_dispatcher(a1, a2, equal_nan=None): + return (a1, a2) + + +@array_function_dispatch(_array_equal_dispatcher) +def array_equal(a1, a2, equal_nan=False): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + equal_nan : bool + Whether to compare NaN's as equal. If the dtype of a1 and a2 is + complex, values will be considered equal if either the real or the + imaginary component of a given value is ``nan``. + + .. versionadded:: 1.19.0 + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + >>> a = np.array([1, np.nan]) + >>> np.array_equal(a, a) + False + >>> np.array_equal(a, a, equal_nan=True) + True + + When ``equal_nan`` is True, complex values with nan components are + considered equal if either the real *or* the imaginary components are nan. + + >>> a = np.array([1 + 1j]) + >>> b = a.copy() + >>> a.real = np.nan + >>> b.imag = np.nan + >>> np.array_equal(a, b, equal_nan=True) + True + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + if a1.shape != a2.shape: + return False + if not equal_nan: + return bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True + a1nan, a2nan = isnan(a1), isnan(a2) + # NaN's occur at different locations + if not (a1nan == a2nan).all(): + return False + # Shapes of a1, a2 and masks are guaranteed to be consistent by this point + return bool(asarray(a1[~a1nan] == a2[~a1nan]).all()) + + +def _array_equiv_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equiv_dispatcher) +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + try: + multiarray.broadcast(a1, a2) + except Exception: + return False + + return bool(asarray(a1 == a2).all()) + + +Inf = inf = infty = Infinity = PINF +nan = NaN = NAN +False_ = bool_(False) +True_ = bool_(True) + + +def extend_all(module): + existing = set(__all__) + mall = getattr(module, '__all__') + for a in mall: + if a not in existing: + __all__.append(a) + + +from .umath import * +from .numerictypes import * +from . import fromnumeric +from .fromnumeric import * +from . import arrayprint +from .arrayprint import * +from . import _asarray +from ._asarray import * +from . import _ufunc_config +from ._ufunc_config import * +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) +extend_all(arrayprint) +extend_all(_asarray) +extend_all(_ufunc_config) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/numeric.pyi b/.venv/lib/python3.12/site-packages/numpy/core/numeric.pyi new file mode 100644 index 00000000..fc10bb88 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/numeric.pyi @@ -0,0 +1,660 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + overload, + TypeVar, + Literal, + SupportsAbs, + SupportsIndex, + NoReturn, +) +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from numpy import ( + ComplexWarning as ComplexWarning, + generic, + unsignedinteger, + signedinteger, + floating, + complexfloating, + bool_, + int_, + intp, + float64, + timedelta64, + object_, + _OrderKACF, + _OrderCF, +) + +from numpy._typing import ( + ArrayLike, + NDArray, + DTypeLike, + _ShapeLike, + _DTypeLike, + _ArrayLike, + _SupportsArrayFunc, + _ScalarLike_co, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, + _ArrayLikeUnknown, +) + +_T = TypeVar("_T") +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +_CorrelateMode = Literal["valid", "same", "full"] + +__all__: list[str] + +@overload +def zeros_like( + a: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def zeros_like( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def zeros_like( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload +def zeros_like( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[_SCT]: ... +@overload +def zeros_like( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... + +@overload +def ones( + shape: _ShapeLike, + dtype: None = ..., + order: _OrderCF = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def ones( + shape: _ShapeLike, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def ones( + shape: _ShapeLike, + dtype: DTypeLike, + order: _OrderCF = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def ones_like( + a: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def ones_like( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def ones_like( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload +def ones_like( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[_SCT]: ... +@overload +def ones_like( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... + +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: None = ..., + order: _OrderCF = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: DTypeLike, + order: _OrderCF = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def full_like( + a: _ArrayType, + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def full_like( + a: _ArrayLike[_SCT], + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def full_like( + a: object, + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[_SCT]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., + subok: bool = ..., + shape: None | _ShapeLike= ..., +) -> NDArray[Any]: ... + +@overload +def count_nonzero( + a: ArrayLike, + axis: None = ..., + *, + keepdims: Literal[False] = ..., +) -> int: ... +@overload +def count_nonzero( + a: ArrayLike, + axis: _ShapeLike = ..., + *, + keepdims: bool = ..., +) -> Any: ... # TODO: np.intp or ndarray[np.intp] + +def isfortran(a: NDArray[Any] | generic) -> bool: ... + +def argwhere(a: ArrayLike) -> NDArray[intp]: ... + +def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... + +@overload +def correlate( + a: _ArrayLikeUnknown, + v: _ArrayLikeUnknown, + mode: _CorrelateMode = ..., +) -> NDArray[Any]: ... +@overload +def correlate( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[bool_]: ... +@overload +def correlate( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating[Any]]: ... +@overload +def correlate( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def correlate( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... + +@overload +def convolve( + a: _ArrayLikeUnknown, + v: _ArrayLikeUnknown, + mode: _CorrelateMode = ..., +) -> NDArray[Any]: ... +@overload +def convolve( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[bool_]: ... +@overload +def convolve( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating[Any]]: ... +@overload +def convolve( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def convolve( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... + +@overload +def outer( + a: _ArrayLikeUnknown, + b: _ArrayLikeUnknown, + out: None = ..., +) -> NDArray[Any]: ... +@overload +def outer( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + out: None = ..., +) -> NDArray[bool_]: ... +@overload +def outer( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + out: None = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def outer( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + out: None = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def outer( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def outer( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + out: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def outer( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + out: None = ..., +) -> NDArray[timedelta64]: ... +@overload +def outer( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def outer( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def tensordot( + a: _ArrayLikeUnknown, + b: _ArrayLikeUnknown, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[Any]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[bool_]: ... +@overload +def tensordot( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[floating[Any]]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def tensordot( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[timedelta64]: ... +@overload +def tensordot( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[object_]: ... + +@overload +def roll( + a: _ArrayLike[_SCT], + shift: _ShapeLike, + axis: None | _ShapeLike = ..., +) -> NDArray[_SCT]: ... +@overload +def roll( + a: ArrayLike, + shift: _ShapeLike, + axis: None | _ShapeLike = ..., +) -> NDArray[Any]: ... + +def rollaxis( + a: NDArray[_SCT], + axis: int, + start: int = ..., +) -> NDArray[_SCT]: ... + +def moveaxis( + a: NDArray[_SCT], + source: _ShapeLike, + destination: _ShapeLike, +) -> NDArray[_SCT]: ... + +@overload +def cross( + a: _ArrayLikeUnknown, + b: _ArrayLikeUnknown, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[Any]: ... +@overload +def cross( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NoReturn: ... +@overload +def cross( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def cross( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def cross( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cross( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cross( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: None | int = ..., +) -> NDArray[object_]: ... + +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = ..., + sparse: Literal[False] = ..., +) -> NDArray[int_]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = ..., + sparse: Literal[True] = ..., +) -> tuple[NDArray[int_], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_SCT], + sparse: Literal[False] = ..., +) -> NDArray[_SCT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_SCT], + sparse: Literal[True], +) -> tuple[NDArray[_SCT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike, + sparse: Literal[False] = ..., +) -> NDArray[Any]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike, + sparse: Literal[True], +) -> tuple[NDArray[Any], ...]: ... + +def fromfunction( + function: Callable[..., _T], + shape: Sequence[int], + *, + dtype: DTypeLike = ..., + like: _SupportsArrayFunc = ..., + **kwargs: Any, +) -> _T: ... + +def isscalar(element: object) -> TypeGuard[ + generic | bool | int | float | complex | str | bytes | memoryview +]: ... + +def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ... + +def base_repr( + number: SupportsAbs[float], + base: float = ..., + padding: SupportsIndex = ..., +) -> str: ... + +@overload +def identity( + n: int, + dtype: None = ..., + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_SCT], + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def identity( + n: int, + dtype: DTypeLike, + *, + like: _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +def allclose( + a: ArrayLike, + b: ArrayLike, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., +) -> bool: ... + +@overload +def isclose( + a: _ScalarLike_co, + b: _ScalarLike_co, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., +) -> bool_: ... +@overload +def isclose( + a: ArrayLike, + b: ArrayLike, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., +) -> NDArray[bool_]: ... + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... + +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.py b/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.py new file mode 100644 index 00000000..aea41bc2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.py @@ -0,0 +1,689 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + sctypeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 int128 + uint8 uint16 uint32 uint64 uint128 + float16 float32 float64 float96 float128 float256 + complex32 complex64 complex128 complex192 complex256 complex512 + datetime64 timedelta64 + + c-based names + + bool_ + + object_ + + void, str_, unicode_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + float_, complex_, + longfloat, clongfloat, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool_ (kind=b) + +-> number + | +-> integer + | | +-> signedinteger (intxx) (kind=i) + | | | byte + | | | short + | | | intc + | | | intp + | | | int_ + | | | longlong + | | \\-> unsignedinteger (uintxx) (kind=u) + | | ubyte + | | ushort + | | uintc + | | uintp + | | uint_ + | | ulonglong + | +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | float_ (double) + | | longfloat + | \\-> complexfloating (complexxx) (kind=c) + | csingle (singlecomplex) + | complex_ (cfloat, cdouble) + | clongfloat (longcomplex) + +-> flexible + | +-> character + | | str_ (string_, bytes_) (kind=S) [Python 2] + | | unicode_ (kind=U) [Python 2] + | | + | | bytes_ (string_) (kind=S) [Python 3] + | | str_ (unicode_) (kind=U) [Python 3] + | | + | \\-> void (kind=V) + \\-> object_ (not used much) (kind=O) + +""" +import numbers +import warnings + +from .multiarray import ( + ndarray, array, dtype, datetime_data, datetime_as_string, + busday_offset, busday_count, is_busday, busdaycalendar + ) +from .._utils import set_module + +# we add more at the bottom +__all__ = ['sctypeDict', 'sctypes', + 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', + 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', + 'issubdtype', 'datetime_data', 'datetime_as_string', + 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', + ] + +# we don't need all these imports, but we need to keep them for compatibility +# for users using np.core.numerictypes.UPPER_TABLE +from ._string_helpers import ( + english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE +) + +from ._type_aliases import ( + sctypeDict, + allTypes, + bitname, + sctypes, + _concrete_types, + _concrete_typeinfo, + _bits_of, +) +from ._dtype import _kind_name + +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +from builtins import bool, int, float, complex, object, str, bytes +from numpy.compat import long, unicode + + +# We use this later +generic = allTypes['generic'] + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', 'int128', + 'uint128', 'float16', + 'float32', 'float64', 'float80', 'float96', 'float128', + 'float256', + 'complex32', 'complex64', 'complex128', 'complex160', + 'complex192', 'complex256', 'complex512', 'object'] + +@set_module('numpy') +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> np.maximum_sctype(int) + <class 'numpy.int64'> + >>> np.maximum_sctype(np.uint8) + <class 'numpy.uint64'> + >>> np.maximum_sctype(complex) + <class 'numpy.complex256'> # may vary + + >>> np.maximum_sctype(str) + <class 'numpy.str_'> + + >>> np.maximum_sctype('i2') + <class 'numpy.int64'> + >>> np.maximum_sctype('f4') + <class 'numpy.float128'> # may vary + + """ + g = obj2sctype(t) + if g is None: + return t + t = g + base = _kind_name(dtype(t)) + if base in sctypes: + return sctypes[base][-1] + else: + return t + + +@set_module('numpy') +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> np.issctype(np.int32) + True + >>> np.issctype(list) + False + >>> np.issctype(1.1) + False + + Strings are also a scalar type: + + >>> np.issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + return False + except Exception: + return False + + +@set_module('numpy') +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype, maximum_sctype + + Examples + -------- + >>> np.obj2sctype(np.int32) + <class 'numpy.int32'> + >>> np.obj2sctype(np.array([1., 2.])) + <class 'numpy.float64'> + >>> np.obj2sctype(np.array([1.j])) + <class 'numpy.complex128'> + + >>> np.obj2sctype(dict) + <class 'numpy.object_'> + >>> np.obj2sctype('string') + + >>> np.obj2sctype(1, default=list) + <class 'list'> + + """ + # prevent abstract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays + if isinstance(rep, ndarray): + return rep.dtype.type + # fall back on dtype to convert + try: + res = dtype(rep) + except Exception: + return default + else: + return res.type + + +@set_module('numpy') +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError if one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, int) + False + >>> np.issubclass_(np.int32, float) + False + >>> np.issubclass_(np.float64, float) + True + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + + +@set_module('numpy') +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype, obj2sctype + + Examples + -------- + >>> np.issubsctype('S8', str) + False + >>> np.issubsctype(np.array([1]), int) + True + >>> np.issubsctype(np.array([1]), float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + + +@set_module('numpy') +def issubdtype(arg1, arg2): + r""" + Returns True if first argument is a typecode lower/equal in type hierarchy. + + This is like the builtin :func:`issubclass`, but for `dtype`\ s. + + Parameters + ---------- + arg1, arg2 : dtype_like + `dtype` or object coercible to one + + Returns + ------- + out : bool + + See Also + -------- + :ref:`arrays.scalars` : Overview of the numpy type hierarchy. + issubsctype, issubclass_ + + Examples + -------- + `issubdtype` can be used to check the type of arrays: + + >>> ints = np.array([1, 2, 3], dtype=np.int32) + >>> np.issubdtype(ints.dtype, np.integer) + True + >>> np.issubdtype(ints.dtype, np.floating) + False + + >>> floats = np.array([1, 2, 3], dtype=np.float32) + >>> np.issubdtype(floats.dtype, np.integer) + False + >>> np.issubdtype(floats.dtype, np.floating) + True + + Similar types of different sizes are not subdtypes of each other: + + >>> np.issubdtype(np.float64, np.float32) + False + >>> np.issubdtype(np.float32, np.float64) + False + + but both are subtypes of `floating`: + + >>> np.issubdtype(np.float64, np.floating) + True + >>> np.issubdtype(np.float32, np.floating) + True + + For convenience, dtype-like objects are allowed too: + + >>> np.issubdtype('S1', np.string_) + True + >>> np.issubdtype('i4', np.signedinteger) + True + + """ + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + + return issubclass(arg1, arg2) + + +# This dictionary allows look up based on any alias for an array data-type +class _typedict(dict): + """ + Base object for a dictionary for look-up with any alias for an array dtype. + + Instances of `_typedict` can not be used as dictionaries directly, + first they have to be populated. + + """ + + def __getitem__(self, obj): + return dict.__getitem__(self, obj2sctype(obj)) + +nbytes = _typedict() +_alignment = _typedict() +_maxvals = _typedict() +_minvals = _typedict() +def _construct_lookups(): + for name, info in _concrete_typeinfo.items(): + obj = info.type + nbytes[obj] = info.bits // 8 + _alignment[obj] = info.alignment + if len(info) > 5: + _maxvals[obj] = info.max + _minvals[obj] = info.min + else: + _maxvals[obj] = None + _minvals[obj] = None + +_construct_lookups() + + +@set_module('numpy') +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]: + ... print(np.sctype2char(sctype)) + l # may vary + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> np.sctype2char(x) + 'D' + >>> np.sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + if sctype not in _concrete_types: + # for compatibility + raise KeyError(sctype) + return dtype(sctype).char + +# Create dictionary of casting functions that wrap sequences +# indexed by type or type character +cast = _typedict() +for key in _concrete_types: + cast[key] = lambda x, k=key: array(x, copy=False).astype(k) + + +def _scalar_type_key(typ): + """A ``key`` function for `sorted`.""" + dt = dtype(typ) + return (dt.kind.lower(), dt.itemsize) + + +ScalarType = [int, float, complex, bool, bytes, str, memoryview] +ScalarType += sorted(_concrete_types, key=_scalar_type_key) +ScalarType = tuple(ScalarType) + + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character':'c', + 'Integer':'bhilqp', + 'UnsignedInteger':'BHILQP', + 'Float':'efdg', + 'Complex':'FDG', + 'AllInteger':'bBhHiIlLqQpP', + 'AllFloat':'efdgFDG', + 'Datetime': 'Mm', + 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py) +typeDict = sctypeDict + +# b -> boolean +# u -> unsigned integer +# i -> signed integer +# f -> floating point +# c -> complex +# M -> datetime +# m -> timedelta +# S -> string +# U -> Unicode string +# V -> record +# O -> Python object +_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] + +__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' +__len_test_types = len(__test_types) + +# Keep incrementing until a common type both can be coerced to +# is found. Otherwise, return None +def _find_common_coerce(a, b): + if a > b: + return a + try: + thisind = __test_types.index(a.char) + except ValueError: + return None + return _can_coerce_all([a, b], start=thisind) + +# Find a data-type that all data-types in a list can be coerced to +def _can_coerce_all(dtypelist, start=0): + N = len(dtypelist) + if N == 0: + return None + if N == 1: + return dtypelist[0] + thisind = start + while thisind < __len_test_types: + newdtype = dtype(__test_types[thisind]) + numcoerce = len([x for x in dtypelist if newdtype >= x]) + if numcoerce == N: + return newdtype + thisind += 1 + return None + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) + numbers.Number.register(number) + +_register_types() + + +@set_module('numpy') +def find_common_type(array_types, scalar_types): + """ + Determine common type following standard coercion rules. + + .. deprecated:: NumPy 1.25 + + This function is deprecated, use `numpy.promote_types` or + `numpy.result_type` instead. To achieve semantics for the + `scalar_types` argument, use `numpy.result_type` and pass the Python + values `0`, `0.0`, or `0j`. + This will give the same results in almost all cases. + More information and rare exception can be found in the + `NumPy 1.25 release notes <https://numpy.org/devdocs/release/1.25.0-notes.html>`_. + + Parameters + ---------- + array_types : sequence + A list of dtypes or dtype convertible objects representing arrays. + scalar_types : sequence + A list of dtypes or dtype convertible objects representing scalars. + + Returns + ------- + datatype : dtype + The common data type, which is the maximum of `array_types` ignoring + `scalar_types`, unless the maximum of `scalar_types` is of a + different kind (`dtype.kind`). If the kind is not understood, then + None is returned. + + See Also + -------- + dtype, common_type, can_cast, mintypecode + + Examples + -------- + >>> np.find_common_type([], [np.int64, np.float32, complex]) + dtype('complex128') + >>> np.find_common_type([np.int64, np.float32], []) + dtype('float64') + + The standard casting rules ensure that a scalar cannot up-cast an + array unless the scalar is of a fundamentally different kind of data + (i.e. under a different hierarchy in the data type hierarchy) then + the array: + + >>> np.find_common_type([np.float32], [np.int64, np.float64]) + dtype('float32') + + Complex is of a different type, so it up-casts the float in the + `array_types` argument: + + >>> np.find_common_type([np.float32], [complex]) + dtype('complex128') + + Type specifier strings are convertible to dtypes and can therefore + be used instead of dtypes: + + >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) + dtype('complex128') + + """ + # Deprecated 2022-11-07, NumPy 1.25 + warnings.warn( + "np.find_common_type is deprecated. Please use `np.result_type` " + "or `np.promote_types`.\n" + "See https://numpy.org/devdocs/release/1.25.0-notes.html and the " + "docs for more information. (Deprecated NumPy 1.25)", + DeprecationWarning, stacklevel=2) + + array_types = [dtype(x) for x in array_types] + scalar_types = [dtype(x) for x in scalar_types] + + maxa = _can_coerce_all(array_types) + maxsc = _can_coerce_all(scalar_types) + + if maxa is None: + return maxsc + + if maxsc is None: + return maxa + + try: + index_a = _kind_list.index(maxa.kind) + index_sc = _kind_list.index(maxsc.kind) + except ValueError: + return None + + if index_sc > index_a: + return _find_common_coerce(maxsc, maxa) + else: + return maxa diff --git a/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.pyi b/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.pyi new file mode 100644 index 00000000..d05861b2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/numerictypes.pyi @@ -0,0 +1,156 @@ +import sys +import types +from collections.abc import Iterable +from typing import ( + Literal as L, + Union, + overload, + Any, + TypeVar, + Protocol, + TypedDict, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + ubyte, + ushort, + uintc, + uint, + ulonglong, + byte, + short, + intc, + int_, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + datetime64, + timedelta64, + object_, + str_, + bytes_, + void, +) + +from numpy.core._type_aliases import ( + sctypeDict as sctypeDict, + sctypes as sctypes, +) + +from numpy._typing import DTypeLike, ArrayLike, _DTypeLike + +_T = TypeVar("_T") +_SCT = TypeVar("_SCT", bound=generic) + +class _CastFunc(Protocol): + def __call__( + self, x: ArrayLike, k: DTypeLike = ... + ) -> ndarray[Any, dtype[Any]]: ... + +class _TypeCodes(TypedDict): + Character: L['c'] + Integer: L['bhilqp'] + UnsignedInteger: L['BHILQP'] + Float: L['efdg'] + Complex: L['FDG'] + AllInteger: L['bBhHiIlLqQpP'] + AllFloat: L['efdgFDG'] + Datetime: L['Mm'] + All: L['?bhilqpBHILQPefdgFDGSUVOMm'] + +class _typedict(dict[type[generic], _T]): + def __getitem__(self, key: DTypeLike) -> _T: ... + +if sys.version_info >= (3, 10): + _TypeTuple = Union[ + type[Any], + types.UnionType, + tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...], + ] +else: + _TypeTuple = Union[ + type[Any], + tuple[Union[type[Any], tuple[Any, ...]], ...], + ] + +__all__: list[str] + +@overload +def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]: ... +@overload +def maximum_sctype(t: DTypeLike) -> type[Any]: ... + +@overload +def issctype(rep: dtype[Any] | type[Any]) -> bool: ... +@overload +def issctype(rep: object) -> L[False]: ... + +@overload +def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]: ... +@overload +def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]: ... +@overload +def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]: ... +@overload +def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]: ... +@overload +def obj2sctype(rep: object, default: None = ...) -> None: ... +@overload +def obj2sctype(rep: object, default: _T) -> _T: ... + +@overload +def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool: ... +@overload +def issubclass_(arg1: object, arg2: object) -> L[False]: ... + +def issubsctype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... + +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... + +def sctype2char(sctype: DTypeLike) -> str: ... + +cast: _typedict[_CastFunc] +nbytes: _typedict[int] +typecodes: _TypeCodes +ScalarType: tuple[ + type[int], + type[float], + type[complex], + type[bool], + type[bytes], + type[str], + type[memoryview], + type[bool_], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[int_], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[uint], + type[ulonglong], + type[void], +] diff --git a/.venv/lib/python3.12/site-packages/numpy/core/overrides.py b/.venv/lib/python3.12/site-packages/numpy/core/overrides.py new file mode 100644 index 00000000..6403e65b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/overrides.py @@ -0,0 +1,181 @@ +"""Implementation of __array_function__ overrides from NEP-18.""" +import collections +import functools +import os + +from .._utils import set_module +from .._utils._inspect import getargspec +from numpy.core._multiarray_umath import ( + add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) + + +ARRAY_FUNCTIONS = set() + +array_function_like_doc = ( + """like : array_like, optional + Reference object to allow the creation of arrays which are not + NumPy arrays. If an array-like passed in as ``like`` supports + the ``__array_function__`` protocol, the result will be defined + by it. In this case, it ensures the creation of an array object + compatible with that passed in via this argument.""" +) + +def set_array_function_like_doc(public_api): + if public_api.__doc__ is not None: + public_api.__doc__ = public_api.__doc__.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + ) + return public_api + + +add_docstring( + _ArrayFunctionDispatcher, + """ + Class to wrap functions with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. + + Parameters + ---------- + dispatcher : function or None + The dispatcher function that returns a single sequence-like object + of all arguments relevant. It must have the same signature (except + the default values) as the actual implementation. + If ``None``, this is a ``like=`` dispatcher and the + ``_ArrayFunctionDispatcher`` must be called with ``like`` as the + first (additional and positional) argument. + implementation : function + Function that implements the operation on NumPy arrays without + overrides. Arguments passed calling the ``_ArrayFunctionDispatcher`` + will be forwarded to this (and the ``dispatcher``) as if using + ``*args, **kwargs``. + + Attributes + ---------- + _implementation : function + The original implementation passed in. + """) + + +# exposed for testing purposes; used internally by _ArrayFunctionDispatcher +add_docstring( + _get_implementing_args, + """ + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) + + +ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') + + +def verify_matching_signatures(implementation, dispatcher): + """Verify that a dispatcher function has the right signature.""" + implementation_spec = ArgSpec(*getargspec(implementation)) + dispatcher_spec = ArgSpec(*getargspec(dispatcher)) + + if (implementation_spec.args != dispatcher_spec.args or + implementation_spec.varargs != dispatcher_spec.varargs or + implementation_spec.keywords != dispatcher_spec.keywords or + (bool(implementation_spec.defaults) != + bool(dispatcher_spec.defaults)) or + (implementation_spec.defaults is not None and + len(implementation_spec.defaults) != + len(dispatcher_spec.defaults))): + raise RuntimeError('implementation and dispatcher for %s have ' + 'different function signatures' % implementation) + + if implementation_spec.defaults is not None: + if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): + raise RuntimeError('dispatcher functions can only use None for ' + 'default argument values') + + +def array_function_dispatch(dispatcher=None, module=None, verify=True, + docs_from_dispatcher=False): + """Decorator for adding dispatch with the __array_function__ protocol. + + See NEP-18 for example usage. + + Parameters + ---------- + dispatcher : callable or None + Function that when called like ``dispatcher(*args, **kwargs)`` with + arguments from the NumPy function call returns an iterable of + array-like arguments to check for ``__array_function__``. + + If `None`, the first argument is used as the single `like=` argument + and not passed on. A function implementing `like=` must call its + dispatcher with `like` as the first non-keyword argument. + module : str, optional + __module__ attribute to set on new function, e.g., ``module='numpy'``. + By default, module is copied from the decorated function. + verify : bool, optional + If True, verify the that the signature of the dispatcher and decorated + function signatures match exactly: all required and optional arguments + should appear in order with the same names, but the default values for + all optional arguments should be ``None``. Only disable verification + if the dispatcher's signature needs to deviate for some particular + reason, e.g., because the function has a signature like + ``func(*args, **kwargs)``. + docs_from_dispatcher : bool, optional + If True, copy docs from the dispatcher function onto the dispatched + function, rather than from the implementation. This is useful for + functions defined in C, which otherwise don't have docstrings. + + Returns + ------- + Function suitable for decorating the implementation of a NumPy function. + + """ + def decorator(implementation): + if verify: + if dispatcher is not None: + verify_matching_signatures(implementation, dispatcher) + else: + # Using __code__ directly similar to verify_matching_signature + co = implementation.__code__ + last_arg = co.co_argcount + co.co_kwonlyargcount - 1 + last_arg = co.co_varnames[last_arg] + if last_arg != "like" or co.co_kwonlyargcount == 0: + raise RuntimeError( + "__array_function__ expects `like=` to be the last " + "argument and a keyword-only argument. " + f"{implementation} does not seem to comply.") + + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + + public_api = _ArrayFunctionDispatcher(dispatcher, implementation) + public_api = functools.wraps(implementation)(public_api) + + if module is not None: + public_api.__module__ = module + + ARRAY_FUNCTIONS.add(public_api) + + return public_api + + return decorator + + +def array_function_from_dispatcher( + implementation, module=None, verify=True, docs_from_dispatcher=True): + """Like array_function_dispatcher, but with function arguments flipped.""" + + def decorator(dispatcher): + return array_function_dispatch( + dispatcher, module, verify=verify, + docs_from_dispatcher=docs_from_dispatcher)(implementation) + return decorator diff --git a/.venv/lib/python3.12/site-packages/numpy/core/records.py b/.venv/lib/python3.12/site-packages/numpy/core/records.py new file mode 100644 index 00000000..0fb49e8f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/records.py @@ -0,0 +1,1099 @@ +""" +Record Arrays +============= +Record arrays expose the fields of structured arrays as properties. + +Most commonly, ndarrays contain elements of a single type, e.g. floats, +integers, bools etc. However, it is possible for elements to be combinations +of these using structured types, such as:: + + >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)]) + >>> a + array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')]) + +Here, each element consists of two fields: x (and int), and y (a float). +This is known as a structured array. The different fields are analogous +to columns in a spread-sheet. The different fields can be accessed as +one would a dictionary:: + + >>> a['x'] + array([1, 1]) + + >>> a['y'] + array([2., 2.]) + +Record arrays allow us to access fields as properties:: + + >>> ar = np.rec.array(a) + + >>> ar.x + array([1, 1]) + + >>> ar.y + array([2., 2.]) + +""" +import warnings +from collections import Counter +from contextlib import nullcontext + +from .._utils import set_module +from . import numeric as sb +from . import numerictypes as nt +from numpy.compat import os_fspath +from .arrayprint import _get_legacy_print_mode + +# All of the functions allow formats to be a dtype +__all__ = [ + 'record', 'recarray', 'format_parser', + 'fromarrays', 'fromrecords', 'fromstring', 'fromfile', 'array', +] + + +ndarray = sb.ndarray + +_byteorderconv = {'b':'>', + 'l':'<', + 'n':'=', + 'B':'>', + 'L':'<', + 'N':'=', + 'S':'s', + 's':'s', + '>':'>', + '<':'<', + '=':'=', + '|':'|', + 'I':'|', + 'i':'|'} + +# formats regular expression +# allows multidimensional spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.sctypeDict + + +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + return [ + item + for item, counts in Counter(list).items() + if counts > 1 + ] + + +@set_module('numpy') +class format_parser: + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, a5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'a5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + dtype, typename, sctype2char + + Examples + -------- + >>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'], + ... ['T1', 'T2', 'T3']).dtype + dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')]) + + `names` and/or `titles` can be empty lists. If `titles` is an empty list, + titles will simply not appear. If `names` is empty, default field names + will be used. + + >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')]) + >>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype + dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')]) + + """ + + def __init__(self, formats, names, titles, aligned=False, byteorder=None): + self._parseFormats(formats, aligned) + self._setfieldnames(names, titles) + self._createdtype(byteorder) + + def _parseFormats(self, formats, aligned=False): + """ Parse the field formats """ + + if formats is None: + raise ValueError("Need formats argument") + if isinstance(formats, list): + dtype = sb.dtype( + [('f{}'.format(i), format_) for i, format_ in enumerate(formats)], + aligned, + ) + else: + dtype = sb.dtype(formats, aligned) + fields = dtype.fields + if fields is None: + dtype = sb.dtype([('f1', dtype)], aligned) + fields = dtype.fields + keys = dtype.names + self._f_formats = [fields[key][0] for key in keys] + self._offsets = [fields[key][1] for key in keys] + self._nfields = len(keys) + + def _setfieldnames(self, names, titles): + """convert input field names into a list and assign to the _names + attribute """ + + if names: + if type(names) in [list, tuple]: + pass + elif isinstance(names, str): + names = names.split(',') + else: + raise NameError("illegal input names %s" % repr(names)) + + self._names = [n.strip() for n in names[:self._nfields]] + else: + self._names = [] + + # if the names are not specified, they will be assigned as + # "f0, f1, f2,..." + # if not enough names are specified, they will be assigned as "f[n], + # f[n+1],..." etc. where n is the number of specified names..." + self._names += ['f%d' % i for i in range(len(self._names), + self._nfields)] + # check for redundant names + _dup = find_duplicate(self._names) + if _dup: + raise ValueError("Duplicate field names: %s" % _dup) + + if titles: + self._titles = [n.strip() for n in titles[:self._nfields]] + else: + self._titles = [] + titles = [] + + if self._nfields > len(titles): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdtype(self, byteorder): + dtype = sb.dtype({ + 'names': self._names, + 'formats': self._f_formats, + 'offsets': self._offsets, + 'titles': self._titles, + }) + if byteorder is not None: + byteorder = _byteorderconv[byteorder[0]] + dtype = dtype.newbyteorder(byteorder) + + self.dtype = dtype + + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + + # manually set name and module so that this class's type shows up + # as numpy.record when printed + __name__ = 'record' + __module__ = 'numpy' + + def __repr__(self): + if _get_legacy_print_mode() <= 113: + return self.__str__() + return super().__repr__() + + def __str__(self): + if _get_legacy_print_mode() <= 113: + return str(self.item()) + return super().__str__() + + def __getattribute__(self, attr): + if attr in ('setfield', 'getfield', 'dtype'): + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a record, + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + #happens if field is Object type + return obj + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) + return obj + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def __setattr__(self, attr, val): + if attr in ('setfield', 'getfield', 'dtype'): + raise AttributeError("Cannot set '%s' attribute" % attr) + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + else: + if getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def __getitem__(self, indx): + obj = nt.void.__getitem__(self, indx) + + # copy behavior of record.__getattribute__, + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) + else: + # return a single element + return obj + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max(len(name) for name in names) + fmt = '%% %ds: %%s' % maxlen + rows = [fmt % (name, getattr(self, name)) for name in names] + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + +class recarray(ndarray): + """Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + core.records.fromrecords : Construct a record array from data. + record : fundamental data-type for `recarray`. + format_parser : determine a data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')]) + >>> x + array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')]) + + >>> x['x'] + array([1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')]) + + """ + + # manually set name and module so that this class's type shows + # up as "numpy.recarray" when printed + __name__ = 'recarray' + __module__ = 'numpy' + + def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, order='C'): + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + if buf is None: + self = ndarray.__new__(subtype, shape, (record, descr), order=order) + else: + self = ndarray.__new__(subtype, shape, (record, descr), + buffer=buf, offset=offset, + strides=strides, order=order) + return self + + def __array_finalize__(self, obj): + if self.dtype.type is not record and self.dtype.names is not None: + # if self.dtype is not np.record, invoke __setattr__ which will + # convert it to a record if it is a void dtype. + self.dtype = self.dtype + + def __getattribute__(self, attr): + # See if ndarray has this attr, and return it if so. (note that this + # means a field with the same name as an ndarray attr cannot be + # accessed by attribute). + try: + return object.__getattribute__(self, attr) + except AttributeError: # attr must be a fieldname + pass + + # look for a field with this name + fielddict = ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError("recarray has no attribute %s" % attr) from e + obj = self.getfield(*res) + + # At this point obj will always be a recarray, since (see + # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is + # non-structured, convert it to an ndarray. Then if obj is structured + # with void type convert it to the same dtype.type (eg to preserve + # numpy.record type if present), since nested structured fields do not + # inherit type. Don't do this for non-void structures though. + if obj.dtype.names is not None: + if issubclass(obj.dtype.type, nt.void): + return obj.view(dtype=(self.dtype.type, obj.dtype)) + return obj + else: + return obj.view(ndarray) + + # Save the dictionary. + # If the attr is a field name and not in the saved dictionary + # Undo any "setting" of the attribute and do a setfield + # Thus, you can't create attributes on-the-fly that are field names. + def __setattr__(self, attr, val): + + # Automatically convert (void) structured types to records + # (but not non-void structures, subarrays, or non-structured voids) + if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None: + val = sb.dtype((record, val)) + + newattr = attr not in self.__dict__ + try: + ret = object.__setattr__(self, attr, val) + except Exception: + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + if attr not in fielddict: + raise + else: + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + "record array has no attribute %s" % attr + ) from e + return self.setfield(val, *res) + + def __getitem__(self, indx): + obj = super().__getitem__(indx) + + # copy behavior of getattr, except that here + # we might also be returning a single element + if isinstance(obj, ndarray): + if obj.dtype.names is not None: + obj = obj.view(type(self)) + if issubclass(obj.dtype.type, nt.void): + return obj.view(dtype=(self.dtype.type, obj.dtype)) + return obj + else: + return obj.view(type=ndarray) + else: + # return a single element + return obj + + def __repr__(self): + + repr_dtype = self.dtype + if self.dtype.type is record or not issubclass(self.dtype.type, nt.void): + # If this is a full record array (has numpy.record dtype), + # or if it has a scalar (non-void) dtype with no records, + # represent it using the rec.array function. Since rec.array + # converts dtype to a numpy.record for us, convert back + # to non-record before printing + if repr_dtype.type is record: + repr_dtype = sb.dtype((nt.void, repr_dtype)) + prefix = "rec.array(" + fmt = 'rec.array(%s,%sdtype=%s)' + else: + # otherwise represent it using np.array plus a view + # This should only happen if the user is playing + # strange games with dtypes. + prefix = "array(" + fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)' + + # get data/shape string. logic taken from numeric.array_repr + if self.size > 0 or self.shape == (0,): + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') + else: + # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(self.shape),) + + lf = '\n'+' '*len(prefix) + if _get_legacy_print_mode() <= 113: + lf = ' ' + lf # trailing space + return fmt % (lst, lf, repr_dtype) + + def field(self, attr, val=None): + if isinstance(attr, int): + names = ndarray.__getattribute__(self, 'dtype').names + attr = names[attr] + + fielddict = ndarray.__getattribute__(self, 'dtype').fields + + res = fielddict[attr][:2] + + if val is None: + obj = self.getfield(*res) + if obj.dtype.names is not None: + return obj + return obj.view(ndarray) + else: + return self.setfield(val, *res) + + +def _deprecate_shape_0_as_None(shape): + if shape == 0: + warnings.warn( + "Passing `shape=0` to have the shape be inferred is deprecated, " + "and in future will be equivalent to `shape=(0,)`. To infer " + "the shape and suppress this warning, pass `shape=None` instead.", + FutureWarning, stacklevel=3) + return None + else: + return shape + + +@set_module("numpy.rec") +def fromarrays(arrayList, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create a record array from a (flat) list of arrays + + Parameters + ---------- + arrayList : list or tuple + List of array-like objects (such as lists, tuples, + and ndarrays). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of the resulting array. If not provided, inferred from + ``arrayList[0]``. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + Returns + ------- + np.recarray + Record array consisting of given arrayList columns. + + Examples + -------- + >>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') + >>> print(r[1]) + (2, 'dd', 2.0) # may vary + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + + >>> x1 = np.array([1, 2, 3, 4]) + >>> x2 = np.array(['a', 'dd', 'xyz', '12']) + >>> x3 = np.array([1.1, 2, 3,4]) + >>> r = np.core.records.fromarrays( + ... [x1, x2, x3], + ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)])) + >>> r + rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ), + (4, b'12', 4. )], + dtype=[('a', '<i4'), ('b', 'S3'), ('c', '<f4')]) + """ + + arrayList = [sb.asarray(x) for x in arrayList] + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: + shape = arrayList[0].shape + elif isinstance(shape, int): + shape = (shape,) + + if formats is None and dtype is None: + # go through each object in the list to see if it is an ndarray + # and determine the formats. + formats = [obj.dtype for obj in arrayList] + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + _names = descr.names + + # Determine shape from data-type. + if len(descr) != len(arrayList): + raise ValueError("mismatch between the number of fields " + "and the number of arrays") + + d0 = descr[0].shape + nn = len(d0) + if nn > 0: + shape = shape[:-nn] + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for k, obj in enumerate(arrayList): + nn = descr[k].ndim + testshape = obj.shape[:obj.ndim - nn] + name = _names[k] + if testshape != shape: + raise ValueError(f'array-shape mismatch in array {k} ("{name}")') + + _array[name] = obj + + return _array + + +@set_module("numpy.rec") +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Create a recarray from a list of records in text form. + + Parameters + ---------- + recList : sequence + data in the same field may be heterogeneous - they will be promoted + to the highest data type. + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + If both `formats` and `dtype` are None, then this will auto-detect + formats. Use list of tuples rather than list of lists for faster + processing. + + Returns + ------- + np.recarray + record array consisting of given recList rows. + + Examples + -------- + >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print(r[0]) + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + array(['dbe', 'de'], dtype='<U3') + >>> import pickle + >>> pickle.loads(pickle.dumps(r)) + rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], + dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')]) + """ + + if formats is None and dtype is None: # slower + obj = sb.array(recList, dtype=object) + arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])] + return fromarrays(arrlist, formats=formats, shape=shape, names=names, + titles=titles, aligned=aligned, byteorder=byteorder) + + if dtype is not None: + descr = sb.dtype((record, dtype)) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + try: + retval = sb.array(recList, dtype=descr) + except (TypeError, ValueError): + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + if shape is None: + shape = len(recList) + if isinstance(shape, int): + shape = (shape,) + if len(shape) > 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +@set_module("numpy.rec") +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + r"""Create a record array from binary data + + Note that despite the name of this function it does not accept `str` + instances. + + Parameters + ---------- + datastring : bytes-like + Buffer of binary data + dtype : data-type, optional + Valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the buffer to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + + Returns + ------- + np.recarray + Record array view into the data in datastring. This will be readonly + if `datastring` is readonly. + + See Also + -------- + numpy.frombuffer + + Examples + -------- + >>> a = b'\x01\x02\x03abc' + >>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3') + rec.array([(1, 2, 3, b'abc')], + dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')]) + + >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64), + ... ('GradeLevel', np.int32)] + >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), + ... ('Aadi', 66.6, 6)], dtype=grades_dtype) + >>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype) + rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)], + dtype=[('Name', '<U10'), ('Marks', '<f8'), ('GradeLevel', '<i4')]) + + >>> s = '\x01\x02\x03abc' + >>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3') + Traceback (most recent call last) + ... + TypeError: a bytes-like object is required, not 'str' + """ + + if dtype is None and formats is None: + raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape in (None, -1): + shape = (len(datastring) - offset) // itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + pos = fd.tell() + try: + fd.seek(0, 2) + return fd.tell() - pos + finally: + fd.seek(pos, 0) + + +@set_module("numpy.rec") +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + Parameters + ---------- + fd : str or file type + If file is a string or a path-like object then that file is opened, + else it is assumed to be a file object. The file object must + support random access (i.e. it must have tell and seek methods). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + offset : int, optional + Position in the file to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation + + Returns + ------- + np.recarray + record array consisting of data enclosed in file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.newbyteorder('<') + >>> a.tofile(fd) + >>> + >>> _ = fd.seek(0) + >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print(r[5]) + (0.5, 10, 'abcde') + >>> r.shape + (10,) + """ + + if dtype is None and formats is None: + raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: + shape = (-1,) + elif isinstance(shape, int): + shape = (shape,) + + if hasattr(fd, 'readinto'): + # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface. + # Example of fd: gzip, BytesIO, BufferedReader + # file already opened + ctx = nullcontext(fd) + else: + # open file + ctx = open(os_fspath(fd), 'rb') + + with ctx as fd: + if offset > 0: + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod(dtype=nt.intp) + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[shape.index(-1)] = size // -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod(dtype=nt.intp) + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified shape and type") + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise OSError("Didn't read as many bytes as expected") + + return _array + + +@set_module("numpy.rec") +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """ + Construct a record array from a wide-variety of objects. + + A general-purpose record array constructor that dispatches to the + appropriate `recarray` creation function based on the inputs (see Notes). + + Parameters + ---------- + obj : any + Input object. See Notes for details on how various input types are + treated. + dtype : data-type, optional + Valid dtype for array. + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the file or buffer to start reading from. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + copy : bool, optional + Whether to copy the input object (True), or to use a reference instead. + This option only applies when the input is an ndarray or recarray. + Defaults to True. + + Returns + ------- + np.recarray + Record array created from the specified object. + + Notes + ----- + If `obj` is ``None``, then call the `~numpy.recarray` constructor. If + `obj` is a string, then call the `fromstring` constructor. If `obj` is a + list or a tuple, then if the first object is an `~numpy.ndarray`, call + `fromarrays`, otherwise call `fromrecords`. If `obj` is a + `~numpy.recarray`, then make a copy of the data in the recarray + (if ``copy=True``) and use the new formats, names, and titles. If `obj` + is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then + return ``obj.view(recarray)``, making a copy of the data if ``copy=True``. + + Examples + -------- + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.core.records.array(a) + rec.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + dtype=int32) + + >>> b = [(1, 1), (2, 4), (3, 9)] + >>> c = np.core.records.array(b, formats = ['i2', 'f2'], names = ('x', 'y')) + >>> c + rec.array([(1, 1.0), (2, 4.0), (3, 9.0)], + dtype=[('x', '<i2'), ('y', '<f2')]) + + >>> c.x + rec.array([1, 2, 3], dtype=int16) + + >>> c.y + rec.array([ 1.0, 4.0, 9.0], dtype=float16) + + >>> r = np.rec.array(['abc','def'], names=['col1','col2']) + >>> print(r.col1) + abc + + >>> r.col1 + array('abc', dtype='<U3') + + >>> r.col2 + array('def', dtype='<U3') + """ + + if ((isinstance(obj, (type(None), str)) or hasattr(obj, 'readinto')) and + formats is None and dtype is None): + raise ValueError("Must define formats (or dtype) if object is " + "None, string, or an open file") + + kwds = {} + if dtype is not None: + dtype = sb.dtype(dtype) + elif formats is not None: + dtype = format_parser(formats, names, titles, + aligned, byteorder).dtype + else: + kwds = {'formats': formats, + 'names': names, + 'titles': titles, + 'aligned': aligned, + 'byteorder': byteorder + } + + if obj is None: + if shape is None: + raise ValueError("Must define a shape if obj is None") + return recarray(shape, dtype, buf=obj, offset=offset, strides=strides) + + elif isinstance(obj, bytes): + return fromstring(obj, dtype, shape=shape, offset=offset, **kwds) + + elif isinstance(obj, (list, tuple)): + if isinstance(obj[0], (tuple, list)): + return fromrecords(obj, dtype=dtype, shape=shape, **kwds) + else: + return fromarrays(obj, dtype=dtype, shape=shape, **kwds) + + elif isinstance(obj, recarray): + if dtype is not None and (obj.dtype != dtype): + new = obj.view(dtype) + else: + new = obj + if copy: + new = new.copy() + return new + + elif hasattr(obj, 'readinto'): + return fromfile(obj, dtype=dtype, shape=shape, offset=offset) + + elif isinstance(obj, ndarray): + if dtype is not None and (obj.dtype != dtype): + new = obj.view(dtype) + else: + new = obj + if copy: + new = new.copy() + return new.view(recarray) + + else: + interface = getattr(obj, "__array_interface__", None) + if interface is None or not isinstance(interface, dict): + raise ValueError("Unknown input type") + obj = sb.array(obj) + if dtype is not None and (obj.dtype != dtype): + obj = obj.view(dtype) + return obj.view(recarray) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/records.pyi b/.venv/lib/python3.12/site-packages/numpy/core/records.pyi new file mode 100644 index 00000000..d3bbe0e7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/records.pyi @@ -0,0 +1,234 @@ +import os +from collections.abc import Sequence, Iterable +from typing import ( + Any, + TypeVar, + overload, + Protocol, +) + +from numpy import ( + format_parser as format_parser, + record as record, + recarray as recarray, + dtype, + generic, + void, + _ByteOrder, + _SupportsBuffer, +) + +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ShapeLike, + _ArrayLikeVoid_co, + _NestedSequence, +) + +_SCT = TypeVar("_SCT", bound=generic) + +_RecArray = recarray[Any, dtype[_SCT]] + +class _SupportsReadInto(Protocol): + def seek(self, offset: int, whence: int, /) -> object: ... + def tell(self, /) -> int: ... + def readinto(self, buffer: memoryview, /) -> int: ... + +__all__: list[str] + +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: DTypeLike = ..., + shape: None | _ShapeLike = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[Any]: ... +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: None = ..., + shape: None | _ShapeLike = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], + dtype: DTypeLike = ..., + shape: None | _ShapeLike = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[record]: ... +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], + dtype: None = ..., + shape: None | _ShapeLike = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[record]: ... +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def fromfile( + fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., +) -> _RecArray[Any]: ... +@overload +def fromfile( + fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., +) -> _RecArray[record]: ... + +@overload +def array( + obj: _SCT | NDArray[_SCT], + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[_SCT]: ... +@overload +def array( + obj: ArrayLike, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[Any]: ... +@overload +def array( + obj: ArrayLike, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + copy: bool = ..., +) -> _RecArray[record]: ... +@overload +def array( + obj: None, + dtype: DTypeLike, + shape: _ShapeLike, + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[Any]: ... +@overload +def array( + obj: None, + dtype: None = ..., + *, + shape: _ShapeLike, + offset: int = ..., + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + copy: bool = ..., +) -> _RecArray[record]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: DTypeLike, + shape: None | _ShapeLike = ..., + offset: int = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + aligned: bool = ..., + byteorder: None = ..., + copy: bool = ..., +) -> _RecArray[Any]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: None = ..., + shape: None | _ShapeLike = ..., + offset: int = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + copy: bool = ..., +) -> _RecArray[record]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/shape_base.py b/.venv/lib/python3.12/site-packages/numpy/core/shape_base.py new file mode 100644 index 00000000..250fffd4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/shape_base.py @@ -0,0 +1,923 @@ +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', + 'stack', 'vstack'] + +import functools +import itertools +import operator +import warnings + +from . import numeric as _nx +from . import overrides +from .multiarray import array, asanyarray, normalize_axis_index +from . import fromnumeric as _from_nx + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _atleast_1d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_1d_dispatcher) +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or list of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> np.atleast_1d(1.0) + array([1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + [array([1]), array([3, 4])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1) + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _atleast_2d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_2d_dispatcher) +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or list of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> np.atleast_2d(3.0) + array([[3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + [array([[1]]), array([[1, 2]]), array([[1, 2]])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _atleast_3d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_3d_dispatcher) +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or list of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> np.atleast_3d(3.0) + array([[[3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print(arr, arr.shape) # doctest: +SKIP + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :, _nx.newaxis] + elif ary.ndim == 2: + result = ary[:, :, _nx.newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _arrays_for_stack_dispatcher(arrays): + if not hasattr(arrays, "__getitem__"): + raise TypeError('arrays to stack must be passed as a "sequence" type ' + 'such as list or tuple.') + + return tuple(arrays) + + +def _vhstack_dispatcher(tup, *, dtype=None, casting=None): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_vhstack_dispatcher) +def vstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + ``np.row_stack`` is an alias for `vstack`. They are the same function. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 2-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[4], [5], [6]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [4], + [5], + [6]]) + + """ + arrs = atleast_2d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + + +@array_function_dispatch(_vhstack_dispatcher) +def hstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence horizontally (column wise). + + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + hsplit : Split an array into multiple sub-arrays horizontally (column-wise). + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.hstack((a,b)) + array([1, 2, 3, 4, 5, 6]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[4],[5],[6]]) + >>> np.hstack((a,b)) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrs = atleast_1d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs and arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + else: + return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting) + + +def _stack_dispatcher(arrays, axis=None, out=None, *, + dtype=None, casting=None): + arrays = _arrays_for_stack_dispatcher(arrays) + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_dispatch(_stack_dispatcher) +def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): + """ + Join a sequence of arrays along a new axis. + + The ``axis`` parameter specifies the index of the new axis in the + dimensions of the result. For example, if ``axis=0`` it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + arrays : sequence of array_like + Each array must have the same shape. + + axis : int, optional + The axis in the result array along which the input arrays are stacked. + + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + + Returns + ------- + stacked : ndarray + The stacked array has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Examples + -------- + >>> arrays = [np.random.randn(3, 4) for _ in range(10)] + >>> np.stack(arrays, axis=0).shape + (10, 3, 4) + + >>> np.stack(arrays, axis=1).shape + (3, 10, 4) + + >>> np.stack(arrays, axis=2).shape + (3, 4, 10) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.stack((a, b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.stack((a, b), axis=-1) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrays = [asanyarray(arr) for arr in arrays] + if not arrays: + raise ValueError('need at least one array to stack') + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + result_ndim = arrays[0].ndim + 1 + axis = normalize_axis_index(axis, result_ndim) + + sl = (slice(None),) * axis + (_nx.newaxis,) + expanded_arrays = [arr[sl] for arr in arrays] + return _nx.concatenate(expanded_arrays, axis=axis, out=out, + dtype=dtype, casting=casting) + + +# Internal functions to eliminate the overhead of repeated dispatch in one of +# the two possible paths inside np.block. +# Use getattr to protect against __array_function__ being disabled. +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, + '__wrapped__', _from_nx.concatenate) + + +def _block_format_index(index): + """ + Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. + """ + idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + return 'arrays' + idx_str + + +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + parent_index : list of int + The full index of `arrays` within the nested lists passed to + `_block_check_depths_match` at the top of the recursion. + + Returns + ------- + first_index : list of int + The full index of an element from the bottom of the nesting in + `arrays`. If any element at the bottom is an empty list, this will + refer to it, and the last index along the empty axis will be None. + max_arr_ndim : int + The maximum of the ndims of the arrays nested in `arrays`. + final_size: int + The number of elements in the final array. This is used the motivate + the choice of algorithm used using benchmarking wisdom. + + """ + if type(arrays) is tuple: + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + '{} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.'.format( + _block_format_index(parent_index) + ) + ) + elif type(arrays) is list and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim, final_size = next(idxs_ndims) + for index, ndim, size in idxs_ndims: + final_size += size + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at depth " + "{}, but there is an element at depth {} ({})".format( + len(first_index), + len(index), + _block_format_index(index) + ) + ) + # propagate our flag that indicates an empty list at the bottom + if index[-1] is None: + first_index = index + + return first_index, max_arr_ndim, final_size + elif type(arrays) is list and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0, 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + size = _size(arrays) + return parent_index, _ndim(arrays), size + + +def _atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=False, subok=True) + + +def _accumulate(values): + return list(itertools.accumulate(values)) + + +def _concatenate_shapes(shapes, axis): + """Given array shapes, return the resulting shape and slices prefixes. + + These help in nested concatenation. + + Returns + ------- + shape: tuple of int + This tuple satisfies:: + + shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) + shape == concatenate(arrs, axis).shape + + slice_prefixes: tuple of (slice(start, end), ) + For a list of arrays being concatenated, this returns the slice + in the larger array at axis that needs to be sliced into. + + For example, the following holds:: + + ret = concatenate([a, b, c], axis) + _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) + + ret[(slice(None),) * axis + sl_a] == a + ret[(slice(None),) * axis + sl_b] == b + ret[(slice(None),) * axis + sl_c] == c + + These are called slice prefixes since they are used in the recursive + blocking algorithm to compute the left-most slices during the + recursion. Therefore, they must be prepended to rest of the slice + that was computed deeper in the recursion. + + These are returned as tuples to ensure that they can quickly be added + to existing slice tuple without creating a new tuple every time. + + """ + # Cache a result that will be reused. + shape_at_axis = [shape[axis] for shape in shapes] + + # Take a shape, any shape + first_shape = shapes[0] + first_shape_pre = first_shape[:axis] + first_shape_post = first_shape[axis+1:] + + if any(shape[:axis] != first_shape_pre or + shape[axis+1:] != first_shape_post for shape in shapes): + raise ValueError( + 'Mismatched array shapes in block along axis {}.'.format(axis)) + + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) + + offsets_at_axis = _accumulate(shape_at_axis) + slice_prefixes = [(slice(start, end),) + for start, end in zip([0] + offsets_at_axis, + offsets_at_axis)] + return shape, slice_prefixes + + +def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): + """ + Returns the shape of the final array, along with a list + of slices and a list of arrays that can be used for assignment inside the + new array + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + max_depth : list of int + The number of nested lists + result_ndim : int + The number of dimensions in thefinal array. + + Returns + ------- + shape : tuple of int + The shape that the final array will take on. + slices: list of tuple of slices + The slices into the full array required for assignment. These are + required to be prepended with ``(Ellipsis, )`` to obtain to correct + final index. + arrays: list of ndarray + The data to assign to each slice of the full array + + """ + if depth < max_depth: + shapes, slices, arrays = zip( + *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) + for arr in arrays]) + + axis = result_ndim - max_depth + depth + shape, slice_prefixes = _concatenate_shapes(shapes, axis) + + # Prepend the slice prefix and flatten the slices + slices = [slice_prefix + the_slice + for slice_prefix, inner_slices in zip(slice_prefixes, slices) + for the_slice in inner_slices] + + # Flatten the array list + arrays = functools.reduce(operator.add, arrays) + + return shape, slices, arrays + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + # Return the slice and the array inside a list to be consistent with + # the recursive case. + arr = _atleast_nd(arrays, result_ndim) + return arr.shape, [()], [arr] + + +def _block(arrays, max_depth, result_ndim, depth=0): + """ + Internal implementation of block based on repeated concatenation. + `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + if depth < max_depth: + arrs = [_block(arr, max_depth, result_ndim, depth+1) + for arr in arrays] + return _concatenate(arrs, axis=-(max_depth-depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return _atleast_nd(arrays, result_ndim) + + +def _block_dispatcher(arrays): + # Use type(...) is list to match the behavior of np.block(), which special + # cases list specifically rather than allowing for generic iterables or + # tuple. Also, we know that list.__array_function__ will never exist. + if type(arrays) is list: + for subarrays in arrays: + yield from _block_dispatcher(subarrays) + else: + yield arrays + + +@array_function_dispatch(_block_dispatcher) +def block(arrays): + """ + Assemble an nd-array from nested lists of blocks. + + Blocks in the innermost lists are concatenated (see `concatenate`) along + the last dimension (-1), then these are concatenated along the + second-last dimension (-2), and so on until the outermost list is reached. + + Blocks can be of any dimension, but will not be broadcasted using the normal + rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` + the same for all blocks. This is primarily useful for working with scalars, + and means that code like ``np.block([v, 1])`` is valid, where + ``v.ndim == 1``. + + When the nested list is two levels deep, this allows block matrices to be + constructed from their components. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + arrays : nested list of array_like or scalars (but not tuples) + If passed a single ndarray or scalar (a nested list of depth 0), this + is returned unmodified (and not copied). + + Elements shapes must match along the appropriate axes (without + broadcasting), but leading 1s will be prepended to the shape as + necessary to make the dimensions match. + + Returns + ------- + block_array : ndarray + The array assembled from the given blocks. + + The dimensionality of the output is equal to the greatest of: + * the dimensionality of all the inputs + * the depth to which the input list is nested + + Raises + ------ + ValueError + * If list depths are mismatched - for instance, ``[[a, b], c]`` is + illegal, and should be spelt ``[[a, b], [c]]`` + * If lists are empty - for instance, ``[[a, b], []]`` + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + + Notes + ----- + + When called with only scalars, ``np.block`` is equivalent to an ndarray + call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to + ``np.array([[1, 2], [3, 4]])``. + + This function does not enforce that the blocks lie on a fixed grid. + ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: + + AAAbb + AAAbb + cccDD + + But is also allowed to produce, for some ``a, b, c, d``:: + + AAAbb + AAAbb + cDDDD + + Since concatenation happens along the last axis first, `block` is _not_ + capable of producing the following directly:: + + AAAbb + cccbb + cccDD + + Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is + equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. + + Examples + -------- + The most common use of this function is to build a block matrix + + >>> A = np.eye(2) * 2 + >>> B = np.eye(3) * 3 + >>> np.block([ + ... [A, np.zeros((2, 3))], + ... [np.ones((3, 2)), B ] + ... ]) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [1., 1., 3., 0., 0.], + [1., 1., 0., 3., 0.], + [1., 1., 0., 0., 3.]]) + + With a list of depth 1, `block` can be used as `hstack` + + >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) + array([1, 2, 3]) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([a, b, 10]) # hstack([a, b, 10]) + array([ 1, 2, 3, 4, 5, 6, 10]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([A, B]) # hstack([A, B]) + array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + + With a list of depth 2, `block` can be used in place of `vstack`: + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([[a], [b]]) # vstack([a, b]) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([[A], [B]]) # vstack([A, B]) + array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + + It can also be used in places of `atleast_1d` and `atleast_2d` + + >>> a = np.array(0) + >>> b = np.array([1]) + >>> np.block([a]) # atleast_1d(a) + array([0]) + >>> np.block([b]) # atleast_1d(b) + array([1]) + + >>> np.block([[a]]) # atleast_2d(a) + array([[0]]) + >>> np.block([[b]]) # atleast_2d(b) + array([[1]]) + + + """ + arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) + + # It was found through benchmarking that making an array of final size + # around 256x256 was faster by straight concatenation on a + # i7-7700HQ processor and dual channel ram 2400MHz. + # It didn't seem to matter heavily on the dtype used. + # + # A 2D array using repeated concatenation requires 2 copies of the array. + # + # The fastest algorithm will depend on the ratio of CPU power to memory + # speed. + # One can monitor the results of the benchmark + # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d + # to tune this parameter until a C version of the `_block_info_recursion` + # algorithm is implemented which would likely be faster than the python + # version. + if list_ndim * final_size > (2 * 512 * 512): + return _block_slicing(arrays, list_ndim, result_ndim) + else: + return _block_concatenate(arrays, list_ndim, result_ndim) + + +# These helper functions are mostly used for testing. +# They allow us to write tests that directly call `_block_slicing` +# or `_block_concatenate` without blocking large arrays to force the wisdom +# to trigger the desired path. +def _block_setup(arrays): + """ + Returns + (`arrays`, list_ndim, result_ndim, final_size) + """ + bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + if bottom_index and bottom_index[-1] is None: + raise ValueError( + 'List at {} cannot be empty'.format( + _block_format_index(bottom_index) + ) + ) + result_ndim = max(arr_ndim, list_ndim) + return arrays, list_ndim, result_ndim, final_size + + +def _block_slicing(arrays, list_ndim, result_ndim): + shape, slices, arrays = _block_info_recursion( + arrays, list_ndim, result_ndim) + dtype = _nx.result_type(*[arr.dtype for arr in arrays]) + + # Test preferring F only in the case that all input arrays are F + F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) + C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) + order = 'F' if F_order and not C_order else 'C' + result = _nx.empty(shape=shape, dtype=dtype, order=order) + # Note: In a c implementation, the function + # PyArray_CreateMultiSortedStridePerm could be used for more advanced + # guessing of the desired order. + + for the_slice, arr in zip(slices, arrays): + result[(Ellipsis,) + the_slice] = arr + return result + + +def _block_concatenate(arrays, list_ndim, result_ndim): + result = _block(arrays, list_ndim, result_ndim) + if list_ndim == 0: + # Catch an edge case where _block returns a view because + # `arrays` is a single numpy array and not a list of numpy arrays. + # This might copy scalars or lists twice, but this isn't a likely + # usecase for those interested in performance + result = result.copy() + return result diff --git a/.venv/lib/python3.12/site-packages/numpy/core/shape_base.pyi b/.venv/lib/python3.12/site-packages/numpy/core/shape_base.pyi new file mode 100644 index 00000000..10116f1e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/shape_base.pyi @@ -0,0 +1,123 @@ +from collections.abc import Sequence +from typing import TypeVar, overload, Any, SupportsIndex + +from numpy import generic, _CastingKind +from numpy._typing import ( + NDArray, + ArrayLike, + DTypeLike, + _ArrayLike, + _DTypeLike, +) + +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +__all__: list[str] + +@overload +def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ... + +@overload +def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ... + +@overload +def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ... + +@overload +def vstack( + tup: Sequence[_ArrayLike[_SCT]], + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_SCT], + casting: _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... + +@overload +def hstack( + tup: Sequence[_ArrayLike[_SCT]], + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_SCT], + casting: _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... + +@overload +def stack( + arrays: Sequence[_ArrayLike[_SCT]], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: _DTypeLike[_SCT], + casting: _CastingKind = ... +) -> NDArray[_SCT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: _ArrayType = ..., + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> _ArrayType: ... + +@overload +def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/__init__.py diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/_locales.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/_locales.py new file mode 100644 index 00000000..b1dc55a9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/_locales.py @@ -0,0 +1,74 @@ +"""Provide class for testing in French locale + +""" +import sys +import locale + +import pytest + +__ALL__ = ['CommaDecimalPointLocale'] + + +def find_comma_decimal_point_locale(): + """See if platform has a decimal point as comma locale. + + Find a locale that uses a comma instead of a period as the + decimal point. + + Returns + ------- + old_locale: str + Locale when the function was called. + new_locale: {str, None) + First French locale found, None if none found. + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + old_locale = locale.getlocale(locale.LC_NUMERIC) + new_locale = None + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + new_locale = loc + break + except locale.Error: + pass + finally: + locale.setlocale(locale.LC_NUMERIC, locale=old_locale) + return old_locale, new_locale + + +class CommaDecimalPointLocale: + """Sets LC_NUMERIC to a locale with comma as decimal point. + + Classes derived from this class have setup and teardown methods that run + tests with locale.LC_NUMERIC set to a locale where commas (',') are used as + the decimal point instead of periods ('.'). On exit the locale is restored + to the initial locale. It also serves as context manager with the same + effect. If no such locale is available, the test is skipped. + + .. versionadded:: 1.15.0 + + """ + (cur_locale, tst_locale) = find_comma_decimal_point_locale() + + def setup_method(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def teardown_method(self): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) + + def __enter__(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def __exit__(self, type, value, traceback): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/astype_copy.pkl b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/astype_copy.pkl new file mode 100644 index 00000000..7397c978 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/astype_copy.pkl Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp new file mode 100644 index 00000000..575eec11 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp @@ -0,0 +1,170 @@ +#include <algorithm> +#include <fstream> +#include <iostream> +#include <math.h> +#include <random> +#include <cstdio> +#include <ctime> +#include <vector> + +struct ufunc { + std::string name; + double (*f32func)(double); + long double (*f64func)(long double); + float f32ulp; + float f64ulp; +}; + +template <typename T> +T +RandomFloat(T a, T b) +{ + T random = ((T)rand()) / (T)RAND_MAX; + T diff = b - a; + T r = random * diff; + return a + r; +} + +template <typename T> +void +append_random_array(std::vector<T> &arr, T min, T max, size_t N) +{ + for (size_t ii = 0; ii < N; ++ii) + arr.emplace_back(RandomFloat<T>(min, max)); +} + +template <typename T1, typename T2> +std::vector<T1> +computeTrueVal(const std::vector<T1> &in, T2 (*mathfunc)(T2)) +{ + std::vector<T1> out; + for (T1 elem : in) { + T2 elem_d = (T2)elem; + T1 out_elem = (T1)mathfunc(elem_d); + out.emplace_back(out_elem); + } + return out; +} + +/* + * FP range: + * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf] + */ + +#define MINDEN std::numeric_limits<T>::denorm_min() +#define MINFLT std::numeric_limits<T>::min() +#define MAXFLT std::numeric_limits<T>::max() +#define INF std::numeric_limits<T>::infinity() +#define qNAN std::numeric_limits<T>::quiet_NaN() +#define sNAN std::numeric_limits<T>::signaling_NaN() + +template <typename T> +std::vector<T> +generate_input_vector(std::string func) +{ + std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, + -MAXFLT, INF, -INF, qNAN, sNAN, + -1.0, 1.0, 0.0, -0.0}; + + // [-1.0, 1.0] + if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) { + append_random_array<T>(input, -1.0, 1.0, 700); + } + // (0.0, INF] + else if ((func == "log2") || (func == "log10")) { + append_random_array<T>(input, 0.0, 1.0, 200); + append_random_array<T>(input, MINDEN, MINFLT, 200); + append_random_array<T>(input, MINFLT, 1.0, 200); + append_random_array<T>(input, 1.0, MAXFLT, 200); + } + // (-1.0, INF] + else if (func == "log1p") { + append_random_array<T>(input, -1.0, 1.0, 200); + append_random_array<T>(input, -MINFLT, -MINDEN, 100); + append_random_array<T>(input, -1.0, -MINFLT, 100); + append_random_array<T>(input, MINDEN, MINFLT, 100); + append_random_array<T>(input, MINFLT, 1.0, 100); + append_random_array<T>(input, 1.0, MAXFLT, 100); + } + // [1.0, INF] + else if (func == "arccosh") { + append_random_array<T>(input, 1.0, 2.0, 400); + append_random_array<T>(input, 2.0, MAXFLT, 300); + } + // [-INF, INF] + else { + append_random_array<T>(input, -1.0, 1.0, 100); + append_random_array<T>(input, MINDEN, MINFLT, 100); + append_random_array<T>(input, -MINFLT, -MINDEN, 100); + append_random_array<T>(input, MINFLT, 1.0, 100); + append_random_array<T>(input, -1.0, -MINFLT, 100); + append_random_array<T>(input, 1.0, MAXFLT, 100); + append_random_array<T>(input, -MAXFLT, -100.0, 100); + } + + std::random_shuffle(input.begin(), input.end()); + return input; +} + +int +main() +{ + srand(42); + std::vector<struct ufunc> umathfunc = { + {"sin", sin, sin, 1.49, 1.00}, + {"cos", cos, cos, 1.49, 1.00}, + {"tan", tan, tan, 3.91, 3.93}, + {"arcsin", asin, asin, 3.12, 2.55}, + {"arccos", acos, acos, 2.1, 1.67}, + {"arctan", atan, atan, 2.3, 2.52}, + {"sinh", sinh, sinh, 1.55, 1.89}, + {"cosh", cosh, cosh, 2.48, 1.97}, + {"tanh", tanh, tanh, 1.38, 1.19}, + {"arcsinh", asinh, asinh, 1.01, 1.48}, + {"arccosh", acosh, acosh, 1.16, 1.05}, + {"arctanh", atanh, atanh, 1.45, 1.46}, + {"cbrt", cbrt, cbrt, 1.94, 1.82}, + //{"exp",exp,exp,3.76,1.53}, + {"exp2", exp2, exp2, 1.01, 1.04}, + {"expm1", expm1, expm1, 2.62, 2.1}, + //{"log",log,log,1.84,1.67}, + {"log10", log10, log10, 3.5, 1.92}, + {"log1p", log1p, log1p, 1.96, 1.93}, + {"log2", log2, log2, 2.12, 1.84}, + }; + + for (int ii = 0; ii < umathfunc.size(); ++ii) { + // ignore sin/cos + if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) { + std::string fileName = + "umath-validation-set-" + umathfunc[ii].name + ".csv"; + std::ofstream txtOut; + txtOut.open(fileName, std::ofstream::trunc); + txtOut << "dtype,input,output,ulperrortol" << std::endl; + + // Single Precision + auto f32in = generate_input_vector<float>(umathfunc[ii].name); + auto f32out = computeTrueVal<float, double>(f32in, + umathfunc[ii].f32func); + for (int jj = 0; jj < f32in.size(); ++jj) { + txtOut << "np.float32" << std::hex << ",0x" + << *reinterpret_cast<uint32_t *>(&f32in[jj]) << ",0x" + << *reinterpret_cast<uint32_t *>(&f32out[jj]) << "," + << ceil(umathfunc[ii].f32ulp) << std::endl; + } + + // Double Precision + auto f64in = generate_input_vector<double>(umathfunc[ii].name); + auto f64out = computeTrueVal<double, long double>( + f64in, umathfunc[ii].f64func); + for (int jj = 0; jj < f64in.size(); ++jj) { + txtOut << "np.float64" << std::hex << ",0x" + << *reinterpret_cast<uint64_t *>(&f64in[jj]) << ",0x" + << *reinterpret_cast<uint64_t *>(&f64out[jj]) << "," + << ceil(umathfunc[ii].f64ulp) << std::endl; + } + txtOut.close(); + } + } + return 0; +} diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl new file mode 100644 index 00000000..958eee50 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/recarray_from_file.fits b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/recarray_from_file.fits new file mode 100644 index 00000000..ca48ee85 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/recarray_from_file.fits Binary files differdiff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-README.txt b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-README.txt new file mode 100644 index 00000000..cfc9e414 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-README.txt @@ -0,0 +1,15 @@ +Steps to validate transcendental functions: +1) Add a file 'umath-validation-set-<ufuncname>.txt', where ufuncname is name of + the function in NumPy you want to validate +2) The file should contain 4 columns: dtype,input,expected output,ulperror + a. dtype: one of np.float16, np.float32, np.float64 + b. input: floating point input to ufunc in hex. Example: 0x414570a4 + represents 12.340000152587890625 + c. expected output: floating point output for the corresponding input in hex. + This should be computed using a high(er) precision library and then rounded to + same format as the input. + d. ulperror: expected maximum ulp error of the function. This + should be same across all rows of the same dtype. Otherwise, the function is + tested for the maximum ulp error among all entries of that dtype. +3) Add file umath-validation-set-<ufuncname>.txt to the test file test_umath_accuracy.py + which will then validate your ufunc. diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv new file mode 100644 index 00000000..6697ae95 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbddd7f50,0x3fd6eec2,3 +np.float32,0xbe32a20c,0x3fdf8182,3 +np.float32,0xbf607c09,0x4028f84f,3 +np.float32,0x3f25d906,0x3f5db544,3 +np.float32,0x3f01cec8,0x3f84febf,3 +np.float32,0x3f1d5c6e,0x3f68a735,3 +np.float32,0xbf0cab89,0x4009c36d,3 +np.float32,0xbf176b40,0x400d0941,3 +np.float32,0x3f3248b2,0x3f4ce6d4,3 +np.float32,0x3f390b48,0x3f434e0d,3 +np.float32,0xbe261698,0x3fddea43,3 +np.float32,0x3f0e1154,0x3f7b848b,3 +np.float32,0xbf379a3c,0x4017b764,3 +np.float32,0xbeda6f2c,0x4000bd62,3 +np.float32,0xbf6a0c3f,0x402e5d5a,3 +np.float32,0x3ef1d700,0x3f8a17b7,3 +np.float32,0xbf6f4f65,0x4031d30d,3 +np.float32,0x3f2c9eee,0x3f54adfd,3 +np.float32,0x3f3cfb18,0x3f3d8a1e,3 +np.float32,0x3ba80800,0x3fc867d2,3 +np.float32,0x3e723b08,0x3faa7e4d,3 +np.float32,0xbf65820f,0x402bb054,3 +np.float32,0xbee64e7a,0x40026410,3 +np.float32,0x3cb15140,0x3fc64a87,3 +np.float32,0x3f193660,0x3f6ddf2a,3 +np.float32,0xbf0e5b52,0x400a44f7,3 +np.float32,0x3ed55f14,0x3f920a4b,3 +np.float32,0x3dd11a80,0x3fbbf85c,3 +np.float32,0xbf4f5c4b,0x4020f4f9,3 +np.float32,0x3f787532,0x3e792e87,3 +np.float32,0x3f40e6ac,0x3f37a74f,3 +np.float32,0x3f1c1318,0x3f6a47b6,3 +np.float32,0xbe3c48d8,0x3fe0bb70,3 +np.float32,0xbe94d4bc,0x3feed08e,3 +np.float32,0xbe5c3688,0x3fe4ce26,3 +np.float32,0xbf6fe026,0x403239cb,3 +np.float32,0x3ea5983c,0x3f9ee7bf,3 +np.float32,0x3f1471e6,0x3f73c5bb,3 +np.float32,0x3f0e2622,0x3f7b6b87,3 +np.float32,0xbf597180,0x40257ad1,3 +np.float32,0xbeb5321c,0x3ff75d34,3 +np.float32,0x3f5afcd2,0x3f0b6012,3 +np.float32,0xbef2ff88,0x40042e14,3 +np.float32,0xbedc747e,0x400104f5,3 +np.float32,0xbee0c2f4,0x40019dfc,3 +np.float32,0xbf152cd8,0x400c57dc,3 +np.float32,0xbf6cf9e2,0x40303bbe,3 +np.float32,0x3ed9cd74,0x3f90d1a1,3 +np.float32,0xbf754406,0x4036767f,3 +np.float32,0x3f59c5c2,0x3f0db42f,3 +np.float32,0x3f2eefd8,0x3f518684,3 +np.float32,0xbf156bf9,0x400c6b49,3 +np.float32,0xbd550790,0x3fcfb8dc,3 +np.float32,0x3ede58fc,0x3f8f8f77,3 +np.float32,0xbf00ac19,0x40063c4b,3 +np.float32,0x3f4d25ba,0x3f24280e,3 +np.float32,0xbe9568be,0x3feef73c,3 +np.float32,0x3f67d154,0x3ee05547,3 +np.float32,0x3f617226,0x3efcb4f4,3 +np.float32,0xbf3ab41a,0x4018d6cc,3 +np.float32,0xbf3186fe,0x401592cd,3 +np.float32,0x3de3ba50,0x3fbacca9,3 +np.float32,0x3e789f98,0x3fa9ab97,3 +np.float32,0x3f016e08,0x3f8536d8,3 +np.float32,0x3e8b618c,0x3fa5c571,3 +np.float32,0x3eff97bc,0x3f8628a9,3 +np.float32,0xbf6729f0,0x402ca32f,3 +np.float32,0xbebec146,0x3ff9eddc,3 +np.float32,0x3ddb2e60,0x3fbb563a,3 +np.float32,0x3caa8e40,0x3fc66595,3 +np.float32,0xbf5973f2,0x40257bfa,3 +np.float32,0xbdd82c70,0x3fd69916,3 +np.float32,0xbedf4c82,0x400169ef,3 +np.float32,0x3ef8f22c,0x3f881184,3 +np.float32,0xbf1d74d4,0x400eedc9,3 +np.float32,0x3f2e10a6,0x3f52b790,3 +np.float32,0xbf08ecc0,0x4008a628,3 +np.float32,0x3ecb7db4,0x3f94be9f,3 +np.float32,0xbf052ded,0x40078bfc,3 +np.float32,0x3f2ee78a,0x3f5191e4,3 +np.float32,0xbf56f4e1,0x40245194,3 +np.float32,0x3f600a3e,0x3f014a25,3 +np.float32,0x3f3836f8,0x3f44808b,3 +np.float32,0x3ecabfbc,0x3f94f25c,3 +np.float32,0x3c70f500,0x3fc72dec,3 +np.float32,0x3f17c444,0x3f6fabf0,3 +np.float32,0xbf4c22a5,0x401f9a09,3 +np.float32,0xbe4205dc,0x3fe1765a,3 +np.float32,0x3ea49138,0x3f9f2d36,3 +np.float32,0xbece0082,0x3ffe106b,3 +np.float32,0xbe387578,0x3fe03eef,3 +np.float32,0xbf2b6466,0x40137a30,3 +np.float32,0xbe9dadb2,0x3ff12204,3 +np.float32,0xbf56b3f2,0x402433bb,3 +np.float32,0xbdf9b4d8,0x3fd8b51f,3 +np.float32,0x3f58a596,0x3f0fd4b4,3 +np.float32,0xbedf5748,0x40016b6e,3 +np.float32,0x3f446442,0x3f32476f,3 +np.float32,0x3f5be886,0x3f099658,3 +np.float32,0x3ea1e44c,0x3f9fe1de,3 +np.float32,0xbf11e9b8,0x400b585f,3 +np.float32,0xbf231f8f,0x4010befb,3 +np.float32,0xbf4395ea,0x401c2dd0,3 +np.float32,0x3e9e7784,0x3fa0c8a6,3 +np.float32,0xbe255184,0x3fddd14c,3 +np.float32,0x3f70d25e,0x3eb13148,3 +np.float32,0x3f220cdc,0x3f62a722,3 +np.float32,0xbd027bf0,0x3fcd23e7,3 +np.float32,0x3e4ef8b8,0x3faf02d2,3 +np.float32,0xbf76fc6b,0x40380728,3 +np.float32,0xbf57e761,0x4024c1cd,3 +np.float32,0x3ed4fc20,0x3f922580,3 +np.float32,0xbf09b64a,0x4008e1db,3 +np.float32,0x3f21ca62,0x3f62fcf5,3 +np.float32,0xbe55f610,0x3fe40170,3 +np.float32,0xbc0def80,0x3fca2bbb,3 +np.float32,0xbebc8764,0x3ff9547b,3 +np.float32,0x3ec1b200,0x3f9766d1,3 +np.float32,0xbf4ee44e,0x4020c1ee,3 +np.float32,0xbea85852,0x3ff3f22a,3 +np.float32,0xbf195c0c,0x400da3d3,3 +np.float32,0xbf754b5d,0x40367ce8,3 +np.float32,0xbdcbfe50,0x3fd5d52b,3 +np.float32,0xbf1adb87,0x400e1be3,3 +np.float32,0xbf6f8491,0x4031f898,3 +np.float32,0xbf6f9ae7,0x4032086e,3 +np.float32,0xbf52b3f0,0x40226790,3 +np.float32,0xbf698452,0x402e09f4,3 +np.float32,0xbf43dc9a,0x401c493a,3 +np.float32,0xbf165f7f,0x400cb664,3 +np.float32,0x3e635468,0x3fac682f,3 +np.float32,0xbe8cf2b6,0x3fecc28a,3 +np.float32,0x7f7fffff,0x7fc00000,3 +np.float32,0xbf4c6513,0x401fb597,3 +np.float32,0xbf02b8f8,0x4006d47e,3 +np.float32,0x3ed3759c,0x3f9290c8,3 +np.float32,0xbf2a7a5f,0x40132b98,3 +np.float32,0xbae65000,0x3fc9496f,3 +np.float32,0x3f65f5ea,0x3ee8ef07,3 +np.float32,0xbe7712fc,0x3fe84106,3 +np.float32,0xbb9ff700,0x3fc9afd2,3 +np.float32,0x3d8d87a0,0x3fc03592,3 +np.float32,0xbefc921c,0x40058c23,3 +np.float32,0xbf286566,0x401279d8,3 +np.float32,0x3f53857e,0x3f192eaf,3 +np.float32,0xbee9b0f4,0x4002dd90,3 +np.float32,0x3f4041f8,0x3f38a14a,3 +np.float32,0x3f54ea96,0x3f16b02d,3 +np.float32,0x3ea50ef8,0x3f9f0c01,3 +np.float32,0xbeaad2dc,0x3ff49a4a,3 +np.float32,0xbec428c8,0x3ffb636f,3 +np.float32,0xbda46178,0x3fd358c7,3 +np.float32,0xbefacfc4,0x40054b7f,3 +np.float32,0xbf7068f9,0x40329c85,3 +np.float32,0x3f70b850,0x3eb1caa7,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80000000,0x3fc90fdb,3 +np.float32,0x3f68d5c8,0x3edb7cf3,3 +np.float32,0x3d9443d0,0x3fbfc98a,3 +np.float32,0xff7fffff,0x7fc00000,3 +np.float32,0xbeee7ba8,0x40038a5e,3 +np.float32,0xbf0aaaba,0x40092a73,3 +np.float32,0x3f36a4e8,0x3f46c0ee,3 +np.float32,0x3ed268e4,0x3f92da82,3 +np.float32,0xbee6002c,0x4002591b,3 +np.float32,0xbe8f2752,0x3fed5576,3 +np.float32,0x3f525912,0x3f1b40e0,3 +np.float32,0xbe8e151e,0x3fed0e16,3 +np.float32,0x1,0x3fc90fdb,3 +np.float32,0x3ee23b84,0x3f8e7ae1,3 +np.float32,0xbf5961ca,0x40257361,3 +np.float32,0x3f6bbca0,0x3ecd14cd,3 +np.float32,0x3e27b230,0x3fb4014d,3 +np.float32,0xbf183bb8,0x400d49fc,3 +np.float32,0x3f57759c,0x3f120b68,3 +np.float32,0xbd6994c0,0x3fd05d84,3 +np.float32,0xbf1dd684,0x400f0cc8,3 +np.float32,0xbececc1c,0x3ffe480a,3 +np.float32,0xbf48855f,0x401e206d,3 +np.float32,0x3f28c922,0x3f59d382,3 +np.float32,0xbf65c094,0x402bd3b0,3 +np.float32,0x3f657d42,0x3eeb11dd,3 +np.float32,0xbed32d4e,0x3fff7b15,3 +np.float32,0xbf31af02,0x4015a0b1,3 +np.float32,0x3d89eb00,0x3fc06f7f,3 +np.float32,0x3dac2830,0x3fbe4a17,3 +np.float32,0x3f7f7cb6,0x3d81a7df,3 +np.float32,0xbedbb570,0x4000ea82,3 +np.float32,0x3db37830,0x3fbdd4a8,3 +np.float32,0xbf376f48,0x4017a7fd,3 +np.float32,0x3f319f12,0x3f4dd2c9,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x3f1b4f70,0x3f6b3e31,3 +np.float32,0x3e33c880,0x3fb278d1,3 +np.float32,0x3f2796e0,0x3f5b69bd,3 +np.float32,0x3f4915d6,0x3f2ad4d0,3 +np.float32,0x3e4db120,0x3faf2ca0,3 +np.float32,0x3ef03dd4,0x3f8a8ba9,3 +np.float32,0x3e96ca88,0x3fa2cbf7,3 +np.float32,0xbeb136ce,0x3ff64d2b,3 +np.float32,0xbf2f3938,0x4014c75e,3 +np.float32,0x3f769dde,0x3e8b0d76,3 +np.float32,0x3f67cec8,0x3ee06148,3 +np.float32,0x3f0a1ade,0x3f80204e,3 +np.float32,0x3e4b9718,0x3faf7144,3 +np.float32,0x3cccb480,0x3fc5dcf3,3 +np.float32,0x3caeb740,0x3fc654f0,3 +np.float32,0x3f684e0e,0x3ede0678,3 +np.float32,0x3f0ba93c,0x3f7e6663,3 +np.float32,0xbf12bbc4,0x400b985e,3 +np.float32,0xbf2a8e1a,0x40133235,3 +np.float32,0x3f42029c,0x3f35f5c5,3 +np.float32,0x3eed1728,0x3f8b6f9c,3 +np.float32,0xbe5779ac,0x3fe432fd,3 +np.float32,0x3f6ed8b8,0x3ebc7e4b,3 +np.float32,0x3eea25b0,0x3f8c43c7,3 +np.float32,0x3f1988a4,0x3f6d786b,3 +np.float32,0xbe751674,0x3fe7ff8a,3 +np.float32,0xbe9f7418,0x3ff1997d,3 +np.float32,0x3dca11d0,0x3fbc6979,3 +np.float32,0x3f795226,0x3e6a6cab,3 +np.float32,0xbea780e0,0x3ff3b926,3 +np.float32,0xbed92770,0x4000901e,3 +np.float32,0xbf3e9f8c,0x401a49f8,3 +np.float32,0x3f0f7054,0x3f79ddb2,3 +np.float32,0x3a99d400,0x3fc8e966,3 +np.float32,0xbef082b0,0x4003d3c6,3 +np.float32,0xbf0d0790,0x4009defb,3 +np.float32,0xbf1649da,0x400cafb4,3 +np.float32,0xbea5aca8,0x3ff33d5c,3 +np.float32,0xbf4e1843,0x40206ba1,3 +np.float32,0xbe3d7d5c,0x3fe0e2ad,3 +np.float32,0xbf0e802d,0x400a500e,3 +np.float32,0xbf0de8f0,0x400a2295,3 +np.float32,0xbf3016ba,0x4015137e,3 +np.float32,0x3f36b1ea,0x3f46ae5d,3 +np.float32,0xbd27f170,0x3fce4fc7,3 +np.float32,0x3e96ec54,0x3fa2c31f,3 +np.float32,0x3eb4dfdc,0x3f9ad87d,3 +np.float32,0x3f5cac6c,0x3f0815cc,3 +np.float32,0xbf0489aa,0x40075bf1,3 +np.float32,0x3df010c0,0x3fba05f5,3 +np.float32,0xbf229f4a,0x4010956a,3 +np.float32,0x3f75e474,0x3e905a99,3 +np.float32,0xbcece6a0,0x3fccc397,3 +np.float32,0xbdb41528,0x3fd454e7,3 +np.float32,0x3ec8b2f8,0x3f958118,3 +np.float32,0x3f5eaa70,0x3f041a1d,3 +np.float32,0xbf32e1cc,0x40160b91,3 +np.float32,0xbe8e6026,0x3fed219c,3 +np.float32,0x3e6b3160,0x3fab65e3,3 +np.float32,0x3e6d7460,0x3fab1b81,3 +np.float32,0xbf13fbde,0x400bfa3b,3 +np.float32,0xbe8235ec,0x3fe9f9e3,3 +np.float32,0x3d71c4a0,0x3fc18096,3 +np.float32,0x3eb769d0,0x3f9a2aa0,3 +np.float32,0xbf68cb3b,0x402d99e4,3 +np.float32,0xbd917610,0x3fd22932,3 +np.float32,0x3d3cba60,0x3fc3297f,3 +np.float32,0xbf383cbe,0x4017f1cc,3 +np.float32,0xbeee96d0,0x40038e34,3 +np.float32,0x3ec89cb4,0x3f958725,3 +np.float32,0x3ebf92d8,0x3f97f95f,3 +np.float32,0x3f30f3da,0x3f4ec021,3 +np.float32,0xbd26b560,0x3fce45e4,3 +np.float32,0xbec0eb12,0x3ffa8330,3 +np.float32,0x3f6d592a,0x3ec4a6c1,3 +np.float32,0x3ea6d39c,0x3f9e9463,3 +np.float32,0x3e884184,0x3fa6951e,3 +np.float32,0x3ea566c4,0x3f9ef4d1,3 +np.float32,0x3f0c8f4c,0x3f7d5380,3 +np.float32,0x3f28e1ba,0x3f59b2cb,3 +np.float32,0x3f798538,0x3e66e1c3,3 +np.float32,0xbe2889b8,0x3fde39b8,3 +np.float32,0x3f3da05e,0x3f3c949c,3 +np.float32,0x3f24d700,0x3f5f073e,3 +np.float32,0xbe5b5768,0x3fe4b198,3 +np.float32,0xbed3b03a,0x3fff9f05,3 +np.float32,0x3e8a1c4c,0x3fa619eb,3 +np.float32,0xbf075d24,0x40083030,3 +np.float32,0x3f765648,0x3e8d1f52,3 +np.float32,0xbf70fc5e,0x403308bb,3 +np.float32,0x3f557ae8,0x3f15ab76,3 +np.float32,0x3f02f7ea,0x3f84521c,3 +np.float32,0x3f7ebbde,0x3dcbc5c5,3 +np.float32,0xbefbdfc6,0x40057285,3 +np.float32,0x3ec687ac,0x3f9617d9,3 +np.float32,0x3e4831c8,0x3fafe01b,3 +np.float32,0x3e25cde0,0x3fb43ea8,3 +np.float32,0x3e4f2ab8,0x3faefc70,3 +np.float32,0x3ea60ae4,0x3f9ec973,3 +np.float32,0xbf1ed55f,0x400f5dde,3 +np.float32,0xbf5ad4aa,0x40262479,3 +np.float32,0x3e8b3594,0x3fa5d0de,3 +np.float32,0x3f3a77aa,0x3f413c80,3 +np.float32,0xbf07512b,0x40082ca9,3 +np.float32,0x3f33d990,0x3f4ab5e5,3 +np.float32,0x3f521556,0x3f1bb78f,3 +np.float32,0xbecf6036,0x3ffe7086,3 +np.float32,0x3db91bd0,0x3fbd7a11,3 +np.float32,0x3ef63a74,0x3f88d839,3 +np.float32,0xbf2f1116,0x4014b99c,3 +np.float32,0xbf17fdc0,0x400d36b9,3 +np.float32,0xbe87df2c,0x3feb7117,3 +np.float32,0x80800000,0x3fc90fdb,3 +np.float32,0x3ee24c1c,0x3f8e7641,3 +np.float32,0x3f688dce,0x3edcd644,3 +np.float32,0xbf0f4e1c,0x400a8e1b,3 +np.float32,0x0,0x3fc90fdb,3 +np.float32,0x3f786eba,0x3e7999d4,3 +np.float32,0xbf404f80,0x401aeca8,3 +np.float32,0xbe9ffb6a,0x3ff1bd18,3 +np.float32,0x3f146bfc,0x3f73ccfd,3 +np.float32,0xbe47d630,0x3fe233ee,3 +np.float32,0xbe95847c,0x3feefe7c,3 +np.float32,0xbf135df0,0x400bc9e5,3 +np.float32,0x3ea19f3c,0x3f9ff411,3 +np.float32,0x3f235e20,0x3f60f247,3 +np.float32,0xbec789ec,0x3ffc4def,3 +np.float32,0x3f04b656,0x3f834db6,3 +np.float32,0x3dfaf440,0x3fb95679,3 +np.float32,0xbe4a7f28,0x3fe28abe,3 +np.float32,0x3ed4850c,0x3f92463b,3 +np.float32,0x3ec4ba5c,0x3f9694dd,3 +np.float32,0xbce24ca0,0x3fcc992b,3 +np.float32,0xbf5b7c6e,0x402675a0,3 +np.float32,0xbea3ce2a,0x3ff2bf04,3 +np.float32,0x3db02c60,0x3fbe0998,3 +np.float32,0x3c47b780,0x3fc78069,3 +np.float32,0x3ed33b20,0x3f92a0d5,3 +np.float32,0xbf4556d7,0x401cdcde,3 +np.float32,0xbe1b6e28,0x3fdc90ec,3 +np.float32,0xbf3289b7,0x4015ecd0,3 +np.float32,0x3df3f240,0x3fb9c76d,3 +np.float32,0x3eefa7d0,0x3f8ab61d,3 +np.float32,0xbe945838,0x3feeb006,3 +np.float32,0xbf0b1386,0x400949a3,3 +np.float32,0x3f77e546,0x3e812cc1,3 +np.float32,0x3e804ba0,0x3fa8a480,3 +np.float32,0x3f43dcea,0x3f331a06,3 +np.float32,0x3eb87450,0x3f99e33c,3 +np.float32,0x3e5f4898,0x3facecea,3 +np.float32,0x3f646640,0x3eeff10e,3 +np.float32,0x3f1aa832,0x3f6c1051,3 +np.float32,0xbebf6bfa,0x3ffa1bdc,3 +np.float32,0xbb77f300,0x3fc98bd4,3 +np.float32,0x3f3587fe,0x3f485645,3 +np.float32,0x3ef85f34,0x3f883b8c,3 +np.float32,0x3f50e584,0x3f1dc82c,3 +np.float32,0x3f1d30a8,0x3f68deb0,3 +np.float32,0x3ee75a78,0x3f8d0c86,3 +np.float32,0x3f2c023a,0x3f5581e1,3 +np.float32,0xbf074e34,0x40082bca,3 +np.float32,0xbead71f0,0x3ff54c6d,3 +np.float32,0xbf39ed88,0x40188e69,3 +np.float32,0x3f5d2fe6,0x3f07118b,3 +np.float32,0xbf1f79f8,0x400f9267,3 +np.float32,0x3e900c58,0x3fa48e99,3 +np.float32,0xbf759cb2,0x4036c47b,3 +np.float32,0x3f63329c,0x3ef5359c,3 +np.float32,0xbf5d6755,0x40276709,3 +np.float32,0x3f2ce31c,0x3f54519a,3 +np.float32,0x7f800000,0x7fc00000,3 +np.float32,0x3f1bf50e,0x3f6a6d9a,3 +np.float32,0x3f258334,0x3f5e25d8,3 +np.float32,0xbf661a3f,0x402c06ac,3 +np.float32,0x3d1654c0,0x3fc45cef,3 +np.float32,0xbef14a36,0x4003f009,3 +np.float32,0xbf356051,0x4016ec3a,3 +np.float32,0x3f6ccc42,0x3ec79193,3 +np.float32,0xbf2fe3d6,0x401501f9,3 +np.float32,0x3deedc80,0x3fba195b,3 +np.float32,0x3f2e5a28,0x3f52533e,3 +np.float32,0x3e6b68b8,0x3fab5ec8,3 +np.float32,0x3e458240,0x3fb037b7,3 +np.float32,0xbf24bab0,0x401144cb,3 +np.float32,0x3f600f4c,0x3f013fb2,3 +np.float32,0x3f021a04,0x3f84d316,3 +np.float32,0x3f741732,0x3e9cc948,3 +np.float32,0x3f0788aa,0x3f81a5b0,3 +np.float32,0x3f28802c,0x3f5a347c,3 +np.float32,0x3c9eb400,0x3fc69500,3 +np.float32,0x3e5d11e8,0x3fad357a,3 +np.float32,0x3d921250,0x3fbfecb9,3 +np.float32,0x3f354866,0x3f48b066,3 +np.float32,0xbf72cf43,0x40346d84,3 +np.float32,0x3eecdbb8,0x3f8b805f,3 +np.float32,0xbee585d0,0x400247fd,3 +np.float32,0x3e3607a8,0x3fb22fc6,3 +np.float32,0xbf0cb7d6,0x4009c71c,3 +np.float32,0xbf56b230,0x402432ec,3 +np.float32,0xbf4ced02,0x401fee29,3 +np.float32,0xbf3a325c,0x4018a776,3 +np.float32,0x3ecae8bc,0x3f94e732,3 +np.float32,0xbe48c7e8,0x3fe252bd,3 +np.float32,0xbe175d7c,0x3fdc0d5b,3 +np.float32,0x3ea78dac,0x3f9e632d,3 +np.float32,0xbe7434a8,0x3fe7e279,3 +np.float32,0x3f1f9e02,0x3f65c7b9,3 +np.float32,0xbe150f2c,0x3fdbc2c2,3 +np.float32,0x3ee13480,0x3f8ec423,3 +np.float32,0x3ecb7d54,0x3f94beb9,3 +np.float32,0x3f1cef42,0x3f693181,3 +np.float32,0xbf1ec06a,0x400f5730,3 +np.float32,0xbe112acc,0x3fdb44e8,3 +np.float32,0xbe77b024,0x3fe85545,3 +np.float32,0x3ec86fe0,0x3f959353,3 +np.float32,0x3f36b326,0x3f46ac9a,3 +np.float32,0x3e581a70,0x3fadd829,3 +np.float32,0xbf032c0c,0x4006f5f9,3 +np.float32,0xbf43b1fd,0x401c38b1,3 +np.float32,0x3f3701b4,0x3f463c5c,3 +np.float32,0x3f1a995a,0x3f6c22f1,3 +np.float32,0xbf05de0b,0x4007bf97,3 +np.float32,0x3d4bd960,0x3fc2b063,3 +np.float32,0x3f0e1618,0x3f7b7ed0,3 +np.float32,0x3edfd420,0x3f8f2628,3 +np.float32,0xbf6662fe,0x402c3047,3 +np.float32,0x3ec0690c,0x3f97bf9b,3 +np.float32,0xbeaf4146,0x3ff5c7a0,3 +np.float32,0x3f5e7764,0x3f04816d,3 +np.float32,0xbedd192c,0x40011bc5,3 +np.float32,0x3eb76350,0x3f9a2c5e,3 +np.float32,0xbed8108c,0x400069a5,3 +np.float32,0xbe59f31c,0x3fe48401,3 +np.float32,0xbea3e1e6,0x3ff2c439,3 +np.float32,0x3e26d1f8,0x3fb41db5,3 +np.float32,0x3f3a0a7c,0x3f41dba5,3 +np.float32,0x3ebae068,0x3f993ce4,3 +np.float32,0x3f2d8e30,0x3f536942,3 +np.float32,0xbe838bbe,0x3fea5247,3 +np.float32,0x3ebe4420,0x3f98538f,3 +np.float32,0xbcc59b80,0x3fcc265c,3 +np.float32,0x3eebb5c8,0x3f8bd334,3 +np.float32,0xbafc3400,0x3fc94ee8,3 +np.float32,0xbf63ddc1,0x402ac683,3 +np.float32,0xbeabdf80,0x3ff4e18f,3 +np.float32,0x3ea863f0,0x3f9e2a78,3 +np.float32,0x3f45b292,0x3f303bc1,3 +np.float32,0xbe68aa60,0x3fe666bf,3 +np.float32,0x3eb9de18,0x3f998239,3 +np.float32,0xbf719d85,0x4033815e,3 +np.float32,0x3edef9a8,0x3f8f62db,3 +np.float32,0xbd7781c0,0x3fd0cd1e,3 +np.float32,0x3f0b3b90,0x3f7ee92a,3 +np.float32,0xbe3eb3b4,0x3fe10a27,3 +np.float32,0xbf31a4c4,0x40159d23,3 +np.float32,0x3e929434,0x3fa3e5b0,3 +np.float32,0xbeb1a90e,0x3ff66b9e,3 +np.float32,0xbeba9b5e,0x3ff8d048,3 +np.float32,0xbf272a84,0x4012119e,3 +np.float32,0x3f1ebbd0,0x3f66e889,3 +np.float32,0x3ed3cdc8,0x3f927893,3 +np.float32,0xbf50dfce,0x40219b58,3 +np.float32,0x3f0c02de,0x3f7dfb62,3 +np.float32,0xbf694de3,0x402de8d2,3 +np.float32,0xbeaeb13e,0x3ff5a14f,3 +np.float32,0xbf61aa7a,0x40299702,3 +np.float32,0xbf13d159,0x400bed35,3 +np.float32,0xbeecd034,0x40034e0b,3 +np.float32,0xbe50c2e8,0x3fe35761,3 +np.float32,0x3f714406,0x3eae8e57,3 +np.float32,0xbf1ca486,0x400eabd8,3 +np.float32,0x3f5858cc,0x3f106497,3 +np.float32,0x3f670288,0x3ee41c84,3 +np.float32,0xbf20bd2c,0x400ff9f5,3 +np.float32,0xbe29afd8,0x3fde5eff,3 +np.float32,0xbf635e6a,0x402a80f3,3 +np.float32,0x3e82b7b0,0x3fa80446,3 +np.float32,0x3e982e7c,0x3fa26ece,3 +np.float32,0x3d9f0e00,0x3fbf1c6a,3 +np.float32,0x3e8299b4,0x3fa80c07,3 +np.float32,0xbf0529c1,0x40078ac3,3 +np.float32,0xbf403b8a,0x401ae519,3 +np.float32,0xbe57e09c,0x3fe44027,3 +np.float32,0x3ea1c8f4,0x3f9fe913,3 +np.float32,0xbe216a94,0x3fdd52d0,3 +np.float32,0x3f59c442,0x3f0db709,3 +np.float32,0xbd636260,0x3fd02bdd,3 +np.float32,0xbdbbc788,0x3fd4d08d,3 +np.float32,0x3dd19560,0x3fbbf0a3,3 +np.float32,0x3f060ad4,0x3f828641,3 +np.float32,0x3b102e00,0x3fc8c7c4,3 +np.float32,0x3f42b3b8,0x3f34e5a6,3 +np.float32,0x3f0255ac,0x3f84b071,3 +np.float32,0xbf014898,0x40066996,3 +np.float32,0x3e004dc0,0x3fb8fb51,3 +np.float32,0xbf594ff8,0x40256af2,3 +np.float32,0x3efafddc,0x3f877b80,3 +np.float32,0xbf5f0780,0x40283899,3 +np.float32,0x3ee95e54,0x3f8c7bcc,3 +np.float32,0x3eba2f0c,0x3f996c80,3 +np.float32,0x3f37721c,0x3f459b68,3 +np.float32,0x3e2be780,0x3fb378bf,3 +np.float32,0x3e550270,0x3fae3d69,3 +np.float32,0x3e0f9500,0x3fb70e0a,3 +np.float32,0xbf51974a,0x4021eaf4,3 +np.float32,0x3f393832,0x3f430d05,3 +np.float32,0x3f3df16a,0x3f3c1bd8,3 +np.float32,0xbd662340,0x3fd041ed,3 +np.float32,0x3f7e8418,0x3ddc9fce,3 +np.float32,0xbf392734,0x40184672,3 +np.float32,0x3ee3b278,0x3f8e124e,3 +np.float32,0x3eed4808,0x3f8b61d2,3 +np.float32,0xbf6fccbd,0x40322beb,3 +np.float32,0x3e3ecdd0,0x3fb1123b,3 +np.float32,0x3f4419e0,0x3f32bb45,3 +np.float32,0x3f595e00,0x3f0e7914,3 +np.float32,0xbe8c1486,0x3fec88c6,3 +np.float32,0xbf800000,0x40490fdb,3 +np.float32,0xbdaf5020,0x3fd4084d,3 +np.float32,0xbf407660,0x401afb63,3 +np.float32,0x3f0c3aa8,0x3f7db8b8,3 +np.float32,0xbcdb5980,0x3fcc7d5b,3 +np.float32,0x3f4738d4,0x3f2dd1ed,3 +np.float32,0x3f4d7064,0x3f23ab14,3 +np.float32,0xbeb1d576,0x3ff67774,3 +np.float32,0xbf507166,0x40216bb3,3 +np.float32,0x3e86484c,0x3fa71813,3 +np.float32,0x3f09123e,0x3f80bd35,3 +np.float32,0xbe9abe0e,0x3ff05cb2,3 +np.float32,0x3f3019dc,0x3f4fed21,3 +np.float32,0xbe99e00e,0x3ff0227d,3 +np.float32,0xbf155ec5,0x400c6739,3 +np.float32,0x3f5857ba,0x3f106698,3 +np.float32,0x3edf619c,0x3f8f45fb,3 +np.float32,0xbf5ab76a,0x40261664,3 +np.float32,0x3e54b5a8,0x3fae4738,3 +np.float32,0xbee92772,0x4002ca40,3 +np.float32,0x3f2fd610,0x3f504a7a,3 +np.float32,0xbf38521c,0x4017f97e,3 +np.float32,0xff800000,0x7fc00000,3 +np.float32,0x3e2da348,0x3fb34077,3 +np.float32,0x3f2f85fa,0x3f50b894,3 +np.float32,0x3e88f9c8,0x3fa66551,3 +np.float32,0xbf61e570,0x4029b648,3 +np.float32,0xbeab362c,0x3ff4b4a1,3 +np.float32,0x3ec6c310,0x3f9607bd,3 +np.float32,0x3f0d7bda,0x3f7c3810,3 +np.float32,0xbeba5d36,0x3ff8bf99,3 +np.float32,0x3f4b0554,0x3f27adda,3 +np.float32,0x3f60f5dc,0x3efebfb3,3 +np.float32,0x3f36ce2c,0x3f468603,3 +np.float32,0xbe70afac,0x3fe76e8e,3 +np.float32,0x3f673350,0x3ee339b5,3 +np.float32,0xbe124cf0,0x3fdb698c,3 +np.float32,0xbf1243dc,0x400b73d0,3 +np.float32,0x3f3c8850,0x3f3e3407,3 +np.float32,0x3ea02f24,0x3fa05500,3 +np.float32,0xbeffed34,0x400607db,3 +np.float32,0x3f5c75c2,0x3f08817c,3 +np.float32,0x3f4b2fbe,0x3f27682d,3 +np.float32,0x3ee47c34,0x3f8dd9f9,3 +np.float32,0x3f50d48c,0x3f1de584,3 +np.float32,0x3f12dc5e,0x3f75b628,3 +np.float32,0xbefe7e4a,0x4005d2f4,3 +np.float32,0xbec2e846,0x3ffb0cbc,3 +np.float32,0xbedc3036,0x4000fb80,3 +np.float32,0xbf48aedc,0x401e311f,3 +np.float32,0x3f6e032e,0x3ec11363,3 +np.float32,0xbf60de15,0x40292b72,3 +np.float32,0x3f06585e,0x3f8258ba,3 +np.float32,0x3ef49b98,0x3f894e66,3 +np.float32,0x3cc5fe00,0x3fc5f7cf,3 +np.float32,0xbf7525c5,0x40365c2c,3 +np.float32,0x3f64f9f8,0x3eed5fb2,3 +np.float32,0x3e8849c0,0x3fa692fb,3 +np.float32,0x3e50c878,0x3faec79e,3 +np.float32,0x3ed61530,0x3f91d831,3 +np.float32,0xbf54872e,0x40233724,3 +np.float32,0xbf52ee7f,0x4022815e,3 +np.float32,0xbe708c24,0x3fe769fc,3 +np.float32,0xbf26fc54,0x40120260,3 +np.float32,0x3f226e8a,0x3f6228db,3 +np.float32,0xbef30406,0x40042eb8,3 +np.float32,0x3f5d996c,0x3f063f5f,3 +np.float32,0xbf425f9c,0x401bb618,3 +np.float32,0x3e4bb260,0x3faf6dc9,3 +np.float32,0xbe52d5a4,0x3fe39b29,3 +np.float32,0xbe169cf0,0x3fdbf505,3 +np.float32,0xbedfc422,0x40017a8e,3 +np.float32,0x3d8ffef0,0x3fc00e05,3 +np.float32,0xbf12bdab,0x400b98f2,3 +np.float32,0x3f295d0a,0x3f590e88,3 +np.float32,0x3f49d8e4,0x3f2998aa,3 +np.float32,0xbef914f4,0x40050c12,3 +np.float32,0xbf4ea2b5,0x4020a61e,3 +np.float32,0xbf3a89e5,0x4018c762,3 +np.float32,0x3e8707b4,0x3fa6e67a,3 +np.float32,0x3ac55400,0x3fc8de86,3 +np.float32,0x800000,0x3fc90fdb,3 +np.float32,0xbeb9762c,0x3ff8819b,3 +np.float32,0xbebbe23c,0x3ff92815,3 +np.float32,0xbf598c88,0x402587a1,3 +np.float32,0x3e95d864,0x3fa30b4a,3 +np.float32,0x3f7f6f40,0x3d882486,3 +np.float32,0xbf53658c,0x4022b604,3 +np.float32,0xbf2a35f2,0x401314ad,3 +np.float32,0x3eb14380,0x3f9bcf28,3 +np.float32,0x3f0e0c64,0x3f7b8a7a,3 +np.float32,0x3d349920,0x3fc36a9a,3 +np.float32,0xbec2092c,0x3ffad071,3 +np.float32,0xbe1d08e8,0x3fdcc4e0,3 +np.float32,0xbf008968,0x40063243,3 +np.float32,0xbefad582,0x40054c51,3 +np.float32,0xbe52d010,0x3fe39a72,3 +np.float32,0x3f4afdac,0x3f27ba6b,3 +np.float32,0x3f6c483c,0x3eca4408,3 +np.float32,0xbef3cb68,0x40044b0c,3 +np.float32,0x3e94687c,0x3fa36b6f,3 +np.float32,0xbf64ae5c,0x402b39bb,3 +np.float32,0xbf0022b4,0x40061497,3 +np.float32,0x80000001,0x3fc90fdb,3 +np.float32,0x3f25bcd0,0x3f5dda4b,3 +np.float32,0x3ed91b40,0x3f9102d7,3 +np.float32,0x3f800000,0x0,3 +np.float32,0xbebc6aca,0x3ff94cca,3 +np.float32,0x3f239e9a,0x3f609e7d,3 +np.float32,0xbf7312be,0x4034a305,3 +np.float32,0x3efd16d0,0x3f86e148,3 +np.float32,0x3f52753a,0x3f1b0f72,3 +np.float32,0xbde58960,0x3fd7702c,3 +np.float32,0x3ef88580,0x3f883099,3 +np.float32,0x3eebaefc,0x3f8bd51e,3 +np.float32,0x3e877d2c,0x3fa6c807,3 +np.float32,0x3f1a0324,0x3f6cdf32,3 +np.float32,0xbedfe20a,0x40017eb6,3 +np.float32,0x3f205a3c,0x3f64d69d,3 +np.float32,0xbeed5b7c,0x400361b0,3 +np.float32,0xbf69ba10,0x402e2ad0,3 +np.float32,0x3c4fe200,0x3fc77014,3 +np.float32,0x3f043310,0x3f839a69,3 +np.float32,0xbeaf359a,0x3ff5c485,3 +np.float32,0x3db3f110,0x3fbdcd12,3 +np.float32,0x3e24af88,0x3fb462ed,3 +np.float32,0xbf34e858,0x4016c1c8,3 +np.float32,0x3f3334f2,0x3f4b9cd0,3 +np.float32,0xbf145882,0x400c16a2,3 +np.float32,0xbf541c38,0x40230748,3 +np.float32,0x3eba7e10,0x3f99574b,3 +np.float32,0xbe34c6e0,0x3fdfc731,3 +np.float32,0xbe957abe,0x3feefbf0,3 +np.float32,0xbf595a59,0x40256fdb,3 +np.float32,0xbdedc7b8,0x3fd7f4f0,3 +np.float32,0xbf627c02,0x402a06a9,3 +np.float32,0x3f339b78,0x3f4b0d18,3 +np.float32,0xbf2df6d2,0x40145929,3 +np.float32,0x3f617726,0x3efc9fd8,3 +np.float32,0xbee3a8fc,0x40020561,3 +np.float32,0x3efe9f68,0x3f867043,3 +np.float32,0xbf2c3e76,0x4013c3ba,3 +np.float32,0xbf218f28,0x40103d84,3 +np.float32,0xbf1ea847,0x400f4f7f,3 +np.float32,0x3ded9160,0x3fba2e31,3 +np.float32,0x3bce1b00,0x3fc841bf,3 +np.float32,0xbe90566e,0x3feda46a,3 +np.float32,0xbf5ea2ba,0x4028056b,3 +np.float32,0x3f538e62,0x3f191ee6,3 +np.float32,0xbf59e054,0x4025af74,3 +np.float32,0xbe8c98ba,0x3fecab24,3 +np.float32,0x3ee7bdb0,0x3f8cf0b7,3 +np.float32,0xbf2eb828,0x40149b2b,3 +np.float32,0xbe5eb904,0x3fe52068,3 +np.float32,0xbf16b422,0x400cd08d,3 +np.float32,0x3f1ab9b4,0x3f6bfa58,3 +np.float32,0x3dc23040,0x3fbce82a,3 +np.float32,0xbf29d9e7,0x4012f5e5,3 +np.float32,0xbf38f30a,0x40183393,3 +np.float32,0x3e88e798,0x3fa66a09,3 +np.float32,0x3f1d07e6,0x3f69124f,3 +np.float32,0xbe1d3d34,0x3fdccb7e,3 +np.float32,0xbf1715be,0x400ceec2,3 +np.float32,0x3f7a0eac,0x3e5d11f7,3 +np.float32,0xbe764924,0x3fe82707,3 +np.float32,0xbf01a1f8,0x4006837c,3 +np.float32,0x3f2be730,0x3f55a661,3 +np.float32,0xbf7bb070,0x403d4ce5,3 +np.float32,0xbd602110,0x3fd011c9,3 +np.float32,0x3f5d080c,0x3f07609d,3 +np.float32,0xbda20400,0x3fd332d1,3 +np.float32,0x3f1c62da,0x3f69e308,3 +np.float32,0xbf2c6916,0x4013d223,3 +np.float32,0xbf44f8fd,0x401cb816,3 +np.float32,0x3f4da392,0x3f235539,3 +np.float32,0x3e9e8aa0,0x3fa0c3a0,3 +np.float32,0x3e9633c4,0x3fa2f366,3 +np.float32,0xbf0422ab,0x40073ddd,3 +np.float32,0x3f518386,0x3f1cb603,3 +np.float32,0x3f24307a,0x3f5fe096,3 +np.float32,0xbdfb4220,0x3fd8ce24,3 +np.float32,0x3f179d28,0x3f6fdc7d,3 +np.float32,0xbecc2df0,0x3ffd911e,3 +np.float32,0x3f3dff0c,0x3f3c0782,3 +np.float32,0xbf58c4d8,0x4025295b,3 +np.float32,0xbdcf8438,0x3fd60dd3,3 +np.float32,0xbeeaf1b2,0x40030aa7,3 +np.float32,0xbf298a28,0x4012db45,3 +np.float32,0x3f6c4dec,0x3eca2678,3 +np.float32,0x3f4d1ac8,0x3f243a59,3 +np.float32,0x3f62cdfa,0x3ef6e8f8,3 +np.float32,0xbee8acce,0x4002b909,3 +np.float32,0xbd5f2af0,0x3fd00a15,3 +np.float32,0x3f5fde8e,0x3f01a453,3 +np.float32,0x3e95233c,0x3fa33aa4,3 +np.float32,0x3ecd2a60,0x3f9449be,3 +np.float32,0x3f10aa86,0x3f78619d,3 +np.float32,0x3f3888e8,0x3f440a70,3 +np.float32,0x3eeb5bfc,0x3f8bec7d,3 +np.float32,0xbe12d654,0x3fdb7ae6,3 +np.float32,0x3eca3110,0x3f951931,3 +np.float32,0xbe2d1b7c,0x3fdece05,3 +np.float32,0xbf29e9db,0x4012fb3a,3 +np.float32,0xbf0c50b8,0x4009a845,3 +np.float32,0xbed9f0e4,0x4000abef,3 +np.float64,0x3fd078ec5ba0f1d8,0x3ff4f7c00595a4d3,2 +np.float64,0xbfdbc39743b7872e,0x400027f85bce43b2,2 +np.float64,0xbfacd2707c39a4e0,0x3ffa08ae1075d766,2 +np.float64,0xbfc956890f32ad14,0x3ffc52308e7285fd,2 +np.float64,0xbf939c2298273840,0x3ff9706d18e6ea6b,2 +np.float64,0xbfe0d7048961ae09,0x4000fff4406bd395,2 +np.float64,0xbfe9d19b86f3a337,0x4004139bc683a69f,2 +np.float64,0x3fd35c7f90a6b900,0x3ff437220e9123f8,2 +np.float64,0x3fdddca171bbb944,0x3ff15da61e61ec08,2 +np.float64,0x3feb300de9f6601c,0x3fe1c6fadb68cdca,2 +np.float64,0xbfef1815327e302a,0x400739808fc6f964,2 +np.float64,0xbfe332d78e6665af,0x4001b6c4ef922f7c,2 +np.float64,0xbfedbf4dfb7b7e9c,0x40061cefed62a58b,2 +np.float64,0xbfd8dcc7e3b1b990,0x3fff84307713c2c3,2 +np.float64,0xbfedaf161c7b5e2c,0x400612027c1b2b25,2 +np.float64,0xbfed9bde897b37bd,0x4006053f05bd7d26,2 +np.float64,0xbfe081ebc26103d8,0x4000e70755eb66e0,2 +np.float64,0xbfe0366f9c606cdf,0x4000d11212f29afd,2 +np.float64,0xbfc7c115212f822c,0x3ffc1e8c9d58f7db,2 +np.float64,0x3fd8dd9a78b1bb34,0x3ff2bf8d0f4c9376,2 +np.float64,0xbfe54eff466a9dfe,0x4002655950b611f4,2 +np.float64,0xbfe4aad987e955b3,0x40022efb19882518,2 +np.float64,0x3f70231ca0204600,0x3ff911d834e7abf4,2 +np.float64,0x3fede01d047bc03a,0x3fd773cecbd8561b,2 +np.float64,0xbfd6a00d48ad401a,0x3ffee9fd7051633f,2 +np.float64,0x3fd44f3d50a89e7c,0x3ff3f74dd0fc9c91,2 +np.float64,0x3fe540f0d0ea81e2,0x3feb055a7c7d43d6,2 +np.float64,0xbf3ba2e200374800,0x3ff923b582650c6c,2 +np.float64,0x3fe93b2d3f72765a,0x3fe532fa15331072,2 +np.float64,0x3fee8ce5a17d19cc,0x3fd35666eefbe336,2 +np.float64,0x3fe55d5f8feabac0,0x3feadf3dcfe251d4,2 +np.float64,0xbfd1d2ede8a3a5dc,0x3ffda600041ac884,2 +np.float64,0xbfee41186e7c8231,0x40067a625cc6f64d,2 +np.float64,0x3fe521a8b9ea4352,0x3feb2f1a6c8084e5,2 +np.float64,0x3fc65378ef2ca6f0,0x3ff653dfe81ee9f2,2 +np.float64,0x3fdaba0fbcb57420,0x3ff23d630995c6ba,2 +np.float64,0xbfe6b7441d6d6e88,0x4002e182539a2994,2 +np.float64,0x3fda00b6dcb4016c,0x3ff2703d516f28e7,2 +np.float64,0xbfe8699f01f0d33e,0x400382326920ea9e,2 +np.float64,0xbfef5889367eb112,0x4007832af5983793,2 +np.float64,0x3fefb57c8aff6afa,0x3fc14700ab38dcef,2 +np.float64,0xbfda0dfdaab41bfc,0x3fffd75b6fd497f6,2 +np.float64,0xbfb059c36620b388,0x3ffa27c528b97a42,2 +np.float64,0xbfdd450ab1ba8a16,0x40005dcac6ab50fd,2 +np.float64,0xbfe54d6156ea9ac2,0x400264ce9f3f0fb9,2 +np.float64,0xbfe076e94760edd2,0x4000e3d1374884da,2 +np.float64,0xbfc063286720c650,0x3ffb2fd1d6bff0ef,2 +np.float64,0xbfe24680f2e48d02,0x40016ddfbb5bcc0e,2 +np.float64,0xbfdc9351d2b926a4,0x400044e3756fb765,2 +np.float64,0x3fefb173d8ff62e8,0x3fc1bd5626f80850,2 +np.float64,0x3fe77c117a6ef822,0x3fe7e57089bad2ec,2 +np.float64,0xbfddbcebf7bb79d8,0x40006eadb60406b3,2 +np.float64,0xbfecf6625ff9ecc5,0x40059e6c6961a6db,2 +np.float64,0x3fdc8950b8b912a0,0x3ff1bcfb2e27795b,2 +np.float64,0xbfeb2fa517765f4a,0x4004b00aee3e6888,2 +np.float64,0x3fd0efc88da1df90,0x3ff4d8f7cbd8248a,2 +np.float64,0xbfe6641a2becc834,0x4002c43362c1bd0f,2 +np.float64,0xbfe28aec0fe515d8,0x400182c91d4df039,2 +np.float64,0xbfd5ede8d0abdbd2,0x3ffeba7baef05ae8,2 +np.float64,0xbfbd99702a3b32e0,0x3ffafca21c1053f1,2 +np.float64,0x3f96f043f82de080,0x3ff8c6384d5eb610,2 +np.float64,0xbfe5badbc9eb75b8,0x400289c8cd5873d1,2 +np.float64,0x3fe5c6bf95eb8d80,0x3fea5093e9a3e43e,2 +np.float64,0x3fb1955486232ab0,0x3ff8086d4c3e71d5,2 +np.float64,0xbfea145f397428be,0x4004302237a35871,2 +np.float64,0xbfdabe685db57cd0,0x400003e2e29725fb,2 +np.float64,0xbfefc79758ff8f2f,0x400831814e23bfc8,2 +np.float64,0x3fd7edb66cafdb6c,0x3ff3006c5123bfaf,2 +np.float64,0xbfeaf7644bf5eec8,0x400495a7963ce4ed,2 +np.float64,0x3fdf838d78bf071c,0x3ff0e527eed73800,2 +np.float64,0xbfd1a0165ba3402c,0x3ffd98c5ab76d375,2 +np.float64,0x3fd75b67a9aeb6d0,0x3ff327c8d80b17cf,2 +np.float64,0x3fc2aa9647255530,0x3ff6ca854b157df1,2 +np.float64,0xbfe0957fd4612b00,0x4000ecbf3932becd,2 +np.float64,0x3fda1792c0b42f24,0x3ff269fbb2360487,2 +np.float64,0x3fd480706ca900e0,0x3ff3ea53a6aa3ae8,2 +np.float64,0xbfd0780ed9a0f01e,0x3ffd4bfd544c7d47,2 +np.float64,0x3feeec0cd77dd81a,0x3fd0a8a241fdb441,2 +np.float64,0x3fcfa933e93f5268,0x3ff5223478621a6b,2 +np.float64,0x3fdad2481fb5a490,0x3ff236b86c6b2b49,2 +np.float64,0x3fe03b129de07626,0x3ff09f21fb868451,2 +np.float64,0xbfc01212cd202424,0x3ffb259a07159ae9,2 +np.float64,0x3febdb912df7b722,0x3fe0768e20dac8c9,2 +np.float64,0xbfbf2148763e4290,0x3ffb154c361ce5bf,2 +np.float64,0xbfb1a7eb1e234fd8,0x3ffa3cb37ac4a176,2 +np.float64,0xbfe26ad1ec64d5a4,0x400178f480ecce8d,2 +np.float64,0x3fe6d1cd1b6da39a,0x3fe8dc20ec4dad3b,2 +np.float64,0xbfede0e53dfbc1ca,0x4006340d3bdd7c97,2 +np.float64,0xbfe8fd1bd9f1fa38,0x4003bc3477f93f40,2 +np.float64,0xbfe329d0f26653a2,0x4001b3f345af5648,2 +np.float64,0xbfe4bb20eee97642,0x40023451404d6d08,2 +np.float64,0x3fb574832e2ae900,0x3ff7ca4bed0c7110,2 +np.float64,0xbfdf3c098fbe7814,0x4000a525bb72d659,2 +np.float64,0x3fa453e6d428a7c0,0x3ff87f512bb9b0c6,2 +np.float64,0x3faaec888435d920,0x3ff84a7d9e4def63,2 +np.float64,0xbfcdc240df3b8480,0x3ffce30ece754e7f,2 +np.float64,0xbf8c3220f0386440,0x3ff95a600ae6e157,2 +np.float64,0x3fe806076c700c0e,0x3fe71784a96c76eb,2 +np.float64,0x3fedf9b0e17bf362,0x3fd6e35fc0a7b6c3,2 +np.float64,0xbfe1b48422636908,0x400141bd8ed251bc,2 +np.float64,0xbfe82e2817705c50,0x40036b5a5556d021,2 +np.float64,0xbfc8ef8ff931df20,0x3ffc450ffae7ce58,2 +np.float64,0xbfe919fa94f233f5,0x4003c7cce4697fe8,2 +np.float64,0xbfc3ace4a72759c8,0x3ffb9a197bb22651,2 +np.float64,0x3fe479f71ee8f3ee,0x3fec0bd2f59097aa,2 +np.float64,0xbfeeb54a967d6a95,0x4006da12c83649c5,2 +np.float64,0x3fe5e74ea8ebce9e,0x3fea2407cef0f08c,2 +np.float64,0x3fb382baf2270570,0x3ff7e98213b921ba,2 +np.float64,0xbfdd86fd3cbb0dfa,0x40006712952ddbcf,2 +np.float64,0xbfd250eb52a4a1d6,0x3ffdc6d56253b1cd,2 +np.float64,0x3fea30c4ed74618a,0x3fe3962deba4f30e,2 +np.float64,0x3fc895963d312b30,0x3ff60a5d52fcbccc,2 +np.float64,0x3fe9cc4f6273989e,0x3fe442740942c80f,2 +np.float64,0xbfe8769f5cf0ed3f,0x4003873b4cb5bfce,2 +np.float64,0xbfe382f3726705e7,0x4001cfeb3204d110,2 +np.float64,0x3fbfe9a9163fd350,0x3ff7220bd2b97c8f,2 +np.float64,0xbfca6162bb34c2c4,0x3ffc743f939358f1,2 +np.float64,0x3fe127a014e24f40,0x3ff0147c4bafbc39,2 +np.float64,0x3fee9cdd2a7d39ba,0x3fd2e9ef45ab122f,2 +np.float64,0x3fa9ffb97c33ff80,0x3ff851e69fa3542c,2 +np.float64,0x3fd378f393a6f1e8,0x3ff42faafa77de56,2 +np.float64,0xbfe4df1e1669be3c,0x400240284df1c321,2 +np.float64,0x3fed0ed79bfa1db0,0x3fdba89060aa96fb,2 +np.float64,0x3fdef2ee52bde5dc,0x3ff10e942244f4f1,2 +np.float64,0xbfdab38f3ab5671e,0x40000264d8d5b49b,2 +np.float64,0x3fbe95a96e3d2b50,0x3ff73774cb59ce2d,2 +np.float64,0xbfe945653af28aca,0x4003d9657bf129c2,2 +np.float64,0xbfb18f3f2a231e80,0x3ffa3b27cba23f50,2 +np.float64,0xbfef50bf22fea17e,0x40077998a850082c,2 +np.float64,0xbfc52b8c212a5718,0x3ffbca8d6560a2da,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0x3fc1e3a02d23c740,0x3ff6e3a5fcac12a4,2 +np.float64,0xbfeb5e4ea5f6bc9d,0x4004c65abef9426f,2 +np.float64,0xbfe425b132684b62,0x400203c29608b00d,2 +np.float64,0xbfbfa1c19e3f4380,0x3ffb1d6367711158,2 +np.float64,0x3fbba2776e3744f0,0x3ff766f6df586fad,2 +np.float64,0xbfb5d0951e2ba128,0x3ffa7f712480b25e,2 +np.float64,0xbfe949fdab7293fb,0x4003db4530a18507,2 +np.float64,0xbfcf13519b3e26a4,0x3ffd0e6f0a6c38ee,2 +np.float64,0x3f91e6d72823cdc0,0x3ff8da5f08909b6e,2 +np.float64,0x3f78a2e360314600,0x3ff909586727caef,2 +np.float64,0xbfe1ae7e8fe35cfd,0x40013fef082caaa3,2 +np.float64,0x3fe97a6dd1f2f4dc,0x3fe4cb4b99863478,2 +np.float64,0xbfcc1e1e69383c3c,0x3ffcad250a949843,2 +np.float64,0x3faccb797c399700,0x3ff83b8066b49330,2 +np.float64,0x3fe7a2647a6f44c8,0x3fe7acceae6ec425,2 +np.float64,0xbfec3bfcf0f877fa,0x4005366af5a7175b,2 +np.float64,0xbfe2310b94646217,0x400167588fceb228,2 +np.float64,0x3feb167372762ce6,0x3fe1f74c0288fad8,2 +np.float64,0xbfb722b4ee2e4568,0x3ffa94a81b94dfca,2 +np.float64,0x3fc58da9712b1b50,0x3ff66cf8f072aa14,2 +np.float64,0xbfe7fff9d6effff4,0x400359d01b8141de,2 +np.float64,0xbfd56691c5aacd24,0x3ffe9686697797e8,2 +np.float64,0x3fe3ab0557e7560a,0x3fed1593959ef8e8,2 +np.float64,0x3fdd458995ba8b14,0x3ff1883d6f22a322,2 +np.float64,0x3fe7bbed2cef77da,0x3fe786d618094cda,2 +np.float64,0x3fa31a30c4263460,0x3ff88920b936fd79,2 +np.float64,0x8010000000000000,0x3ff921fb54442d18,2 +np.float64,0xbfdc5effbdb8be00,0x40003d95fe0dff11,2 +np.float64,0x3febfdad7e77fb5a,0x3fe030b5297dbbdd,2 +np.float64,0x3fe4f3f3b2e9e7e8,0x3feb6bc59eeb2be2,2 +np.float64,0xbfe44469fd6888d4,0x40020daa5488f97a,2 +np.float64,0xbfe19fddb0e33fbc,0x40013b8c902b167b,2 +np.float64,0x3fa36ad17c26d5a0,0x3ff8869b3e828134,2 +np.float64,0x3fcf23e6c93e47d0,0x3ff5336491a65d1e,2 +np.float64,0xffefffffffffffff,0x7ff8000000000000,2 +np.float64,0xbfe375f4cee6ebea,0x4001cbd2ba42e8b5,2 +np.float64,0xbfaef1215c3de240,0x3ffa19ab02081189,2 +np.float64,0xbfec39c59c78738b,0x4005353dc38e3d78,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0xbfec09bb7b781377,0x40051c0a5754cb3a,2 +np.float64,0x3fe8301f2870603e,0x3fe6d783c5ef0944,2 +np.float64,0xbfed418c987a8319,0x4005cbae1b8693d1,2 +np.float64,0xbfdc16e7adb82dd0,0x4000338b634eaf03,2 +np.float64,0x3fd5d361bdaba6c4,0x3ff390899300a54c,2 +np.float64,0xbff0000000000000,0x400921fb54442d18,2 +np.float64,0x3fd5946232ab28c4,0x3ff3a14767813f29,2 +np.float64,0x3fe833e5fef067cc,0x3fe6d1be720edf2d,2 +np.float64,0x3fedf746a67bee8e,0x3fd6f127fdcadb7b,2 +np.float64,0x3fd90353d3b206a8,0x3ff2b54f7d369ba9,2 +np.float64,0x3fec4b4b72f89696,0x3fdf1b38d2e93532,2 +np.float64,0xbfe9c67596f38ceb,0x40040ee5f524ce03,2 +np.float64,0x3fd350d91aa6a1b4,0x3ff43a303c0da27f,2 +np.float64,0x3fd062603ba0c4c0,0x3ff4fd9514b935d8,2 +np.float64,0xbfe24c075f64980e,0x40016f8e9f2663b3,2 +np.float64,0x3fdaa546eeb54a8c,0x3ff2431a88fef1d5,2 +np.float64,0x3fe92b8151f25702,0x3fe54c67e005cbf9,2 +np.float64,0xbfe1be8b8a637d17,0x400144c078f67c6e,2 +np.float64,0xbfe468a1d7e8d144,0x40021964b118cbf4,2 +np.float64,0xbfdc6de4fab8dbca,0x40003fa9e27893d8,2 +np.float64,0xbfe3c2788ae784f1,0x4001e407ba3aa956,2 +np.float64,0xbfe2bf1542e57e2a,0x400192d4a9072016,2 +np.float64,0xbfe6982f4c6d305e,0x4002d681b1991bbb,2 +np.float64,0x3fdbceb1c4b79d64,0x3ff1f0f117b9d354,2 +np.float64,0x3fdb3705e7b66e0c,0x3ff21af01ca27ace,2 +np.float64,0x3fe3e6358ee7cc6c,0x3fecca4585053983,2 +np.float64,0xbfe16d6a9a62dad5,0x40012c7988aee247,2 +np.float64,0xbfce66e4413ccdc8,0x3ffcf83b08043a0c,2 +np.float64,0xbfeb6cd46876d9a9,0x4004cd61733bfb79,2 +np.float64,0xbfdb1cdd64b639ba,0x400010e6cf087cb7,2 +np.float64,0xbfe09e4e30e13c9c,0x4000ef5277c47721,2 +np.float64,0xbfee88dd127d11ba,0x4006b3cd443643ac,2 +np.float64,0xbf911e06c8223c00,0x3ff966744064fb05,2 +np.float64,0xbfe8f22bc471e458,0x4003b7d5513af295,2 +np.float64,0x3fe3d7329567ae66,0x3fecdd6c241f83ee,2 +np.float64,0x3fc8a9404b315280,0x3ff607dc175edf3f,2 +np.float64,0x3fe7eb80ad6fd702,0x3fe73f8fdb3e6a6c,2 +np.float64,0x3fef0931e37e1264,0x3fcf7fde80a3c5ab,2 +np.float64,0x3fe2ed3c3fe5da78,0x3fee038334cd1860,2 +np.float64,0x3fe251fdb8e4a3fc,0x3feec26dc636ac31,2 +np.float64,0x3feb239436764728,0x3fe1de9462455da7,2 +np.float64,0xbfe63fd7eeec7fb0,0x4002b78cfa3d2fa6,2 +np.float64,0x3fdd639cb5bac738,0x3ff17fc7d92b3eee,2 +np.float64,0x3fd0a7a13fa14f44,0x3ff4eba95c559c84,2 +np.float64,0x3fe804362d70086c,0x3fe71a44cd91ffa4,2 +np.float64,0xbfe0fecf6e61fd9f,0x40010bac8edbdc4f,2 +np.float64,0x3fcb74acfd36e958,0x3ff5ac84437f1b7c,2 +np.float64,0x3fe55053e1eaa0a8,0x3feaf0bf76304c30,2 +np.float64,0x3fc06b508d20d6a0,0x3ff7131da17f3902,2 +np.float64,0x3fdd78750fbaf0ec,0x3ff179e97fbf7f65,2 +np.float64,0x3fe44cb946689972,0x3fec46859b5da6be,2 +np.float64,0xbfeb165a7ff62cb5,0x4004a41c9cc9589e,2 +np.float64,0x3fe01ffb2b603ff6,0x3ff0aed52bf1c3c1,2 +np.float64,0x3f983c60a83078c0,0x3ff8c107805715ab,2 +np.float64,0x3fd8b5ff13b16c00,0x3ff2ca4a837a476a,2 +np.float64,0x3fc80510a1300a20,0x3ff61cc3b4af470b,2 +np.float64,0xbfd3935b06a726b6,0x3ffe1b3a2066f473,2 +np.float64,0xbfdd4a1f31ba943e,0x40005e81979ed445,2 +np.float64,0xbfa76afdd42ed600,0x3ff9dd63ffba72d2,2 +np.float64,0x3fe7e06d496fc0da,0x3fe7503773566707,2 +np.float64,0xbfea5fbfe874bf80,0x40045106af6c538f,2 +np.float64,0x3fee000c487c0018,0x3fd6bef1f8779d88,2 +np.float64,0xbfb39f4ee2273ea0,0x3ffa5c3f2b3888ab,2 +np.float64,0x3feb9247b0772490,0x3fe1092d2905efce,2 +np.float64,0x3fdaa39b4cb54738,0x3ff243901da0da17,2 +np.float64,0x3fcd5b2b493ab658,0x3ff56e262e65b67d,2 +np.float64,0x3fcf82512f3f04a0,0x3ff52738847c55f2,2 +np.float64,0x3fe2af5e0c655ebc,0x3fee4ffab0c82348,2 +np.float64,0xbfec0055d0f800ac,0x4005172d325933e8,2 +np.float64,0x3fe71da9336e3b52,0x3fe86f2e12f6e303,2 +np.float64,0x3fbefab0723df560,0x3ff731188ac716ec,2 +np.float64,0xbfe11dca28623b94,0x400114d3d4ad370d,2 +np.float64,0x3fbcbda8ca397b50,0x3ff755281078abd4,2 +np.float64,0x3fe687c7126d0f8e,0x3fe945099a7855cc,2 +np.float64,0xbfecde510579bca2,0x400590606e244591,2 +np.float64,0xbfd72de681ae5bce,0x3fff0ff797ad1755,2 +np.float64,0xbfe7c0f7386f81ee,0x40034226e0805309,2 +np.float64,0x3fd8d55619b1aaac,0x3ff2c1cb3267b14e,2 +np.float64,0x3fecd7a2ad79af46,0x3fdcabbffeaa279e,2 +np.float64,0x3fee7fb1a8fcff64,0x3fd3ae620286fe19,2 +np.float64,0xbfc5f3a3592be748,0x3ffbe3ed204d9842,2 +np.float64,0x3fec9e5527793caa,0x3fddb00bc8687e4b,2 +np.float64,0x3fc35dc70f26bb90,0x3ff6b3ded7191e33,2 +np.float64,0x3fda91c07ab52380,0x3ff24878848fec8f,2 +np.float64,0xbfe12cde1fe259bc,0x4001194ab99d5134,2 +np.float64,0xbfd35ab736a6b56e,0x3ffe0c5ce8356d16,2 +np.float64,0x3fc9c94123339280,0x3ff5e3239f3ad795,2 +np.float64,0xbfe72f54926e5ea9,0x40030c95d1d02b56,2 +np.float64,0xbfee283186fc5063,0x40066786bd0feb79,2 +np.float64,0xbfe7b383f56f6708,0x40033d23ef0e903d,2 +np.float64,0x3fd6037327ac06e8,0x3ff383bf2f311ddb,2 +np.float64,0x3fe0e344b561c68a,0x3ff03cd90fd4ba65,2 +np.float64,0xbfef0ff54b7e1feb,0x400730fa5fce381e,2 +np.float64,0x3fd269929da4d324,0x3ff476b230136d32,2 +np.float64,0xbfbc5fb9f638bf70,0x3ffae8e63a4e3234,2 +np.float64,0xbfe2e8bc84e5d179,0x40019fb5874f4310,2 +np.float64,0xbfd7017413ae02e8,0x3fff040d843c1531,2 +np.float64,0x3fefd362fa7fa6c6,0x3fbababc3ddbb21d,2 +np.float64,0x3fecb62ed3f96c5e,0x3fdd44ba77ccff94,2 +np.float64,0xbfb16fad5222df58,0x3ffa392d7f02b522,2 +np.float64,0x3fbcf4abc639e950,0x3ff751b23c40e27f,2 +np.float64,0x3fe128adbce2515c,0x3ff013dc91db04b5,2 +np.float64,0x3fa5dd9d842bbb40,0x3ff87300c88d512f,2 +np.float64,0xbfe61efcaf6c3dfa,0x4002ac27117f87c9,2 +np.float64,0x3feffe1233fffc24,0x3f9638d3796a4954,2 +np.float64,0xbfe78548b66f0a92,0x40032c0447b7bfe2,2 +np.float64,0x3fe7bd38416f7a70,0x3fe784e86d6546b6,2 +np.float64,0x3fe0d6bc5961ad78,0x3ff0443899e747ac,2 +np.float64,0xbfd0bb6e47a176dc,0x3ffd5d6dff390d41,2 +np.float64,0xbfec1d16b8f83a2e,0x40052620378d3b78,2 +np.float64,0x3fe9bbec20f377d8,0x3fe45e167c7a3871,2 +np.float64,0xbfeed81d9dfdb03b,0x4006f9dec2db7310,2 +np.float64,0xbfe1e35179e3c6a3,0x40014fd1b1186ac0,2 +np.float64,0xbfc9c7e605338fcc,0x3ffc60a6bd1a7126,2 +np.float64,0x3feec92810fd9250,0x3fd1afde414ab338,2 +np.float64,0xbfeb9f1d90773e3b,0x4004e606b773f5b0,2 +np.float64,0x3fcbabdf6b3757c0,0x3ff5a573866404af,2 +np.float64,0x3fe9f4e1fff3e9c4,0x3fe3fd7b6712dd7b,2 +np.float64,0xbfe6c0175ded802e,0x4002e4a4dc12f3fe,2 +np.float64,0xbfeefc96f37df92e,0x40071d367cd721ff,2 +np.float64,0xbfeaab58dc7556b2,0x400472ce37e31e50,2 +np.float64,0xbfc62668772c4cd0,0x3ffbea5e6c92010a,2 +np.float64,0x3fafe055fc3fc0a0,0x3ff822ce6502519a,2 +np.float64,0x3fd7b648ffaf6c90,0x3ff30f5a42f11418,2 +np.float64,0xbfe934fe827269fd,0x4003d2b9fed9e6ad,2 +np.float64,0xbfe6d691f2edad24,0x4002eca6a4b1797b,2 +np.float64,0x3fc7e62ced2fcc58,0x3ff620b1f44398b7,2 +np.float64,0xbfc89be9f33137d4,0x3ffc3a67a497f59c,2 +np.float64,0xbfe7793d536ef27a,0x40032794bf14dd64,2 +np.float64,0x3fde55a02dbcab40,0x3ff13b5f82d223e4,2 +np.float64,0xbfc8eabd7b31d57c,0x3ffc4472a81cb6d0,2 +np.float64,0x3fddcb5468bb96a8,0x3ff162899c381f2e,2 +np.float64,0xbfec7554d8f8eaaa,0x40055550e18ec463,2 +np.float64,0x3fd0b6e8b6a16dd0,0x3ff4e7b4781a50e3,2 +np.float64,0x3fedaae01b7b55c0,0x3fd8964916cdf53d,2 +np.float64,0x3fe0870f8a610e20,0x3ff072e7db95c2a2,2 +np.float64,0xbfec3e3ce2787c7a,0x4005379d0f6be873,2 +np.float64,0xbfe65502586caa04,0x4002beecff89147f,2 +np.float64,0xbfe0df39a961be74,0x4001025e36d1c061,2 +np.float64,0xbfb5d8edbe2bb1d8,0x3ffa7ff72b7d6a2b,2 +np.float64,0xbfde89574bbd12ae,0x40008ba4cd74544d,2 +np.float64,0xbfe72938f0ee5272,0x40030a5efd1acb6d,2 +np.float64,0xbfcd500d133aa01c,0x3ffcd462f9104689,2 +np.float64,0x3fe0350766606a0e,0x3ff0a2a3664e2c14,2 +np.float64,0xbfc892fb573125f8,0x3ffc3944641cc69d,2 +np.float64,0xbfba7dc7c634fb90,0x3ffaca9a6a0ffe61,2 +np.float64,0xbfeac94478759289,0x40048068a8b83e45,2 +np.float64,0xbfe8f60c1af1ec18,0x4003b961995b6e51,2 +np.float64,0x3fea1c0817743810,0x3fe3ba28c1643cf7,2 +np.float64,0xbfe42a0fefe85420,0x4002052aadd77f01,2 +np.float64,0x3fd2c61c56a58c38,0x3ff45e84cb9a7fa9,2 +np.float64,0xbfd83fb7cdb07f70,0x3fff59ab4790074c,2 +np.float64,0x3fd95e630fb2bcc8,0x3ff29c8bee1335ad,2 +np.float64,0x3feee88f387dd11e,0x3fd0c3ad3ded4094,2 +np.float64,0x3fe061291160c252,0x3ff0890010199bbc,2 +np.float64,0xbfdc7db3b5b8fb68,0x400041dea3759443,2 +np.float64,0x3fee23b320fc4766,0x3fd5ee73d7aa5c56,2 +np.float64,0xbfdc25c590b84b8c,0x4000359cf98a00b4,2 +np.float64,0xbfd63cbfd2ac7980,0x3ffecf7b9cf99b3c,2 +np.float64,0xbfbeb3c29a3d6788,0x3ffb0e66ecc0fc3b,2 +np.float64,0xbfd2f57fd6a5eb00,0x3ffdf1d7c79e1532,2 +np.float64,0xbfab3eda9c367db0,0x3ff9fc0c875f42e9,2 +np.float64,0xbfe12df1c6e25be4,0x4001199c673e698c,2 +np.float64,0x3fef8ab23a7f1564,0x3fc5aff358c59f1c,2 +np.float64,0x3fe562f50feac5ea,0x3fead7bce205f7d9,2 +np.float64,0x3fdc41adbeb8835c,0x3ff1d0f71341b8f2,2 +np.float64,0x3fe2748967e4e912,0x3fee9837f970ff9e,2 +np.float64,0xbfdaa89d57b5513a,0x400000e3889ba4cf,2 +np.float64,0x3fdf2a137dbe5428,0x3ff0fecfbecbbf86,2 +np.float64,0xbfea1fdcd2f43fba,0x4004351974b32163,2 +np.float64,0xbfe34a93a3e69528,0x4001be323946a3e0,2 +np.float64,0x3fe929bacff25376,0x3fe54f47bd7f4cf2,2 +np.float64,0xbfd667fbd6accff8,0x3ffedb04032b3a1a,2 +np.float64,0xbfeb695796f6d2af,0x4004cbb08ec6f525,2 +np.float64,0x3fd204df2ea409c0,0x3ff490f51e6670f5,2 +np.float64,0xbfd89a2757b1344e,0x3fff722127b988c4,2 +np.float64,0xbfd0787187a0f0e4,0x3ffd4c16dbe94f32,2 +np.float64,0x3fd44239bfa88474,0x3ff3fabbfb24b1fa,2 +np.float64,0xbfeb0b3489f61669,0x40049ee33d811d33,2 +np.float64,0x3fdcf04eaab9e09c,0x3ff1a02a29996c4e,2 +np.float64,0x3fd4c51e4fa98a3c,0x3ff3d8302c68fc9a,2 +np.float64,0x3fd1346645a268cc,0x3ff4c72b4970ecaf,2 +np.float64,0x3fd6a89d09ad513c,0x3ff357af6520afac,2 +np.float64,0xbfba0f469a341e90,0x3ffac3a8f41bed23,2 +np.float64,0xbfe13f8ddce27f1c,0x40011ed557719fd6,2 +np.float64,0x3fd43e5e26a87cbc,0x3ff3fbc040fc30dc,2 +np.float64,0x3fe838125a707024,0x3fe6cb5c987248f3,2 +np.float64,0x3fe128c30c625186,0x3ff013cff238dd1b,2 +np.float64,0xbfcd4718833a8e30,0x3ffcd33c96bde6f9,2 +np.float64,0x3fe43fcd08e87f9a,0x3fec573997456ec1,2 +np.float64,0xbfe9a29104734522,0x4003ffd502a1b57f,2 +np.float64,0xbfe4709d7968e13b,0x40021bfc5cd55af4,2 +np.float64,0x3fd21c3925a43874,0x3ff48adf48556cbb,2 +np.float64,0x3fe9a521b2734a44,0x3fe4844fc054e839,2 +np.float64,0xbfdfa6a912bf4d52,0x4000b4730ad8521e,2 +np.float64,0x3fe3740702e6e80e,0x3fed5b106283b6ed,2 +np.float64,0x3fd0a3aa36a14754,0x3ff4ecb02a5e3f49,2 +np.float64,0x3fdcb903d0b97208,0x3ff1afa5d692c5b9,2 +np.float64,0xbfe7d67839efacf0,0x40034a3146abf6f2,2 +np.float64,0x3f9981c6d8330380,0x3ff8bbf1853d7b90,2 +np.float64,0xbfe9d4191673a832,0x400414a9ab453c5d,2 +np.float64,0x3fef0a1e5c7e143c,0x3fcf70b02a54c415,2 +np.float64,0xbfd996dee6b32dbe,0x3fffb6cf707ad8e4,2 +np.float64,0x3fe19bef17e337de,0x3fef9e70d4fcedae,2 +np.float64,0x3fe34a59716694b2,0x3fed8f6d5cfba474,2 +np.float64,0x3fdf27e27cbe4fc4,0x3ff0ff70500e0c7c,2 +np.float64,0xbfe19df87fe33bf1,0x40013afb401de24c,2 +np.float64,0xbfbdfd97ba3bfb30,0x3ffb02ef8c225e57,2 +np.float64,0xbfe3d3417267a683,0x4001e95ed240b0f8,2 +np.float64,0x3fe566498b6acc94,0x3fead342957d4910,2 +np.float64,0x3ff0000000000000,0x0,2 +np.float64,0x3feb329bd8766538,0x3fe1c2225aafe3b4,2 +np.float64,0xbfc19ca703233950,0x3ffb575b5df057b9,2 +np.float64,0x3fe755027d6eaa04,0x3fe81eb99c262e00,2 +np.float64,0xbfe6c2b8306d8570,0x4002e594199f9eec,2 +np.float64,0x3fd69438e6ad2870,0x3ff35d2275ae891d,2 +np.float64,0x3fda3e7285b47ce4,0x3ff25f5573dd47ae,2 +np.float64,0x3fe7928a166f2514,0x3fe7c4490ef4b9a9,2 +np.float64,0xbfd4eb71b9a9d6e4,0x3ffe75e8ccb74be1,2 +np.float64,0xbfcc3a07f1387410,0x3ffcb0b8af914a5b,2 +np.float64,0xbfe6e80225edd004,0x4002f2e26eae8999,2 +np.float64,0xbfb347728a268ee8,0x3ffa56bd526a12db,2 +np.float64,0x3fe5140ead6a281e,0x3feb4132c9140a1c,2 +np.float64,0xbfc147f125228fe4,0x3ffb4cab18b9050f,2 +np.float64,0xbfcb9145b537228c,0x3ffc9b1b6227a8c9,2 +np.float64,0xbfda84ef4bb509de,0x3ffff7f8a674e17d,2 +np.float64,0x3fd2eb6bbfa5d6d8,0x3ff454c225529d7e,2 +np.float64,0x3fe18c95f1e3192c,0x3fefb0cf0efba75a,2 +np.float64,0x3fe78606efef0c0e,0x3fe7d6c3a092d64c,2 +np.float64,0x3fbad5119a35aa20,0x3ff773dffe3ce660,2 +np.float64,0x3fd0cf5903a19eb4,0x3ff4e15fd21fdb42,2 +np.float64,0xbfd85ce90bb0b9d2,0x3fff618ee848e974,2 +np.float64,0x3fe90e11b9f21c24,0x3fe57be62f606f4a,2 +np.float64,0x3fd7a2040faf4408,0x3ff314ce85457ec2,2 +np.float64,0xbfd73fba69ae7f74,0x3fff14bff3504811,2 +np.float64,0x3fa04b4bd42096a0,0x3ff89f9b52f521a2,2 +np.float64,0xbfd7219ce5ae433a,0x3fff0cac0b45cc18,2 +np.float64,0xbfe0cf4661e19e8d,0x4000fdadb14e3c22,2 +np.float64,0x3fd07469fea0e8d4,0x3ff4f8eaa9b2394a,2 +np.float64,0x3f9b05c5d8360b80,0x3ff8b5e10672db5c,2 +np.float64,0x3fe4c25b916984b8,0x3febad29bd0e25e2,2 +np.float64,0xbfde8b4891bd1692,0x40008beb88d5c409,2 +np.float64,0xbfe199a7efe33350,0x400139b089aee21c,2 +np.float64,0x3fecdad25cf9b5a4,0x3fdc9d062867e8c3,2 +np.float64,0xbfe979b277f2f365,0x4003eedb061e25a4,2 +np.float64,0x3fc8c7311f318e60,0x3ff6040b9aeaad9d,2 +np.float64,0x3fd2b605b8a56c0c,0x3ff462b9a955c224,2 +np.float64,0x3fc073b6ad20e770,0x3ff7120e9f2fd63c,2 +np.float64,0xbfec60ede678c1dc,0x40054a3863e24dc2,2 +np.float64,0x3fe225171be44a2e,0x3feef910dca420ea,2 +np.float64,0xbfd7529762aea52e,0x3fff19d00661f650,2 +np.float64,0xbfd781783daf02f0,0x3fff2667b90be461,2 +np.float64,0x3fe3f6ec6d67edd8,0x3fecb4e814a2e33a,2 +np.float64,0x3fece6702df9cce0,0x3fdc6719d92a50d2,2 +np.float64,0xbfb5c602ce2b8c08,0x3ffa7ec761ba856a,2 +np.float64,0xbfd61f0153ac3e02,0x3ffec78e3b1a6c4d,2 +np.float64,0xbfec3462b2f868c5,0x400532630bbd7050,2 +np.float64,0xbfdd248485ba490a,0x400059391c07c1bb,2 +np.float64,0xbfd424921fa84924,0x3ffe416a85d1dcdf,2 +np.float64,0x3fbb23a932364750,0x3ff76eef79209f7f,2 +np.float64,0x3fca248b0f344918,0x3ff5d77c5c1b4e5e,2 +np.float64,0xbfe69af4a4ed35ea,0x4002d77c2e4fbd4e,2 +np.float64,0x3fdafe3cdcb5fc78,0x3ff22a9be6efbbf2,2 +np.float64,0xbfebba3377f77467,0x4004f3836e1fe71a,2 +np.float64,0xbfe650fae06ca1f6,0x4002bd851406377c,2 +np.float64,0x3fda630007b4c600,0x3ff2554f1832bd94,2 +np.float64,0xbfda8107d9b50210,0x3ffff6e6209659f3,2 +np.float64,0x3fea759a02f4eb34,0x3fe31d1a632c9aae,2 +np.float64,0x3fbf88149e3f1030,0x3ff728313aa12ccb,2 +np.float64,0x3f7196d2a0232e00,0x3ff910647e1914c1,2 +np.float64,0x3feeae51d17d5ca4,0x3fd2709698d31f6f,2 +np.float64,0xbfd73cd663ae79ac,0x3fff13f96300b55a,2 +np.float64,0x3fd4fc5f06a9f8c0,0x3ff3c99359854b97,2 +np.float64,0x3fb29f5d6e253ec0,0x3ff7f7c20e396b20,2 +np.float64,0xbfd757c82aaeaf90,0x3fff1b34c6141e98,2 +np.float64,0x3fc56fd4cf2adfa8,0x3ff670c145122909,2 +np.float64,0x3fc609a2f52c1348,0x3ff65d3ef3cade2c,2 +np.float64,0xbfe1de631163bcc6,0x40014e5528fadb73,2 +np.float64,0xbfe7eb4a726fd695,0x40035202f49d95c4,2 +np.float64,0xbfc9223771324470,0x3ffc4b84d5e263b9,2 +np.float64,0x3fee91a8a87d2352,0x3fd3364befde8de6,2 +np.float64,0x3fbc9784fe392f10,0x3ff7578e29f6a1b2,2 +np.float64,0xbfec627c2c78c4f8,0x40054b0ff2cb9c55,2 +np.float64,0xbfb8b406a6316810,0x3ffaadd97062fb8c,2 +np.float64,0xbfecf98384f9f307,0x4005a043d9110d79,2 +np.float64,0xbfe5834bab6b0698,0x400276f114aebee4,2 +np.float64,0xbfd90f391eb21e72,0x3fff91e26a8f48f3,2 +np.float64,0xbfee288ce2fc511a,0x400667cb09aa04b3,2 +np.float64,0x3fd5aa5e32ab54bc,0x3ff39b7080a52214,2 +np.float64,0xbfee7ef907fcfdf2,0x4006ab96a8eba4c5,2 +np.float64,0x3fd6097973ac12f4,0x3ff3822486978bd1,2 +np.float64,0xbfe02d14b8e05a2a,0x4000ce5be53047b1,2 +np.float64,0xbf9c629a6838c540,0x3ff993897728c3f9,2 +np.float64,0xbfee2024667c4049,0x40066188782fb1f0,2 +np.float64,0xbfa42a88fc285510,0x3ff9c35a4bbce104,2 +np.float64,0x3fa407af5c280f60,0x3ff881b360d8eea1,2 +np.float64,0x3fed0ba42cfa1748,0x3fdbb7d55609175f,2 +np.float64,0xbfdd0b5844ba16b0,0x400055b0bb59ebb2,2 +np.float64,0x3fd88d97e6b11b30,0x3ff2d53c1ecb8f8c,2 +np.float64,0xbfeb7a915ef6f523,0x4004d410812eb84c,2 +np.float64,0xbfb5f979ca2bf2f0,0x3ffa8201d73cd4ca,2 +np.float64,0x3fb3b65dd6276cc0,0x3ff7e64576199505,2 +np.float64,0x3fcd47a7793a8f50,0x3ff570a7b672f160,2 +np.float64,0xbfa41dd30c283ba0,0x3ff9c2f488127eb3,2 +np.float64,0x3fe4b1ea1f6963d4,0x3febc2bed7760427,2 +np.float64,0xbfdd0f81d2ba1f04,0x400056463724b768,2 +np.float64,0x3fd15d93f7a2bb28,0x3ff4bc7a24eacfd7,2 +np.float64,0xbfe3213af8e64276,0x4001b14579dfded3,2 +np.float64,0x3fd90dfbeab21bf8,0x3ff2b26a6c2c3bb3,2 +np.float64,0xbfd02d54bca05aaa,0x3ffd38ab3886b203,2 +np.float64,0x3fc218dcad2431b8,0x3ff6dced56d5b417,2 +np.float64,0x3fea5edf71f4bdbe,0x3fe3455ee09f27e6,2 +np.float64,0x3fa74319042e8640,0x3ff867d224545438,2 +np.float64,0x3fd970ad92b2e15c,0x3ff2979084815dc1,2 +np.float64,0x3fce0a4bf73c1498,0x3ff557a4df32df3e,2 +np.float64,0x3fef5c8e10feb91c,0x3fc99ca0eeaaebe4,2 +np.float64,0xbfedae997ffb5d33,0x400611af18f407ab,2 +np.float64,0xbfbcf07d6239e0f8,0x3ffaf201177a2d36,2 +np.float64,0xbfc3c52541278a4c,0x3ffb9d2af0408e4a,2 +np.float64,0x3fe4ef44e4e9de8a,0x3feb71f7331255e5,2 +np.float64,0xbfccd9f5f539b3ec,0x3ffcc53a99339592,2 +np.float64,0xbfda32c745b4658e,0x3fffe16e8727ef89,2 +np.float64,0xbfef54932a7ea926,0x40077e4605e61ca1,2 +np.float64,0x3fe9d4ae3573a95c,0x3fe4344a069a3fd0,2 +np.float64,0x3fda567e73b4acfc,0x3ff258bd77a663c7,2 +np.float64,0xbfd5bcac5eab7958,0x3ffead6379c19c52,2 +np.float64,0xbfee5e56f97cbcae,0x40069131fc54018d,2 +np.float64,0x3fc2d4413925a880,0x3ff6c54163816298,2 +np.float64,0xbfe9ddf6e873bbee,0x400418d8c722f7c5,2 +np.float64,0x3fdaf2a683b5e54c,0x3ff22dcda599d69c,2 +np.float64,0xbfca69789f34d2f0,0x3ffc7547ff10b1a6,2 +np.float64,0x3fed076f62fa0ede,0x3fdbcbda03c1d72a,2 +np.float64,0xbfcb38326f367064,0x3ffc8fb55dadeae5,2 +np.float64,0x3fe1938705e3270e,0x3fefa88130c5adda,2 +np.float64,0x3feaffae3b75ff5c,0x3fe221e3da537c7e,2 +np.float64,0x3fefc94acb7f9296,0x3fbd9a360ace67b4,2 +np.float64,0xbfe8bddeb0f17bbe,0x4003a316685c767e,2 +np.float64,0x3fbe10fbee3c21f0,0x3ff73fceb10650f5,2 +np.float64,0x3fde9126c1bd224c,0x3ff12a742f734d0a,2 +np.float64,0xbfe9686c91f2d0d9,0x4003e7bc6ee77906,2 +np.float64,0xbfb1ba4892237490,0x3ffa3dda064c2509,2 +np.float64,0xbfe2879100e50f22,0x400181c1a5b16f0f,2 +np.float64,0x3fd1cd40b6a39a80,0x3ff49f70e3064e95,2 +np.float64,0xbfc965869132cb0c,0x3ffc5419f3b43701,2 +np.float64,0x3fea7a6f2874f4de,0x3fe31480fb2dd862,2 +np.float64,0x3fc3bc56892778b0,0x3ff6a7e8fa0e8b0e,2 +np.float64,0x3fec1ed451f83da8,0x3fdfd78e564b8ad7,2 +np.float64,0x3feb77d16df6efa2,0x3fe13d083344e45e,2 +np.float64,0xbfe822e7c67045d0,0x400367104a830cf6,2 +np.float64,0x8000000000000001,0x3ff921fb54442d18,2 +np.float64,0xbfd4900918a92012,0x3ffe5dc0e19737b4,2 +np.float64,0x3fed184187fa3084,0x3fdb7b7a39f234f4,2 +np.float64,0x3fecef846179df08,0x3fdc3cb2228c3682,2 +np.float64,0xbfe2d2aed165a55e,0x400198e21c5b861b,2 +np.float64,0x7ff0000000000000,0x7ff8000000000000,2 +np.float64,0xbfee9409a07d2813,0x4006bd358232d073,2 +np.float64,0xbfecedc2baf9db86,0x4005995df566fc21,2 +np.float64,0x3fe6d857396db0ae,0x3fe8d2cb8794aa99,2 +np.float64,0xbf9a579e7834af40,0x3ff98b5cc8021e1c,2 +np.float64,0x3fc664fefb2cca00,0x3ff651a664ccf8fa,2 +np.float64,0xbfe8a7aa0e714f54,0x40039a5b4df938a0,2 +np.float64,0xbfdf27d380be4fa8,0x4000a241074dbae6,2 +np.float64,0x3fe00ddf55e01bbe,0x3ff0b94eb1ea1851,2 +np.float64,0x3feb47edbff68fdc,0x3fe199822d075959,2 +np.float64,0x3fb4993822293270,0x3ff7d80c838186d0,2 +np.float64,0xbfca2cd1473459a4,0x3ffc6d88c8de3d0d,2 +np.float64,0xbfea7d9c7674fb39,0x40045e4559e9e52d,2 +np.float64,0x3fe0dce425e1b9c8,0x3ff04099cab23289,2 +np.float64,0x3fd6bb7e97ad76fc,0x3ff352a30434499c,2 +np.float64,0x3fd4a4f16da949e4,0x3ff3e0b07432c9aa,2 +np.float64,0x8000000000000000,0x3ff921fb54442d18,2 +np.float64,0x3fe688f5b56d11ec,0x3fe9435f63264375,2 +np.float64,0xbfdf5a427ebeb484,0x4000a97a6c5d4abc,2 +np.float64,0xbfd1f3483fa3e690,0x3ffdae6c8a299383,2 +np.float64,0xbfeac920db759242,0x4004805862be51ec,2 +np.float64,0x3fef5bc711feb78e,0x3fc9ac40fba5b93b,2 +np.float64,0x3fe4bd9e12e97b3c,0x3febb363c787d381,2 +np.float64,0x3fef6a59ab7ed4b4,0x3fc880f1324eafce,2 +np.float64,0x3fc07a362120f470,0x3ff7113cf2c672b3,2 +np.float64,0xbfe4d6dbe2e9adb8,0x40023d6f6bea44b7,2 +np.float64,0xbfec2d6a15785ad4,0x40052eb425cc37a2,2 +np.float64,0x3fc90dae05321b60,0x3ff5fb10015d2934,2 +np.float64,0xbfa9239f74324740,0x3ff9eb2d057068ea,2 +np.float64,0xbfeb4fc8baf69f92,0x4004bf5e17fb08a4,2 +np.float64,0x0,0x3ff921fb54442d18,2 +np.float64,0x3faaf1884c35e320,0x3ff84a5591dbe1f3,2 +np.float64,0xbfed842561fb084b,0x4005f5c0a19116ce,2 +np.float64,0xbfc64850c32c90a0,0x3ffbeeac2ee70f9a,2 +np.float64,0x3fd7d879f5afb0f4,0x3ff306254c453436,2 +np.float64,0xbfdabaa586b5754c,0x4000035e6ac83a2b,2 +np.float64,0xbfebfeefa977fddf,0x4005167446fb9faf,2 +np.float64,0xbfe9383462727069,0x4003d407aa6a1577,2 +np.float64,0x3fe108dfb6e211c0,0x3ff026ac924b281d,2 +np.float64,0xbf85096df02a12c0,0x3ff94c0e60a22ede,2 +np.float64,0xbfe3121cd566243a,0x4001ac8f90db5882,2 +np.float64,0xbfd227f62aa44fec,0x3ffdbc26bb175dcc,2 +np.float64,0x3fd931af2cb26360,0x3ff2a8b62dfe003c,2 +np.float64,0xbfd9b794e3b36f2a,0x3fffbfbc89ec013d,2 +np.float64,0x3fc89b2e6f313660,0x3ff609a6e67f15f2,2 +np.float64,0x3fc0b14a8f216298,0x3ff70a4b6905aad2,2 +np.float64,0xbfeda11a657b4235,0x400608b3f9fff574,2 +np.float64,0xbfed2ee9ec7a5dd4,0x4005c040b7c02390,2 +np.float64,0xbfef7819d8fef034,0x4007ac6bf75cf09d,2 +np.float64,0xbfcc4720fb388e40,0x3ffcb2666a00b336,2 +np.float64,0xbfe05dec4be0bbd8,0x4000dc8a25ca3760,2 +np.float64,0x3fb093416e212680,0x3ff81897b6d8b374,2 +np.float64,0xbfc6ab89332d5714,0x3ffbfb4559d143e7,2 +np.float64,0x3fc51948512a3290,0x3ff67bb9df662c0a,2 +np.float64,0x3fed4d94177a9b28,0x3fda76c92f0c0132,2 +np.float64,0x3fdd195fbeba32c0,0x3ff194a5586dd18e,2 +np.float64,0x3fe3f82799e7f050,0x3fecb354c2faf55c,2 +np.float64,0x3fecac2169f95842,0x3fdd7222296cb7a7,2 +np.float64,0x3fe3d3f36fe7a7e6,0x3fece18f45e30dd7,2 +np.float64,0x3fe31ff63d663fec,0x3fedc46c77d30c6a,2 +np.float64,0xbfe3120c83e62419,0x4001ac8a7c4aa742,2 +np.float64,0x3fe7c1a7976f8350,0x3fe77e4a9307c9f8,2 +np.float64,0x3fe226fe9de44dfe,0x3feef6c0f3cb00fa,2 +np.float64,0x3fd5c933baab9268,0x3ff3933e8a37de42,2 +np.float64,0x3feaa98496f5530a,0x3fe2c003832ebf21,2 +np.float64,0xbfc6f80a2f2df014,0x3ffc04fd54cb1317,2 +np.float64,0x3fde5e18d0bcbc30,0x3ff138f7b32a2ca3,2 +np.float64,0xbfe30c8dd566191c,0x4001aad4af935a78,2 +np.float64,0x3fbe8d196e3d1a30,0x3ff737fec8149ecc,2 +np.float64,0x3feaee6731f5dcce,0x3fe241fa42cce22d,2 +np.float64,0x3fef9cc46cff3988,0x3fc3f17b708dbdbb,2 +np.float64,0xbfdb181bdeb63038,0x4000103ecf405602,2 +np.float64,0xbfc58de0ed2b1bc0,0x3ffbd704c14e15cd,2 +np.float64,0xbfee05d5507c0bab,0x40064e480faba6d8,2 +np.float64,0x3fe27d0ffa64fa20,0x3fee8dc71ef79f2c,2 +np.float64,0xbfe4f7ad4c69ef5a,0x400248456cd09a07,2 +np.float64,0xbfe4843e91e9087d,0x4002225f3e139c84,2 +np.float64,0x3fe7158b9c6e2b18,0x3fe87ae845c5ba96,2 +np.float64,0xbfea64316074c863,0x400452fd2bc23a44,2 +np.float64,0xbfc9f3ae4133e75c,0x3ffc663d482afa42,2 +np.float64,0xbfd5e18513abc30a,0x3ffeb72fc76d7071,2 +np.float64,0xbfd52f6438aa5ec8,0x3ffe87e5b18041e5,2 +np.float64,0xbfea970650f52e0d,0x400469a4a6758154,2 +np.float64,0xbfe44321b7e88644,0x40020d404a2141b1,2 +np.float64,0x3fdf5a39bbbeb474,0x3ff0f10453059dbd,2 +np.float64,0xbfa1d4069423a810,0x3ff9b0a2eacd2ce2,2 +np.float64,0xbfc36d16a326da2c,0x3ffb92077d41d26a,2 +np.float64,0x1,0x3ff921fb54442d18,2 +np.float64,0x3feb232a79764654,0x3fe1df5beeb249d0,2 +np.float64,0xbfed2003d5fa4008,0x4005b737c2727583,2 +np.float64,0x3fd5b093a3ab6128,0x3ff399ca2db1d96d,2 +np.float64,0x3fca692c3d34d258,0x3ff5ceb86b79223e,2 +np.float64,0x3fd6bbdf89ad77c0,0x3ff3528916df652d,2 +np.float64,0xbfefdadd46ffb5bb,0x40085ee735e19f19,2 +np.float64,0x3feb69fb2676d3f6,0x3fe157ee0c15691e,2 +np.float64,0x3fe44c931f689926,0x3fec46b6f5e3f265,2 +np.float64,0xbfc43ddbcb287bb8,0x3ffbac71d268d74d,2 +np.float64,0x3fe6e16d43edc2da,0x3fe8c5cf0f0daa66,2 +np.float64,0x3fe489efc76913e0,0x3febf704ca1ac2a6,2 +np.float64,0xbfe590aadceb2156,0x40027b764205cf78,2 +np.float64,0xbf782e8aa0305d00,0x3ff93a29e81928ab,2 +np.float64,0x3fedcb80cffb9702,0x3fd7e5d1f98a418b,2 +np.float64,0x3fe075858060eb0c,0x3ff07d23ab46b60f,2 +np.float64,0x3fe62a68296c54d0,0x3fe9c77f7068043b,2 +np.float64,0x3feff16a3c7fe2d4,0x3fae8e8a739cc67a,2 +np.float64,0xbfd6ed93e3addb28,0x3ffefebab206fa99,2 +np.float64,0x3fe40d8ccf681b1a,0x3fec97e9cd29966d,2 +np.float64,0x3fd6408210ac8104,0x3ff3737a7d374107,2 +np.float64,0x3fec8023b8f90048,0x3fde35ebfb2b3afd,2 +np.float64,0xbfe13babd4627758,0x40011dae5c07c56b,2 +np.float64,0xbfd2183e61a4307c,0x3ffdb80dd747cfbe,2 +np.float64,0x3feae8eb1d75d1d6,0x3fe24c1f6e42ae77,2 +np.float64,0xbfea559b9c74ab37,0x40044c8e5e123b20,2 +np.float64,0xbfd12c9d57a2593a,0x3ffd7ac6222f561c,2 +np.float64,0x3fe32eb697e65d6e,0x3fedb202693875b6,2 +np.float64,0xbfde0808c3bc1012,0x4000794bd8616ea3,2 +np.float64,0x3fe14958a06292b2,0x3ff0007b40ac648a,2 +np.float64,0x3fe3d388a6e7a712,0x3fece21751a6dd7c,2 +np.float64,0x3fe7ad7897ef5af2,0x3fe79c5b3da302a7,2 +np.float64,0x3fec75527e78eaa4,0x3fde655de0cf0508,2 +np.float64,0x3fea920d4c75241a,0x3fe2ea48f031d908,2 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,2 +np.float64,0xbfc17a68cb22f4d0,0x3ffb530925f41aa0,2 +np.float64,0xbfe1c93166e39263,0x400147f3cb435dec,2 +np.float64,0x3feb97c402f72f88,0x3fe0fe5b561bf869,2 +np.float64,0x3fb58ff5162b1ff0,0x3ff7c8933fa969dc,2 +np.float64,0x3fe68e2beded1c58,0x3fe93c075283703b,2 +np.float64,0xbf94564cc828aca0,0x3ff97355e5ee35db,2 +np.float64,0x3fd31061c9a620c4,0x3ff44b150ec96998,2 +np.float64,0xbfc7d0c89f2fa190,0x3ffc208bf4eddc4d,2 +np.float64,0x3fe5736f1d6ae6de,0x3feac18f84992d1e,2 +np.float64,0x3fdb62e480b6c5c8,0x3ff20ecfdc4afe7c,2 +np.float64,0xbfc417228b282e44,0x3ffba78afea35979,2 +np.float64,0x3f8f5ba1303eb780,0x3ff8e343714630ff,2 +np.float64,0x3fe8e99126f1d322,0x3fe5b6511d4c0798,2 +np.float64,0xbfe2ec08a1e5d812,0x4001a0bb28a85875,2 +np.float64,0x3fea3b46cf74768e,0x3fe383dceaa74296,2 +np.float64,0xbfe008b5ed60116c,0x4000c3d62c275d40,2 +np.float64,0xbfcd9f8a4b3b3f14,0x3ffcde98d6484202,2 +np.float64,0xbfdb5fb112b6bf62,0x40001a22137ef1c9,2 +np.float64,0xbfe9079565f20f2b,0x4003c0670c92e401,2 +np.float64,0xbfce250dc53c4a1c,0x3ffcefc2b3dc3332,2 +np.float64,0x3fe9ba85d373750c,0x3fe4607131b28773,2 +np.float64,0x10000000000000,0x3ff921fb54442d18,2 +np.float64,0xbfeb9ef42c773de8,0x4004e5f239203ad8,2 +np.float64,0xbfd6bf457dad7e8a,0x3ffef2563d87b18d,2 +np.float64,0x3fe4de9aa5e9bd36,0x3feb87f97defb04a,2 +np.float64,0x3fedb4f67cfb69ec,0x3fd8603c465bffac,2 +np.float64,0x3fe7b6d9506f6db2,0x3fe78e670c7bdb67,2 +np.float64,0x3fe071717460e2e2,0x3ff07f84472d9cc5,2 +np.float64,0xbfed2e79dbfa5cf4,0x4005bffc6f9ad24f,2 +np.float64,0x3febb8adc377715c,0x3fe0bcebfbd45900,2 +np.float64,0xbfee2cffd87c5a00,0x40066b20a037c478,2 +np.float64,0x3fef7e358d7efc6c,0x3fc6d0ba71a542a8,2 +np.float64,0xbfef027eef7e04fe,0x400723291cb00a7a,2 +np.float64,0x3fac96da34392dc0,0x3ff83d260a936c6a,2 +np.float64,0x3fe9dba94a73b752,0x3fe428736b94885e,2 +np.float64,0x3fed37581efa6eb0,0x3fdae49dcadf1d90,2 +np.float64,0xbfe6e61037edcc20,0x4002f23031b8d522,2 +np.float64,0xbfdea7204dbd4e40,0x40008fe1f37918b7,2 +np.float64,0x3feb9f8edb773f1e,0x3fe0eef20bd4387b,2 +np.float64,0x3feeb0b6ed7d616e,0x3fd25fb3b7a525d6,2 +np.float64,0xbfd7ce9061af9d20,0x3fff3b25d531aa2b,2 +np.float64,0xbfc806b509300d6c,0x3ffc2768743a8360,2 +np.float64,0xbfa283882c250710,0x3ff9b61fda28914a,2 +np.float64,0x3fdec70050bd8e00,0x3ff11b1d769b578f,2 +np.float64,0xbfc858a44930b148,0x3ffc31d6758b4721,2 +np.float64,0x3fdc321150b86424,0x3ff1d5504c3c91e4,2 +np.float64,0x3fd9416870b282d0,0x3ff2a46f3a850f5b,2 +np.float64,0x3fdd756968baead4,0x3ff17ac510a5573f,2 +np.float64,0xbfedfd632cfbfac6,0x400648345a2f89b0,2 +np.float64,0x3fd6874285ad0e84,0x3ff36098ebff763f,2 +np.float64,0x3fe6daacc9edb55a,0x3fe8cf75fae1e35f,2 +np.float64,0x3fe53f19766a7e32,0x3feb07d0e97cd55b,2 +np.float64,0x3fd13cc36ca27988,0x3ff4c4ff801b1faa,2 +np.float64,0x3fe4f21cbce9e43a,0x3feb6e34a72ef529,2 +np.float64,0xbfc21c1cc9243838,0x3ffb67726394ca89,2 +np.float64,0x3fe947a3f2728f48,0x3fe51eae4660e23c,2 +np.float64,0xbfce78cd653cf19c,0x3ffcfa89194b3f5e,2 +np.float64,0x3fe756f049eeade0,0x3fe81be7f2d399e2,2 +np.float64,0xbfcc727cf138e4f8,0x3ffcb7f547841bb0,2 +np.float64,0xbfc2d8d58f25b1ac,0x3ffb7f496cc72458,2 +np.float64,0xbfcfd0e4653fa1c8,0x3ffd26e1309bc80b,2 +np.float64,0xbfe2126c106424d8,0x40015e0e01db6a4a,2 +np.float64,0x3fe580e4306b01c8,0x3feaaf683ce51aa5,2 +np.float64,0x3fcea8a1b93d5140,0x3ff543456c0d28c7,2 +np.float64,0xfff0000000000000,0x7ff8000000000000,2 +np.float64,0xbfd9d5da72b3abb4,0x3fffc8013113f968,2 +np.float64,0xbfe1fdfcea63fbfa,0x400157def2e4808d,2 +np.float64,0xbfc0022e0720045c,0x3ffb239963e7cbf2,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv new file mode 100644 index 00000000..0defe50b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f83203f,0x3e61d9d6,2 +np.float32,0x3f98dea1,0x3f1d1af6,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x7eba99af,0x42b0d032,2 +np.float32,0x3fc95a13,0x3f833650,2 +np.float32,0x3fce9a45,0x3f8771e1,2 +np.float32,0x3fc1bd96,0x3f797811,2 +np.float32,0x7eba2391,0x42b0ceed,2 +np.float32,0x7d4e8f15,0x42acdb8c,2 +np.float32,0x3feca42e,0x3f9cc88e,2 +np.float32,0x7e2b314e,0x42af412e,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f803687,0x3d6c4380,2 +np.float32,0x3fa0edbd,0x3f33e706,2 +np.float32,0x3faa8074,0x3f4b3d3c,2 +np.float32,0x3fa0c49e,0x3f337af3,2 +np.float32,0x3f8c9ec4,0x3ee18812,2 +np.float32,0x7efef78e,0x42b17006,2 +np.float32,0x3fc75720,0x3f818aa4,2 +np.float32,0x7f52d4c8,0x42b27198,2 +np.float32,0x3f88f21e,0x3ebe52b0,2 +np.float32,0x3ff7a042,0x3fa3a07a,2 +np.float32,0x7f52115c,0x42b26fbd,2 +np.float32,0x3fc6bf6f,0x3f810b42,2 +np.float32,0x3fd105d0,0x3f895649,2 +np.float32,0x3fee7c2a,0x3f9df66e,2 +np.float32,0x7f0ff9a5,0x42b1ae4f,2 +np.float32,0x7e81f075,0x42b016e7,2 +np.float32,0x3fa57d65,0x3f3f70c6,2 +np.float32,0x80800000,0xffc00000,2 +np.float32,0x7da239f5,0x42adc2bf,2 +np.float32,0x3f9e432c,0x3f2cbd80,2 +np.float32,0x3ff2839b,0x3fa07ee4,2 +np.float32,0x3fec8aef,0x3f9cb850,2 +np.float32,0x7d325893,0x42ac905b,2 +np.float32,0x3fa27431,0x3f37dade,2 +np.float32,0x3fce7408,0x3f8753ae,2 +np.float32,0x3fde6684,0x3f93353f,2 +np.float32,0x3feb9a3e,0x3f9c1cff,2 +np.float32,0x7deb34bb,0x42ae80f0,2 +np.float32,0x3fed9300,0x3f9d61b7,2 +np.float32,0x7f35e253,0x42b225fb,2 +np.float32,0x7e6db57f,0x42afe93f,2 +np.float32,0x3fa41f08,0x3f3c10bc,2 +np.float32,0x3fb0d4da,0x3f590de3,2 +np.float32,0x3fb5c690,0x3f632351,2 +np.float32,0x3fcde9ce,0x3f86e638,2 +np.float32,0x3f809c7b,0x3dc81161,2 +np.float32,0x3fd77291,0x3f8e3226,2 +np.float32,0x3fc21a06,0x3f7a1a82,2 +np.float32,0x3fba177e,0x3f6b8139,2 +np.float32,0x7f370dff,0x42b22944,2 +np.float32,0x3fe5bfcc,0x3f9841c1,2 +np.float32,0x3feb0caa,0x3f9bc139,2 +np.float32,0x7f4fe5c3,0x42b26a6c,2 +np.float32,0x7f1e1419,0x42b1de28,2 +np.float32,0x7f5e3c96,0x42b28c92,2 +np.float32,0x3f8cd313,0x3ee3521e,2 +np.float32,0x3fa97824,0x3f48e049,2 +np.float32,0x7d8ca281,0x42ad799e,2 +np.float32,0x3f96b51b,0x3f165193,2 +np.float32,0x3f81328a,0x3e0bf504,2 +np.float32,0x3ff60bf3,0x3fa2ab45,2 +np.float32,0x3ff9b629,0x3fa4e107,2 +np.float32,0x3fecacfc,0x3f9cce37,2 +np.float32,0x3fba8804,0x3f6c5600,2 +np.float32,0x3f81f752,0x3e333fdd,2 +np.float32,0x3fb5b262,0x3f62fb46,2 +np.float32,0x3fa21bc0,0x3f36f7e6,2 +np.float32,0x3fbc87bb,0x3f7011dc,2 +np.float32,0x3fe18b32,0x3f9565ae,2 +np.float32,0x7dfb6dd5,0x42aea316,2 +np.float32,0x3fb7c602,0x3f670ee3,2 +np.float32,0x7efeb6a2,0x42b16f84,2 +np.float32,0x3fa56180,0x3f3f2ca4,2 +np.float32,0x3f8dcaff,0x3eeb9ac0,2 +np.float32,0x7e876238,0x42b02beb,2 +np.float32,0x7f0bb67d,0x42b19eec,2 +np.float32,0x3faca01c,0x3f4fffa5,2 +np.float32,0x3fdb57ee,0x3f9108b8,2 +np.float32,0x3fe3bade,0x3f96e4b7,2 +np.float32,0x7f7aa2dd,0x42b2ca25,2 +np.float32,0x3fed92ec,0x3f9d61aa,2 +np.float32,0x7eb789b1,0x42b0c7b9,2 +np.float32,0x7f7f16e4,0x42b2d329,2 +np.float32,0x3fb6647e,0x3f645b84,2 +np.float32,0x3f99335e,0x3f1e1d96,2 +np.float32,0x7e690a11,0x42afdf17,2 +np.float32,0x7dff2f95,0x42aeaaae,2 +np.float32,0x7f70adfd,0x42b2b564,2 +np.float32,0x3fe92252,0x3f9a80fe,2 +np.float32,0x3fef54ce,0x3f9e7fe5,2 +np.float32,0x3ff24eaa,0x3fa05df9,2 +np.float32,0x7f04565a,0x42b18328,2 +np.float32,0x3fcb8b80,0x3f85007f,2 +np.float32,0x3fcd4d0a,0x3f866983,2 +np.float32,0x3fbe7d82,0x3f73a911,2 +np.float32,0x3f8a7a8a,0x3ecdc8f6,2 +np.float32,0x3f912441,0x3f030d56,2 +np.float32,0x3f9b29d6,0x3f23f663,2 +np.float32,0x3fab7f36,0x3f4d7c6c,2 +np.float32,0x7dfedafc,0x42aeaa04,2 +np.float32,0x3fe190c0,0x3f956982,2 +np.float32,0x3f927515,0x3f07e0bb,2 +np.float32,0x3ff6442a,0x3fa2cd7e,2 +np.float32,0x7f6656d0,0x42b29ee8,2 +np.float32,0x3fe29aa0,0x3f96201f,2 +np.float32,0x3fa4a247,0x3f3d5687,2 +np.float32,0x3fa1cf19,0x3f363226,2 +np.float32,0x3fc20037,0x3f79ed36,2 +np.float32,0x7cc1241a,0x42ab5645,2 +np.float32,0x3fafd540,0x3f56f25a,2 +np.float32,0x7e5b3f5f,0x42afbfdb,2 +np.float32,0x7f48de5f,0x42b258d0,2 +np.float32,0x3fce1ca0,0x3f870e85,2 +np.float32,0x7ee40bb2,0x42b136e4,2 +np.float32,0x7ecdb133,0x42b10212,2 +np.float32,0x3f9f181c,0x3f2f02ca,2 +np.float32,0x3f936cbf,0x3f0b4f63,2 +np.float32,0x3fa4f8ea,0x3f3e2c2f,2 +np.float32,0x3fcc03e2,0x3f8561ac,2 +np.float32,0x3fb801f2,0x3f67831b,2 +np.float32,0x7e141dad,0x42aef70c,2 +np.float32,0x3fe8c04e,0x3f9a4087,2 +np.float32,0x3f8548d5,0x3e929f37,2 +np.float32,0x7f148d7d,0x42b1be56,2 +np.float32,0x3fd2c9a2,0x3f8ab1ed,2 +np.float32,0x7eb374fd,0x42b0bc36,2 +np.float32,0x7f296d36,0x42b201a7,2 +np.float32,0x3ff138e2,0x3f9fb09d,2 +np.float32,0x3ff42898,0x3fa18347,2 +np.float32,0x7da8c5e1,0x42add700,2 +np.float32,0x7dcf72c4,0x42ae40a4,2 +np.float32,0x7ea571fc,0x42b09296,2 +np.float32,0x3fc0953d,0x3f776ba3,2 +np.float32,0x7f1773dd,0x42b1c83c,2 +np.float32,0x7ef53b68,0x42b15c17,2 +np.float32,0x3f85d69f,0x3e9a0f3a,2 +np.float32,0x7e8b9a05,0x42b03ba0,2 +np.float32,0x3ff07d20,0x3f9f3ad2,2 +np.float32,0x7e8da32c,0x42b0430a,2 +np.float32,0x7ef96004,0x42b164ab,2 +np.float32,0x3fdfaa62,0x3f941837,2 +np.float32,0x7f0057c5,0x42b17377,2 +np.float32,0x3fb2663f,0x3f5c5065,2 +np.float32,0x3fd3d8c3,0x3f8b8055,2 +np.float32,0x1,0xffc00000,2 +np.float32,0x3fd536c1,0x3f8c8862,2 +np.float32,0x3f91b953,0x3f053619,2 +np.float32,0x3fb3305c,0x3f5deee1,2 +np.float32,0x7ecd86b9,0x42b101a8,2 +np.float32,0x3fbf71c5,0x3f75624d,2 +np.float32,0x3ff5f0f4,0x3fa29ad2,2 +np.float32,0x3fe50389,0x3f97c328,2 +np.float32,0x3fa325a1,0x3f399e69,2 +np.float32,0x3fe4397a,0x3f973a9f,2 +np.float32,0x3f8684c6,0x3ea2b784,2 +np.float32,0x7f25ae00,0x42b1f634,2 +np.float32,0x3ff7cbf7,0x3fa3badb,2 +np.float32,0x7f73f0e0,0x42b2bc48,2 +np.float32,0x3fc88b70,0x3f828b92,2 +np.float32,0x3fb01c16,0x3f578886,2 +np.float32,0x7e557623,0x42afb229,2 +np.float32,0x3fcbcd5b,0x3f8535b4,2 +np.float32,0x7f7157e4,0x42b2b6cd,2 +np.float32,0x7f51d9d4,0x42b26f36,2 +np.float32,0x7f331a3b,0x42b21e17,2 +np.float32,0x7f777fb5,0x42b2c3b2,2 +np.float32,0x3f832001,0x3e61d11f,2 +np.float32,0x7f2cd055,0x42b20bca,2 +np.float32,0x3f89831f,0x3ec42f76,2 +np.float32,0x7f21da33,0x42b1ea3d,2 +np.float32,0x3f99e416,0x3f20330a,2 +np.float32,0x7f2c8ea1,0x42b20b07,2 +np.float32,0x7f462c98,0x42b251e6,2 +np.float32,0x7f4fdb3f,0x42b26a52,2 +np.float32,0x3fcc1338,0x3f856e07,2 +np.float32,0x3f823673,0x3e3e20da,2 +np.float32,0x7dbfe89d,0x42ae18c6,2 +np.float32,0x3fc9b04c,0x3f837d38,2 +np.float32,0x7dba3213,0x42ae094d,2 +np.float32,0x7ec5a483,0x42b0eda1,2 +np.float32,0x3fbc4d14,0x3f6fa543,2 +np.float32,0x3fc85ce2,0x3f8264f1,2 +np.float32,0x7f77c816,0x42b2c447,2 +np.float32,0x3f9c9281,0x3f280492,2 +np.float32,0x7f49b3e2,0x42b25aef,2 +np.float32,0x3fa7e4da,0x3f45347c,2 +np.float32,0x7e0c9df5,0x42aedc72,2 +np.float32,0x7f21fd1a,0x42b1eaab,2 +np.float32,0x7f7c63ad,0x42b2cdb6,2 +np.float32,0x7f4eb80a,0x42b26783,2 +np.float32,0x7e98038c,0x42b0673c,2 +np.float32,0x7e89ba08,0x42b034b4,2 +np.float32,0x3ffc06ba,0x3fa64094,2 +np.float32,0x3fae63f6,0x3f53db36,2 +np.float32,0x3fbc2d30,0x3f6f6a1c,2 +np.float32,0x7de0e5e5,0x42ae69fe,2 +np.float32,0x7e09ed18,0x42aed28d,2 +np.float32,0x3fea78f8,0x3f9b6129,2 +np.float32,0x7dfe0bcc,0x42aea863,2 +np.float32,0x7ee21d03,0x42b13289,2 +np.float32,0x3fcc3aed,0x3f858dfc,2 +np.float32,0x3fe6b3ba,0x3f98e4ea,2 +np.float32,0x3f90f25f,0x3f025225,2 +np.float32,0x7f1bcaf4,0x42b1d6b3,2 +np.float32,0x3f83ac81,0x3e74c20e,2 +np.float32,0x3f98681d,0x3f1bae16,2 +np.float32,0x3fe1f2d9,0x3f95ad08,2 +np.float32,0x3fa279d7,0x3f37e951,2 +np.float32,0x3feb922a,0x3f9c17c4,2 +np.float32,0x7f1c72e8,0x42b1d8da,2 +np.float32,0x3fea156b,0x3f9b2038,2 +np.float32,0x3fed6bda,0x3f9d48aa,2 +np.float32,0x3fa86142,0x3f46589c,2 +np.float32,0x3ff16bc2,0x3f9fd072,2 +np.float32,0x3fbebf65,0x3f74207b,2 +np.float32,0x7e7b78b5,0x42b00610,2 +np.float32,0x3ff51ab8,0x3fa217f0,2 +np.float32,0x3f8361bb,0x3e6adf07,2 +np.float32,0x7edbceed,0x42b1240e,2 +np.float32,0x7f10e2c0,0x42b1b18a,2 +np.float32,0x3fa7bc58,0x3f44d4ef,2 +np.float32,0x3f813bde,0x3e0e1138,2 +np.float32,0x7f30d5b9,0x42b21791,2 +np.float32,0x3fb4f450,0x3f61806a,2 +np.float32,0x7eee02c4,0x42b14cca,2 +np.float32,0x7ec74b62,0x42b0f1e4,2 +np.float32,0x3ff96bca,0x3fa4b498,2 +np.float32,0x7f50e304,0x42b26cda,2 +np.float32,0x7eb14c57,0x42b0b603,2 +np.float32,0x7c3f0733,0x42a9edbf,2 +np.float32,0x7ea57acb,0x42b092b1,2 +np.float32,0x7f2788dc,0x42b1fbe7,2 +np.float32,0x3fa39f14,0x3f3ad09b,2 +np.float32,0x3fc3a7e0,0x3f7ccfa0,2 +np.float32,0x3fe70a73,0x3f991eb0,2 +np.float32,0x7f4831f7,0x42b25718,2 +np.float32,0x3fe947d0,0x3f9a999c,2 +np.float32,0x7ef2b1c7,0x42b156c4,2 +np.float32,0x3fede0ea,0x3f9d937f,2 +np.float32,0x3f9fef8e,0x3f314637,2 +np.float32,0x3fc313c5,0x3f7bcebd,2 +np.float32,0x7ee99337,0x42b14328,2 +np.float32,0x7eb9042e,0x42b0cbd5,2 +np.float32,0x3fc9d3dc,0x3f839a69,2 +np.float32,0x3fb2c018,0x3f5d091d,2 +np.float32,0x3fcc4e8f,0x3f859dc5,2 +np.float32,0x3fa9363b,0x3f484819,2 +np.float32,0x7f72ce2e,0x42b2b9e4,2 +np.float32,0x7e639326,0x42afd2f1,2 +np.float32,0x7f4595d3,0x42b25060,2 +np.float32,0x7f6d0ac4,0x42b2ad97,2 +np.float32,0x7f1bda0d,0x42b1d6e5,2 +np.float32,0x3fd85ffd,0x3f8ee0ed,2 +np.float32,0x3f91d53f,0x3f059c8e,2 +np.float32,0x7d06e103,0x42ac0155,2 +np.float32,0x3fb83126,0x3f67de6e,2 +np.float32,0x7d81ce1f,0x42ad5097,2 +np.float32,0x7f79cb3b,0x42b2c86b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x3fdbfffd,0x3f918137,2 +np.float32,0x7f4ecb1c,0x42b267b2,2 +np.float32,0x3fc2c122,0x3f7b3ed3,2 +np.float32,0x7f415854,0x42b24544,2 +np.float32,0x7e3d988b,0x42af7575,2 +np.float32,0x3f83ca99,0x3e789fcb,2 +np.float32,0x7f274f70,0x42b1fb38,2 +np.float32,0x7f0d20e6,0x42b1a416,2 +np.float32,0x3fdf3a1d,0x3f93c9c1,2 +np.float32,0x7efaa13e,0x42b1673d,2 +np.float32,0x3fb20b15,0x3f5b9434,2 +np.float32,0x3f86af9f,0x3ea4c664,2 +np.float32,0x3fe4fcb0,0x3f97be8a,2 +np.float32,0x3f920683,0x3f065085,2 +np.float32,0x3fa4b278,0x3f3d7e8b,2 +np.float32,0x3f8077a8,0x3daef77f,2 +np.float32,0x7e865be4,0x42b02807,2 +np.float32,0x3fcea7e2,0x3f877c9f,2 +np.float32,0x7e7e9db1,0x42b00c6d,2 +np.float32,0x3f9819aa,0x3f1aba7e,2 +np.float32,0x7f2b6c4b,0x42b207a7,2 +np.float32,0x7ef85e3e,0x42b16299,2 +np.float32,0x3fbd8290,0x3f71df8b,2 +np.float32,0x3fbbb615,0x3f6e8c8c,2 +np.float32,0x7f1bc7f5,0x42b1d6a9,2 +np.float32,0x3fbb4fea,0x3f6dcdad,2 +np.float32,0x3fb67e09,0x3f648dd1,2 +np.float32,0x3fc83495,0x3f824374,2 +np.float32,0x3fe52980,0x3f97dcbc,2 +np.float32,0x3f87d893,0x3eb25d7c,2 +np.float32,0x3fdb805a,0x3f9125c0,2 +np.float32,0x3fb33f0f,0x3f5e0ce1,2 +np.float32,0x3facc524,0x3f50516b,2 +np.float32,0x3ff40484,0x3fa16d0e,2 +np.float32,0x3ff078bf,0x3f9f3811,2 +np.float32,0x7f736747,0x42b2bb27,2 +np.float32,0x7f55768b,0x42b277f3,2 +np.float32,0x80000001,0xffc00000,2 +np.float32,0x7f6463d1,0x42b29a8e,2 +np.float32,0x3f8f8b59,0x3ef9d792,2 +np.float32,0x3f8a6f4d,0x3ecd5bf4,2 +np.float32,0x3fe958d9,0x3f9aa4ca,2 +np.float32,0x7f1e2ce2,0x42b1de78,2 +np.float32,0x3fb8584a,0x3f682a05,2 +np.float32,0x7dea3dc6,0x42ae7ed5,2 +np.float32,0x7f53a815,0x42b27399,2 +np.float32,0x7e0cf986,0x42aeddbf,2 +np.float32,0x7f3afb71,0x42b23422,2 +np.float32,0x3fd87d6e,0x3f8ef685,2 +np.float32,0x3ffcaa46,0x3fa6a0d7,2 +np.float32,0x7eecd276,0x42b14a3a,2 +np.float32,0x3ffc30b4,0x3fa65951,2 +np.float32,0x7e9c85e2,0x42b07634,2 +np.float32,0x3f95d862,0x3f1383de,2 +np.float32,0x7ef21410,0x42b15577,2 +np.float32,0x3fbfa1b5,0x3f75b86e,2 +np.float32,0x3fd6d90f,0x3f8dc086,2 +np.float32,0x0,0xffc00000,2 +np.float32,0x7e885dcd,0x42b02f9f,2 +np.float32,0x3fb3e057,0x3f5f54bf,2 +np.float32,0x7f40afdd,0x42b24385,2 +np.float32,0x3fb795c2,0x3f66b120,2 +np.float32,0x3fba7c11,0x3f6c3f73,2 +np.float32,0x3ffef620,0x3fa7f828,2 +np.float32,0x7d430508,0x42acbe1e,2 +np.float32,0x3f8d2892,0x3ee6369f,2 +np.float32,0x3fbea139,0x3f73e9d5,2 +np.float32,0x3ffaa928,0x3fa571b9,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x7f16f9ce,0x42b1c69f,2 +np.float32,0x3fa8f753,0x3f47b657,2 +np.float32,0x3fd48a63,0x3f8c06ac,2 +np.float32,0x7f13419e,0x42b1b9d9,2 +np.float32,0x3fdf1526,0x3f93afde,2 +np.float32,0x3f903c8b,0x3eff3be8,2 +np.float32,0x7f085323,0x42b1925b,2 +np.float32,0x7cdbe309,0x42ab98ac,2 +np.float32,0x3fba2cfd,0x3f6ba9f1,2 +np.float32,0x7f5a805d,0x42b283e4,2 +np.float32,0x7f6753dd,0x42b2a119,2 +np.float32,0x3fed9f02,0x3f9d6964,2 +np.float32,0x3f96422c,0x3f14ddba,2 +np.float32,0x7f22f2a9,0x42b1edb1,2 +np.float32,0x3fe3fcfd,0x3f97119d,2 +np.float32,0x7e018ad0,0x42aeb271,2 +np.float32,0x7db896f5,0x42ae04de,2 +np.float32,0x7e55c795,0x42afb2ec,2 +np.float32,0x7f58ef8d,0x42b28036,2 +np.float32,0x7f24a16a,0x42b1f2f3,2 +np.float32,0x3fcf714c,0x3f881b09,2 +np.float32,0x3fcdd056,0x3f86d200,2 +np.float32,0x7f02fad0,0x42b17de0,2 +np.float32,0x7eeab877,0x42b145a9,2 +np.float32,0x3fd6029d,0x3f8d20f7,2 +np.float32,0x3fd4f8cd,0x3f8c59d6,2 +np.float32,0x3fb29d4a,0x3f5cc1a5,2 +np.float32,0x3fb11e2d,0x3f59a77a,2 +np.float32,0x7eded576,0x42b12b0e,2 +np.float32,0x7f26c2a5,0x42b1f988,2 +np.float32,0x3fb6165b,0x3f63c151,2 +np.float32,0x7f3bca47,0x42b23657,2 +np.float32,0x7d8c93bf,0x42ad7968,2 +np.float32,0x3f8ede02,0x3ef47176,2 +np.float32,0x3fbef762,0x3f7485b9,2 +np.float32,0x7f1419af,0x42b1bcc6,2 +np.float32,0x7d9e8c79,0x42adb701,2 +np.float32,0x3fa26336,0x3f37af63,2 +np.float32,0x7f5f5590,0x42b28f18,2 +np.float32,0x3fddc93a,0x3f92c651,2 +np.float32,0x3ff0a5fc,0x3f9f547f,2 +np.float32,0x3fb2f6b8,0x3f5d790e,2 +np.float32,0x3ffe59a4,0x3fa79d2c,2 +np.float32,0x7e4df848,0x42af9fde,2 +np.float32,0x3fb0ab3b,0x3f58b678,2 +np.float32,0x7ea54d47,0x42b09225,2 +np.float32,0x3fdd6404,0x3f927eb2,2 +np.float32,0x3f846dc0,0x3e864caa,2 +np.float32,0x7d046aee,0x42abf7e7,2 +np.float32,0x7f7c5a05,0x42b2cda3,2 +np.float32,0x3faf6126,0x3f55fb21,2 +np.float32,0x7f36a910,0x42b22829,2 +np.float32,0x3fdc7b36,0x3f91d938,2 +np.float32,0x3fff443e,0x3fa82577,2 +np.float32,0x7ee7154a,0x42b13daa,2 +np.float32,0x3f944742,0x3f0e435c,2 +np.float32,0x7f5b510a,0x42b285cc,2 +np.float32,0x3f9bc940,0x3f25c4d2,2 +np.float32,0x3fee4782,0x3f9dd4ea,2 +np.float32,0x3fcfc2dd,0x3f885aea,2 +np.float32,0x7eab65cf,0x42b0a4af,2 +np.float32,0x3f9cf908,0x3f292689,2 +np.float32,0x7ed35501,0x42b10feb,2 +np.float32,0x7dabb70a,0x42addfd9,2 +np.float32,0x7f348919,0x42b2222b,2 +np.float32,0x3fb137d4,0x3f59dd17,2 +np.float32,0x7e7b36c9,0x42b0058a,2 +np.float32,0x7e351fa4,0x42af5e0d,2 +np.float32,0x3f973c0c,0x3f18011e,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f9b0a4b,0x3f239a33,2 +np.float32,0x3f87c4cf,0x3eb17e7e,2 +np.float32,0x7ef67760,0x42b15eaa,2 +np.float32,0x3fc4d2c8,0x3f7ed20f,2 +np.float32,0x7e940dac,0x42b059b8,2 +np.float32,0x7f6e6a52,0x42b2b08d,2 +np.float32,0x3f838752,0x3e6fe4b2,2 +np.float32,0x3fd8f046,0x3f8f4a94,2 +np.float32,0x3fa82112,0x3f45c223,2 +np.float32,0x3fd49b16,0x3f8c1345,2 +np.float32,0x7f02a941,0x42b17ca1,2 +np.float32,0x3f8a9d2c,0x3ecf1768,2 +np.float32,0x7c9372e3,0x42aacc0f,2 +np.float32,0x3fd260b3,0x3f8a619a,2 +np.float32,0x3f8a1b88,0x3eca27cb,2 +np.float32,0x7d25d510,0x42ac6b1c,2 +np.float32,0x7ef5a578,0x42b15cf5,2 +np.float32,0x3fe6625d,0x3f98ae9a,2 +np.float32,0x3ff53240,0x3fa22658,2 +np.float32,0x3f8bb2e6,0x3ed944cf,2 +np.float32,0x7f4679b1,0x42b252ad,2 +np.float32,0x3fa8db30,0x3f4774fc,2 +np.float32,0x7ee5fafd,0x42b13b37,2 +np.float32,0x3fc405e0,0x3f7d71fb,2 +np.float32,0x3f9303cd,0x3f09ddfd,2 +np.float32,0x7f486e67,0x42b257b2,2 +np.float32,0x7e73f12b,0x42aff680,2 +np.float32,0x3fe80f8b,0x3f99cbe4,2 +np.float32,0x3f84200a,0x3e81a3f3,2 +np.float32,0x3fa14e5c,0x3f34e3ce,2 +np.float32,0x3fda22ec,0x3f9029bb,2 +np.float32,0x3f801772,0x3d1aef98,2 +np.float32,0x7eaa1428,0x42b0a0bb,2 +np.float32,0x3feae0b3,0x3f9ba4aa,2 +np.float32,0x7ea439b4,0x42b08ecc,2 +np.float32,0x3fa28b1c,0x3f381579,2 +np.float32,0x7e8af247,0x42b03937,2 +np.float32,0x3fd19216,0x3f89c2b7,2 +np.float32,0x7f6ea033,0x42b2b100,2 +np.float32,0x3fad4fbf,0x3f518224,2 +np.float32,0x3febd940,0x3f9c45bd,2 +np.float32,0x7f4643a3,0x42b25221,2 +np.float32,0x7ec34478,0x42b0e771,2 +np.float32,0x7f18c83b,0x42b1ccb5,2 +np.float32,0x3fc665ad,0x3f80bf94,2 +np.float32,0x3ff0a999,0x3f9f56c4,2 +np.float32,0x3faf1cd2,0x3f5568fe,2 +np.float32,0x7ecd9dc6,0x42b101e1,2 +np.float32,0x3faad282,0x3f4bf754,2 +np.float32,0x3ff905a0,0x3fa47771,2 +np.float32,0x7f596481,0x42b28149,2 +np.float32,0x7f1cb31f,0x42b1d9ac,2 +np.float32,0x7e266719,0x42af32a6,2 +np.float32,0x7eccce06,0x42b0ffdb,2 +np.float32,0x3f9b6f71,0x3f24c102,2 +np.float32,0x3f80e4ba,0x3df1d6bc,2 +np.float32,0x3f843d51,0x3e836a60,2 +np.float32,0x7f70bd88,0x42b2b585,2 +np.float32,0x3fe4cc96,0x3f979e18,2 +np.float32,0x3ff737c7,0x3fa36151,2 +np.float32,0x3ff1197e,0x3f9f9cf4,2 +np.float32,0x7f08e190,0x42b19471,2 +np.float32,0x3ff1542e,0x3f9fc1b2,2 +np.float32,0x3ff6673c,0x3fa2e2d2,2 +np.float32,0xbf800000,0xffc00000,2 +np.float32,0x7e3f9ba7,0x42af7add,2 +np.float32,0x7f658ff6,0x42b29d2d,2 +np.float32,0x3f93441c,0x3f0ac0d9,2 +np.float32,0x7f526a74,0x42b27096,2 +np.float32,0x7f5b00c8,0x42b28511,2 +np.float32,0x3ff212f8,0x3fa038cf,2 +np.float32,0x7e0bd60d,0x42aed998,2 +np.float32,0x7f71ef7f,0x42b2b80e,2 +np.float32,0x7f7a897e,0x42b2c9f1,2 +np.float32,0x7e8b76a6,0x42b03b1e,2 +np.float32,0x7efa0da3,0x42b1660f,2 +np.float32,0x3fce9166,0x3f876ae0,2 +np.float32,0x3fc4163d,0x3f7d8e30,2 +np.float32,0x3fdb3784,0x3f90f16b,2 +np.float32,0x7c5f177b,0x42aa3d30,2 +np.float32,0x3fc6276d,0x3f808af5,2 +np.float32,0x7bac9cc2,0x42a856f4,2 +np.float32,0x3fe5876f,0x3f981bea,2 +np.float32,0x3fef60e3,0x3f9e878a,2 +np.float32,0x3fb23cd8,0x3f5bfb06,2 +np.float32,0x3fe114e2,0x3f951402,2 +np.float32,0x7ca8ef04,0x42ab11b4,2 +np.float32,0x7d93c2ad,0x42ad92ec,2 +np.float32,0x3fe5bb8a,0x3f983ee6,2 +np.float32,0x7f0182fd,0x42b1781b,2 +np.float32,0x7da63bb2,0x42adcf3d,2 +np.float32,0x3fac46b7,0x3f4f399e,2 +np.float32,0x7f7a5d8f,0x42b2c997,2 +np.float32,0x7f76572e,0x42b2c14b,2 +np.float32,0x7f42d53e,0x42b24931,2 +np.float32,0x7f7ffd00,0x42b2d4f6,2 +np.float32,0x3fc346c3,0x3f7c2756,2 +np.float32,0x7f1f6ae3,0x42b1e27a,2 +np.float32,0x3f87fb56,0x3eb3e2ee,2 +np.float32,0x3fed17a2,0x3f9d12b4,2 +np.float32,0x7f5ea903,0x42b28d8c,2 +np.float32,0x3f967f82,0x3f15a4ab,2 +np.float32,0x7d3b540c,0x42aca984,2 +np.float32,0x7f56711a,0x42b27a4a,2 +np.float32,0x7f122223,0x42b1b5ee,2 +np.float32,0x3fd6fa34,0x3f8dd919,2 +np.float32,0x3fadd62e,0x3f52a7b3,2 +np.float32,0x3fb7bf0c,0x3f67015f,2 +np.float32,0x7edf4ba7,0x42b12c1d,2 +np.float32,0x7e33cc65,0x42af5a4b,2 +np.float32,0x3fa6be17,0x3f427831,2 +np.float32,0x3fa07aa8,0x3f32b7d4,2 +np.float32,0x3fa4a3af,0x3f3d5a01,2 +np.float32,0x3fdbb267,0x3f9149a8,2 +np.float32,0x7ed45e25,0x42b1126c,2 +np.float32,0x3fe3f432,0x3f970ba6,2 +np.float32,0x7f752080,0x42b2bec3,2 +np.float32,0x3f872747,0x3eaa62ea,2 +np.float32,0x7e52175d,0x42afaa03,2 +np.float32,0x3fdc766c,0x3f91d5ce,2 +np.float32,0x7ecd6841,0x42b1015c,2 +np.float32,0x7f3d6c40,0x42b23ac6,2 +np.float32,0x3fb80c14,0x3f6796b9,2 +np.float32,0x3ff6ad56,0x3fa30d68,2 +np.float32,0x3fda44c3,0x3f90423e,2 +np.float32,0x3fdcba0c,0x3f9205fc,2 +np.float32,0x7e14a720,0x42aef8e6,2 +np.float32,0x3fe9e489,0x3f9b0047,2 +np.float32,0x7e69f933,0x42afe123,2 +np.float32,0x3ff3ee6d,0x3fa15f71,2 +np.float32,0x3f8538cd,0x3e91c1a7,2 +np.float32,0x3fdc3f07,0x3f91ae46,2 +np.float32,0x3fba2ef0,0x3f6bada2,2 +np.float32,0x7da64cd8,0x42adcf71,2 +np.float32,0x3fc34bd2,0x3f7c301d,2 +np.float32,0x3fa273aa,0x3f37d984,2 +np.float32,0x3ff0338c,0x3f9f0c86,2 +np.float32,0x7ed62cef,0x42b116c3,2 +np.float32,0x3f911e7e,0x3f02f7c6,2 +np.float32,0x7c8514c9,0x42aa9792,2 +np.float32,0x3fea2a74,0x3f9b2df5,2 +np.float32,0x3fe036f8,0x3f947a25,2 +np.float32,0x7c5654bf,0x42aa28ad,2 +np.float32,0x3fd9e423,0x3f8ffc32,2 +np.float32,0x7eec0439,0x42b1487b,2 +np.float32,0x3fc580f4,0x3f7ffb62,2 +np.float32,0x3fb0e316,0x3f592bbe,2 +np.float32,0x7c4cfb7d,0x42aa11d8,2 +np.float32,0x3faf9704,0x3f566e00,2 +np.float32,0x3fa7cf8a,0x3f45023d,2 +np.float32,0x7f7b724d,0x42b2cbcc,2 +np.float32,0x7f05bfe3,0x42b18897,2 +np.float32,0x3f90bde3,0x3f018bf3,2 +np.float32,0x7c565479,0x42aa28ad,2 +np.float32,0x3f94b517,0x3f0fb8e5,2 +np.float32,0x3fd6aadd,0x3f8d9e3c,2 +np.float32,0x7f09b37c,0x42b1977f,2 +np.float32,0x7f2b45ea,0x42b20734,2 +np.float32,0x3ff1d15e,0x3fa00fe9,2 +np.float32,0x3f99bce6,0x3f1fbd6c,2 +np.float32,0x7ecd1f76,0x42b100a7,2 +np.float32,0x7f443e2b,0x42b24ce2,2 +np.float32,0x7da7d6a5,0x42add428,2 +np.float32,0x7ebe0193,0x42b0d975,2 +np.float32,0x7ee13c43,0x42b1308b,2 +np.float32,0x3f8adf1b,0x3ed18e0c,2 +np.float32,0x7f76ce65,0x42b2c242,2 +np.float32,0x7e34f43d,0x42af5d92,2 +np.float32,0x7f306b76,0x42b2165d,2 +np.float32,0x7e1fd07f,0x42af1df7,2 +np.float32,0x3fab9a41,0x3f4db909,2 +np.float32,0x3fc23d1a,0x3f7a5803,2 +np.float32,0x3f8b7403,0x3ed70245,2 +np.float32,0x3f8c4dd6,0x3edebbae,2 +np.float32,0x3fe5f411,0x3f9864cd,2 +np.float32,0x3f88128b,0x3eb4e508,2 +np.float32,0x3fcb09de,0x3f84976f,2 +np.float32,0x7f32f2f5,0x42b21da6,2 +np.float32,0x3fe75610,0x3f9950f6,2 +np.float32,0x3f993edf,0x3f1e408d,2 +np.float32,0x3fc4a9d7,0x3f7e8be9,2 +np.float32,0x7f74551a,0x42b2bd1a,2 +np.float32,0x7de87129,0x42ae7ae2,2 +np.float32,0x7f18bbbd,0x42b1cc8c,2 +np.float32,0x7e7e1dd4,0x42b00b6c,2 +np.float32,0x3ff6e55b,0x3fa32f64,2 +np.float32,0x3fa634c8,0x3f412df3,2 +np.float32,0x3fd0fb7c,0x3f894e49,2 +np.float32,0x3ff4f6a6,0x3fa201d7,2 +np.float32,0x7f69d418,0x42b2a69a,2 +np.float32,0x7cb9632d,0x42ab414a,2 +np.float32,0x3fc57d36,0x3f7ff503,2 +np.float32,0x7e9e2ed7,0x42b07b9b,2 +np.float32,0x7f2e6868,0x42b2107d,2 +np.float32,0x3fa3169a,0x3f39785d,2 +np.float32,0x7f03cde0,0x42b18117,2 +np.float32,0x7f6d75d2,0x42b2ae7f,2 +np.float32,0x3ff483f2,0x3fa1bb75,2 +np.float32,0x7f1b39f7,0x42b1d4d6,2 +np.float32,0x3f8c7a7d,0x3ee0481e,2 +np.float32,0x3f989095,0x3f1c2b19,2 +np.float32,0x3fa4cbfd,0x3f3dbd87,2 +np.float32,0x7f75b00f,0x42b2bfef,2 +np.float32,0x3f940724,0x3f0d6756,2 +np.float32,0x7f5e5a1a,0x42b28cd6,2 +np.float32,0x800000,0xffc00000,2 +np.float32,0x7edd1d29,0x42b12716,2 +np.float32,0x3fa3e9e4,0x3f3b8c16,2 +np.float32,0x7e46d70e,0x42af8dd5,2 +np.float32,0x3f824745,0x3e40ec1e,2 +np.float32,0x3fd67623,0x3f8d770a,2 +np.float32,0x3fe9a6f3,0x3f9ad7fa,2 +np.float32,0x3fdda67c,0x3f92adc1,2 +np.float32,0x7ccb6c9a,0x42ab70d4,2 +np.float32,0x3ffd364a,0x3fa6f2fe,2 +np.float32,0x7e02424c,0x42aeb545,2 +np.float32,0x3fb6d2f2,0x3f6534a1,2 +np.float32,0x3fe1fe26,0x3f95b4cc,2 +np.float32,0x7e93ac57,0x42b05867,2 +np.float32,0x7f7b3433,0x42b2cb4d,2 +np.float32,0x3fb76803,0x3f66580d,2 +np.float32,0x3f9af881,0x3f23661b,2 +np.float32,0x3fd58062,0x3f8cbf98,2 +np.float32,0x80000000,0xffc00000,2 +np.float32,0x7f1af8f4,0x42b1d3ff,2 +np.float32,0x3fe66bba,0x3f98b4dc,2 +np.float32,0x7f6bd7bf,0x42b2aaff,2 +np.float32,0x3f84f79a,0x3e8e2e49,2 +np.float32,0x7e475b06,0x42af8f28,2 +np.float32,0x3faff89b,0x3f573d5e,2 +np.float32,0x7de5aa77,0x42ae74bb,2 +np.float32,0x3f8e9e42,0x3ef26cd2,2 +np.float32,0x3fb1cec3,0x3f5b1740,2 +np.float32,0x3f8890d6,0x3eba4821,2 +np.float32,0x3f9b39e9,0x3f242547,2 +np.float32,0x3fc895a4,0x3f829407,2 +np.float32,0x7f77943c,0x42b2c3dc,2 +np.float32,0x7f390d58,0x42b22ed2,2 +np.float32,0x3fe7e160,0x3f99ad58,2 +np.float32,0x3f93d2a0,0x3f0cb205,2 +np.float32,0x7f29499b,0x42b2013c,2 +np.float32,0x3f8c11b2,0x3edca10f,2 +np.float32,0x7e898ef8,0x42b03413,2 +np.float32,0x3fdff942,0x3f944f34,2 +np.float32,0x7f3d602f,0x42b23aa5,2 +np.float32,0x3f8a50f3,0x3ecc345b,2 +np.float32,0x3fa1f86d,0x3f369ce4,2 +np.float32,0x3f97ad95,0x3f19681d,2 +np.float32,0x3ffad1e0,0x3fa589e5,2 +np.float32,0x3fa70590,0x3f432311,2 +np.float32,0x7e6840cb,0x42afdd5c,2 +np.float32,0x3fd4036d,0x3f8ba0aa,2 +np.float32,0x7f7cc953,0x42b2ce84,2 +np.float32,0x7f228e1e,0x42b1ec74,2 +np.float32,0x7e37a866,0x42af652a,2 +np.float32,0x3fda22d0,0x3f9029a7,2 +np.float32,0x7f736bff,0x42b2bb31,2 +np.float32,0x3f9833b6,0x3f1b0b8e,2 +np.float32,0x7f466001,0x42b2526a,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7dd62bcd,0x42ae50f8,2 +np.float32,0x7f1d2bfe,0x42b1db36,2 +np.float32,0x7ecffe9e,0x42b107c5,2 +np.float32,0x7ebefe0a,0x42b0dc1b,2 +np.float32,0x7f45c63d,0x42b250dd,2 +np.float32,0x7f601af0,0x42b290db,2 +np.float32,0x3fcbb88a,0x3f8524e5,2 +np.float32,0x7ede55ff,0x42b129e8,2 +np.float32,0x7ea5dd5a,0x42b093e2,2 +np.float32,0x3ff53857,0x3fa22a12,2 +np.float32,0x3f8dbd6a,0x3eeb28a4,2 +np.float32,0x3fd1b467,0x3f89dd2c,2 +np.float32,0x3fe0423f,0x3f9481fc,2 +np.float32,0x3f84b421,0x3e8a6174,2 +np.float32,0x7f4efc97,0x42b2682c,2 +np.float32,0x7f601b33,0x42b290dc,2 +np.float32,0x3f94f240,0x3f108719,2 +np.float32,0x7decd251,0x42ae8471,2 +np.float32,0x3fdc457c,0x3f91b2e2,2 +np.float32,0x3f92a966,0x3f089c5a,2 +np.float32,0x3fc9732f,0x3f834afc,2 +np.float32,0x3f97948f,0x3f19194e,2 +np.float32,0x7f0824a1,0x42b191ac,2 +np.float32,0x7f0365a5,0x42b17f81,2 +np.float32,0x3f800000,0x0,2 +np.float32,0x7f0054c6,0x42b1736b,2 +np.float32,0x3fe86544,0x3f9a0484,2 +np.float32,0x7e95f844,0x42b0604e,2 +np.float32,0x3fce8602,0x3f8761e2,2 +np.float32,0x3fc726c8,0x3f81621d,2 +np.float32,0x3fcf6b03,0x3f88161b,2 +np.float32,0x3fceb843,0x3f87898a,2 +np.float32,0x3fe2f8b2,0x3f966071,2 +np.float32,0x7f3c8e7f,0x42b2386d,2 +np.float32,0x3fcee13a,0x3f87a9d2,2 +np.float32,0x3fc4df27,0x3f7ee73c,2 +np.float32,0x3ffde486,0x3fa758e3,2 +np.float32,0x3fa91be0,0x3f480b17,2 +np.float32,0x7f2a5a7d,0x42b20472,2 +np.float32,0x7e278d80,0x42af362d,2 +np.float32,0x3f96d091,0x3f16a9d5,2 +np.float32,0x7e925225,0x42b053b2,2 +np.float32,0x7f7ef83a,0x42b2d2ec,2 +np.float32,0x7eb4923a,0x42b0bf61,2 +np.float32,0x7e98bf19,0x42b069b3,2 +np.float32,0x3fac93a2,0x3f4fe410,2 +np.float32,0x7f46389c,0x42b25205,2 +np.float32,0x3f9fd447,0x3f30fd54,2 +np.float32,0x3fef42d4,0x3f9e7483,2 +np.float32,0x7f482174,0x42b256ed,2 +np.float32,0x3f97aedb,0x3f196c1e,2 +np.float32,0x7f764edd,0x42b2c13a,2 +np.float32,0x3f9117b5,0x3f02de5c,2 +np.float32,0x3fc7984e,0x3f81c12d,2 +np.float64,0x3ff1e2cb7463c597,0x3fdec6caf39e0c0e,2 +np.float64,0x3ffe4f89789c9f13,0x3ff40f4b1da0f3e9,2 +np.float64,0x7f6a5c9ac034b935,0x408605e51703c145,2 +np.float64,0x7fdcb6ece3b96dd9,0x40862d6521e16d60,2 +np.float64,0x3ff6563e182cac7c,0x3feb9d8210f3fa88,2 +np.float64,0x7fde32025f3c6404,0x40862dcc1d1a9b7f,2 +np.float64,0x7fd755ed35aeabd9,0x40862bbc5522b779,2 +np.float64,0x3ff5c81f4bcb903e,0x3fea71f10b954ea3,2 +np.float64,0x3fffe805d35fd00c,0x3ff50463a1ba2938,2 +np.float64,0x7fd045a1c1a08b43,0x408628d9f431f2f5,2 +np.float64,0x3ff49f7dd9893efc,0x3fe7c6736e17ea8e,2 +np.float64,0x7fccfbc1fd39f783,0x408627eca79acf51,2 +np.float64,0x3ff1af0a00035e14,0x3fdd1c0e7d5706ea,2 +np.float64,0x7fe7bd17162f7a2d,0x4086316af683502b,2 +np.float64,0x3ff0941b8d012837,0x3fd128d274065ac0,2 +np.float64,0x3ffa0c5d98b418bb,0x3ff11af9c8edd17f,2 +np.float64,0x3ffad9733355b2e6,0x3ff1b6d1307acb42,2 +np.float64,0x3ffabb2a33d57654,0x3ff1a0442b034e50,2 +np.float64,0x3ff36118b0c6c231,0x3fe472b7dfb23516,2 +np.float64,0x3ff2441d3664883a,0x3fe0d61145608f0c,2 +np.float64,0x7fe039862d20730b,0x40862e5f8ed752d3,2 +np.float64,0x7fb1dde24023bbc4,0x40861e824cdb0664,2 +np.float64,0x7face6335839cc66,0x40861ccf90a26e16,2 +np.float64,0x3ffb5d0e1af6ba1c,0x3ff2170f6f42fafe,2 +np.float64,0x3ff5c2c6a50b858d,0x3fea665aabf04407,2 +np.float64,0x3ffabb409db57681,0x3ff1a054ea32bfc3,2 +np.float64,0x3ff1e054e983c0aa,0x3fdeb30c17286cb6,2 +np.float64,0x7fe467f73268cfed,0x4086303529e52e9b,2 +np.float64,0x7fe0e86bf961d0d7,0x40862eb40788b04a,2 +np.float64,0x3ffb743542f6e86a,0x3ff227b4ea5acee0,2 +np.float64,0x3ff2de6826e5bcd0,0x3fe2e31fcde0a96c,2 +np.float64,0x7fd6b27ccfad64f9,0x40862b8385697c31,2 +np.float64,0x7fe0918e8d21231c,0x40862e8a82d9517a,2 +np.float64,0x7fd0ca0395a19406,0x4086291a0696ed33,2 +np.float64,0x3ffb042496960849,0x3ff1d658c928abfc,2 +np.float64,0x3ffcd0409799a081,0x3ff31877df0cb245,2 +np.float64,0x7fe429bd06685379,0x4086301c9f259934,2 +np.float64,0x3ff933076092660f,0x3ff06d2e5f4d9ab7,2 +np.float64,0x7feaefcb28f5df95,0x4086326dccf88e6f,2 +np.float64,0x7fb5f2c1f82be583,0x40862027ac02a39d,2 +np.float64,0x3ffb5d9e3bd6bb3c,0x3ff21777501d097e,2 +np.float64,0x10000000000000,0xfff8000000000000,2 +np.float64,0x3ff70361596e06c3,0x3fecf675ceda7e19,2 +np.float64,0x3ff71a21b5ee3444,0x3fed224fa048d9a9,2 +np.float64,0x3ffb102b86762057,0x3ff1df2cc9390518,2 +np.float64,0x7feaaeb35c355d66,0x4086325a60704a90,2 +np.float64,0x7fd9a3d0a93347a0,0x40862c7d300fc076,2 +np.float64,0x7fabcf159c379e2a,0x40861c80cdbbff27,2 +np.float64,0x7fd1c066ec2380cd,0x4086298c3006fee6,2 +np.float64,0x3ff3d5ae2d67ab5c,0x3fe5bc16447428db,2 +np.float64,0x3ff4b76add696ed6,0x3fe800f5bbf21376,2 +np.float64,0x3ff60d89ee0c1b14,0x3feb063fdebe1a68,2 +np.float64,0x7f1d2648003a4c8f,0x4085eaf9238af95a,2 +np.float64,0x7fe8b45f6df168be,0x408631bca5abf6d6,2 +np.float64,0x7fe9ea5308f3d4a5,0x4086321ea2bd3af9,2 +np.float64,0x7fcb6ba5a636d74a,0x4086277b208075ed,2 +np.float64,0x3ff621cfd74c43a0,0x3feb30d59baf5919,2 +np.float64,0x3ff7bc8ca0af7919,0x3fee524da8032896,2 +np.float64,0x7fda22dd0c3445b9,0x40862ca47326d063,2 +np.float64,0x7fd02ed4b2a05da8,0x408628ceb6919421,2 +np.float64,0x3ffe64309fdcc861,0x3ff41c1b18940709,2 +np.float64,0x3ffee4042abdc808,0x3ff46a6005bccb41,2 +np.float64,0x3ff078145b00f029,0x3fceeb3d6bfae0eb,2 +np.float64,0x7fda20fd20b441f9,0x40862ca3e03b990b,2 +np.float64,0x3ffa9e9e9af53d3d,0x3ff18ade3cbee789,2 +np.float64,0x3ff0a1062501420c,0x3fd1e32de6d18c0d,2 +np.float64,0x3ff3bdf118477be2,0x3fe57ad89b7fdf8b,2 +np.float64,0x3ff101c0d5c20382,0x3fd6965d3539be47,2 +np.float64,0x7feba3b53b774769,0x408632a28c7aca4d,2 +np.float64,0x3ff598db5d4b31b7,0x3fea0aa65c0b421a,2 +np.float64,0x3ff5fdfbb72bfbf8,0x3feae55accde4a5e,2 +np.float64,0x7fe5bae53aab75c9,0x408630b5e7a5b92a,2 +np.float64,0x3ff8f668afd1ecd2,0x3ff03af686666c9c,2 +np.float64,0x3ff5ba72dd2b74e6,0x3fea5441f223c093,2 +np.float64,0x3ff8498147109302,0x3fef4e45d501601d,2 +np.float64,0x7feddcfa5efbb9f4,0x4086334106a6e76b,2 +np.float64,0x7fd1a30200234603,0x4086297ee5cc562c,2 +np.float64,0x3ffffa8ee07ff51e,0x3ff50f1dc46f1303,2 +np.float64,0x7fef7ed00ebefd9f,0x408633ae01dabe52,2 +np.float64,0x3ffb6e062276dc0c,0x3ff22344c58c2016,2 +np.float64,0x7fcf2b59943e56b2,0x4086288190dd5eeb,2 +np.float64,0x3ffa589f9254b13f,0x3ff155cc081eee0b,2 +np.float64,0x3ff05415ca60a82c,0x3fc9e45565baef0a,2 +np.float64,0x7feb34bed576697d,0x408632822d5a178c,2 +np.float64,0x3ff3993845c73270,0x3fe51423baf246c3,2 +np.float64,0x3ff88367aaf106d0,0x3fefb2d9ca9f1192,2 +np.float64,0x7fef364304fe6c85,0x4086339b7ed82997,2 +np.float64,0x7fcba2c317374585,0x4086278b24e42934,2 +np.float64,0x3ff1aef885e35df1,0x3fdd1b79f55b20c0,2 +np.float64,0x7fe19367886326ce,0x40862f035f867445,2 +np.float64,0x3ff3c8295e279053,0x3fe5970aa670d32e,2 +np.float64,0x3ff6edda164ddbb4,0x3feccca9eb59d6b9,2 +np.float64,0x7fdeaea940bd5d52,0x40862dece02d151b,2 +np.float64,0x7fea9d6324353ac5,0x408632552ddf0d4f,2 +np.float64,0x7fe60e39e66c1c73,0x408630d45b1ad0c4,2 +np.float64,0x7fde06325abc0c64,0x40862dc07910038c,2 +np.float64,0x7f9ec89d303d9139,0x408617c55ea4c576,2 +np.float64,0x3ff9801930530032,0x3ff0abe5be046051,2 +np.float64,0x3ff4d5859689ab0b,0x3fe849a7f7a19fa3,2 +np.float64,0x3ff38afbc48715f8,0x3fe4ebb7710cbab9,2 +np.float64,0x3ffd88a0e77b1142,0x3ff3916964407e21,2 +np.float64,0x1,0xfff8000000000000,2 +np.float64,0x3ff5db59e58bb6b4,0x3fea9b6b5ccc116f,2 +np.float64,0x3ffd4b05b15a960c,0x3ff369792f661a90,2 +np.float64,0x7fdcebc4fb39d789,0x40862d73cd623378,2 +np.float64,0x3ff5b56f944b6adf,0x3fea4955d6b06ca3,2 +np.float64,0x7fd4e4abf2a9c957,0x40862ad9e9da3c61,2 +np.float64,0x7fe08e0d6aa11c1a,0x40862e88d17ef277,2 +np.float64,0x3ff0dfc97da1bf93,0x3fd50f9004136d8f,2 +np.float64,0x7fdec38eaebd871c,0x40862df2511e26b4,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0x3ff21865504430cb,0x3fe033fe3cf3947a,2 +np.float64,0x7fdc139708b8272d,0x40862d371cfbad03,2 +np.float64,0x7fe1fe3be3a3fc77,0x40862f336e3ba63a,2 +np.float64,0x7fd9fa2493b3f448,0x40862c97f2960be9,2 +np.float64,0x3ff0a027db414050,0x3fd1d6e54a707c87,2 +np.float64,0x3ff568b16f4ad163,0x3fe99f5c6d7b6e18,2 +np.float64,0x3ffe2f82877c5f05,0x3ff3fb54bd0da753,2 +np.float64,0x7fbaf5778435eaee,0x408621ccc9e2c1be,2 +np.float64,0x7fc5aaf8362b55ef,0x40862598e7072a49,2 +np.float64,0x7fe0ebfdd4a1d7fb,0x40862eb5b7bf99d5,2 +np.float64,0x7fd8efeb5931dfd6,0x40862c444636f408,2 +np.float64,0x3ff361a308c6c346,0x3fe4744cae63e6df,2 +np.float64,0x7fef287d39be50f9,0x40863397f65c807e,2 +np.float64,0x7fe72c4a14ae5893,0x4086313992e52082,2 +np.float64,0x3ffd1be44cba37c8,0x3ff34a9a45239eb9,2 +np.float64,0x3ff50369c18a06d4,0x3fe8b69319f091f1,2 +np.float64,0x3ffb333c25766678,0x3ff1f8c78eeb28f1,2 +np.float64,0x7fe12050416240a0,0x40862ece4e2f2f24,2 +np.float64,0x7fe348f5526691ea,0x40862fc16fbe7b6c,2 +np.float64,0x3ff343cc4d068799,0x3fe41c2a30cab7d2,2 +np.float64,0x7fd1b0daaa2361b4,0x408629852b3104ff,2 +np.float64,0x3ff6a41f37ad483e,0x3fec3b36ee6c6d4a,2 +np.float64,0x3ffad9439435b287,0x3ff1b6add9a1b3d7,2 +np.float64,0x7fbeb9a2f23d7345,0x408622d89ac1eaba,2 +np.float64,0x3ffab3d39fb567a7,0x3ff19ac75b4427f3,2 +np.float64,0x3ff890003ed12000,0x3fefc8844471c6ad,2 +np.float64,0x3ffc9f595e593eb2,0x3ff2f7a8699f06d8,2 +np.float64,0x7fe2224ef6e4449d,0x40862f43684a154a,2 +np.float64,0x3ffa67ba08d4cf74,0x3ff161525778df99,2 +np.float64,0x7fe87e24b570fc48,0x408631ab02b159fb,2 +np.float64,0x7fd6e99be92dd337,0x40862b96dba73685,2 +np.float64,0x7fe90f39fdf21e73,0x408631d9dbd36c1e,2 +np.float64,0x3ffb7806abd6f00e,0x3ff22a719b0f4c46,2 +np.float64,0x3ffa511ba3d4a238,0x3ff1500c124f6e17,2 +np.float64,0x3ff5d7a569abaf4b,0x3fea937391c280e8,2 +np.float64,0x7fc4279d20284f39,0x40862504a5cdcb96,2 +np.float64,0x3ffe8791b1fd0f24,0x3ff431f1ed7eaba0,2 +np.float64,0x7fe3b2f5276765e9,0x40862fecf15e2535,2 +np.float64,0x7feeab0e7abd561c,0x408633778044cfbc,2 +np.float64,0x7fdba88531375109,0x40862d1860306d7a,2 +np.float64,0x7fe7b19b3def6335,0x4086316716d6890b,2 +np.float64,0x3ff9e9437413d287,0x3ff0ff89431c748c,2 +np.float64,0x3ff960716a52c0e3,0x3ff092498028f802,2 +np.float64,0x3ff271bf56a4e37f,0x3fe1786fc8dd775d,2 +np.float64,0x3fff2a6578be54cb,0x3ff494bbe303eeb5,2 +np.float64,0x3ffd842eb5fb085e,0x3ff38e8b7ba42bc5,2 +np.float64,0x3ff91600e5d22c02,0x3ff0553c6a6b3d93,2 +np.float64,0x3ff9153f45f22a7e,0x3ff0549c0eaecf95,2 +np.float64,0x7fe0ab319da15662,0x40862e96da3b19f9,2 +np.float64,0x3ff06acd1f60d59a,0x3fcd2aca543d2772,2 +np.float64,0x3ffb3e7a54d67cf4,0x3ff200f288cd391b,2 +np.float64,0x3ffd01356f1a026b,0x3ff339003462a56c,2 +np.float64,0x3ffacd35def59a6c,0x3ff1adb8d32b3ec0,2 +np.float64,0x3ff6f953264df2a6,0x3fece2f992948d6e,2 +np.float64,0x3ff0fa91f5a1f524,0x3fd64609a28f1590,2 +np.float64,0x7fd1b7610ca36ec1,0x408629881e03dc7d,2 +np.float64,0x3ff4317fb7c86300,0x3fe6b086ed265887,2 +np.float64,0x3ff3856198070ac3,0x3fe4dbb6bc88b9e3,2 +np.float64,0x7fed7fc4573aff88,0x40863327e7013a81,2 +np.float64,0x3ffe53cbbf5ca798,0x3ff411f07a29b1f4,2 +np.float64,0x3ff092195b012433,0x3fd10b1c0b4b14fe,2 +np.float64,0x3ff1a3171163462e,0x3fdcb5c301d5d40d,2 +np.float64,0x3ffa1401f1742804,0x3ff120eb319e9faa,2 +np.float64,0x7fd352f6f426a5ed,0x40862a3a048feb6d,2 +np.float64,0x7fd4ee246fa9dc48,0x40862add895d808f,2 +np.float64,0x3ff0675cfa00ceba,0x3fccb2222c5493ca,2 +np.float64,0x3ffe5cb38f3cb967,0x3ff417773483d161,2 +np.float64,0x7fe11469ea2228d3,0x40862ec8bd3e497f,2 +np.float64,0x3fff13cba67e2798,0x3ff4872fe2c26104,2 +np.float64,0x3ffb73d3d316e7a8,0x3ff2276f08612ea2,2 +np.float64,0x7febfb70f237f6e1,0x408632bbc9450721,2 +np.float64,0x3ff84a0d87b0941b,0x3fef4f3b707e3145,2 +np.float64,0x7fd71fd5082e3fa9,0x40862ba9b4091172,2 +np.float64,0x3ff560737d8ac0e7,0x3fe98cc9c9ba2f61,2 +np.float64,0x3ff46a266ae8d44d,0x3fe74190e5234822,2 +np.float64,0x7fe8cc9225719923,0x408631c477db9708,2 +np.float64,0x3ff871de5930e3bc,0x3fef948f7d00fbef,2 +np.float64,0x3ffd0bc7895a178f,0x3ff33ffc18357721,2 +np.float64,0x3ff66099f9ccc134,0x3febb2bc775b4720,2 +np.float64,0x7fe91f1be9723e37,0x408631deec3a5c9e,2 +np.float64,0x7fd60462f12c08c5,0x40862b4537e1c1c6,2 +np.float64,0x3ff053100ba0a620,0x3fc9bc0c21e2284f,2 +np.float64,0x7fd864c611b0c98b,0x40862c1724506255,2 +np.float64,0x7fd191decb2323bd,0x408629771bfb68cc,2 +np.float64,0x3ff792a1656f2543,0x3fee054f2e135fcf,2 +np.float64,0x7fd03625cea06c4b,0x408628d253b840e3,2 +np.float64,0x7fc3967716272ced,0x408624ca35451042,2 +np.float64,0x7fe6636cb32cc6d8,0x408630f3073a22a7,2 +np.float64,0x3ffc2d3976585a73,0x3ff2a9d4c0dae607,2 +np.float64,0x3fffd10ee79fa21e,0x3ff4f70db69888be,2 +np.float64,0x3ff1d4fcae23a9f9,0x3fde57675007b23c,2 +np.float64,0x3ffa5da19e14bb43,0x3ff1599f74d1c113,2 +np.float64,0x3ff7f4eb0d6fe9d6,0x3feeb85189659e99,2 +np.float64,0x7fbcca44d8399489,0x408622536234f7c1,2 +np.float64,0x7fef5f97ec3ebf2f,0x408633a60fdde0d7,2 +np.float64,0x7fde4a66da3c94cd,0x40862dd290ebc184,2 +np.float64,0x3ff072957a40e52b,0x3fce34d913d87613,2 +np.float64,0x3ff2bc4c9dc57899,0x3fe27497e6ebe27d,2 +np.float64,0x7fd7d152b4afa2a4,0x40862be63469eecd,2 +np.float64,0x3ff957d768f2afaf,0x3ff08b4ad8062a73,2 +np.float64,0x7fe4bc5f45a978be,0x40863055fd66e4eb,2 +np.float64,0x7fc90de345321bc6,0x408626c24ce7e370,2 +np.float64,0x3ff2d7a37d85af47,0x3fe2cd6a40b544a0,2 +np.float64,0x7fe536ea1f6a6dd3,0x40863084bade76a3,2 +np.float64,0x3fff970c9cdf2e19,0x3ff4d524572356dd,2 +np.float64,0x3ffe173ae63c2e76,0x3ff3ec1ee35ad28c,2 +np.float64,0x3ff714025cce2805,0x3fed168aedff4a2b,2 +np.float64,0x7fce7b414c3cf682,0x40862853dcdd19d4,2 +np.float64,0x3ff019623f2032c4,0x3fbc7c602df0bbaf,2 +np.float64,0x3ff72f57fd0e5eb0,0x3fed4ae75f697432,2 +np.float64,0x3ff283778e8506ef,0x3fe1b5c5725b0dfd,2 +np.float64,0x3ff685a29aed0b45,0x3febfdfdedd581e2,2 +np.float64,0x3ff942d24fb285a4,0x3ff07a224c3ecfaf,2 +np.float64,0x3ff2e4a9f465c954,0x3fe2f71905399e8f,2 +np.float64,0x7fdfa1c7fa3f438f,0x40862e2b4e06f098,2 +np.float64,0x3ff49b59c26936b4,0x3fe7bc41c8c1e59d,2 +np.float64,0x3ff2102d3704205a,0x3fe014bf7e28924e,2 +np.float64,0x3ff88de3b8311bc8,0x3fefc4e3e0a15a89,2 +np.float64,0x7fea5ba25374b744,0x40863241519c9b66,2 +np.float64,0x3fffe5df637fcbbf,0x3ff5032488f570f9,2 +np.float64,0x7fe67cfefe6cf9fd,0x408630fc25333cb4,2 +np.float64,0x3ff090bf2b01217e,0x3fd0f6fcf1092b4a,2 +np.float64,0x7fecd75bc5f9aeb7,0x408632f9b6c2e013,2 +np.float64,0x7fe15df38c62bbe6,0x40862eeae5ac944b,2 +np.float64,0x3ff4757875a8eaf1,0x3fe75e0eafbe28ce,2 +np.float64,0x7fecca8a51b99514,0x408632f627c23923,2 +np.float64,0x3ff91ca529d2394a,0x3ff05abb327fd1ca,2 +np.float64,0x3ffb962993b72c53,0x3ff23ff831717579,2 +np.float64,0x3ffd548a2c7aa914,0x3ff36fac7f56d716,2 +np.float64,0x7fbafb5cb035f6b8,0x408621ce898a02fb,2 +np.float64,0x3ff1d86daca3b0db,0x3fde73536c29218c,2 +np.float64,0x7fa8d0f8f431a1f1,0x40861b97a03c3a18,2 +np.float64,0x3ff44f1067489e21,0x3fe6fcbd8144ab2a,2 +np.float64,0x7fec062b07380c55,0x408632bed9c6ce85,2 +np.float64,0x3ff7e11e0fcfc23c,0x3fee94ada7efaac4,2 +np.float64,0x7fe77505c1aeea0b,0x4086315287dda0ba,2 +np.float64,0x7fc465af2728cb5d,0x4086251d236107f7,2 +np.float64,0x3ffe811c4a7d0238,0x3ff42df7e8b6cf2d,2 +np.float64,0x7fe05a471260b48d,0x40862e6fa502738b,2 +np.float64,0x7fec32cd9778659a,0x408632cb8d98c5a3,2 +np.float64,0x7fd203a220a40743,0x408629aa43b010c0,2 +np.float64,0x7fed71f7d17ae3ef,0x4086332428207101,2 +np.float64,0x3ff3918999e72313,0x3fe4fe5e8991402f,2 +np.float64,0x3ff3ecae38c7d95c,0x3fe5fa787d887981,2 +np.float64,0x7fd65345b82ca68a,0x40862b61aed8c64e,2 +np.float64,0x3ff1efdd01c3dfba,0x3fdf2eae36139204,2 +np.float64,0x3ffba9344f375268,0x3ff24d7fdcfc313b,2 +np.float64,0x7fd0469b35208d35,0x408628da6ed24bdd,2 +np.float64,0x7fe525782daa4aef,0x4086307e240c8b30,2 +np.float64,0x3ff8e473d371c8e8,0x3ff02beebd4171c7,2 +np.float64,0x3ff59a43898b3487,0x3fea0dc0a6acea0a,2 +np.float64,0x7fef50c7263ea18d,0x408633a247d7cd42,2 +np.float64,0x7fe8b5a301f16b45,0x408631bd0e71c855,2 +np.float64,0x3ff209369de4126d,0x3fdff4264334446b,2 +np.float64,0x3ffbe2ff4437c5fe,0x3ff2763b356814c7,2 +np.float64,0x3ff55938156ab270,0x3fe97c70514f91bf,2 +np.float64,0x3fff5d8bf81ebb18,0x3ff4b333b230672a,2 +np.float64,0x3ff16a317bc2d463,0x3fdab84e7faa468f,2 +np.float64,0x3ff7e64f8dafcc9f,0x3fee9e0bd57e9566,2 +np.float64,0x7fef4dc065be9b80,0x408633a181e25abb,2 +np.float64,0x3ff64a24a62c9449,0x3feb849ced76437e,2 +np.float64,0x7fc3cb85ef27970b,0x408624dfc39c8f74,2 +np.float64,0x7fec2162a77842c4,0x408632c69b0d43b6,2 +np.float64,0x7feccee6dc399dcd,0x408632f75de98c46,2 +np.float64,0x7faff4f5f43fe9eb,0x40861d9d89be14c9,2 +np.float64,0x7fee82df60fd05be,0x4086336cfdeb7317,2 +np.float64,0x3ffe54588d9ca8b1,0x3ff41247eb2f75ca,2 +np.float64,0x3ffe5615b55cac2c,0x3ff4135c4eb11620,2 +np.float64,0x3ffdaf9a6a1b5f35,0x3ff3aa70e50d1692,2 +np.float64,0x3ff69c045f4d3809,0x3fec2b00734e2cde,2 +np.float64,0x7fd049239aa09246,0x408628dbad6dd995,2 +np.float64,0x3ff2acbe8465597d,0x3fe24138652195e1,2 +np.float64,0x3ffb288302365106,0x3ff1f0f86ca7e5d1,2 +np.float64,0x3fff6fe8d87edfd2,0x3ff4be136acf53c5,2 +np.float64,0x3ffc87c8bfb90f92,0x3ff2e7bbd65867cb,2 +np.float64,0x3ff173327ca2e665,0x3fdb0b945abb00d7,2 +np.float64,0x3ff9a5cf7a134b9f,0x3ff0ca2450f07c78,2 +np.float64,0x7faf782b043ef055,0x40861d7e0e9b35ef,2 +np.float64,0x3ffa0874975410e9,0x3ff117ee3dc8f5ba,2 +np.float64,0x7fc710fc7f2e21f8,0x40862618fed167fb,2 +np.float64,0x7feb73f4c876e7e9,0x40863294ae3ac1eb,2 +np.float64,0x8000000000000000,0xfff8000000000000,2 +np.float64,0x7fb46615c028cc2b,0x40861f91bade4dad,2 +np.float64,0x7fc26b064624d60c,0x4086244c1b76c938,2 +np.float64,0x3ff06ab9fa40d574,0x3fcd282fd971d1b4,2 +np.float64,0x3ff61da7410c3b4e,0x3feb28201031af02,2 +np.float64,0x3ffec7ba1b9d8f74,0x3ff459342511f952,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe5d570422baae0,0x408630bfa75008c9,2 +np.float64,0x3ffa895832f512b0,0x3ff17ad41555dccb,2 +np.float64,0x7fd343ac21a68757,0x40862a33ad59947a,2 +np.float64,0x3ffc1eeb37383dd6,0x3ff29ff29e55a006,2 +np.float64,0x7fee3c5c507c78b8,0x4086335a6b768090,2 +np.float64,0x7fe96d774a32daee,0x408631f7b9937e36,2 +np.float64,0x7fb878362430f06b,0x40862106603497b6,2 +np.float64,0x7fec0a79c03814f3,0x408632c01479905e,2 +np.float64,0x3ffa2f143c145e28,0x3ff135e25d902e1a,2 +np.float64,0x3ff14ccff80299a0,0x3fd9a0cd3397b14c,2 +np.float64,0x3ff97980dcb2f302,0x3ff0a6942a8133ab,2 +np.float64,0x3ff872e2d1f0e5c6,0x3fef96526eb2f756,2 +np.float64,0x7fdf1c9b46be3936,0x40862e0957fee329,2 +np.float64,0x7fcab6525d356ca4,0x408627458791f029,2 +np.float64,0x3ff964e74a52c9ce,0x3ff095e8845d523c,2 +np.float64,0x3ffb3aa23c967544,0x3ff1fe282d897c13,2 +np.float64,0x7fdd8a36afbb146c,0x40862d9f2b05f61b,2 +np.float64,0x3ffea39f42fd473e,0x3ff4432a48176399,2 +np.float64,0x7fea614f68b4c29e,0x408632430a750385,2 +np.float64,0x7feeafb86abd5f70,0x40863378b79f70cf,2 +np.float64,0x3ff80bc94eb01792,0x3feee138e9d626bd,2 +np.float64,0x7fcaca74743594e8,0x4086274b8ce4d1e1,2 +np.float64,0x3ff8b14815316290,0x3ff000b3526c8321,2 +np.float64,0x7fc698eb5f2d31d6,0x408625eeec86cd2b,2 +np.float64,0x7fe15429a3e2a852,0x40862ee6621205b8,2 +np.float64,0x7fee37f81b7c6fef,0x4086335941ed80dd,2 +np.float64,0x3ff8097ab3f012f6,0x3feedd1bafc3196e,2 +np.float64,0x7fe7c889ceaf9113,0x4086316ed13f2394,2 +np.float64,0x7fceca94513d9528,0x4086286893a06824,2 +np.float64,0x3ff593a103cb2742,0x3fe9ff1af4f63cc9,2 +np.float64,0x7fee237d24bc46f9,0x40863353d4142c87,2 +np.float64,0x3ffbf71e4777ee3c,0x3ff2844c0ed9f4d9,2 +np.float64,0x3ff490c65c09218d,0x3fe7a2216d9f69fd,2 +np.float64,0x3fff5ceaf1feb9d6,0x3ff4b2d430a90110,2 +np.float64,0x3ff55baecceab75e,0x3fe98203980666c4,2 +np.float64,0x3ff511bc306a2378,0x3fe8d81ce7be7b50,2 +np.float64,0x3ff38f83dcc71f08,0x3fe4f89f130d5f87,2 +np.float64,0x3ff73a3676ee746d,0x3fed5f98a65107ee,2 +np.float64,0x7fc27e50c824fca1,0x408624547828bc49,2 +np.float64,0xfff0000000000000,0xfff8000000000000,2 +np.float64,0x3fff38959ebe712b,0x3ff49d362c7ba16a,2 +np.float64,0x3ffad6d23a75ada4,0x3ff1b4dda6394ed0,2 +np.float64,0x3ffe77c6c2dcef8e,0x3ff4283698835ecb,2 +np.float64,0x3fff5feb413ebfd6,0x3ff4b49bcbdb3aa9,2 +np.float64,0x3ff0d30aa161a615,0x3fd4751bcdd7d727,2 +np.float64,0x3ff51e07e00a3c10,0x3fe8f4bd1408d694,2 +np.float64,0x8010000000000000,0xfff8000000000000,2 +np.float64,0x7fd231d2fe2463a5,0x408629beaceafcba,2 +np.float64,0x3fff6b4aee1ed696,0x3ff4bb58544bf8eb,2 +np.float64,0x3ff91fcd2f323f9a,0x3ff05d56e33db6b3,2 +np.float64,0x3ff3b889ab477113,0x3fe56bdeab74cce5,2 +np.float64,0x3ff99bfe30d337fc,0x3ff0c24bbf265561,2 +np.float64,0x3ffbe9e5eaf7d3cc,0x3ff27b0fe60f827a,2 +np.float64,0x7fd65678e92cacf1,0x40862b62d44fe8b6,2 +np.float64,0x7fd9cc477233988e,0x40862c89c638ee48,2 +np.float64,0x3ffc123c72d82479,0x3ff297294d05cbc0,2 +np.float64,0x3ff58abad58b1576,0x3fe9eb65da2a867a,2 +np.float64,0x7fe534887b2a6910,0x40863083d4ec2877,2 +np.float64,0x7fe1d3dcb123a7b8,0x40862f208116c55e,2 +np.float64,0x7fd4d570dba9aae1,0x40862ad412c413cd,2 +np.float64,0x3fffce7d3fdf9cfa,0x3ff4f58f02451928,2 +np.float64,0x3ffa76901c74ed20,0x3ff16c9a5851539c,2 +np.float64,0x7fdd88ffa23b11fe,0x40862d9ed6c6f426,2 +np.float64,0x3ff09fdbb9e13fb7,0x3fd1d2ae4fcbf713,2 +np.float64,0x7fe64567772c8ace,0x408630e845dbc290,2 +np.float64,0x7fb1a849ba235092,0x40861e6a291535b2,2 +np.float64,0x3ffaddb105f5bb62,0x3ff1b9f68f4c419b,2 +np.float64,0x7fd2fc3d5025f87a,0x40862a15cbc1df75,2 +np.float64,0x7fdea7d872bd4fb0,0x40862deb190b2c50,2 +np.float64,0x7fd50ea97eaa1d52,0x40862ae9edc4c812,2 +np.float64,0x3fff659c245ecb38,0x3ff4b7fb18b31aea,2 +np.float64,0x3ff3f1fbb7c7e3f7,0x3fe608bd9d76268c,2 +np.float64,0x3ff76869d9aed0d4,0x3fedb6c23d3a317b,2 +np.float64,0x7fedd4efe93ba9df,0x4086333edeecaa43,2 +np.float64,0x3ff9a5bd4eb34b7a,0x3ff0ca15d02bc960,2 +np.float64,0x3ffd9359cc5b26b4,0x3ff39850cb1a6b6c,2 +np.float64,0x7fe912d0427225a0,0x408631db00e46272,2 +np.float64,0x3ffb3802fe567006,0x3ff1fc4093646465,2 +np.float64,0x3ff02cc38a205987,0x3fc2e8182802a07b,2 +np.float64,0x3ffda953dd1b52a8,0x3ff3a66c504cf207,2 +np.float64,0x7fe0a487e4a1490f,0x40862e93a6f20152,2 +np.float64,0x7fed265ed1fa4cbd,0x4086330f838ae431,2 +np.float64,0x7fd0000114200001,0x408628b76ec48b5c,2 +np.float64,0x3ff2c262786584c5,0x3fe288860d354b0f,2 +np.float64,0x8000000000000001,0xfff8000000000000,2 +np.float64,0x3ffdae9f075b5d3e,0x3ff3a9d006ae55c1,2 +np.float64,0x3ffb69c72156d38e,0x3ff22037cbb85e5b,2 +np.float64,0x7feeae255f7d5c4a,0x408633784e89bc05,2 +np.float64,0x7feb13927c362724,0x408632786630c55d,2 +np.float64,0x7fef49e072be93c0,0x408633a08451d476,2 +np.float64,0x3fff23d6337e47ac,0x3ff490ceb6e634ae,2 +np.float64,0x3ffba82cf8f7505a,0x3ff24cc51c73234d,2 +np.float64,0x7fe948719ef290e2,0x408631ec0b36476e,2 +np.float64,0x3ff41926c5e8324e,0x3fe670e14bbda8cd,2 +np.float64,0x3ff91f09c1523e14,0x3ff05cb5731878da,2 +np.float64,0x3ff6ae6afccd5cd6,0x3fec4fbeca764086,2 +np.float64,0x3ff927f7e0f24ff0,0x3ff06413eeb8eb1e,2 +np.float64,0x3ff19dd2b9e33ba5,0x3fdc882f97994600,2 +np.float64,0x7fe8e502c5b1ca05,0x408631cc56526fff,2 +np.float64,0x7feb49f70fb693ed,0x4086328868486fcd,2 +np.float64,0x3ffd942d535b285a,0x3ff398d8d89f52ca,2 +np.float64,0x7fc3b9c5c627738b,0x408624d893e692ca,2 +np.float64,0x7fea0780ff340f01,0x408632279fa46704,2 +np.float64,0x7fe4c90066a99200,0x4086305adb47a598,2 +np.float64,0x7fdb209113364121,0x40862cf0ab64fd7d,2 +np.float64,0x3ff38617e5470c30,0x3fe4ddc0413b524f,2 +np.float64,0x7fea1b5b803436b6,0x4086322db767f091,2 +np.float64,0x7fe2004898e40090,0x40862f3457795dc5,2 +np.float64,0x3ff3c4360ac7886c,0x3fe58c29843a4c75,2 +np.float64,0x3ff504bc168a0978,0x3fe8b9ada7f698e6,2 +np.float64,0x3ffd3e936fda7d27,0x3ff3615912c5b4ac,2 +np.float64,0x3ffbdc52fb97b8a6,0x3ff2718dae5f1f2b,2 +np.float64,0x3fffef6d84ffdedb,0x3ff508adbc8556cf,2 +np.float64,0x3ff23b65272476ca,0x3fe0b646ed2579eb,2 +np.float64,0x7fe4633068a8c660,0x408630334a4b7ff7,2 +np.float64,0x3ff769b754aed36f,0x3fedb932af0223f9,2 +np.float64,0x7fe7482d92ee905a,0x408631432de1b057,2 +np.float64,0x3ff5dd682aabbad0,0x3fea9fd5e506a86d,2 +np.float64,0x7fd68399a2ad0732,0x40862b72ed89805d,2 +np.float64,0x3ffad7acc3d5af5a,0x3ff1b57fe632c948,2 +np.float64,0x3ffc68e43698d1c8,0x3ff2d2be6f758761,2 +np.float64,0x3ff4e517fbc9ca30,0x3fe86eddf5e63a58,2 +np.float64,0x3ff34c63c56698c8,0x3fe435b74ccd6a13,2 +np.float64,0x7fea9456c17528ad,0x4086325275237015,2 +np.float64,0x7fee6573f2fccae7,0x4086336543760346,2 +np.float64,0x7fd5496fb9aa92de,0x40862b0023235667,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x3ffb70e31256e1c6,0x3ff22552f54b13e0,2 +np.float64,0x3ff66a33988cd467,0x3febc656da46a1ca,2 +np.float64,0x3fff0af2eb1e15e6,0x3ff481dec325f5c8,2 +np.float64,0x3ff6a0233d0d4046,0x3fec33400958eda1,2 +np.float64,0x7fdb11e2d5b623c5,0x40862cec55e405f9,2 +np.float64,0x3ffb8a015ad71402,0x3ff2374d7b563a72,2 +np.float64,0x3ff1807d8ce300fb,0x3fdb849e4bce8335,2 +np.float64,0x3ffefd535e3dfaa6,0x3ff479aaac6ffe79,2 +np.float64,0x3ff701e23a6e03c4,0x3fecf39072d96fc7,2 +np.float64,0x3ff4ac809f895901,0x3fe7e6598f2335a5,2 +np.float64,0x3ff0309f26a0613e,0x3fc3b3f4b2783690,2 +np.float64,0x3ff241dd0ce483ba,0x3fe0cde2cb639144,2 +np.float64,0x3ffabce63fb579cc,0x3ff1a18fe2a2da59,2 +np.float64,0x3ffd84b967db0973,0x3ff38ee4f240645d,2 +np.float64,0x7fc3f88b9a27f116,0x408624f1e10cdf3f,2 +np.float64,0x7fe1d5fd5923abfa,0x40862f2175714a3a,2 +np.float64,0x7fe487b145690f62,0x4086304190700183,2 +np.float64,0x7fe7997feaef32ff,0x4086315eeefdddd2,2 +np.float64,0x3ff8f853b671f0a8,0x3ff03c907353a8da,2 +np.float64,0x7fca4c23b5349846,0x408627257ace5778,2 +np.float64,0x7fe0c9bf3a21937d,0x40862ea576c3ea43,2 +np.float64,0x7fc442b389288566,0x4086250f5f126ec9,2 +np.float64,0x7fc6d382ed2da705,0x40862603900431b0,2 +np.float64,0x7fe40b069068160c,0x4086301066468124,2 +np.float64,0x3ff7f62a146fec54,0x3feeba8dfc4363fe,2 +np.float64,0x3ff721e8e94e43d2,0x3fed313a6755d34f,2 +np.float64,0x7fe579feaf2af3fc,0x4086309ddefb6112,2 +np.float64,0x3ffe2c6bde5c58d8,0x3ff3f9665dc9a16e,2 +np.float64,0x7fcf9998ed3f3331,0x4086289dab274788,2 +np.float64,0x7fdb03af2236075d,0x40862ce82252e490,2 +np.float64,0x7fe72799392e4f31,0x40863137f428ee71,2 +np.float64,0x7f9f2190603e4320,0x408617dc5b3b3c3c,2 +np.float64,0x3ff69c56d52d38ae,0x3fec2ba59fe938b2,2 +np.float64,0x7fdcde27bf39bc4e,0x40862d70086cd06d,2 +np.float64,0x3ff654d6b8eca9ae,0x3feb9aa0107609a6,2 +np.float64,0x7fdf69d967bed3b2,0x40862e1d1c2b94c2,2 +np.float64,0xffefffffffffffff,0xfff8000000000000,2 +np.float64,0x7fedfd073f3bfa0d,0x40863349980c2c8b,2 +np.float64,0x7f7c1856803830ac,0x40860bf312b458c7,2 +np.float64,0x7fe9553f1bb2aa7d,0x408631f0173eadd5,2 +np.float64,0x3ff6e92efc2dd25e,0x3fecc38f98e7e1a7,2 +np.float64,0x7fe9719ac532e335,0x408631f906cd79c3,2 +np.float64,0x3ff60e56ae4c1cad,0x3feb07ef8637ec7e,2 +np.float64,0x3ff0d0803501a100,0x3fd455c0af195a9c,2 +np.float64,0x7fe75248a3eea490,0x40863146a614aec1,2 +np.float64,0x7fdff61ead3fec3c,0x40862e408643d7aa,2 +np.float64,0x7fed4ac7a4fa958e,0x408633197b5cf6ea,2 +np.float64,0x7fe58d44562b1a88,0x408630a5098d1bbc,2 +np.float64,0x7fd89dcdb1b13b9a,0x40862c29c2979288,2 +np.float64,0x3ff205deda240bbe,0x3fdfda67c84fd3a8,2 +np.float64,0x7fdf84c15abf0982,0x40862e23f361923d,2 +np.float64,0x3ffe012b3afc0256,0x3ff3de3dfa5f47ce,2 +np.float64,0x3ffe2f3512dc5e6a,0x3ff3fb245206398e,2 +np.float64,0x7fed6174c2bac2e9,0x4086331faa699617,2 +np.float64,0x3ff1f30f8783e61f,0x3fdf47e06f2c40d1,2 +np.float64,0x3ff590da9eab21b5,0x3fe9f8f7b4baf3c2,2 +np.float64,0x3ffb3ca1eb967944,0x3ff1ff9baf66d704,2 +np.float64,0x7fe50ba9a5aa1752,0x408630745ab7fd3c,2 +np.float64,0x3ff43743a4a86e87,0x3fe6bf7ae80b1dda,2 +np.float64,0x3ff47e1a24e8fc34,0x3fe773acca44c7d6,2 +np.float64,0x3ff589ede9eb13dc,0x3fe9e99f28fab3a4,2 +np.float64,0x3ff72f2cbf8e5e5a,0x3fed4a94e7edbf24,2 +np.float64,0x3ffa4f9bbc549f38,0x3ff14ee60aea45d3,2 +np.float64,0x3ff975dae732ebb6,0x3ff0a3a1fbd7284a,2 +np.float64,0x7fbcf14ee039e29d,0x4086225e33f3793e,2 +np.float64,0x3ff10e027f621c05,0x3fd71cce2452b4e0,2 +np.float64,0x3ff33ea193067d43,0x3fe40cbac4daaddc,2 +np.float64,0x7fbef8f2263df1e3,0x408622e905c8e1b4,2 +np.float64,0x3fff7f5bfe3efeb8,0x3ff4c732e83df253,2 +np.float64,0x3ff5700a6b4ae015,0x3fe9afdd7b8b82b0,2 +np.float64,0x3ffd5099da5aa134,0x3ff36d1bf26e55bf,2 +np.float64,0x3ffed8e0f89db1c2,0x3ff4639ff065107a,2 +np.float64,0x3fff9d0c463f3a18,0x3ff4d8a9f297cf52,2 +np.float64,0x3ff23db5b2e47b6b,0x3fe0bebdd48f961a,2 +np.float64,0x3ff042bff1e08580,0x3fc713bf24cc60ef,2 +np.float64,0x7feb4fe97a769fd2,0x4086328a26675646,2 +np.float64,0x3ffeafbfeedd5f80,0x3ff44a955a553b1c,2 +np.float64,0x3ff83fb524507f6a,0x3fef3d1729ae0976,2 +np.float64,0x3ff1992294433245,0x3fdc5f5ce53dd197,2 +np.float64,0x7fe89fe629b13fcb,0x408631b601a83867,2 +np.float64,0x7fe53e4d74aa7c9a,0x40863087839b52f1,2 +np.float64,0x3ff113713e6226e2,0x3fd757631ca7cd09,2 +np.float64,0x7fd4a0b7a629416e,0x40862abfba27a09b,2 +np.float64,0x3ff184c6e2a3098e,0x3fdbab2e3966ae57,2 +np.float64,0x3ffafbbf77f5f77f,0x3ff1d02bb331d9f9,2 +np.float64,0x3ffc6099a358c134,0x3ff2cd16941613d1,2 +np.float64,0x3ffb7c441ef6f888,0x3ff22d7b12e31432,2 +np.float64,0x3ff625ba5eec4b75,0x3feb39060e55fb79,2 +np.float64,0x7fde879acbbd0f35,0x40862de2aab4d72d,2 +np.float64,0x7f930aed982615da,0x408613edb6df8528,2 +np.float64,0x7fa4b82dac29705a,0x40861a261c0a9aae,2 +np.float64,0x7fced5c16b3dab82,0x4086286b7a73e611,2 +np.float64,0x7fe133749d2266e8,0x40862ed73a41b112,2 +np.float64,0x3ff2d8146ea5b029,0x3fe2ced55dbf997d,2 +np.float64,0x3ff60dac77ac1b59,0x3feb0688b0e54c7b,2 +np.float64,0x3ff275d9b024ebb3,0x3fe186b87258b834,2 +np.float64,0x3ff533e6500a67cd,0x3fe92746c8b50ddd,2 +np.float64,0x7fe370896666e112,0x40862fd1ca144736,2 +np.float64,0x7fee7695357ced29,0x40863369c459420e,2 +np.float64,0x7fd1e0528023c0a4,0x4086299a85caffd0,2 +np.float64,0x7fd05c7b24a0b8f5,0x408628e52824386f,2 +np.float64,0x3ff11dcc3b023b98,0x3fd7c56c8cef1be1,2 +np.float64,0x7fc9d9fae933b3f5,0x408627027404bc5f,2 +np.float64,0x7fe2359981246b32,0x40862f4be675e90d,2 +np.float64,0x3ffb10a949962152,0x3ff1df88f83b8cde,2 +np.float64,0x3ffa65b53654cb6a,0x3ff15fc8956ccc87,2 +np.float64,0x3ff0000000000000,0x0,2 +np.float64,0x7fad97ef703b2fde,0x40861d002f3d02da,2 +np.float64,0x3ff57aaf93aaf55f,0x3fe9c7b01f194edb,2 +np.float64,0x7fe9ecd73f33d9ad,0x4086321f69917205,2 +np.float64,0x3ff0dcb79c61b96f,0x3fd4eac86a7a9c38,2 +np.float64,0x7fee9c12ffbd3825,0x4086337396cd706d,2 +np.float64,0x3ff52c40af4a5881,0x3fe915a8a7de8f00,2 +np.float64,0x3ffbcfff59779ffe,0x3ff268e523fe8dda,2 +np.float64,0x7fe014cb4b602996,0x40862e4d5de42a03,2 +np.float64,0x7fae2370e83c46e1,0x40861d258dd5b3ee,2 +np.float64,0x7fe9e33602f3c66b,0x4086321c704ac2bb,2 +np.float64,0x3ff648acd74c915a,0x3feb8195ca53bcaa,2 +np.float64,0x7fe385f507670be9,0x40862fda95ebaf44,2 +np.float64,0x3ffb0e382c361c70,0x3ff1ddbea963e0a7,2 +np.float64,0x3ff47d6b6ae8fad7,0x3fe771f80ad37cd2,2 +np.float64,0x3ffca7d538f94faa,0x3ff2fd5f62e851ac,2 +np.float64,0x3ff83e949c107d29,0x3fef3b1c5bbac99b,2 +np.float64,0x7fc6fb933a2df725,0x408626118e51a286,2 +np.float64,0x7fe43a1454e87428,0x4086302318512d9b,2 +np.float64,0x7fe51fe32aaa3fc5,0x4086307c07271348,2 +np.float64,0x3ff35e563966bcac,0x3fe46aa2856ef85f,2 +np.float64,0x3ff84dd4e4909baa,0x3fef55d86d1d5c2e,2 +np.float64,0x7febe3d84077c7b0,0x408632b507686f03,2 +np.float64,0x3ff6aca2e32d5946,0x3fec4c32a2368ee3,2 +np.float64,0x7fe7070e3e6e0e1b,0x4086312caddb0454,2 +np.float64,0x7fd3657f2aa6cafd,0x40862a41acf47e70,2 +np.float64,0x3ff61534456c2a68,0x3feb1663900af13b,2 +np.float64,0x3ff8bc556eb178ab,0x3ff00a16b5403f88,2 +np.float64,0x3ffa7782e3f4ef06,0x3ff16d529c94a438,2 +np.float64,0x7fc15785ed22af0b,0x408623d0cd94fb86,2 +np.float64,0x3ff2e3eeb6e5c7dd,0x3fe2f4c4876d3edf,2 +np.float64,0x3ff2e4e17e85c9c3,0x3fe2f7c9e437b22e,2 +np.float64,0x7feb3aaf67f6755e,0x40863283ec4a0d76,2 +np.float64,0x7fe89efcf7313df9,0x408631b5b5e41263,2 +np.float64,0x3ffcc6fad4f98df6,0x3ff31245778dff6d,2 +np.float64,0x3ff356114466ac22,0x3fe45253d040a024,2 +np.float64,0x3ff81c70d2d038e2,0x3feefed71ebac776,2 +np.float64,0x7fdb75c96136eb92,0x40862d09a603f03e,2 +np.float64,0x3ff340f91b8681f2,0x3fe413bb6e6d4a54,2 +np.float64,0x3fff906079df20c1,0x3ff4d13869d16bc7,2 +np.float64,0x3ff226a42d644d48,0x3fe0698d316f1ac0,2 +np.float64,0x3ff948abc3b29158,0x3ff07eeb0b3c81ba,2 +np.float64,0x3ffc25df1fb84bbe,0x3ff2a4c13ad4edad,2 +np.float64,0x7fe07ea3b960fd46,0x40862e815b4cf43d,2 +np.float64,0x3ff497d3dae92fa8,0x3fe7b3917bf10311,2 +np.float64,0x7fea561db1f4ac3a,0x4086323fa4aef2a9,2 +np.float64,0x7fd1b49051236920,0x40862986d8759ce5,2 +np.float64,0x7f7ba3bd6037477a,0x40860bd19997fd90,2 +np.float64,0x3ff01126dd00224e,0x3fb76b67938dfb11,2 +np.float64,0x3ff29e1105053c22,0x3fe2102a4c5fa102,2 +np.float64,0x3ff9de2a6553bc55,0x3ff0f6cfe4dea30e,2 +np.float64,0x7fc558e7d42ab1cf,0x4086257a608fc055,2 +np.float64,0x3ff79830a74f3061,0x3fee0f93db153d65,2 +np.float64,0x7fe2661648e4cc2c,0x40862f6117a71eb2,2 +np.float64,0x3ff140cf4262819e,0x3fd92aefedae1ab4,2 +np.float64,0x3ff5f36251abe6c5,0x3feaced481ceaee3,2 +np.float64,0x7fc80911d5301223,0x4086266d4757f768,2 +np.float64,0x3ff9079a6c320f35,0x3ff04949d21ebe1e,2 +np.float64,0x3ffde8d2e09bd1a6,0x3ff3cedca8a5db5d,2 +np.float64,0x3ffadd1de375ba3c,0x3ff1b989790e8d93,2 +np.float64,0x3ffdbc40ee1b7882,0x3ff3b286b1c7da57,2 +np.float64,0x3ff8ff514771fea2,0x3ff04264add00971,2 +np.float64,0x7fefd7d0e63fafa1,0x408633c47d9f7ae4,2 +np.float64,0x3ffc47798c588ef3,0x3ff2bbe441fa783a,2 +np.float64,0x7fe6ebc55b6dd78a,0x408631232d9abf31,2 +np.float64,0xbff0000000000000,0xfff8000000000000,2 +np.float64,0x7fd378e4afa6f1c8,0x40862a49a8f98cb4,2 +np.float64,0x0,0xfff8000000000000,2 +np.float64,0x3ffe88ed7efd11db,0x3ff432c7ecb95492,2 +np.float64,0x3ff4f5509289eaa1,0x3fe8955a11656323,2 +np.float64,0x7fda255b41344ab6,0x40862ca53676a23e,2 +np.float64,0x3ffebe85b9bd7d0c,0x3ff453992cd55dea,2 +np.float64,0x3ff5d6180b8bac30,0x3fea901c2160c3bc,2 +np.float64,0x3ffcdfb8fcf9bf72,0x3ff322c83b3bc735,2 +np.float64,0x3ff3c91c26679238,0x3fe599a652b7cf59,2 +np.float64,0x7fc389f7a62713ee,0x408624c518edef93,2 +np.float64,0x3ffe1245ba1c248c,0x3ff3e901b2c4a47a,2 +np.float64,0x7fe1e76e95e3cedc,0x40862f29446f9eff,2 +np.float64,0x3ff02ae4f92055ca,0x3fc28221abd63daa,2 +np.float64,0x7fbf648a143ec913,0x40862304a0619d03,2 +np.float64,0x3ff2be7ef8657cfe,0x3fe27bcc6c97522e,2 +np.float64,0x3ffa7595e514eb2c,0x3ff16bdc64249ad1,2 +np.float64,0x3ff4ee130049dc26,0x3fe884354cbad8c9,2 +np.float64,0x3ff19211fc232424,0x3fdc2160bf3eae40,2 +np.float64,0x3ffec215aedd842c,0x3ff455c4cdd50c32,2 +np.float64,0x7fe7cb50ffaf96a1,0x4086316fc06a53af,2 +np.float64,0x3fffa679161f4cf2,0x3ff4de30ba7ac5b8,2 +np.float64,0x7fdcb459763968b2,0x40862d646a21011d,2 +np.float64,0x3ff9f338d6d3e672,0x3ff1075835d8f64e,2 +np.float64,0x3ff8de3319d1bc66,0x3ff026ae858c0458,2 +np.float64,0x7fee0199d33c0333,0x4086334ad03ac683,2 +np.float64,0x3ffc06076c380c0f,0x3ff28eaec3814faa,2 +np.float64,0x3ffe9e2e235d3c5c,0x3ff43fd4d2191a7f,2 +np.float64,0x3ffd93b06adb2761,0x3ff398888239cde8,2 +np.float64,0x7fefe4b71cffc96d,0x408633c7ba971b92,2 +np.float64,0x7fb2940352252806,0x40861ed244bcfed6,2 +np.float64,0x3ffba4647e3748c9,0x3ff24a15f02e11b9,2 +np.float64,0x7fd2d9543725b2a7,0x40862a0708446596,2 +np.float64,0x7fc04997f120932f,0x4086235055d35251,2 +np.float64,0x3ff6d14313ada286,0x3fec94b177f5d3fc,2 +np.float64,0x3ff279fc8684f3f9,0x3fe19511c3e5b9a8,2 +np.float64,0x3ff42f4609085e8c,0x3fe6aabe526ce2bc,2 +np.float64,0x7fc1c6c62a238d8b,0x408624037de7f6ec,2 +np.float64,0x7fe31ff4b8e63fe8,0x40862fb05b40fd16,2 +np.float64,0x7fd2a8825fa55104,0x408629f234d460d6,2 +np.float64,0x3ffe8c1d725d183b,0x3ff434bdc444143f,2 +np.float64,0x3ff0e9dc3e21d3b8,0x3fd58676e2c13fc9,2 +np.float64,0x3ffed03172fda063,0x3ff45e59f7aa6c8b,2 +np.float64,0x7fd74621962e8c42,0x40862bb6e90d66f8,2 +np.float64,0x3ff1faa29663f545,0x3fdf833a2c5efde1,2 +np.float64,0x7fda02834db40506,0x40862c9a860d6747,2 +np.float64,0x7f709b2fc021365f,0x408607be328eb3eb,2 +np.float64,0x7fec0d58aa381ab0,0x408632c0e61a1af6,2 +np.float64,0x3ff524d1720a49a3,0x3fe90479968d40fd,2 +np.float64,0x7fd64cb3b32c9966,0x40862b5f53c4b0b4,2 +np.float64,0x3ff9593e3ed2b27c,0x3ff08c6eea5f6e8b,2 +np.float64,0x3ff7de8b1f6fbd16,0x3fee9007abcfdf7b,2 +np.float64,0x7fe8d816d6b1b02d,0x408631c82e38a894,2 +np.float64,0x7fd726bbe22e4d77,0x40862bac16ee8d52,2 +np.float64,0x7fa70b07d42e160f,0x40861affcc4265e2,2 +np.float64,0x7fe18b4091e31680,0x40862effa8bce66f,2 +np.float64,0x3ff830253010604a,0x3fef21b2eaa75758,2 +np.float64,0x3fffcade407f95bc,0x3ff4f3734b24c419,2 +np.float64,0x3ff8c17cecb182fa,0x3ff00e75152d7bda,2 +np.float64,0x7fdad9b9d035b373,0x40862cdbabb793ba,2 +np.float64,0x3ff9f9e154f3f3c2,0x3ff10c8dfdbd2510,2 +np.float64,0x3ff465e162e8cbc3,0x3fe736c751c75b73,2 +np.float64,0x3ff9b4cd8493699b,0x3ff0d616235544b8,2 +np.float64,0x7fe557c4a56aaf88,0x4086309114ed12d9,2 +np.float64,0x7fe5999133eb3321,0x408630a9991a9b54,2 +np.float64,0x7fe7c9009e2f9200,0x4086316ef9359a47,2 +np.float64,0x3ff8545cabd0a8ba,0x3fef6141f1030c36,2 +np.float64,0x3ffa1f1712943e2e,0x3ff129849d492ce3,2 +np.float64,0x7fea803a14750073,0x4086324c652c276c,2 +np.float64,0x3ff5b6f97fcb6df3,0x3fea4cb0b97b18e9,2 +np.float64,0x7fc2efdfc425dfbf,0x40862485036a5c6e,2 +np.float64,0x7fe2c78e5be58f1c,0x40862f8b0a5e7baf,2 +np.float64,0x7fe80d7fff301aff,0x40863185e234060a,2 +np.float64,0x3ffd895d457b12ba,0x3ff391e2cac7a3f8,2 +np.float64,0x3ff44c9764a8992f,0x3fe6f6690396c232,2 +np.float64,0x3ff731688b8e62d1,0x3fed4ed70fac3839,2 +np.float64,0x3ff060200460c040,0x3fcbad4a07d97f0e,2 +np.float64,0x3ffbd2f70a17a5ee,0x3ff26afb46ade929,2 +np.float64,0x7febe9e841f7d3d0,0x408632b6c465ddd9,2 +np.float64,0x3ff2532f8be4a65f,0x3fe10c6cd8d64cf4,2 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,2 +np.float64,0x3ff3a1ae3a47435c,0x3fe52c00210cc459,2 +np.float64,0x7fe9c34ae6b38695,0x408632128d150149,2 +np.float64,0x3fff311029fe6220,0x3ff498b852f30bff,2 +np.float64,0x3ffd4485a1ba890c,0x3ff3653b6fa701cd,2 +np.float64,0x7fd52718b1aa4e30,0x40862af330d9c68c,2 +np.float64,0x3ff10b695a4216d3,0x3fd7009294e367b7,2 +np.float64,0x3ffdf73de59bee7c,0x3ff3d7fa96d2c1ae,2 +np.float64,0x3ff2f1c75965e38f,0x3fe320aaff3db882,2 +np.float64,0x3ff2a56a5a854ad5,0x3fe228cc4ad7e7a5,2 +np.float64,0x7fe60cd1cf6c19a3,0x408630d3d87a04b3,2 +np.float64,0x3ff89fa65c113f4c,0x3fefe3543773180c,2 +np.float64,0x3ffd253130ba4a62,0x3ff350b76ba692a0,2 +np.float64,0x7feaad7051f55ae0,0x40863259ff932d62,2 +np.float64,0x7fd9cc37cf33986f,0x40862c89c15f963b,2 +np.float64,0x3ff8c08de771811c,0x3ff00daa9c17acd7,2 +np.float64,0x7fea58b25d34b164,0x408632406d54cc6f,2 +np.float64,0x7fe5f161fd2be2c3,0x408630c9ddf272a5,2 +np.float64,0x3ff5840dbf8b081c,0x3fe9dc9117b4cbc7,2 +np.float64,0x3ff3fd762307faec,0x3fe6277cd530c640,2 +np.float64,0x3ff9095c98b212b9,0x3ff04abff170ac24,2 +np.float64,0x7feaac66017558cb,0x40863259afb4f8ce,2 +np.float64,0x7fd78f96bcaf1f2c,0x40862bd00175fdf9,2 +np.float64,0x3ffaca27e0959450,0x3ff1ab72b8f8633e,2 +np.float64,0x3ffb7f18cb96fe32,0x3ff22f81bcb8907b,2 +np.float64,0x3ffcce48d1199c92,0x3ff317276f62c0b2,2 +np.float64,0x3ffcb9a7f3797350,0x3ff30958e0d6a34d,2 +np.float64,0x7fda569ef6b4ad3d,0x40862cb43b33275a,2 +np.float64,0x7fde9f0893bd3e10,0x40862de8cc036283,2 +np.float64,0x3ff428be3928517c,0x3fe699bb5ab58904,2 +np.float64,0x7fa4d3344029a668,0x40861a3084989291,2 +np.float64,0x3ff03607bd006c0f,0x3fc4c4840cf35f48,2 +np.float64,0x3ff2b1335c056267,0x3fe25000846b75a2,2 +np.float64,0x7fe0cb8bd8e19717,0x40862ea65237d496,2 +np.float64,0x3fff4b1b7b9e9637,0x3ff4a83fb08e7b24,2 +np.float64,0x7fe7526140aea4c2,0x40863146ae86069c,2 +np.float64,0x7fbfcfb7c23f9f6f,0x4086231fc246ede5,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv new file mode 100644 index 00000000..cb94c93c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe7d3a7c,0xbe7fe217,4 +np.float32,0x3dc102f0,0x3dc14c60,4 +np.float32,0xbe119c28,0xbe121aef,4 +np.float32,0xbe51cd68,0xbe534c75,4 +np.float32,0x3c04a300,0x3c04a35f,4 +np.float32,0xbf4f0b62,0xbf712a69,4 +np.float32,0x3ef61a5c,0x3f005cf6,4 +np.float32,0xbf13024c,0xbf1c97df,4 +np.float32,0x3e93b580,0x3e95d6b5,4 +np.float32,0x3e44e7b8,0x3e4623a5,4 +np.float32,0xbe35df20,0xbe36d773,4 +np.float32,0x3eecd2c0,0x3ef633cf,4 +np.float32,0x3f2772ba,0x3f36862a,4 +np.float32,0x3e211ea8,0x3e21cac5,4 +np.float32,0x3e3b3d90,0x3e3c4cc6,4 +np.float32,0x3f37c962,0x3f4d018c,4 +np.float32,0x3e92ad88,0x3e94c31a,4 +np.float32,0x3f356ffc,0x3f49a766,4 +np.float32,0x3f487ba2,0x3f665254,4 +np.float32,0x3f061c46,0x3f0d27ae,4 +np.float32,0xbee340a2,0xbeeb7722,4 +np.float32,0xbe85aede,0xbe874026,4 +np.float32,0x3f34cf9a,0x3f48c474,4 +np.float32,0x3e29a690,0x3e2a6fbd,4 +np.float32,0xbeb29428,0xbeb669d1,4 +np.float32,0xbe606d40,0xbe624370,4 +np.float32,0x3dae6860,0x3dae9e85,4 +np.float32,0xbf04872b,0xbf0b4d25,4 +np.float32,0x3f2080e2,0x3f2d7ab0,4 +np.float32,0xbec77dcc,0xbecceb27,4 +np.float32,0x3e0dda10,0x3e0e4f38,4 +np.float32,0xbefaf970,0xbf03262c,4 +np.float32,0x3f576a0c,0x3f7ffee6,4 +np.float32,0x3f222382,0x3f2f95d6,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e41c468,0x3e42f14e,4 +np.float32,0xbf2f64dd,0xbf4139a8,4 +np.float32,0xbf60ef90,0xbf895956,4 +np.float32,0xbf67c855,0xbf90eff0,4 +np.float32,0xbed35aee,0xbed9df00,4 +np.float32,0xbf2c7d92,0xbf3d448f,4 +np.float32,0x3f7b1604,0x3faff122,4 +np.float32,0xbf7c758b,0xbfb3bf87,4 +np.float32,0x3ecda1c8,0x3ed39acf,4 +np.float32,0x3f3af8ae,0x3f519fcb,4 +np.float32,0xbf16e6a3,0xbf2160fd,4 +np.float32,0x3f0c97d2,0x3f14d668,4 +np.float32,0x3f0a8060,0x3f1257b9,4 +np.float32,0x3f27905a,0x3f36ad57,4 +np.float32,0x3eeaeba4,0x3ef40efe,4 +np.float32,0x3e58dde0,0x3e5a8580,4 +np.float32,0xbf0cabe2,0xbf14ee6b,4 +np.float32,0xbe805ca8,0xbe81bf03,4 +np.float32,0x3f5462ba,0x3f7a7b85,4 +np.float32,0xbee235d0,0xbeea4d8b,4 +np.float32,0xbe880cb0,0xbe89b426,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x3f208c00,0x3f2d88f6,4 +np.float32,0xbf34f3d2,0xbf48f7a2,4 +np.float32,0x3f629428,0x3f8b1763,4 +np.float32,0xbf52a900,0xbf776b4a,4 +np.float32,0xbd17f8d0,0xbd1801be,4 +np.float32,0xbef7cada,0xbf0153d1,4 +np.float32,0x3f7d3b90,0x3fb63967,4 +np.float32,0xbd6a20b0,0xbd6a4160,4 +np.float32,0x3f740496,0x3fa1beb7,4 +np.float32,0x3ed8762c,0x3edf7dd9,4 +np.float32,0x3f53b066,0x3f793d42,4 +np.float32,0xbe9de718,0xbea084f9,4 +np.float32,0x3ea3ae90,0x3ea69b4b,4 +np.float32,0x3f1b8f00,0x3f273183,4 +np.float32,0x3f5cd6ac,0x3f852ead,4 +np.float32,0x3f29d510,0x3f39b169,4 +np.float32,0x3ee2a934,0x3eeace33,4 +np.float32,0x3eecac94,0x3ef608c2,4 +np.float32,0xbea915e2,0xbeac5203,4 +np.float32,0xbd316e90,0xbd317cc8,4 +np.float32,0xbf70b495,0xbf9c97b6,4 +np.float32,0xbe80d976,0xbe823ff3,4 +np.float32,0x3e9205f8,0x3e94143f,4 +np.float32,0x3f49247e,0x3f676296,4 +np.float32,0x3d9030c0,0x3d904f50,4 +np.float32,0x3e4df058,0x3e4f5a5c,4 +np.float32,0xbe1fd360,0xbe207b58,4 +np.float32,0xbf69dc7c,0xbf937006,4 +np.float32,0x3f36babe,0x3f4b7df3,4 +np.float32,0xbe8c9758,0xbe8e6bb7,4 +np.float32,0xbf4de72d,0xbf6f3c20,4 +np.float32,0xbecdad68,0xbed3a780,4 +np.float32,0xbf73e2cf,0xbfa18702,4 +np.float32,0xbece16a8,0xbed41a75,4 +np.float32,0x3f618a96,0x3f89fc6d,4 +np.float32,0xbf325853,0xbf454ea9,4 +np.float32,0x3f138568,0x3f1d3828,4 +np.float32,0xbf56a6e9,0xbf7e9748,4 +np.float32,0x3ef5d594,0x3f0035bf,4 +np.float32,0xbf408220,0xbf59dfaa,4 +np.float32,0xbed120e6,0xbed76dd5,4 +np.float32,0xbf6dbda5,0xbf986cee,4 +np.float32,0x3f744a38,0x3fa23282,4 +np.float32,0xbe4b56d8,0xbe4cb329,4 +np.float32,0x3f54c5f2,0x3f7b2d97,4 +np.float32,0xbd8b1c90,0xbd8b3801,4 +np.float32,0x3ee19a48,0x3ee9a03b,4 +np.float32,0x3f48460e,0x3f65fc3d,4 +np.float32,0x3eb541c0,0x3eb9461e,4 +np.float32,0xbea7d098,0xbeaaf98c,4 +np.float32,0xbda99e40,0xbda9d00c,4 +np.float32,0xbefb2ca6,0xbf03438d,4 +np.float32,0x3f4256be,0x3f5cab0b,4 +np.float32,0xbdbdb198,0xbdbdf74d,4 +np.float32,0xbf325b5f,0xbf4552e9,4 +np.float32,0xbf704d1a,0xbf9c00b4,4 +np.float32,0x3ebb1d04,0x3ebf8cf8,4 +np.float32,0xbed03566,0xbed66bf1,4 +np.float32,0x3e8fcee8,0x3e91c501,4 +np.float32,0xbf2e1eec,0xbf3f7b9d,4 +np.float32,0x3f33c4d2,0x3f474cac,4 +np.float32,0x3f598ef4,0x3f8201b4,4 +np.float32,0x3e09bb30,0x3e0a2660,4 +np.float32,0x3ed4e228,0x3edb8cdb,4 +np.float32,0x3eb7a190,0x3ebbd0a1,4 +np.float32,0xbd9ae630,0xbd9b0c18,4 +np.float32,0x3f43020e,0x3f5db2d7,4 +np.float32,0xbec06ac0,0xbec542d4,4 +np.float32,0x3f3dfde0,0x3f561674,4 +np.float32,0xbf64084a,0xbf8cabe6,4 +np.float32,0xbd6f95b0,0xbd6fb8b7,4 +np.float32,0x3f268640,0x3f354e2d,4 +np.float32,0xbe72b4bc,0xbe7509b2,4 +np.float32,0xbf3414fa,0xbf47bd5a,4 +np.float32,0xbf375218,0xbf4c566b,4 +np.float32,0x3f203c1a,0x3f2d2273,4 +np.float32,0xbd503530,0xbd504c2b,4 +np.float32,0xbc45e540,0xbc45e67b,4 +np.float32,0xbf175c4f,0xbf21f2c6,4 +np.float32,0x3f7432a6,0x3fa20b2b,4 +np.float32,0xbf43367f,0xbf5e03d8,4 +np.float32,0x3eb3997c,0x3eb780c4,4 +np.float32,0x3e5574c8,0x3e570878,4 +np.float32,0xbf04b57b,0xbf0b8349,4 +np.float32,0x3f6216d8,0x3f8a914b,4 +np.float32,0xbf57a237,0xbf80337d,4 +np.float32,0xbee1403a,0xbee93bee,4 +np.float32,0xbeaf9b9a,0xbeb33f3b,4 +np.float32,0xbf109374,0xbf19a223,4 +np.float32,0xbeae6824,0xbeb1f810,4 +np.float32,0xbcff9320,0xbcff9dbe,4 +np.float32,0x3ed205c0,0x3ed868a9,4 +np.float32,0x3d897c30,0x3d8996ad,4 +np.float32,0xbf2899d2,0xbf380d4c,4 +np.float32,0xbf54cb0b,0xbf7b36c2,4 +np.float32,0x3ea8e8ec,0x3eac2262,4 +np.float32,0x3ef5e1a0,0x3f003c9d,4 +np.float32,0xbf00c81e,0xbf06f1e2,4 +np.float32,0xbf346775,0xbf483181,4 +np.float32,0x3f7a4fe4,0x3fae077c,4 +np.float32,0x3f00776e,0x3f06948f,4 +np.float32,0xbe0a3078,0xbe0a9cbc,4 +np.float32,0xbeba0b06,0xbebe66be,4 +np.float32,0xbdff4e38,0xbdfff8b2,4 +np.float32,0xbe927f70,0xbe9492ff,4 +np.float32,0x3ebb07e0,0x3ebf7642,4 +np.float32,0x3ebcf8e0,0x3ec18c95,4 +np.float32,0x3f49bdfc,0x3f685b51,4 +np.float32,0x3cbc29c0,0x3cbc2dfd,4 +np.float32,0xbe9e951a,0xbea13bf1,4 +np.float32,0xbe8c237c,0xbe8df33d,4 +np.float32,0x3e17f198,0x3e1881c4,4 +np.float32,0xbd0b5220,0xbd0b5902,4 +np.float32,0xbf34c4a2,0xbf48b4f5,4 +np.float32,0xbedaa814,0xbee1ea94,4 +np.float32,0x3ebf5d6c,0x3ec42053,4 +np.float32,0x3cd04b40,0x3cd050ff,4 +np.float32,0xbec33fe0,0xbec85244,4 +np.float32,0xbf00b27a,0xbf06d8d8,4 +np.float32,0x3f15d7be,0x3f201243,4 +np.float32,0xbe3debd0,0xbe3f06f7,4 +np.float32,0xbea81704,0xbeab4418,4 +np.float32,0x1,0x1,4 +np.float32,0x3f49e6ba,0x3f689d8b,4 +np.float32,0x3f351030,0x3f491fc0,4 +np.float32,0x3e607de8,0x3e625482,4 +np.float32,0xbe8dbbe4,0xbe8f9c0e,4 +np.float32,0x3edbf350,0x3ee35924,4 +np.float32,0xbf0c84c4,0xbf14bf9c,4 +np.float32,0x3eb218b0,0x3eb5e61a,4 +np.float32,0x3e466dd0,0x3e47b138,4 +np.float32,0xbe8ece94,0xbe90ba01,4 +np.float32,0xbe82ec2a,0xbe84649a,4 +np.float32,0xbf7e1f10,0xbfb98b9e,4 +np.float32,0xbf2d00ea,0xbf3df688,4 +np.float32,0x3db7cdd0,0x3db80d36,4 +np.float32,0xbe388b98,0xbe398f25,4 +np.float32,0xbd86cb40,0xbd86e436,4 +np.float32,0x7f7fffff,0x7fc00000,4 +np.float32,0x3f472a60,0x3f6436c6,4 +np.float32,0xbf5b2c1d,0xbf838d87,4 +np.float32,0x3f0409ea,0x3f0abad8,4 +np.float32,0x3f47dd0e,0x3f6553f0,4 +np.float32,0x3e3eab00,0x3e3fc98a,4 +np.float32,0xbf7c2a7f,0xbfb2e19b,4 +np.float32,0xbeda0048,0xbee13112,4 +np.float32,0x3f46600a,0x3f62f5b2,4 +np.float32,0x3f45aef4,0x3f61de43,4 +np.float32,0x3dd40a50,0x3dd46bc4,4 +np.float32,0xbf6cdd0b,0xbf974191,4 +np.float32,0x3f78de4c,0x3faac725,4 +np.float32,0x3f3c39a4,0x3f53777f,4 +np.float32,0xbe2a30ec,0xbe2afc0b,4 +np.float32,0xbf3c0ef0,0xbf533887,4 +np.float32,0x3ecb6548,0x3ed12a53,4 +np.float32,0x3eb994e8,0x3ebde7fc,4 +np.float32,0x3d4c1ee0,0x3d4c3487,4 +np.float32,0xbf52cb6d,0xbf77a7eb,4 +np.float32,0x3eb905d4,0x3ebd4e80,4 +np.float32,0x3e712428,0x3e736d72,4 +np.float32,0xbf79ee6e,0xbfad22be,4 +np.float32,0x3de6f8b0,0x3de776c1,4 +np.float32,0x3e9b2898,0x3e9da325,4 +np.float32,0x3ea09b20,0x3ea35d20,4 +np.float32,0x3d0ea9a0,0x3d0eb103,4 +np.float32,0xbd911500,0xbd913423,4 +np.float32,0x3e004618,0x3e009c97,4 +np.float32,0x3f5e0e5a,0x3f86654c,4 +np.float32,0x3f2e6300,0x3f3fd88b,4 +np.float32,0x3e0cf5d0,0x3e0d68c3,4 +np.float32,0x3d6a16c0,0x3d6a376c,4 +np.float32,0x3f7174aa,0x3f9db53c,4 +np.float32,0xbe04bba0,0xbe051b81,4 +np.float32,0xbe6fdcb4,0xbe721c92,4 +np.float32,0x3f4379f0,0x3f5e6c31,4 +np.float32,0xbf680098,0xbf913257,4 +np.float32,0xbf3c31ca,0xbf536bea,4 +np.float32,0x3f59db58,0x3f824a4e,4 +np.float32,0xbf3ffc84,0xbf591554,4 +np.float32,0x3d1d5160,0x3d1d5b48,4 +np.float32,0x3f6c64ae,0x3f96a3da,4 +np.float32,0xbf1b49fd,0xbf26daaa,4 +np.float32,0x3ec80be0,0x3ecd8576,4 +np.float32,0x3f3becc0,0x3f530629,4 +np.float32,0xbea93890,0xbeac76c1,4 +np.float32,0x3f5b3acc,0x3f839bbd,4 +np.float32,0xbf5d6818,0xbf85bef9,4 +np.float32,0x3f794266,0x3fab9fa6,4 +np.float32,0xbee8eb7c,0xbef1cf3b,4 +np.float32,0xbf360a06,0xbf4a821e,4 +np.float32,0x3f441cf6,0x3f5f693d,4 +np.float32,0x3e60de40,0x3e62b742,4 +np.float32,0xbebb3d7e,0xbebfafdc,4 +np.float32,0x3e56a3a0,0x3e583e28,4 +np.float32,0x3f375bfe,0x3f4c6499,4 +np.float32,0xbf384d7d,0xbf4dbf9a,4 +np.float32,0x3efb03a4,0x3f032c06,4 +np.float32,0x3f1d5d10,0x3f29794d,4 +np.float32,0xbe25f7dc,0xbe26b41d,4 +np.float32,0x3f6d2f88,0x3f97aebb,4 +np.float32,0xbe9fa100,0xbea255cb,4 +np.float32,0xbf21dafa,0xbf2f382a,4 +np.float32,0x3d3870e0,0x3d3880d9,4 +np.float32,0x3eeaf00c,0x3ef413f4,4 +np.float32,0xbc884ea0,0xbc88503c,4 +np.float32,0xbf7dbdad,0xbfb80b6d,4 +np.float32,0xbf4eb713,0xbf709b46,4 +np.float32,0xbf1c0ad4,0xbf27cd92,4 +np.float32,0x3f323088,0x3f451737,4 +np.float32,0x3e405d88,0x3e4183e1,4 +np.float32,0x3d7ad580,0x3d7afdb4,4 +np.float32,0xbf207338,0xbf2d6927,4 +np.float32,0xbecf7948,0xbed59e1a,4 +np.float32,0x3f16ff94,0x3f217fde,4 +np.float32,0xbdf19588,0xbdf225dd,4 +np.float32,0xbf4d9654,0xbf6eb442,4 +np.float32,0xbf390b9b,0xbf4ed220,4 +np.float32,0xbe155a74,0xbe15e354,4 +np.float32,0x3f519e4c,0x3f759850,4 +np.float32,0xbee3f08c,0xbeec3b84,4 +np.float32,0xbf478be7,0xbf64d23b,4 +np.float32,0xbefdee50,0xbf04d92a,4 +np.float32,0x3e8def78,0x3e8fd1bc,4 +np.float32,0x3e3df2a8,0x3e3f0dee,4 +np.float32,0xbf413e22,0xbf5afd97,4 +np.float32,0xbf1b8bc4,0xbf272d71,4 +np.float32,0xbf31e5be,0xbf44af22,4 +np.float32,0x3de7e080,0x3de86010,4 +np.float32,0xbf5ddf7e,0xbf863645,4 +np.float32,0x3f3eba6a,0x3f57306e,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x3ec22d5c,0x3ec72973,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x3f032e0c,0x3f09ba82,4 +np.float32,0x3d74bd60,0x3d74e2b7,4 +np.float32,0xbea0d61e,0xbea39b42,4 +np.float32,0xbefdfa78,0xbf04e02a,4 +np.float32,0x3e5cb220,0x3e5e70ec,4 +np.float32,0xbe239e54,0xbe2452a4,4 +np.float32,0x3f452738,0x3f61090e,4 +np.float32,0x3e99a2e0,0x3e9c0a66,4 +np.float32,0x3e4394d8,0x3e44ca5f,4 +np.float32,0x3f4472e2,0x3f5fef14,4 +np.float32,0xbf46bc70,0xbf638814,4 +np.float32,0xbf0b910f,0xbf139c7a,4 +np.float32,0x3f36b4a6,0x3f4b753f,4 +np.float32,0x3e0bf478,0x3e0c64f6,4 +np.float32,0x3ce02480,0x3ce02ba9,4 +np.float32,0xbd904b10,0xbd9069b1,4 +np.float32,0xbf7f5d72,0xbfc00b70,4 +np.float32,0x3f62127e,0x3f8a8ca8,4 +np.float32,0xbf320253,0xbf44d6e4,4 +np.float32,0x3f2507be,0x3f335833,4 +np.float32,0x3f299284,0x3f395887,4 +np.float32,0xbd8211b0,0xbd82281d,4 +np.float32,0xbd3374c0,0xbd338376,4 +np.float32,0x3f36c56a,0x3f4b8d30,4 +np.float32,0xbf51f704,0xbf76331f,4 +np.float32,0xbe9871ca,0xbe9acab2,4 +np.float32,0xbe818d8c,0xbe82fa0f,4 +np.float32,0x3f08b958,0x3f103c18,4 +np.float32,0x3f22559a,0x3f2fd698,4 +np.float32,0xbf11f388,0xbf1b4db8,4 +np.float32,0x3ebe1990,0x3ec2c359,4 +np.float32,0xbe75ab38,0xbe7816b6,4 +np.float32,0x3e96102c,0x3e984c99,4 +np.float32,0xbe80d9d2,0xbe824052,4 +np.float32,0x3ef47588,0x3efeda7f,4 +np.float32,0xbe45e524,0xbe4725ea,4 +np.float32,0x3f7f9e7a,0x3fc213ff,4 +np.float32,0x3f1d3c36,0x3f294faa,4 +np.float32,0xbf3c58db,0xbf53a591,4 +np.float32,0x3f0d3d20,0x3f159c69,4 +np.float32,0x3f744be6,0x3fa23552,4 +np.float32,0x3f2e0cea,0x3f3f630e,4 +np.float32,0x3e193c10,0x3e19cff7,4 +np.float32,0xbf4150ac,0xbf5b19dd,4 +np.float32,0xbf145f72,0xbf1e4355,4 +np.float32,0xbb76cc00,0xbb76cc26,4 +np.float32,0x3f756780,0x3fa41b3e,4 +np.float32,0x3ea9b868,0x3eacfe3c,4 +np.float32,0x3d07c920,0x3d07cf7f,4 +np.float32,0xbf2263d4,0xbf2fe8ff,4 +np.float32,0x3e53b3f8,0x3e553daa,4 +np.float32,0xbf785be8,0xbfa9b5ba,4 +np.float32,0x3f324f7a,0x3f454254,4 +np.float32,0xbf2188f2,0xbf2ece5b,4 +np.float32,0xbe33781c,0xbe3466a2,4 +np.float32,0xbd3cf120,0xbd3d024c,4 +np.float32,0x3f06b18a,0x3f0dd70f,4 +np.float32,0x3f40d63e,0x3f5a5f6a,4 +np.float32,0x3f752340,0x3fa3a41e,4 +np.float32,0xbe1cf1c0,0xbe1d90bc,4 +np.float32,0xbf02d948,0xbf0957d7,4 +np.float32,0x3f73bed0,0x3fa14bf7,4 +np.float32,0x3d914920,0x3d916864,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0xbe67a5d8,0xbe69aba7,4 +np.float32,0x3f689c4a,0x3f91eb9f,4 +np.float32,0xbf196e00,0xbf248601,4 +np.float32,0xbf50dacb,0xbf7444fe,4 +np.float32,0x3f628b86,0x3f8b0e1e,4 +np.float32,0x3f6ee2f2,0x3f99fe7f,4 +np.float32,0x3ee5df40,0x3eee6492,4 +np.float32,0x3f501746,0x3f72f41b,4 +np.float32,0xbf1f0f18,0xbf2ba164,4 +np.float32,0xbf1a8bfd,0xbf25ec01,4 +np.float32,0xbd4926f0,0xbd493ba9,4 +np.float32,0xbf4e364f,0xbf6fc17b,4 +np.float32,0x3e50c578,0x3e523ed4,4 +np.float32,0x3f65bf10,0x3f8e95ce,4 +np.float32,0xbe8d75a2,0xbe8f52f2,4 +np.float32,0xbf3f557e,0xbf581962,4 +np.float32,0xbeff2bfc,0xbf05903a,4 +np.float32,0x3f5e8bde,0x3f86e3d8,4 +np.float32,0xbf7a0012,0xbfad4b9b,4 +np.float32,0x3edefce0,0x3ee6b790,4 +np.float32,0xbf0003de,0xbf060f09,4 +np.float32,0x3efc4650,0x3f03e548,4 +np.float32,0x3f4582e4,0x3f6198f5,4 +np.float32,0x3f10086c,0x3f18f9d0,4 +np.float32,0x3f1cd304,0x3f28ca77,4 +np.float32,0x3f683366,0x3f916e8d,4 +np.float32,0xbed49392,0xbedb3675,4 +np.float32,0xbf6fe5f6,0xbf9b6c0e,4 +np.float32,0xbf59b416,0xbf8224f6,4 +np.float32,0x3d20c960,0x3d20d3f4,4 +np.float32,0x3f6b00d6,0x3f94dbe7,4 +np.float32,0x3f6c26ae,0x3f965352,4 +np.float32,0xbf370ea6,0xbf4bf5dd,4 +np.float32,0x3dfe7230,0x3dff1af1,4 +np.float32,0xbefc21a8,0xbf03d038,4 +np.float32,0x3f16a990,0x3f21156a,4 +np.float32,0xbef8ac0c,0xbf01d48f,4 +np.float32,0x3f170de8,0x3f21919d,4 +np.float32,0x3db9ef80,0x3dba3122,4 +np.float32,0x3d696400,0x3d698461,4 +np.float32,0x3f007aa2,0x3f069843,4 +np.float32,0x3f22827c,0x3f3010a9,4 +np.float32,0x3f3650dc,0x3f4ae6f1,4 +np.float32,0xbf1d8037,0xbf29a5e1,4 +np.float32,0xbf08fdc4,0xbf108d0e,4 +np.float32,0xbd8df350,0xbd8e1079,4 +np.float32,0xbf36bb32,0xbf4b7e98,4 +np.float32,0x3f2e3756,0x3f3f9ced,4 +np.float32,0x3d5a6f20,0x3d5a89aa,4 +np.float32,0x3f55d568,0x3f7d1889,4 +np.float32,0x3e1ed110,0x3e1f75d9,4 +np.float32,0x3e7386b8,0x3e75e1dc,4 +np.float32,0x3f48ea0e,0x3f670434,4 +np.float32,0x3e921fb0,0x3e942f14,4 +np.float32,0xbf0d4d0b,0xbf15af7f,4 +np.float32,0x3f179ed2,0x3f224549,4 +np.float32,0xbf3a328e,0xbf507e6d,4 +np.float32,0xbf74591a,0xbfa24b6e,4 +np.float32,0x3ec7d1c4,0x3ecd4657,4 +np.float32,0xbf6ecbed,0xbf99de85,4 +np.float32,0x3db0bd00,0x3db0f559,4 +np.float32,0x7f800000,0x7fc00000,4 +np.float32,0x3e0373b8,0x3e03d0d6,4 +np.float32,0xbf439784,0xbf5e9a04,4 +np.float32,0xbef97a9e,0xbf024ac6,4 +np.float32,0x3e4d71a8,0x3e4ed90a,4 +np.float32,0xbf14d868,0xbf1ed7e3,4 +np.float32,0xbf776870,0xbfa7ce37,4 +np.float32,0xbe32a500,0xbe339038,4 +np.float32,0xbf326d8a,0xbf456c3d,4 +np.float32,0xbe9b758c,0xbe9df3e7,4 +np.float32,0x3d9515a0,0x3d95376a,4 +np.float32,0x3e3f7320,0x3e40953e,4 +np.float32,0xbee57e7e,0xbeedf84f,4 +np.float32,0x3e821e94,0x3e838ffd,4 +np.float32,0x3f74beaa,0x3fa2f721,4 +np.float32,0xbe9b7672,0xbe9df4d9,4 +np.float32,0x3f4041fc,0x3f597e71,4 +np.float32,0xbe9ea7c4,0xbea14f92,4 +np.float32,0xbf800000,0xbfc90fdb,4 +np.float32,0x3e04fb90,0x3e055bfd,4 +np.float32,0xbf14d3d6,0xbf1ed245,4 +np.float32,0xbe84ebec,0xbe86763e,4 +np.float32,0x3f08e568,0x3f107039,4 +np.float32,0x3d8dc9e0,0x3d8de6ef,4 +np.float32,0x3ea4549c,0x3ea74a94,4 +np.float32,0xbebd2806,0xbec1bf51,4 +np.float32,0x3f311a26,0x3f439498,4 +np.float32,0xbf3d2222,0xbf54cf7e,4 +np.float32,0x3e00c500,0x3e011c81,4 +np.float32,0xbe35ed1c,0xbe36e5a9,4 +np.float32,0xbd4ec020,0xbd4ed6a0,4 +np.float32,0x3e1eb088,0x3e1f54eb,4 +np.float32,0x3cf94840,0x3cf9521a,4 +np.float32,0xbf010c5d,0xbf0740e0,4 +np.float32,0xbf3bd63b,0xbf52e502,4 +np.float32,0x3f233f30,0x3f310542,4 +np.float32,0x3ea24128,0x3ea519d7,4 +np.float32,0x3f478b38,0x3f64d124,4 +np.float32,0x3f1e0c6c,0x3f2a57ec,4 +np.float32,0xbf3ad294,0xbf51680a,4 +np.float32,0x3ede0554,0x3ee5a4b4,4 +np.float32,0x3e451a98,0x3e46577d,4 +np.float32,0x3f520164,0x3f764542,4 +np.float32,0x0,0x0,4 +np.float32,0xbd056cd0,0xbd0572db,4 +np.float32,0xbf58b018,0xbf812f5e,4 +np.float32,0x3e036eb0,0x3e03cbc3,4 +np.float32,0x3d1377a0,0x3d137fc9,4 +np.float32,0xbf692d3a,0xbf929a2c,4 +np.float32,0xbec60fb8,0xbecb5dea,4 +np.float32,0x3ed23340,0x3ed89a8e,4 +np.float32,0x3c87f040,0x3c87f1d9,4 +np.float32,0x3dac62f0,0x3dac9737,4 +np.float32,0xbed97c16,0xbee09f02,4 +np.float32,0xbf2d5f3c,0xbf3e769c,4 +np.float32,0xbc3b7c40,0xbc3b7d4c,4 +np.float32,0x3ed998ec,0x3ee0bedd,4 +np.float32,0x3dd86630,0x3dd8cdcb,4 +np.float32,0x3e8b4304,0x3e8d09ea,4 +np.float32,0x3f51e6b0,0x3f761697,4 +np.float32,0x3ec51f24,0x3eca5923,4 +np.float32,0xbf647430,0xbf8d2307,4 +np.float32,0x3f253d9c,0x3f339eb2,4 +np.float32,0x3dc969d0,0x3dc9bd4b,4 +np.float32,0xbc2f1300,0xbc2f13da,4 +np.float32,0xbf170007,0xbf21806d,4 +np.float32,0x3f757d10,0x3fa4412e,4 +np.float32,0xbe7864ac,0xbe7ae564,4 +np.float32,0x3f2ffe90,0x3f420cfb,4 +np.float32,0xbe576138,0xbe590012,4 +np.float32,0xbf517a21,0xbf755959,4 +np.float32,0xbf159cfe,0xbf1fc9d5,4 +np.float32,0xbf638b2a,0xbf8c22cf,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x3ed19ca0,0x3ed7f569,4 +np.float32,0x3f7c4460,0x3fb32d26,4 +np.float32,0x3ebfae6c,0x3ec477ab,4 +np.float32,0x3dd452d0,0x3dd4b4a8,4 +np.float32,0x3f471482,0x3f6413fb,4 +np.float32,0xbf49d704,0xbf6883fe,4 +np.float32,0xbd42c4e0,0xbd42d7af,4 +np.float32,0xbeb02994,0xbeb3d668,4 +np.float32,0x3f4d1fd8,0x3f6dedd2,4 +np.float32,0x3efb591c,0x3f035d11,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xbf50f782,0xbf7476ad,4 +np.float32,0x3d7232c0,0x3d7256f0,4 +np.float32,0x3f649460,0x3f8d46bb,4 +np.float32,0x3f5561bc,0x3f7c46a9,4 +np.float32,0x3e64f6a0,0x3e66ea5d,4 +np.float32,0x3e5b0470,0x3e5cb8f9,4 +np.float32,0xbe9b6b2c,0xbe9de904,4 +np.float32,0x3f6c33f4,0x3f966486,4 +np.float32,0x3f5cee54,0x3f854613,4 +np.float32,0x3ed3e044,0x3eda716e,4 +np.float32,0xbf3cac7f,0xbf542131,4 +np.float32,0x3c723500,0x3c723742,4 +np.float32,0x3de59900,0x3de614d3,4 +np.float32,0xbdf292f8,0xbdf32517,4 +np.float32,0x3f05c8b2,0x3f0cc59b,4 +np.float32,0xbf1ab182,0xbf261b14,4 +np.float32,0xbda396f0,0xbda3c39a,4 +np.float32,0xbf270ed0,0xbf360231,4 +np.float32,0x3f2063e6,0x3f2d557e,4 +np.float32,0x3c550280,0x3c550409,4 +np.float32,0xbe103b48,0xbe10b679,4 +np.float32,0xbebae390,0xbebf4f40,4 +np.float32,0x3f3bc868,0x3f52d0aa,4 +np.float32,0xbd62f880,0xbd631647,4 +np.float32,0xbe7a38f4,0xbe7cc833,4 +np.float32,0x3f09d796,0x3f118f39,4 +np.float32,0xbf5fa558,0xbf8802d0,4 +np.float32,0x3f111cc8,0x3f1a48b0,4 +np.float32,0x3e831958,0x3e849356,4 +np.float32,0xbf614dbd,0xbf89bc3b,4 +np.float32,0xbd521510,0xbd522cac,4 +np.float32,0x3f05af22,0x3f0ca7a0,4 +np.float32,0xbf1ac60e,0xbf2634df,4 +np.float32,0xbf6bd05e,0xbf95e3fe,4 +np.float32,0xbd1fa6e0,0xbd1fb13b,4 +np.float32,0xbeb82f7a,0xbebc68b1,4 +np.float32,0xbd92aaf8,0xbd92cb23,4 +np.float32,0xbe073a54,0xbe079fbf,4 +np.float32,0xbf198655,0xbf24a468,4 +np.float32,0x3f62f6d8,0x3f8b81ba,4 +np.float32,0x3eef4310,0x3ef8f4f9,4 +np.float32,0x3e8988e0,0x3e8b3eae,4 +np.float32,0xbf3ddba5,0xbf55e367,4 +np.float32,0x3dc6d2e0,0x3dc7232b,4 +np.float32,0xbf31040e,0xbf437601,4 +np.float32,0x3f1bb74a,0x3f276442,4 +np.float32,0xbf0075d2,0xbf0692b3,4 +np.float32,0xbf606ce0,0xbf88d0ff,4 +np.float32,0xbf083856,0xbf0fa39d,4 +np.float32,0xbdb25b20,0xbdb2950a,4 +np.float32,0xbeb86860,0xbebca5ae,4 +np.float32,0x3de83160,0x3de8b176,4 +np.float32,0xbf33a98f,0xbf472664,4 +np.float32,0x3e7795f8,0x3e7a1058,4 +np.float32,0x3e0ca6f8,0x3e0d192a,4 +np.float32,0xbf1aef60,0xbf2668c3,4 +np.float32,0xbda53b58,0xbda5695e,4 +np.float32,0xbf178096,0xbf221fc5,4 +np.float32,0xbf0a4159,0xbf120ccf,4 +np.float32,0x3f7bca36,0x3fb1d0df,4 +np.float32,0xbef94360,0xbf022b26,4 +np.float32,0xbef16f36,0xbefb6ad6,4 +np.float32,0x3f53a7e6,0x3f792e25,4 +np.float32,0xbf7c536f,0xbfb35993,4 +np.float32,0xbe84aaa0,0xbe8632a2,4 +np.float32,0x3ecb3998,0x3ed0fab9,4 +np.float32,0x3f539304,0x3f79090a,4 +np.float32,0xbf3c7816,0xbf53d3b3,4 +np.float32,0xbe7a387c,0xbe7cc7b7,4 +np.float32,0x3f7000e4,0x3f9b92b1,4 +np.float32,0x3e08fd70,0x3e0966e5,4 +np.float32,0x3db97ba0,0x3db9bcc8,4 +np.float32,0xbee99056,0xbef2886a,4 +np.float32,0xbf0668da,0xbf0d819e,4 +np.float32,0x3e58a408,0x3e5a4a51,4 +np.float32,0x3f3440b8,0x3f47faed,4 +np.float32,0xbf19a2ce,0xbf24c7ff,4 +np.float32,0xbe75e990,0xbe7856ee,4 +np.float32,0x3f3c865c,0x3f53e8cb,4 +np.float32,0x3e5e03d0,0x3e5fcac9,4 +np.float32,0x3edb8e34,0x3ee2e932,4 +np.float32,0xbf7e1f5f,0xbfb98ce4,4 +np.float32,0xbf7372ff,0xbfa0d0ae,4 +np.float32,0xbf3ee850,0xbf577548,4 +np.float32,0x3ef19658,0x3efb9737,4 +np.float32,0xbe8088de,0xbe81ecaf,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbde39dd8,0xbde4167a,4 +np.float32,0xbf065d7a,0xbf0d7441,4 +np.float32,0xbde52c78,0xbde5a79b,4 +np.float32,0xbe3a28c0,0xbe3b333e,4 +np.float32,0x3f6e8b3c,0x3f998516,4 +np.float32,0x3f3485c2,0x3f485c39,4 +np.float32,0x3e6f2c68,0x3e71673e,4 +np.float32,0xbe4ec9cc,0xbe50385e,4 +np.float32,0xbf1c3bb0,0xbf280b39,4 +np.float32,0x3ec8ea18,0x3ece76f7,4 +np.float32,0x3e26b5f8,0x3e2774c9,4 +np.float32,0x3e1e4a38,0x3e1eed5c,4 +np.float32,0xbee7a106,0xbef05c6b,4 +np.float32,0xbf305928,0xbf4289d8,4 +np.float32,0x3f0c431c,0x3f147118,4 +np.float32,0xbe57ba6c,0xbe595b52,4 +np.float32,0x3eabc9cc,0x3eaf2fc7,4 +np.float32,0xbef1ed24,0xbefbf9ae,4 +np.float32,0xbf61b576,0xbf8a29cc,4 +np.float32,0x3e9c1ff4,0x3e9ea6cb,4 +np.float32,0x3f6c53b2,0x3f968dbe,4 +np.float32,0x3e2d1b80,0x3e2df156,4 +np.float32,0x3e9f2f70,0x3ea1de4a,4 +np.float32,0xbf5861ee,0xbf80e61a,4 +np.float32,0x3f429144,0x3f5d0505,4 +np.float32,0x3e235cc8,0x3e24103e,4 +np.float32,0xbf354879,0xbf496f6a,4 +np.float32,0xbf20a146,0xbf2da447,4 +np.float32,0x3e8d8968,0x3e8f6785,4 +np.float32,0x3f3fbc94,0x3f58b4c1,4 +np.float32,0x3f2c5f50,0x3f3d1b9f,4 +np.float32,0x3f7bf0f8,0x3fb23d23,4 +np.float32,0xbf218282,0xbf2ec60f,4 +np.float32,0x3f2545aa,0x3f33a93e,4 +np.float32,0xbf4b17be,0xbf6a9018,4 +np.float32,0xbb9df700,0xbb9df728,4 +np.float32,0x3f685d54,0x3f91a06c,4 +np.float32,0x3efdfe2c,0x3f04e24c,4 +np.float32,0x3ef1c5a0,0x3efbccd9,4 +np.float32,0xbf41d731,0xbf5be76e,4 +np.float32,0x3ebd1360,0x3ec1a919,4 +np.float32,0xbf706bd4,0xbf9c2d58,4 +np.float32,0x3ea525e4,0x3ea8279d,4 +np.float32,0xbe51f1b0,0xbe537186,4 +np.float32,0x3f5e8cf6,0x3f86e4f4,4 +np.float32,0xbdad2520,0xbdad5a19,4 +np.float32,0xbf5c5704,0xbf84b0e5,4 +np.float32,0x3f47b54e,0x3f65145e,4 +np.float32,0x3eb4fc78,0x3eb8fc0c,4 +np.float32,0x3dca1450,0x3dca68a1,4 +np.float32,0x3eb02a74,0x3eb3d757,4 +np.float32,0x3f74ae6a,0x3fa2db75,4 +np.float32,0x3f800000,0x3fc90fdb,4 +np.float32,0xbdb46a00,0xbdb4a5f2,4 +np.float32,0xbe9f2ba6,0xbea1da4e,4 +np.float32,0x3f0afa70,0x3f12e8f7,4 +np.float32,0xbf677b20,0xbf909547,4 +np.float32,0x3eff9188,0x3f05cacf,4 +np.float32,0x3f720562,0x3f9e911b,4 +np.float32,0xbf7180d8,0xbf9dc794,4 +np.float32,0xbee7d076,0xbef0919d,4 +np.float32,0x3f0432ce,0x3f0aea95,4 +np.float32,0x3f3bc4c8,0x3f52cb54,4 +np.float32,0xbea72f30,0xbeaa4ebe,4 +np.float32,0x3e90ed00,0x3e92ef33,4 +np.float32,0xbda63670,0xbda6654a,4 +np.float32,0xbf5a6f85,0xbf82d7e0,4 +np.float32,0x3e6e8808,0x3e70be34,4 +np.float32,0xbf4f3822,0xbf71768f,4 +np.float32,0x3e5c8a68,0x3e5e483f,4 +np.float32,0xbf0669d4,0xbf0d82c4,4 +np.float32,0xbf79f77c,0xbfad37b0,4 +np.float32,0x3f25c82c,0x3f345453,4 +np.float32,0x3f1b2948,0x3f26b188,4 +np.float32,0x3ef7e288,0x3f016159,4 +np.float32,0x3c274280,0x3c27433e,4 +np.float32,0xbf4c8fa0,0xbf6cfd5e,4 +np.float32,0x3ea4ccb4,0x3ea7c966,4 +np.float32,0xbf7b157e,0xbfafefca,4 +np.float32,0xbee4c2b0,0xbeed264d,4 +np.float32,0xbc1fd640,0xbc1fd6e6,4 +np.float32,0x3e892308,0x3e8ad4f6,4 +np.float32,0xbf3f69c7,0xbf5837ed,4 +np.float32,0x3ec879e8,0x3ecdfd05,4 +np.float32,0x3f07a8c6,0x3f0efa30,4 +np.float32,0x3f67b880,0x3f90dd4d,4 +np.float32,0x3e8a11c8,0x3e8bccd5,4 +np.float32,0x3f7df6fc,0x3fb8e935,4 +np.float32,0xbef3e498,0xbefe3599,4 +np.float32,0xbf18ad7d,0xbf2395d8,4 +np.float32,0x3f2bce74,0x3f3c57f5,4 +np.float32,0xbf38086e,0xbf4d5c2e,4 +np.float32,0x3f772d7a,0x3fa75c35,4 +np.float32,0xbf3b6e24,0xbf524c00,4 +np.float32,0xbdd39108,0xbdd3f1d4,4 +np.float32,0xbf691f6b,0xbf928974,4 +np.float32,0x3f146188,0x3f1e45e4,4 +np.float32,0xbf56045b,0xbf7d6e03,4 +np.float32,0xbf4b2ee4,0xbf6ab622,4 +np.float32,0xbf3fa3f6,0xbf588f9d,4 +np.float32,0x3f127bb0,0x3f1bf398,4 +np.float32,0x3ed858a0,0x3edf5d3e,4 +np.float32,0xbd6de3b0,0xbd6e05fa,4 +np.float32,0xbecc662c,0xbed24261,4 +np.float32,0xbd6791d0,0xbd67b170,4 +np.float32,0xbf146016,0xbf1e441e,4 +np.float32,0xbf61f04c,0xbf8a6841,4 +np.float32,0xbe7f16d0,0xbe80e6e7,4 +np.float32,0xbebf93e6,0xbec45b10,4 +np.float32,0xbe8a59fc,0xbe8c17d1,4 +np.float32,0xbebc7a0c,0xbec10426,4 +np.float32,0xbf2a682e,0xbf3a7649,4 +np.float32,0xbe18d0cc,0xbe19637b,4 +np.float32,0x3d7f5100,0x3d7f7b66,4 +np.float32,0xbf10f5fa,0xbf1a1998,4 +np.float32,0x3f25e956,0x3f347fdc,4 +np.float32,0x3e6e8658,0x3e70bc78,4 +np.float32,0x3f21a5de,0x3f2ef3a5,4 +np.float32,0xbf4e71d4,0xbf702607,4 +np.float32,0xbf49d6b6,0xbf688380,4 +np.float32,0xbdb729c0,0xbdb7687c,4 +np.float32,0xbf63e1f4,0xbf8c81c7,4 +np.float32,0x3dda6cb0,0x3ddad73e,4 +np.float32,0x3ee1bc40,0x3ee9c612,4 +np.float32,0x3ebdb5f8,0x3ec2581b,4 +np.float32,0x3f7d9576,0x3fb77646,4 +np.float32,0x3e087140,0x3e08d971,4 +np.float64,0xbfdba523cfb74a48,0xbfdc960ddd9c0506,3 +np.float64,0x3fb51773622a2ee0,0x3fb51d93f77089d5,3 +np.float64,0x3fc839f6d33073f0,0x3fc85f9a47dfe8e6,3 +np.float64,0xbfecba2d82f9745b,0xbff1d55416c6c993,3 +np.float64,0x3fd520fe47aa41fc,0x3fd58867f1179634,3 +np.float64,0x3fe1b369c56366d4,0x3fe2c1ac9dd2c45a,3 +np.float64,0xbfec25a7cd784b50,0xbff133417389b12d,3 +np.float64,0xbfd286342ea50c68,0xbfd2cb0bca22e66d,3 +np.float64,0x3fd5f6fe5eabedfc,0x3fd66bad16680d08,3 +np.float64,0xbfe863a87570c751,0xbfebbb9b637eb6dc,3 +np.float64,0x3fc97f5b4d32feb8,0x3fc9ab5066d8eaec,3 +np.float64,0xbfcb667af936ccf4,0xbfcb9d3017047a1d,3 +np.float64,0xbfd1b7b9afa36f74,0xbfd1f3c175706154,3 +np.float64,0x3fef97385b7f2e70,0x3ff6922a1a6c709f,3 +np.float64,0xbfd13e4205a27c84,0xbfd1757c993cdb74,3 +np.float64,0xbfd18d88aca31b12,0xbfd1c7dd75068f7d,3 +np.float64,0x3fe040ce0f60819c,0x3fe10c59d2a27089,3 +np.float64,0xbfddc7deddbb8fbe,0xbfdef9de5baecdda,3 +np.float64,0xbfcf6e96193edd2c,0xbfcfc1bb7396b9a3,3 +np.float64,0x3fd544f494aa89e8,0x3fd5ae850e2b37dd,3 +np.float64,0x3fe15b381fe2b670,0x3fe25841c7bfe2af,3 +np.float64,0xbfde793420bcf268,0xbfdfc2ddc7b4a341,3 +np.float64,0x3fd0d5db30a1abb8,0x3fd1092cef4aa4fb,3 +np.float64,0x3fe386a08c670d42,0x3fe50059bbf7f491,3 +np.float64,0xbfe0aae3a96155c8,0xbfe1880ef13e95ce,3 +np.float64,0xbfe80eeb03f01dd6,0xbfeb39e9f107e944,3 +np.float64,0xbfd531af3caa635e,0xbfd59a178f17552a,3 +np.float64,0x3fcced14ab39da28,0x3fcd2d9a806337ef,3 +np.float64,0xbfdb4c71bcb698e4,0xbfdc33d9d9daf708,3 +np.float64,0xbfde7375ecbce6ec,0xbfdfbc5611bc48ff,3 +np.float64,0x3fecc5707a798ae0,0x3ff1e2268d778017,3 +np.float64,0x3fe8f210a1f1e422,0x3fec9b3349a5baa2,3 +np.float64,0x3fe357f9b8e6aff4,0x3fe4c5a0b89a9228,3 +np.float64,0xbfe0f863b761f0c8,0xbfe1e3283494c3d4,3 +np.float64,0x3fd017c395a02f88,0x3fd044761f2f4a66,3 +np.float64,0x3febeb4746f7d68e,0x3ff0f6b955e7feb6,3 +np.float64,0xbfbdaaeeae3b55e0,0xbfbdbc0950109261,3 +np.float64,0xbfea013095f40261,0xbfee5b8fe8ad8593,3 +np.float64,0xbfe9f87b7973f0f7,0xbfee4ca3a8438d72,3 +np.float64,0x3fd37f77cfa6fef0,0x3fd3d018c825f057,3 +np.float64,0x3fb0799cee20f340,0x3fb07c879e7cb63f,3 +np.float64,0xbfdcfd581cb9fab0,0xbfde15e35314b52d,3 +np.float64,0xbfd49781b8a92f04,0xbfd4f6fa1516fefc,3 +np.float64,0x3fb3fcb6d627f970,0x3fb401ed44a713a8,3 +np.float64,0x3fd5737ef8aae6fc,0x3fd5dfe42d4416c7,3 +np.float64,0x7ff4000000000000,0x7ffc000000000000,3 +np.float64,0xbfe56ae780ead5cf,0xbfe776ea5721b900,3 +np.float64,0x3fd4567786a8acf0,0x3fd4b255421c161a,3 +np.float64,0x3fef6fb58cfedf6c,0x3ff62012dfcf0a33,3 +np.float64,0xbfd1dbcd3da3b79a,0xbfd2194fd628f74d,3 +np.float64,0x3fd9350016b26a00,0x3fd9e8b01eb023e9,3 +np.float64,0xbfe4fb3a69e9f675,0xbfe6e1d2c9eca56c,3 +np.float64,0x3fe9fe0f73f3fc1e,0x3fee5631cfd39772,3 +np.float64,0xbfd51c1bc6aa3838,0xbfd5833b3bd53543,3 +np.float64,0x3fc64158e12c82b0,0x3fc65e7352f237d7,3 +np.float64,0x3fd0d8ee1ba1b1dc,0x3fd10c5c99a16f0e,3 +np.float64,0x3fd5554e15aaaa9c,0x3fd5bfdb9ec9e873,3 +np.float64,0x3fe61ce209ec39c4,0x3fe869bc4c28437d,3 +np.float64,0xbfe4e42c8c69c859,0xbfe6c356dac7e2db,3 +np.float64,0xbfe157021062ae04,0xbfe2533ed39f4212,3 +np.float64,0x3fe844066cf0880c,0x3feb8aea0b7bd0a4,3 +np.float64,0x3fe55016586aa02c,0x3fe752e4b2a67b9f,3 +np.float64,0x3fdabce619b579cc,0x3fdb95809bc789d9,3 +np.float64,0x3fee03bae37c0776,0x3ff3778ba38ca882,3 +np.float64,0xbfeb2f5844f65eb0,0xbff03dd1b767d3c8,3 +np.float64,0x3fedcfdbaffb9fb8,0x3ff32e81d0639164,3 +np.float64,0x3fe06fc63ee0df8c,0x3fe142fc27f92eaf,3 +np.float64,0x3fe7ce90fd6f9d22,0x3fead8f832bbbf5d,3 +np.float64,0xbfbc0015ce380028,0xbfbc0e7470e06e86,3 +np.float64,0xbfe9b3de90f367bd,0xbfedd857931dfc6b,3 +np.float64,0xbfcb588f5936b120,0xbfcb8ef0124a4f21,3 +np.float64,0x3f8d376a503a6f00,0x3f8d37ab43e7988d,3 +np.float64,0xbfdb123a40b62474,0xbfdbf38b6cf5db92,3 +np.float64,0xbfee7da6be7cfb4e,0xbff433042cd9d5eb,3 +np.float64,0xbfc4c9e01b2993c0,0xbfc4e18dbafe37ef,3 +np.float64,0x3fedd42faffba860,0x3ff334790cd18a19,3 +np.float64,0x3fe9cdf772f39bee,0x3fee044f87b856ab,3 +np.float64,0x3fe0245881e048b2,0x3fe0eb5a1f739c8d,3 +np.float64,0xbfe4712bd9e8e258,0xbfe62cb3d82034aa,3 +np.float64,0x3fe9a16b46f342d6,0x3fedb972b2542551,3 +np.float64,0xbfe57ab4536af568,0xbfe78c34b03569c2,3 +np.float64,0x3fb6d6ceb22dada0,0x3fb6de976964d6dd,3 +np.float64,0x3fc3ac23a3275848,0x3fc3c02de53919b8,3 +np.float64,0xbfccb531e7396a64,0xbfccf43ec69f6281,3 +np.float64,0xbfd2f07fc8a5e100,0xbfd33a35a8c41b62,3 +np.float64,0xbfe3e5dd04e7cbba,0xbfe57940157c27ba,3 +np.float64,0x3feefe40757dfc80,0x3ff51bc72b846af6,3 +np.float64,0x8000000000000001,0x8000000000000001,3 +np.float64,0x3fecb7b766796f6e,0x3ff1d28972a0fc7e,3 +np.float64,0xbfea1bf1357437e2,0xbfee89a6532bfd71,3 +np.float64,0xbfca3983b7347308,0xbfca696463b791ef,3 +np.float64,0x10000000000000,0x10000000000000,3 +np.float64,0xbf886b45d030d680,0xbf886b6bbc04314b,3 +np.float64,0x3fd5224bb5aa4498,0x3fd589c92e82218f,3 +np.float64,0xbfec799874f8f331,0xbff18d5158b8e640,3 +np.float64,0xbf88124410302480,0xbf88126863350a16,3 +np.float64,0xbfe37feaaa66ffd6,0xbfe4f7e24382e79d,3 +np.float64,0x3fd777eca1aeefd8,0x3fd8076ead6d55dc,3 +np.float64,0x3fecaaeb3af955d6,0x3ff1c4159fa3e965,3 +np.float64,0xbfeb81e4e6f703ca,0xbff08d4e4c77fada,3 +np.float64,0xbfd7d0a0edafa142,0xbfd866e37010312e,3 +np.float64,0x3feda48c00fb4918,0x3ff2f3fd33c36307,3 +np.float64,0x3feb87ecc4770fda,0x3ff09336e490deda,3 +np.float64,0xbfefd78ad27faf16,0xbff78abbafb50ac1,3 +np.float64,0x3fe58e918c6b1d24,0x3fe7a70b38cbf016,3 +np.float64,0x3fda163b95b42c78,0x3fdade86b88ba4ee,3 +np.float64,0x3fe8fc1aaf71f836,0x3fecab3f93b59df5,3 +np.float64,0xbf8de56f903bcac0,0xbf8de5b527cec797,3 +np.float64,0xbfec112db2f8225b,0xbff11dd648de706f,3 +np.float64,0x3fc3214713264290,0x3fc333b1c862f7d0,3 +np.float64,0xbfeb5e5836f6bcb0,0xbff06ac364b49177,3 +np.float64,0x3fc23d9777247b30,0x3fc24d8ae3bcb615,3 +np.float64,0xbfdf0eed65be1dda,0xbfe036cea9b9dfb6,3 +np.float64,0xbfb2d5c85a25ab90,0xbfb2da24bb409ff3,3 +np.float64,0xbfecdda0c3f9bb42,0xbff1fdf94fc6e89e,3 +np.float64,0x3fdfe79154bfcf24,0x3fe0b338e0476a9d,3 +np.float64,0xbfd712ac6bae2558,0xbfd79abde21f287b,3 +np.float64,0x3fea3f148a747e2a,0x3feec6bed9d4fa04,3 +np.float64,0x3fd4879e4ca90f3c,0x3fd4e632fa4e2edd,3 +np.float64,0x3fe9137a9e7226f6,0x3fecd0c441088d6a,3 +np.float64,0xbfc75bf4ef2eb7e8,0xbfc77da8347d742d,3 +np.float64,0xbfd94090a0b28122,0xbfd9f5458816ed5a,3 +np.float64,0x3fde439cbcbc8738,0x3fdf85fbf496b61f,3 +np.float64,0xbfe18bacdce3175a,0xbfe29210e01237f7,3 +np.float64,0xbfd58ec413ab1d88,0xbfd5fcd838f0a934,3 +np.float64,0xbfeae5af2d75cb5e,0xbfeff1de1b4a06be,3 +np.float64,0x3fb64d1a162c9a30,0x3fb65458fb831354,3 +np.float64,0x3fc18b1e15231640,0x3fc1994c6ffd7a6a,3 +np.float64,0xbfd7b881bcaf7104,0xbfd84ce89a9ee8c7,3 +np.float64,0x3feb916a40f722d4,0x3ff09c8aa851d7c4,3 +np.float64,0x3fdab5fbb5b56bf8,0x3fdb8de43961bbde,3 +np.float64,0x3fe4f35402e9e6a8,0x3fe6d75dc5082894,3 +np.float64,0x3fe2fdb2e5e5fb66,0x3fe454e32a5d2182,3 +np.float64,0x3fe8607195f0c0e4,0x3febb6a4c3bf6a5c,3 +np.float64,0x3fd543ca9aaa8794,0x3fd5ad49203ae572,3 +np.float64,0x3fe8e05ca1f1c0ba,0x3fec7eff123dcc58,3 +np.float64,0x3fe298b6ca65316e,0x3fe3d81d2927c4dd,3 +np.float64,0x3fcfecea733fd9d8,0x3fd0220f1d0faf78,3 +np.float64,0xbfe2e739f065ce74,0xbfe439004e73772a,3 +np.float64,0xbfd1ae6b82a35cd8,0xbfd1ea129a5ee756,3 +np.float64,0xbfeb7edff576fdc0,0xbff08a5a638b8a8b,3 +np.float64,0x3fe5b645ff6b6c8c,0x3fe7dcee1faefe3f,3 +np.float64,0xbfd478427ba8f084,0xbfd4d5fc7c239e60,3 +np.float64,0xbfe39904e3e7320a,0xbfe517972b30b1e5,3 +np.float64,0xbfd3b75b6ba76eb6,0xbfd40acf20a6e074,3 +np.float64,0x3fd596267aab2c4c,0x3fd604b01faeaf75,3 +np.float64,0x3fe134463762688c,0x3fe229fc36784a72,3 +np.float64,0x3fd25dadf7a4bb5c,0x3fd2a0b9e04ea060,3 +np.float64,0xbfc05d3e0b20ba7c,0xbfc068bd2bb9966f,3 +np.float64,0x3f8cf517b039ea00,0x3f8cf556ed74b163,3 +np.float64,0x3fda87361cb50e6c,0x3fdb5a75af897e7f,3 +np.float64,0x3fe53e1926ea7c32,0x3fe73acf01b8ff31,3 +np.float64,0x3fe2e94857e5d290,0x3fe43b8cc820f9c7,3 +np.float64,0x3fd81fe6acb03fcc,0x3fd8bc623c0068cf,3 +np.float64,0xbfddf662c3bbecc6,0xbfdf2e76dc90786e,3 +np.float64,0x3fece174fbf9c2ea,0x3ff2026a1a889580,3 +np.float64,0xbfdc83c5b8b9078c,0xbfdd8dcf6ee3b7da,3 +np.float64,0x3feaf5448f75ea8a,0x3ff0075b108bcd0d,3 +np.float64,0xbfebf32f7ef7e65f,0xbff0fed42aaa826a,3 +np.float64,0x3fe389e5e8e713cc,0x3fe5047ade055ccb,3 +np.float64,0x3f635cdcc026ba00,0x3f635cddeea082ce,3 +np.float64,0x3fae580f543cb020,0x3fae5c9d5108a796,3 +np.float64,0x3fec9fafce793f60,0x3ff1b77bec654f00,3 +np.float64,0x3fb19d226e233a40,0x3fb1a0b32531f7ee,3 +np.float64,0xbfdf9a71e7bf34e4,0xbfe086cef88626c7,3 +np.float64,0x8010000000000000,0x8010000000000000,3 +np.float64,0xbfef170ba2fe2e17,0xbff54ed4675f5b8a,3 +np.float64,0xbfcc6e2f8f38dc60,0xbfccab65fc34d183,3 +np.float64,0x3fee756c4bfcead8,0x3ff4258782c137e6,3 +np.float64,0xbfd461c218a8c384,0xbfd4be3e391f0ff4,3 +np.float64,0xbfe3b64686e76c8d,0xbfe53caa16d6c90f,3 +np.float64,0xbfc1c65d8d238cbc,0xbfc1d51e58f82403,3 +np.float64,0x3fe6e06c63edc0d8,0x3fe97cb832eeb6a2,3 +np.float64,0xbfc9fc20b933f840,0xbfca2ab004312d85,3 +np.float64,0xbfe29aa6df65354e,0xbfe3da7ecf3ba466,3 +np.float64,0x3fea4df7d1749bf0,0x3feee0d448bd4746,3 +np.float64,0xbfedec6161fbd8c3,0xbff3563e1d943aa2,3 +np.float64,0x3fdb6f0437b6de08,0x3fdc5a1888b1213d,3 +np.float64,0xbfe270cbd3e4e198,0xbfe3a72ac27a0b0c,3 +np.float64,0xbfdfff8068bfff00,0xbfe0c1088e3b8983,3 +np.float64,0xbfd28edbe6a51db8,0xbfd2d416c8ed363e,3 +np.float64,0xbfb4e35f9229c6c0,0xbfb4e9531d2a737f,3 +np.float64,0xbfee6727e97cce50,0xbff40e7717576e46,3 +np.float64,0xbfddb5fbddbb6bf8,0xbfdee5aad78f5361,3 +np.float64,0xbfdf9d3e9dbf3a7e,0xbfe0886b191f2957,3 +np.float64,0x3fa57e77042afce0,0x3fa5801518ea9342,3 +np.float64,0x3f95c4e4882b89c0,0x3f95c55003c8e714,3 +np.float64,0x3fd9b10f61b36220,0x3fda6fe5d635a8aa,3 +np.float64,0xbfe2973411652e68,0xbfe3d641fe9885fd,3 +np.float64,0xbfee87bd5a7d0f7b,0xbff443bea81b3fff,3 +np.float64,0x3f9ea064c83d40c0,0x3f9ea19025085b2f,3 +np.float64,0xbfe4b823dfe97048,0xbfe689623d30dc75,3 +np.float64,0xbfa06a326c20d460,0xbfa06aeacbcd3eb8,3 +np.float64,0x3fe1e5c4c1e3cb8a,0x3fe2fe44b822f20e,3 +np.float64,0x3f99dafaa833b600,0x3f99dbaec10a1a0a,3 +np.float64,0xbfed7cb3877af967,0xbff2bfe9e556aaf9,3 +np.float64,0x3fd604f2e2ac09e4,0x3fd67a89408ce6ba,3 +np.float64,0x3fec57b60f78af6c,0x3ff16881f46d60f7,3 +np.float64,0xbfea2e3a17745c74,0xbfeea95c7190fd42,3 +np.float64,0xbfd60a7c37ac14f8,0xbfd6806ed642de35,3 +np.float64,0xbfe544b9726a8973,0xbfe743ac399d81d7,3 +np.float64,0xbfd13520faa26a42,0xbfd16c02034a8fe0,3 +np.float64,0xbfea9ea59ff53d4b,0xbfef70538ee12e00,3 +np.float64,0x3fd66633f8accc68,0x3fd6e23c13ab0e9e,3 +np.float64,0xbfe4071bd3e80e38,0xbfe5a3c9ba897d81,3 +np.float64,0xbfbe1659fa3c2cb0,0xbfbe2831d4fed196,3 +np.float64,0xbfd3312777a6624e,0xbfd37df09b9baeba,3 +np.float64,0x3fd13997caa27330,0x3fd170a4900c8907,3 +np.float64,0xbfe7cbc235ef9784,0xbfead4c4d6cbf129,3 +np.float64,0xbfe1456571628acb,0xbfe23e4ec768c8e2,3 +np.float64,0xbfedf1a044fbe340,0xbff35da96773e176,3 +np.float64,0x3fce38b1553c7160,0x3fce8270709774f9,3 +np.float64,0xbfecb01761f9602f,0xbff1c9e9d382f1f8,3 +np.float64,0xbfe0a03560e1406b,0xbfe17b8d5a1ca662,3 +np.float64,0x3fe50f37cbea1e70,0x3fe6fc55e1ae7da6,3 +np.float64,0xbfe12d64a0625aca,0xbfe221d3a7834e43,3 +np.float64,0xbf6fb288403f6500,0xbf6fb28d6f389db6,3 +np.float64,0x3fda831765b50630,0x3fdb55eecae58ca9,3 +np.float64,0x3fe1a0fe4c6341fc,0x3fe2ab9564304425,3 +np.float64,0xbfef2678a77e4cf1,0xbff56ff42b2797bb,3 +np.float64,0xbfab269c1c364d40,0xbfab29df1cd48779,3 +np.float64,0x3fe8ec82a271d906,0x3fec92567d7a6675,3 +np.float64,0xbfc235115f246a24,0xbfc244ee567682ea,3 +np.float64,0x3feef5bf8d7deb80,0x3ff50ad4875ee9bd,3 +np.float64,0x3fe768b5486ed16a,0x3fea421356160e65,3 +np.float64,0xbfd4255684a84aae,0xbfd47e8baf7ec7f6,3 +np.float64,0x3fc7f67f2b2fed00,0x3fc81ae83cf92dd5,3 +np.float64,0x3fe9b1b19a736364,0x3fedd4b0e24ee741,3 +np.float64,0x3fb27eb9e624fd70,0x3fb282dacd89ce28,3 +np.float64,0xbfd490b710a9216e,0xbfd4efcdeb213458,3 +np.float64,0xbfd1347b2ca268f6,0xbfd16b55dece2d38,3 +np.float64,0x3fc6a5668d2d4ad0,0x3fc6c41452c0c087,3 +np.float64,0xbfca7b209f34f640,0xbfcaac710486f6bd,3 +np.float64,0x3fc23a1a47247438,0x3fc24a047fd4c27a,3 +np.float64,0x3fdb1413a8b62828,0x3fdbf595e2d994bc,3 +np.float64,0xbfea69b396f4d367,0xbfef11bdd2b0709a,3 +np.float64,0x3fd14c9958a29934,0x3fd1846161b10422,3 +np.float64,0xbfe205f44be40be8,0xbfe325283aa3c6a8,3 +np.float64,0x3fecd03c9ef9a07a,0x3ff1ee85aaf52a01,3 +np.float64,0x3fe34281d7e68504,0x3fe4aab63e6de816,3 +np.float64,0xbfe120e2376241c4,0xbfe213023ab03939,3 +np.float64,0xbfe951edc4f2a3dc,0xbfed3615e38576f8,3 +np.float64,0x3fe5a2286f6b4450,0x3fe7c196e0ec10ed,3 +np.float64,0xbfed7a3e1f7af47c,0xbff2bcc0793555d2,3 +np.float64,0x3fe050274960a04e,0x3fe11e2e256ea5cc,3 +np.float64,0xbfcfa71f653f4e40,0xbfcffc11483d6a06,3 +np.float64,0x3f6ead2e403d5a00,0x3f6ead32f314c052,3 +np.float64,0x3fe3a2a026674540,0x3fe523bfe085f6ec,3 +np.float64,0xbfe294a62e65294c,0xbfe3d31ebd0b4ca2,3 +np.float64,0xbfb4894d06291298,0xbfb48ef4b8e256b8,3 +np.float64,0xbfc0c042c1218084,0xbfc0cc98ac2767c4,3 +np.float64,0xbfc6a32cb52d4658,0xbfc6c1d1597ed06b,3 +np.float64,0xbfd30f7777a61eee,0xbfd35aa39fee34eb,3 +np.float64,0x3fe7fc2c2eeff858,0x3feb1d8a558b5537,3 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,3 +np.float64,0xbfdadf917bb5bf22,0xbfdbbbae9a9f67a0,3 +np.float64,0xbfcf0395e13e072c,0xbfcf5366015f7362,3 +np.float64,0xbfe8644c9170c899,0xbfebbc98e74a227d,3 +np.float64,0x3fc3b2d8e52765b0,0x3fc3c6f7d44cffaa,3 +np.float64,0x3fc57407b92ae810,0x3fc58e12ccdd47a1,3 +np.float64,0x3fd56a560daad4ac,0x3fd5d62b8dfcc058,3 +np.float64,0x3fd595deefab2bbc,0x3fd6046420b2f79b,3 +np.float64,0xbfd5360f50aa6c1e,0xbfd59ebaacd815b8,3 +np.float64,0x3fdfb6aababf6d54,0x3fe0970b8aac9f61,3 +np.float64,0x3ff0000000000000,0x3ff921fb54442d18,3 +np.float64,0xbfeb3a8958f67513,0xbff04872e8278c79,3 +np.float64,0x3f9e1ea6683c3d40,0x3f9e1fc326186705,3 +np.float64,0x3fe6b6d5986d6dac,0x3fe94175bd60b19d,3 +np.float64,0xbfee4d90b77c9b21,0xbff3e60e9134edc2,3 +np.float64,0x3fd806ce0cb00d9c,0x3fd8a14c4855a8f5,3 +np.float64,0x3fd54acc75aa9598,0x3fd5b4b72fcbb5df,3 +np.float64,0xbfe59761f16b2ec4,0xbfe7b2fa5d0244ac,3 +np.float64,0xbfcd4fa3513a9f48,0xbfcd92d0814a5383,3 +np.float64,0xbfdc827523b904ea,0xbfdd8c577b53053c,3 +np.float64,0xbfd4bb7f34a976fe,0xbfd51d00d9a99360,3 +np.float64,0xbfe818bc87f03179,0xbfeb48d1ea0199c5,3 +np.float64,0xbfa8a2e15c3145c0,0xbfa8a5510ba0e45c,3 +np.float64,0xbfb6d15f422da2c0,0xbfb6d922689da015,3 +np.float64,0x3fcd04eaab3a09d8,0x3fcd46131746ef08,3 +np.float64,0x3fcfb5cfbb3f6ba0,0x3fd0059d308237f3,3 +np.float64,0x3fe8dcf609f1b9ec,0x3fec7997973010b6,3 +np.float64,0xbfdf1834d7be306a,0xbfe03c1d4e2b48f0,3 +np.float64,0x3fee82ae50fd055c,0x3ff43b545066fe1a,3 +np.float64,0xbfde039c08bc0738,0xbfdf3d6ed4d2ee5c,3 +np.float64,0x3fec07389bf80e72,0x3ff1137ed0acd161,3 +np.float64,0xbfef44c010fe8980,0xbff5b488ad22a4c5,3 +np.float64,0x3f76e722e02dce00,0x3f76e72ab2759d88,3 +np.float64,0xbfcaa9e6053553cc,0xbfcadc41125fca93,3 +np.float64,0x3fed6088147ac110,0x3ff29c06c4ef35fc,3 +np.float64,0x3fd32bd836a657b0,0x3fd3785fdb75909f,3 +np.float64,0xbfeedbb1d97db764,0xbff4d87f6c82a93c,3 +np.float64,0xbfe40f31d5e81e64,0xbfe5ae292cf258a2,3 +np.float64,0x7ff8000000000000,0x7ff8000000000000,3 +np.float64,0xbfeb2b25bc76564c,0xbff039d81388550c,3 +np.float64,0x3fec5008fa78a012,0x3ff1604195801da3,3 +np.float64,0x3fce2d4f293c5aa0,0x3fce76b99c2db4da,3 +np.float64,0xbfdc435412b886a8,0xbfdd45e7b7813f1e,3 +np.float64,0x3fdf2c9d06be593c,0x3fe047cb03c141b6,3 +np.float64,0x3fddefc61ebbdf8c,0x3fdf26fb8fad9fae,3 +np.float64,0x3fab50218436a040,0x3fab537395eaf3bb,3 +np.float64,0xbfd5b95a8fab72b6,0xbfd62a191a59343a,3 +np.float64,0x3fdbf803b4b7f008,0x3fdcf211578e98c3,3 +np.float64,0xbfec8c255979184b,0xbff1a1bee108ed30,3 +np.float64,0x3fe33cdaffe679b6,0x3fe4a3a318cd994f,3 +np.float64,0x3fd8cf585cb19eb0,0x3fd97a408bf3c38c,3 +np.float64,0x3fe919dde07233bc,0x3fecdb0ea13a2455,3 +np.float64,0xbfd5ba35e4ab746c,0xbfd62b024805542d,3 +np.float64,0x3fd2f933e7a5f268,0x3fd343527565e97c,3 +np.float64,0xbfe5b9f8ddeb73f2,0xbfe7e1f772c3e438,3 +np.float64,0x3fe843cd92f0879c,0x3feb8a92d68eae3e,3 +np.float64,0xbfd096b234a12d64,0xbfd0c7beca2c6605,3 +np.float64,0xbfef3363da7e66c8,0xbff58c98dde6c27c,3 +np.float64,0x3fd51b01ddaa3604,0x3fd582109d89ead1,3 +np.float64,0x3fea0f10ff741e22,0x3fee736c2d2a2067,3 +np.float64,0x3fc276e7b724edd0,0x3fc28774520bc6d4,3 +np.float64,0xbfef9abc9f7f3579,0xbff69d49762b1889,3 +np.float64,0x3fe1539ec0e2a73e,0x3fe24f370b7687d0,3 +np.float64,0x3fad72350c3ae460,0x3fad765e7766682a,3 +np.float64,0x3fa289a47c251340,0x3fa28aae12f41646,3 +np.float64,0xbfe5c488e5eb8912,0xbfe7f05d7e7dcddb,3 +np.float64,0xbfc22ef1d7245de4,0xbfc23ebeb990a1b8,3 +np.float64,0x3fe59a0b80eb3418,0x3fe7b695fdcba1de,3 +np.float64,0xbfe9cad619f395ac,0xbfedff0514d91e2c,3 +np.float64,0x3fc8bc74eb3178e8,0x3fc8e48cb22da666,3 +np.float64,0xbfc5389a3f2a7134,0xbfc551cd6febc544,3 +np.float64,0x3fce82feb33d0600,0x3fceceecce2467ef,3 +np.float64,0x3fda346791b468d0,0x3fdaff95154a4ca6,3 +np.float64,0x3fd04501fea08a04,0x3fd073397b32607e,3 +np.float64,0xbfb6be498a2d7c90,0xbfb6c5f93aeb0e57,3 +np.float64,0x3fe1f030dd63e062,0x3fe30ad8fb97cce0,3 +np.float64,0xbfee3fb36dfc7f67,0xbff3d0a5e380b86f,3 +np.float64,0xbfa876773c30ecf0,0xbfa878d9d3df6a3f,3 +np.float64,0x3fdb58296eb6b054,0x3fdc40ceffb17f82,3 +np.float64,0xbfea16b5d8742d6c,0xbfee809b99fd6adc,3 +np.float64,0xbfdc5062b6b8a0c6,0xbfdd547623275fdb,3 +np.float64,0x3fef6db242fedb64,0x3ff61ab4cdaef467,3 +np.float64,0xbfc9f778f933eef0,0xbfca25eef1088167,3 +np.float64,0xbfd22063eba440c8,0xbfd260c8766c69cf,3 +np.float64,0x3fdd2379f2ba46f4,0x3fde40b025cb1ffa,3 +np.float64,0xbfea967af2f52cf6,0xbfef61a178774636,3 +np.float64,0x3fe4f5b49fe9eb6a,0x3fe6da8311a5520e,3 +np.float64,0x3feccde17b799bc2,0x3ff1ebd0ea228b71,3 +np.float64,0x3fe1bb76506376ec,0x3fe2cb56fca01840,3 +np.float64,0xbfef94e583ff29cb,0xbff68aeab8ba75a2,3 +np.float64,0x3fed024a55fa0494,0x3ff228ea5d456e9d,3 +np.float64,0xbfe877b2a8f0ef65,0xbfebdaa1a4712459,3 +np.float64,0x3fef687a8d7ed0f6,0x3ff60cf5fef8d448,3 +np.float64,0xbfeeb2dc8afd65b9,0xbff48dda6a906cd6,3 +np.float64,0x3fdb2e28aeb65c50,0x3fdc12620655eb7a,3 +np.float64,0x3fedc1863afb830c,0x3ff31ae823315e83,3 +np.float64,0xbfe6b1bb546d6376,0xbfe93a38163e3a59,3 +np.float64,0x3fe479c78468f390,0x3fe637e5c0fc5730,3 +np.float64,0x3fbad1fade35a3f0,0x3fbade9a43ca05cf,3 +np.float64,0xbfe2d1c563e5a38b,0xbfe41e712785900c,3 +np.float64,0xbfc08c33ed211868,0xbfc09817a752d500,3 +np.float64,0xbfecce0935f99c12,0xbff1ebfe84524037,3 +np.float64,0x3fce4ef0e73c9de0,0x3fce995638a3dc48,3 +np.float64,0xbfd2fb2343a5f646,0xbfd345592517ca18,3 +np.float64,0x3fd848f7cdb091f0,0x3fd8e8bee5f7b49a,3 +np.float64,0x3fe532b7d2ea6570,0x3fe72b9ac747926a,3 +np.float64,0x3fd616aadcac2d54,0x3fd68d692c5cad42,3 +np.float64,0x3fd7720eb3aee41c,0x3fd801206a0e1e43,3 +np.float64,0x3fee835a35fd06b4,0x3ff43c7175eb7a54,3 +np.float64,0xbfe2e8f70b65d1ee,0xbfe43b2800a947a7,3 +np.float64,0xbfed38f45d7a71e9,0xbff26acd6bde7174,3 +np.float64,0xbfc0c62661218c4c,0xbfc0d28964d66120,3 +np.float64,0x3fe97940bef2f282,0x3fed76b986a74ee3,3 +np.float64,0x3fc96f7dc532def8,0x3fc99b20044c8fcf,3 +np.float64,0xbfd60201eeac0404,0xbfd677675efaaedc,3 +np.float64,0x3fe63c0867ec7810,0x3fe894f060200140,3 +np.float64,0xbfef6144b37ec289,0xbff5fa589a515ba8,3 +np.float64,0xbfde2da0c8bc5b42,0xbfdf6d0b59e3232a,3 +np.float64,0xbfd7401612ae802c,0xbfd7cb74ddd413b9,3 +np.float64,0x3fe41c012de83802,0x3fe5be9d87da3f82,3 +np.float64,0x3fdf501609bea02c,0x3fe05c1d96a2270b,3 +np.float64,0x3fcf9fa1233f3f40,0x3fcff45598e72f07,3 +np.float64,0x3fd4e3895ea9c714,0x3fd547580d8392a2,3 +np.float64,0x3fe1e8ff5fe3d1fe,0x3fe3022a0b86a2ab,3 +np.float64,0xbfe0aa55956154ab,0xbfe18768823da589,3 +np.float64,0x3fb2a0aa26254150,0x3fb2a4e1faff1c93,3 +np.float64,0x3fd3823417a70468,0x3fd3d2f808dbb167,3 +np.float64,0xbfaed323643da640,0xbfaed7e9bef69811,3 +np.float64,0x3fe661e8c4ecc3d2,0x3fe8c9c535f43c16,3 +np.float64,0xbfa429777c2852f0,0xbfa42acd38ba02a6,3 +np.float64,0x3fb5993ea22b3280,0x3fb59fd353e47397,3 +np.float64,0x3fee62d21efcc5a4,0x3ff40788f9278ade,3 +np.float64,0xbf813fb810227f80,0xbf813fc56d8f3c53,3 +np.float64,0x3fd56205deaac40c,0x3fd5cd59671ef193,3 +np.float64,0x3fd31a4de5a6349c,0x3fd365fe401b66e8,3 +np.float64,0xbfec7cc7a478f98f,0xbff190cf69703ca4,3 +np.float64,0xbf755881a02ab100,0xbf755887f52e7794,3 +np.float64,0x3fdd1c92e6ba3924,0x3fde38efb4e8605c,3 +np.float64,0x3fdf49da80be93b4,0x3fe0588af8dd4a34,3 +np.float64,0x3fe1fcdbf2e3f9b8,0x3fe31a27b9d273f2,3 +np.float64,0x3fe2a0f18be541e4,0x3fe3e23b159ce20f,3 +np.float64,0xbfed0f1561fa1e2b,0xbff23820fc0a54ca,3 +np.float64,0x3fe34a006c669400,0x3fe4b419b9ed2b83,3 +np.float64,0xbfd51be430aa37c8,0xbfd583005a4d62e7,3 +np.float64,0x3fe5ec4e336bd89c,0x3fe826caad6b0f65,3 +np.float64,0xbfdad71b1fb5ae36,0xbfdbb25bef8b53d8,3 +np.float64,0xbfe8eac2d871d586,0xbfec8f8cac7952f9,3 +np.float64,0xbfe1d5aef663ab5e,0xbfe2eae14b7ccdfd,3 +np.float64,0x3fec11d3157823a6,0x3ff11e8279506753,3 +np.float64,0xbfe67ff1166cffe2,0xbfe8f3e61c1dfd32,3 +np.float64,0xbfd101eecda203de,0xbfd136e0e9557022,3 +np.float64,0x3fde6c9e5cbcd93c,0x3fdfb48ee7efe134,3 +np.float64,0x3fec3ede9c787dbe,0x3ff14dead1e5cc1c,3 +np.float64,0x3fe7a022086f4044,0x3fea93ce2980b161,3 +np.float64,0xbfc3b2b1b7276564,0xbfc3c6d02d60bb21,3 +np.float64,0x7ff0000000000000,0x7ff8000000000000,3 +np.float64,0x3fe60b5647ec16ac,0x3fe8517ef0544b40,3 +np.float64,0xbfd20ab654a4156c,0xbfd24a2f1b8e4932,3 +np.float64,0xbfe4aa1e2f69543c,0xbfe677005cbd2646,3 +np.float64,0xbfc831cc0b306398,0xbfc8574910d0b86d,3 +np.float64,0xbfc3143495262868,0xbfc3267961b79198,3 +np.float64,0x3fc14d64c1229ac8,0x3fc15afea90a319d,3 +np.float64,0x3fc0a5a207214b48,0x3fc0b1bd2f15c1b0,3 +np.float64,0xbfc0b8351521706c,0xbfc0c4792672d6db,3 +np.float64,0xbfdc383600b8706c,0xbfdd398429e163bd,3 +np.float64,0x3fd9e17321b3c2e8,0x3fdaa4c4d140a622,3 +np.float64,0xbfd44f079ea89e10,0xbfd4aa7d6deff4ab,3 +np.float64,0xbfc3de52a927bca4,0xbfc3f2f8f65f4c3f,3 +np.float64,0x3fe7779d566eef3a,0x3fea57f8592dbaad,3 +np.float64,0xbfe309039e661207,0xbfe462f47f9a64e5,3 +np.float64,0x3fd8e06d08b1c0dc,0x3fd98cc946e440a6,3 +np.float64,0x3fdde66c9ebbccd8,0x3fdf1c68009a8dc1,3 +np.float64,0x3fd4369c6ba86d38,0x3fd490bf460a69e4,3 +np.float64,0xbfe132252fe2644a,0xbfe22775e109cc2e,3 +np.float64,0x3fee15483c7c2a90,0x3ff39111de89036f,3 +np.float64,0xbfc1d5ee8123abdc,0xbfc1e4d66c6871a5,3 +np.float64,0x3fc851c52b30a388,0x3fc877d93fb4ae1a,3 +np.float64,0x3fdaade707b55bd0,0x3fdb85001661fffe,3 +np.float64,0xbfe79fb7f96f3f70,0xbfea9330ec27ac10,3 +np.float64,0xbfe8b0f725f161ee,0xbfec3411c0e4517a,3 +np.float64,0xbfea79f5f374f3ec,0xbfef2e9dd9270488,3 +np.float64,0x3fe0b5fe5b616bfc,0x3fe19512a36a4534,3 +np.float64,0xbfad7c622c3af8c0,0xbfad808fea96a804,3 +np.float64,0xbfe3e24dbce7c49c,0xbfe574b4c1ea9818,3 +np.float64,0xbfe80b038af01607,0xbfeb33fec279576a,3 +np.float64,0xbfef69e2ea7ed3c6,0xbff610a5593a18bc,3 +np.float64,0x3fdcc0bb39b98178,0x3fddd1f8c9a46430,3 +np.float64,0xbfba39976a347330,0xbfba4563bb5369a4,3 +np.float64,0xbfebf9768ef7f2ed,0xbff10548ab725f74,3 +np.float64,0xbfec21c066f84381,0xbff12f2803ba052f,3 +np.float64,0xbfca216a6b3442d4,0xbfca50c5e1e5748e,3 +np.float64,0x3fd5e40da4abc81c,0x3fd65783f9a22946,3 +np.float64,0x3fc235ca17246b98,0x3fc245a8f453173f,3 +np.float64,0x3fecb5b867796b70,0x3ff1d046a0bfda69,3 +np.float64,0x3fcb457fef368b00,0x3fcb7b6daa8165a7,3 +np.float64,0xbfa5ed6f7c2bdae0,0xbfa5ef27244e2e42,3 +np.float64,0x3fecf618a1f9ec32,0x3ff21a86cc104542,3 +np.float64,0x3fe9d95413f3b2a8,0x3fee178dcafa11fc,3 +np.float64,0xbfe93a5357f274a7,0xbfed0f9a565da84a,3 +np.float64,0xbfeb9e45ff773c8c,0xbff0a93cab8e258d,3 +np.float64,0x3fcbd9d0bd37b3a0,0x3fcc134e87cae241,3 +np.float64,0x3fe55d4db76aba9c,0x3fe764a0e028475a,3 +np.float64,0xbfc8a6fc71314df8,0xbfc8ceaafbfc59a7,3 +np.float64,0x3fe0615fa660c2c0,0x3fe1323611c4cbc2,3 +np.float64,0x3fb965558632cab0,0x3fb9700b84de20ab,3 +np.float64,0x8000000000000000,0x8000000000000000,3 +np.float64,0x3fe76776c6eeceee,0x3fea40403e24a9f1,3 +np.float64,0x3fe3b7f672676fec,0x3fe53ece71a1a1b1,3 +np.float64,0xbfa9b82ba4337050,0xbfa9baf15394ca64,3 +np.float64,0xbfe31faf49663f5e,0xbfe47f31b1ca73dc,3 +np.float64,0xbfcc4c6beb3898d8,0xbfcc88c5f814b2c1,3 +np.float64,0x3fd481530aa902a8,0x3fd4df8df03bc155,3 +np.float64,0x3fd47593b8a8eb28,0x3fd4d327ab78a1a8,3 +np.float64,0x3fd70e6ccbae1cd8,0x3fd7962fe8b63d46,3 +np.float64,0x3fd25191f7a4a324,0x3fd2941623c88e02,3 +np.float64,0x3fd0603ef0a0c07c,0x3fd08f64e97588dc,3 +np.float64,0xbfc653bae52ca774,0xbfc6711e5e0d8ea9,3 +np.float64,0xbfd11db8fea23b72,0xbfd153b63c6e8812,3 +np.float64,0xbfea9bde25f537bc,0xbfef6b52268e139a,3 +np.float64,0x1,0x1,3 +np.float64,0xbfefd3806d7fa701,0xbff776dcef9583ca,3 +np.float64,0xbfe0fb8cfde1f71a,0xbfe1e6e2e774a8f8,3 +np.float64,0x3fea384534f4708a,0x3feebadaa389be0d,3 +np.float64,0x3feff761c97feec4,0x3ff866157b9d072d,3 +np.float64,0x3fe7131ccb6e263a,0x3fe9c58b4389f505,3 +np.float64,0x3fe9084f7872109e,0x3fecbed0355dbc8f,3 +np.float64,0x3f708e89e0211d00,0x3f708e8cd4946b9e,3 +np.float64,0xbfe39185f067230c,0xbfe50e1cd178244d,3 +np.float64,0x3fd67cc1a9acf984,0x3fd6fa514784b48c,3 +np.float64,0xbfecaef005f95de0,0xbff1c89c9c3ef94a,3 +np.float64,0xbfe12eec81e25dd9,0xbfe223a4285bba9a,3 +np.float64,0x3fbe7f9faa3cff40,0x3fbe92363525068d,3 +np.float64,0xbfe1950b2b632a16,0xbfe29d45fc1e4ce9,3 +np.float64,0x3fe45049e6e8a094,0x3fe6020de759e383,3 +np.float64,0x3fe4d10c8969a21a,0x3fe6aa1fe42cbeb9,3 +np.float64,0xbfe9d04658f3a08d,0xbfee08370a0dbf0c,3 +np.float64,0x3fe14fb314e29f66,0x3fe24a8d73663521,3 +np.float64,0xbfef4abfe4fe9580,0xbff5c2c1ff1250ca,3 +np.float64,0xbfe6162b366c2c56,0xbfe86073ac3c6243,3 +np.float64,0x3feffe781e7ffcf0,0x3ff8d2cbedd6a1b5,3 +np.float64,0xbff0000000000000,0xbff921fb54442d18,3 +np.float64,0x3fc1dc45ad23b888,0x3fc1eb3d9bddda58,3 +np.float64,0xbfe793f6fcef27ee,0xbfea81c93d65aa64,3 +np.float64,0x3fdef6d2bbbdeda4,0x3fe029079d42efb5,3 +np.float64,0xbfdf0ac479be1588,0xbfe0346dbc95963f,3 +np.float64,0xbfd33927d7a67250,0xbfd38653f90a5b73,3 +np.float64,0xbfe248b072e49161,0xbfe37631ef6572e1,3 +np.float64,0xbfc8ceb6af319d6c,0xbfc8f7288657f471,3 +np.float64,0x3fdd7277fcbae4f0,0x3fde99886e6766ef,3 +np.float64,0xbfe0d30c6561a619,0xbfe1b72f90bf53d6,3 +np.float64,0xbfcb0fe07d361fc0,0xbfcb448e2eae9542,3 +np.float64,0xbfe351f57fe6a3eb,0xbfe4be13eef250f2,3 +np.float64,0x3fe85ec02cf0bd80,0x3febb407e2e52e4c,3 +np.float64,0x3fc8bc59b53178b0,0x3fc8e470f65800ec,3 +np.float64,0xbfd278d447a4f1a8,0xbfd2bd133c9c0620,3 +np.float64,0x3feda5cfd87b4ba0,0x3ff2f5ab4324f43f,3 +np.float64,0xbfd2b32a36a56654,0xbfd2fa09c36afd34,3 +np.float64,0xbfed4a81cb7a9504,0xbff28077a4f4fff4,3 +np.float64,0x3fdf079bf9be0f38,0x3fe0329f7fb13f54,3 +np.float64,0x3fd14097f6a28130,0x3fd177e9834ec23f,3 +np.float64,0xbfaeab11843d5620,0xbfaeafc5531eb6b5,3 +np.float64,0xbfac3f8c14387f20,0xbfac433893d53360,3 +np.float64,0xbfc139d7ed2273b0,0xbfc14743adbbe660,3 +np.float64,0x3fe78cb02cef1960,0x3fea7707f76edba9,3 +np.float64,0x3fefe16b41ffc2d6,0x3ff7bff36a7aa7b8,3 +np.float64,0x3fec5260d378a4c2,0x3ff162c588b0da38,3 +np.float64,0x3fedb146f17b628e,0x3ff304f90d3a15d1,3 +np.float64,0x3fd1fd45f7a3fa8c,0x3fd23c2dc3929e20,3 +np.float64,0x3fe0898a5ee11314,0x3fe1610c63e726eb,3 +np.float64,0x3fe7719946eee332,0x3fea4f205eecb59f,3 +np.float64,0x3fe955218972aa44,0x3fed3b530c1f7651,3 +np.float64,0x3fe0ccbf4461997e,0x3fe1afc7b4587836,3 +np.float64,0xbfe9204314f24086,0xbfece5605780e346,3 +np.float64,0xbfe552017feaa403,0xbfe755773cbd74d5,3 +np.float64,0x3fd8ce4b32b19c98,0x3fd9791c8dd44eae,3 +np.float64,0x3fef89acd9ff135a,0x3ff668f78adf7ced,3 +np.float64,0x3fc9d713ad33ae28,0x3fca04da6c293bbd,3 +np.float64,0xbfe22d9c4de45b38,0xbfe3553effadcf92,3 +np.float64,0x3fa5cda38c2b9b40,0x3fa5cf53c5787482,3 +np.float64,0x3fa878ebdc30f1e0,0x3fa87b4f2bf1d4c3,3 +np.float64,0x3fe8030353700606,0x3feb27e196928789,3 +np.float64,0x3fb50607222a0c10,0x3fb50c188ce391e6,3 +np.float64,0x3fd9ba4ab4b37494,0x3fda79fa8bd40f45,3 +np.float64,0x3fb564598e2ac8b0,0x3fb56abe42d1ba13,3 +np.float64,0xbfd1177c83a22efa,0xbfd14d3d7ef30cc4,3 +np.float64,0xbfd952cec7b2a59e,0xbfda09215d17c0ac,3 +np.float64,0x3fe1d8066663b00c,0x3fe2edb35770b8dd,3 +np.float64,0xbfc89427a3312850,0xbfc8bb7a7c389497,3 +np.float64,0xbfe86ebfd3f0dd80,0xbfebccc2ba0f506c,3 +np.float64,0x3fc390578b2720b0,0x3fc3a40cb7f5f728,3 +np.float64,0xbfd122f9b8a245f4,0xbfd15929dc57a897,3 +np.float64,0x3f8d0636d03a0c80,0x3f8d06767de576df,3 +np.float64,0xbfe4b55d8b696abb,0xbfe685be537a9637,3 +np.float64,0xbfdfd51cf9bfaa3a,0xbfe0a894fcff0c76,3 +np.float64,0xbfd37c1f52a6f83e,0xbfd3cc9593c37aad,3 +np.float64,0x3fd0e8283ea1d050,0x3fd11c25c800785a,3 +np.float64,0x3fd3160784a62c10,0x3fd36183a6c2880c,3 +np.float64,0x3fd4c66e57a98cdc,0x3fd5288fe3394eff,3 +np.float64,0x3fee2f7e3afc5efc,0x3ff3b8063eb30cdc,3 +np.float64,0xbfe526773a6a4cee,0xbfe71b4364215b18,3 +np.float64,0x3fea01181e740230,0x3fee5b65eccfd130,3 +np.float64,0xbfe51c03f76a3808,0xbfe70d5919d37587,3 +np.float64,0x3fd97e1375b2fc28,0x3fda3845da40b22b,3 +np.float64,0x3fd5c14a14ab8294,0x3fd632890d07ed03,3 +np.float64,0xbfec9b474279368e,0xbff1b28f50584fe3,3 +np.float64,0x3fe0139ca860273a,0x3fe0d7fc377f001c,3 +np.float64,0x3fdb080c9db61018,0x3fdbe85056358fa0,3 +np.float64,0xbfdd72ceb1bae59e,0xbfde99ea171661eb,3 +np.float64,0xbfe64e934fec9d26,0xbfe8aec2ef24be63,3 +np.float64,0x3fd1036a93a206d4,0x3fd1386adabe01bd,3 +np.float64,0x3febc9d4a5f793aa,0x3ff0d4c069f1e67d,3 +np.float64,0xbfe547a16fea8f43,0xbfe747902fe6fb4d,3 +np.float64,0x3fc289b0f9251360,0x3fc29a709de6bdd9,3 +np.float64,0xbfe694494a6d2892,0xbfe9108f3dc133e2,3 +np.float64,0x3fd827dfe4b04fc0,0x3fd8c4fe40532b91,3 +np.float64,0xbfe8b89418f17128,0xbfec400c5a334b2e,3 +np.float64,0x3fed5605147aac0a,0x3ff28ed1f612814a,3 +np.float64,0xbfed36af31fa6d5e,0xbff26804e1f71af0,3 +np.float64,0x3fdbb01c02b76038,0x3fdca2381558bbf0,3 +np.float64,0x3fe2a951666552a2,0x3fe3ec88f780f9e6,3 +np.float64,0x3fe662defbecc5be,0x3fe8cb1dbfca98ab,3 +np.float64,0x3fd098b1b3a13164,0x3fd0c9d064e4eaf2,3 +np.float64,0x3fefa10edeff421e,0x3ff6b1c6187b18a8,3 +np.float64,0xbfec4feb7a789fd7,0xbff16021ef37a219,3 +np.float64,0x3fd8e415bbb1c82c,0x3fd990c1f8b786bd,3 +np.float64,0xbfead5a09275ab41,0xbfefd44fab5b4f6e,3 +np.float64,0xbfe8666c16f0ccd8,0xbfebbfe0c9f2a9ae,3 +np.float64,0x3fdc962132b92c44,0x3fdda2525a6f406c,3 +np.float64,0xbfe2037f03e406fe,0xbfe3222ec2a3449e,3 +np.float64,0xbfec82c27e790585,0xbff197626ea9df1e,3 +np.float64,0x3fd2b4e03ca569c0,0x3fd2fbd3c7fda23e,3 +np.float64,0xbfe9b0dee5f361be,0xbfedd34f6d3dfe8a,3 +np.float64,0x3feef45cd17de8ba,0x3ff508180687b591,3 +np.float64,0x3f82c39bf0258700,0x3f82c3ad24c3b3f1,3 +np.float64,0xbfca848cfd350918,0xbfcab612ce258546,3 +np.float64,0x3fd6442aaaac8854,0x3fd6bdea54016e48,3 +np.float64,0x3fe550799e6aa0f4,0x3fe75369c9ea5b1e,3 +np.float64,0xbfe0e9d5a361d3ac,0xbfe1d20011139d89,3 +np.float64,0x3fbfc9ff1e3f9400,0x3fbfdf0ea6885c80,3 +np.float64,0xbfa187e8b4230fd0,0xbfa188c95072092e,3 +np.float64,0x3fcd28c9533a5190,0x3fcd6ae879c21b47,3 +np.float64,0x3fc6227ec52c4500,0x3fc63f1fbb441d29,3 +np.float64,0x3fe9b7a2ed736f46,0x3feddeab49b2d176,3 +np.float64,0x3fd4aee93da95dd4,0x3fd50fb3b71e0339,3 +np.float64,0xbfe164dacf62c9b6,0xbfe263bb2f7dd5d9,3 +np.float64,0x3fec62e525f8c5ca,0x3ff17496416d9921,3 +np.float64,0x3fdd363ee0ba6c7c,0x3fde55c6a49a5f86,3 +np.float64,0x3fe65cbf75ecb97e,0x3fe8c28d31ff3ebd,3 +np.float64,0xbfe76d27ca6eda50,0xbfea4899e3661425,3 +np.float64,0xbfc305738d260ae8,0xbfc3178dcfc9d30f,3 +np.float64,0xbfd3aa2a54a75454,0xbfd3fcf1e1ce8328,3 +np.float64,0x3fd1609fc9a2c140,0x3fd1992efa539b9f,3 +np.float64,0xbfac1291bc382520,0xbfac162cc7334b4d,3 +np.float64,0xbfedb461ea7b68c4,0xbff309247850455d,3 +np.float64,0xbfe8d2adf8f1a55c,0xbfec6947be90ba92,3 +np.float64,0xbfd7128965ae2512,0xbfd79a9855bcfc5a,3 +np.float64,0x3fe8deb09471bd62,0x3fec7c56b3aee531,3 +np.float64,0xbfe5f4d329ebe9a6,0xbfe8327ea8189af8,3 +np.float64,0xbfd3b46ac9a768d6,0xbfd407b80b12ff17,3 +np.float64,0x3fec899d7cf9133a,0x3ff19ef26baca36f,3 +np.float64,0xbfec192fd5783260,0xbff126306e507fd0,3 +np.float64,0x3fe945bdaef28b7c,0x3fed222f787310bf,3 +np.float64,0xbfeff9635d7ff2c7,0xbff87d6773f318eb,3 +np.float64,0xbfd604b81cac0970,0xbfd67a4aa852559a,3 +np.float64,0x3fcd1cc9d53a3990,0x3fcd5e962e237c24,3 +np.float64,0xbfed77b0fffaef62,0xbff2b97a1c9b6483,3 +np.float64,0xbfc9c69325338d28,0xbfc9f401500402fb,3 +np.float64,0xbfdf97e246bf2fc4,0xbfe0855601ea9db3,3 +np.float64,0x3fc7e6304f2fcc60,0x3fc80a4e718504cd,3 +np.float64,0x3fec3b599e7876b4,0x3ff14a2d1b9c68e6,3 +np.float64,0xbfe98618e1f30c32,0xbfed8bfbb31c394a,3 +np.float64,0xbfe59b3c0feb3678,0xbfe7b832d6df81de,3 +np.float64,0xbfe54ce2fe6a99c6,0xbfe74e9a85be4116,3 +np.float64,0x3fc9db49cb33b690,0x3fca092737ef500a,3 +np.float64,0xbfb4a922ae295248,0xbfb4aee4e39078a9,3 +np.float64,0xbfd0e542e0a1ca86,0xbfd11925208d66af,3 +np.float64,0x3fd70543f2ae0a88,0x3fd78c5e9238a3ee,3 +np.float64,0x3fd67f7a7facfef4,0x3fd6fd3998df8545,3 +np.float64,0xbfe40b643d6816c8,0xbfe5a947e427f298,3 +np.float64,0xbfcd85f69b3b0bec,0xbfcdcaa24b75f1a3,3 +np.float64,0x3fec705fb4f8e0c0,0x3ff1833c82163ee2,3 +np.float64,0x3fb37650ea26eca0,0x3fb37b20c16fb717,3 +np.float64,0x3fe5ebfa55ebd7f4,0x3fe826578d716e70,3 +np.float64,0x3fe991dfe5f323c0,0x3fed9f8a4bf1f588,3 +np.float64,0xbfd658bd0aacb17a,0xbfd6d3dd06e54900,3 +np.float64,0xbfc24860252490c0,0xbfc258701a0b9290,3 +np.float64,0xbfefb8d763ff71af,0xbff705b6ea4a569d,3 +np.float64,0x3fb8fcb4ae31f970,0x3fb906e809e7899f,3 +np.float64,0x3fce6343cb3cc688,0x3fceae41d1629625,3 +np.float64,0xbfd43d5a11a87ab4,0xbfd497da25687e07,3 +np.float64,0xbfe9568851f2ad11,0xbfed3d9e5fe83a76,3 +np.float64,0x3fe1b66153e36cc2,0x3fe2c53c7e016271,3 +np.float64,0x3fef27452bfe4e8a,0x3ff571b3486ed416,3 +np.float64,0x3fca87c0a7350f80,0x3fcab958a7bb82d4,3 +np.float64,0xbfd8776a8fb0eed6,0xbfd91afaf2f50edf,3 +np.float64,0x3fe9522a76f2a454,0x3fed3679264e1525,3 +np.float64,0x3fea14ff2cf429fe,0x3fee7da6431cc316,3 +np.float64,0x3fe970618bf2e0c4,0x3fed68154d54dd97,3 +np.float64,0x3fd3410cfca68218,0x3fd38e9b21792240,3 +np.float64,0xbf6a8070c0350100,0xbf6a8073c7c34517,3 +np.float64,0xbfbe449de23c8938,0xbfbe56c8e5e4d98b,3 +np.float64,0x3fedbc92e27b7926,0x3ff314313216d8e6,3 +np.float64,0xbfe3be4706677c8e,0xbfe546d3ceb85aea,3 +np.float64,0x3fe30cd6d76619ae,0x3fe467b6f2664a8d,3 +np.float64,0x3fd7d69b21afad38,0x3fd86d54284d05ad,3 +np.float64,0xbfe501001fea0200,0xbfe6e978afcff4d9,3 +np.float64,0xbfe44ba3d8e89748,0xbfe5fc0a31cd1e3e,3 +np.float64,0x3fec52f7c078a5f0,0x3ff16367acb209b2,3 +np.float64,0xbfcb19efcb3633e0,0xbfcb4ed9235a7d47,3 +np.float64,0xbfab86796c370cf0,0xbfab89df7bf15710,3 +np.float64,0xbfb962feda32c600,0xbfb96db1e1679c98,3 +np.float64,0x3fe0dd14e861ba2a,0x3fe1c2fc72810567,3 +np.float64,0x3fe41bcc6de83798,0x3fe5be59b7f9003b,3 +np.float64,0x3fc82f4c4f305e98,0x3fc854bd9798939f,3 +np.float64,0xbfcd143a613a2874,0xbfcd55cbd1619d84,3 +np.float64,0xbfd52da61baa5b4c,0xbfd595d0b3543439,3 +np.float64,0xbfb71b4a8e2e3698,0xbfb7235a4ab8432f,3 +np.float64,0xbfec141a19782834,0xbff120e1e39fc856,3 +np.float64,0xbfdba9319db75264,0xbfdc9a8ca2578bb2,3 +np.float64,0xbfbce5d74639cbb0,0xbfbcf5a4878cfa51,3 +np.float64,0x3fde67f7b3bccff0,0x3fdfaf45a9f843ad,3 +np.float64,0xbfe12d87bc625b10,0xbfe221fd4476eb71,3 +np.float64,0x3fe35b8f6be6b71e,0x3fe4ca20f65179e1,3 +np.float64,0xbfdbada1d3b75b44,0xbfdc9f78b19f93d1,3 +np.float64,0xbfc60159c52c02b4,0xbfc61d79b879f598,3 +np.float64,0x3fd6b81c38ad7038,0x3fd739c27bfa16d8,3 +np.float64,0xbfd646a253ac8d44,0xbfd6c08c19612bbb,3 +np.float64,0xbfe6babef0ed757e,0xbfe94703d0bfa311,3 +np.float64,0xbfed5671f1faace4,0xbff28f5a3f3683d0,3 +np.float64,0x3fc01d1e85203a40,0x3fc02817ec0dfd38,3 +np.float64,0xbfe9188a61f23115,0xbfecd8eb5da84223,3 +np.float64,0x3fdca3bab9b94774,0x3fddb1868660c239,3 +np.float64,0xbfa255750c24aaf0,0xbfa25675f7b36343,3 +np.float64,0x3fb3602db626c060,0x3fb364ed2d5b2876,3 +np.float64,0xbfd30a14bda6142a,0xbfd354ff703b8862,3 +np.float64,0xbfe1cfe381639fc7,0xbfe2e3e720b968c8,3 +np.float64,0xbfd2af6a4fa55ed4,0xbfd2f61e190bcd1f,3 +np.float64,0xbfe93c50937278a1,0xbfed12d64bb10d73,3 +np.float64,0x3fddd8bc44bbb178,0x3fdf0ced7f9005cc,3 +np.float64,0x3fdb2bc73cb65790,0x3fdc0fc0e18e425e,3 +np.float64,0xbfd073f6aba0e7ee,0xbfd0a3cb5468a961,3 +np.float64,0x3fed4bad7b7a975a,0x3ff281ebeb75e414,3 +np.float64,0xbfdc75b50bb8eb6a,0xbfdd7e1a7631cb22,3 +np.float64,0x3fd458a90fa8b154,0x3fd4b4a5817248ce,3 +np.float64,0x3feead5db57d5abc,0x3ff484286fab55ff,3 +np.float64,0x3fb3894382271280,0x3fb38e217b4e7905,3 +np.float64,0xffefffffffffffff,0x7ff8000000000000,3 +np.float64,0xbfe428212ae85042,0xbfe5ce36f226bea8,3 +np.float64,0xbfc08b39f7211674,0xbfc0971b93ebc7ad,3 +np.float64,0xbfc2e7cf5525cfa0,0xbfc2f994eb72b623,3 +np.float64,0xbfdb0d85afb61b0c,0xbfdbee5a2de3c5db,3 +np.float64,0xfff0000000000000,0x7ff8000000000000,3 +np.float64,0xbfd0d36af7a1a6d6,0xbfd106a5f05ef6ff,3 +np.float64,0xbfc333d0912667a0,0xbfc3467162b7289a,3 +np.float64,0x3fcdababc53b5758,0x3fcdf16458c20fa8,3 +np.float64,0x3fd0821b38a10438,0x3fd0b26e3e0b9185,3 +np.float64,0x0,0x0,3 +np.float64,0x3feb7f70edf6fee2,0x3ff08ae81854bf20,3 +np.float64,0x3fe6e075716dc0ea,0x3fe97cc5254be6ff,3 +np.float64,0x3fea13b682f4276e,0x3fee7b6f18073b5b,3 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv new file mode 100644 index 00000000..1da29c82 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbf24142a,0xbf1a85ef,2 +np.float32,0x3e71cf91,0x3e6f9e37,2 +np.float32,0xe52a7,0xe52a7,2 +np.float32,0x3ef1e074,0x3ee9add9,2 +np.float32,0x806160ac,0x806160ac,2 +np.float32,0x7e2d59a2,0x42af4798,2 +np.float32,0xbf32cac9,0xbf26bf96,2 +np.float32,0x3f081701,0x3f026142,2 +np.float32,0x3f23cc88,0x3f1a499c,2 +np.float32,0xbf090d94,0xbf033ad0,2 +np.float32,0x803af2fc,0x803af2fc,2 +np.float32,0x807eb17e,0x807eb17e,2 +np.float32,0x5c0d8e,0x5c0d8e,2 +np.float32,0x3f7b79d2,0x3f5e6b1d,2 +np.float32,0x806feeae,0x806feeae,2 +np.float32,0x3e4b423a,0x3e49f274,2 +np.float32,0x3f49e5ac,0x3f394a41,2 +np.float32,0x3f18cd4e,0x3f10ef35,2 +np.float32,0xbed75734,0xbed17322,2 +np.float32,0x7f591151,0x42b28085,2 +np.float32,0xfefe9da6,0xc2b16f51,2 +np.float32,0xfeac90fc,0xc2b0a82a,2 +np.float32,0x805c198e,0x805c198e,2 +np.float32,0x7f66d6df,0x42b2a004,2 +np.float32,0x505438,0x505438,2 +np.float32,0xbf39a209,0xbf2c5255,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xc84cb,0xc84cb,2 +np.float32,0x7f07d6f5,0x42b19088,2 +np.float32,0x79d7e4,0x79d7e4,2 +np.float32,0xff32f6a0,0xc2b21db1,2 +np.float32,0x7c005c05,0x42a9222e,2 +np.float32,0x3ec449aa,0x3ebfc5ae,2 +np.float32,0x800ec323,0x800ec323,2 +np.float32,0xff1c904c,0xc2b1d93a,2 +np.float32,0x7f4eca52,0x42b267b0,2 +np.float32,0x3ee06540,0x3ed9c514,2 +np.float32,0x6aab4,0x6aab4,2 +np.float32,0x3e298d8c,0x3e28c99e,2 +np.float32,0xbf38d162,0xbf2ba94a,2 +np.float32,0x2d9083,0x2d9083,2 +np.float32,0x7eae5032,0x42b0ad52,2 +np.float32,0x3ead5b3c,0x3eaa3443,2 +np.float32,0x806fef66,0x806fef66,2 +np.float32,0x3f5b614e,0x3f46ca71,2 +np.float32,0xbf4c906a,0xbf3b60fc,2 +np.float32,0x8049453e,0x8049453e,2 +np.float32,0x3d305220,0x3d304432,2 +np.float32,0x2e1a89,0x2e1a89,2 +np.float32,0xbf4e74ec,0xbf3cdacf,2 +np.float32,0x807a827a,0x807a827a,2 +np.float32,0x80070745,0x80070745,2 +np.float32,0xbe1ba2fc,0xbe1b0b28,2 +np.float32,0xbe5131d0,0xbe4fc421,2 +np.float32,0x5bfd98,0x5bfd98,2 +np.float32,0xbd8e1a48,0xbd8dfd27,2 +np.float32,0x8006c160,0x8006c160,2 +np.float32,0x346490,0x346490,2 +np.float32,0xbdbdf060,0xbdbdaaf0,2 +np.float32,0x3ea9d0c4,0x3ea6d8c7,2 +np.float32,0xbf2aaa28,0xbf200916,2 +np.float32,0xbf160c26,0xbf0e9047,2 +np.float32,0x80081fd4,0x80081fd4,2 +np.float32,0x7db44283,0x42adf8b6,2 +np.float32,0xbf1983f8,0xbf118bf5,2 +np.float32,0x2c4a35,0x2c4a35,2 +np.float32,0x6165a7,0x6165a7,2 +np.float32,0xbe776b44,0xbe75129f,2 +np.float32,0xfe81841a,0xc2b0153b,2 +np.float32,0xbf7d1b2f,0xbf5f9461,2 +np.float32,0x80602d36,0x80602d36,2 +np.float32,0xfe8d5046,0xc2b041dd,2 +np.float32,0xfe5037bc,0xc2afa56d,2 +np.float32,0x4bbea6,0x4bbea6,2 +np.float32,0xfea039de,0xc2b0822d,2 +np.float32,0x7ea627a4,0x42b094c7,2 +np.float32,0x3f556198,0x3f423591,2 +np.float32,0xfedbae04,0xc2b123c1,2 +np.float32,0xbe30432c,0xbe2f6744,2 +np.float32,0x80202c77,0x80202c77,2 +np.float32,0xff335cc1,0xc2b21ed5,2 +np.float32,0x3e1e1ebe,0x3e1d7f95,2 +np.float32,0x8021c9c0,0x8021c9c0,2 +np.float32,0x7dc978,0x7dc978,2 +np.float32,0xff6cfabc,0xc2b2ad75,2 +np.float32,0x7f2bd542,0x42b208e0,2 +np.float32,0x53bf33,0x53bf33,2 +np.float32,0x804e04bb,0x804e04bb,2 +np.float32,0x3f30d2f9,0x3f2521ca,2 +np.float32,0x3dfde876,0x3dfd4316,2 +np.float32,0x46f8b1,0x46f8b1,2 +np.float32,0xbd5f9e20,0xbd5f81ba,2 +np.float32,0x807d6a22,0x807d6a22,2 +np.float32,0xff3881da,0xc2b22d50,2 +np.float32,0x1b1cb5,0x1b1cb5,2 +np.float32,0x3f75f2d0,0x3f5a7435,2 +np.float32,0xfee39c1a,0xc2b135e9,2 +np.float32,0x7f79f14a,0x42b2c8b9,2 +np.float32,0x8000e2d1,0x8000e2d1,2 +np.float32,0xab779,0xab779,2 +np.float32,0xbede6690,0xbed7f102,2 +np.float32,0x76e20d,0x76e20d,2 +np.float32,0x3ed714cb,0x3ed135e9,2 +np.float32,0xbeaa6f44,0xbea76f31,2 +np.float32,0x7f7dc8b1,0x42b2d089,2 +np.float32,0x108cb2,0x108cb2,2 +np.float32,0x7d37ba82,0x42ac9f94,2 +np.float32,0x3f31d068,0x3f25f221,2 +np.float32,0x8010a331,0x8010a331,2 +np.float32,0x3f2fdc7c,0x3f2456cd,2 +np.float32,0x7f7a9a67,0x42b2ca13,2 +np.float32,0x3f2acb31,0x3f202492,2 +np.float32,0x7f54fa94,0x42b276c9,2 +np.float32,0x3ebf8a70,0x3ebb553c,2 +np.float32,0x7f75b1a7,0x42b2bff2,2 +np.float32,0x7daebe07,0x42ade8cc,2 +np.float32,0xbd3a3ef0,0xbd3a2e86,2 +np.float32,0x8078ec9e,0x8078ec9e,2 +np.float32,0x3eda206a,0x3ed403ec,2 +np.float32,0x3f7248f2,0x3f57cd77,2 +np.float32,0x805d55ba,0x805d55ba,2 +np.float32,0xff30dc3e,0xc2b217a3,2 +np.float32,0xbe12b27c,0xbe123333,2 +np.float32,0xbf6ed9cf,0xbf554cd0,2 +np.float32,0xbed9eb5c,0xbed3d31c,2 +np.float32,0xbf1c9aea,0xbf14307b,2 +np.float32,0x3f540ac4,0x3f412de2,2 +np.float32,0x800333ac,0x800333ac,2 +np.float32,0x3f74cdb4,0x3f59a09a,2 +np.float32,0xbf41dc41,0xbf32ee6f,2 +np.float32,0xff2c7804,0xc2b20ac4,2 +np.float32,0x514493,0x514493,2 +np.float32,0xbddf1220,0xbddea1cf,2 +np.float32,0xfeaf74de,0xc2b0b0ab,2 +np.float32,0xfe5dfb30,0xc2afc633,2 +np.float32,0xbf4785c4,0xbf376bdb,2 +np.float32,0x80191cd3,0x80191cd3,2 +np.float32,0xfe44f708,0xc2af88fb,2 +np.float32,0x3d4cd8a0,0x3d4cc2ca,2 +np.float32,0x7f572eff,0x42b27c0f,2 +np.float32,0x8031bacb,0x8031bacb,2 +np.float32,0x7f2ea684,0x42b21133,2 +np.float32,0xbea1976a,0xbe9f05bb,2 +np.float32,0x3d677b41,0x3d675bc1,2 +np.float32,0x3f61bf24,0x3f4b9870,2 +np.float32,0x7ef55ddf,0x42b15c5f,2 +np.float32,0x3eabcb20,0x3ea8b91c,2 +np.float32,0xff73d9ec,0xc2b2bc18,2 +np.float32,0x77b9f5,0x77b9f5,2 +np.float32,0x4c6c6c,0x4c6c6c,2 +np.float32,0x7ed09c94,0x42b10949,2 +np.float32,0xdeeec,0xdeeec,2 +np.float32,0x7eac5858,0x42b0a782,2 +np.float32,0x7e190658,0x42af07bd,2 +np.float32,0xbe3c8980,0xbe3b7ce2,2 +np.float32,0x8059e86e,0x8059e86e,2 +np.float32,0xff201836,0xc2b1e4a5,2 +np.float32,0xbeac109c,0xbea8fafb,2 +np.float32,0x7edd1e2b,0x42b12718,2 +np.float32,0x639cd8,0x639cd8,2 +np.float32,0x3f5e4cae,0x3f490059,2 +np.float32,0x3d84c185,0x3d84a9c4,2 +np.float32,0xbe8c1130,0xbe8a605b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3f1da5e4,0x3f151404,2 +np.float32,0x7f75a873,0x42b2bfdf,2 +np.float32,0xbd873540,0xbd871c28,2 +np.float32,0xbe8e5e10,0xbe8c9808,2 +np.float32,0x7f004bf2,0x42b17347,2 +np.float32,0x800000,0x800000,2 +np.float32,0xbf6d6b79,0xbf544095,2 +np.float32,0x7ed7b563,0x42b11a6a,2 +np.float32,0x80693745,0x80693745,2 +np.float32,0x3ee0f608,0x3eda49a8,2 +np.float32,0xfe1285a4,0xc2aef181,2 +np.float32,0x72d946,0x72d946,2 +np.float32,0x6a0dca,0x6a0dca,2 +np.float32,0x3f5c9df6,0x3f47ba99,2 +np.float32,0xff002af6,0xc2b172c4,2 +np.float32,0x3f4ac98f,0x3f39fd0a,2 +np.float32,0x8066acf7,0x8066acf7,2 +np.float32,0xbcaa4e60,0xbcaa4b3c,2 +np.float32,0x80162813,0x80162813,2 +np.float32,0xff34b318,0xc2b222a2,2 +np.float32,0x7f1ce33c,0x42b1da49,2 +np.float32,0x3f0e55ab,0x3f07ddb0,2 +np.float32,0x7c75d996,0x42aa6eec,2 +np.float32,0xbf221bc6,0xbf18dc89,2 +np.float32,0x3f5a1a4c,0x3f45d1d4,2 +np.float32,0x7f2451b8,0x42b1f1fb,2 +np.float32,0x3ec55ca0,0x3ec0c655,2 +np.float32,0x3f752dc2,0x3f59e600,2 +np.float32,0xbe33f638,0xbe330c4d,2 +np.float32,0x3e2a9148,0x3e29c9d8,2 +np.float32,0x3f3362a1,0x3f273c01,2 +np.float32,0x5f83b3,0x5f83b3,2 +np.float32,0x3e362488,0x3e353216,2 +np.float32,0x140bcf,0x140bcf,2 +np.float32,0x7e3e96df,0x42af7822,2 +np.float32,0xbebc7082,0xbeb86ce6,2 +np.float32,0xbe92a92e,0xbe90b9d2,2 +np.float32,0xff3d8afc,0xc2b23b19,2 +np.float32,0x804125e3,0x804125e3,2 +np.float32,0x3f3675d1,0x3f29bedb,2 +np.float32,0xff70bb09,0xc2b2b57f,2 +np.float32,0x3f29681c,0x3f1efcd2,2 +np.float32,0xbdc70380,0xbdc6b3a8,2 +np.float32,0x54e0dd,0x54e0dd,2 +np.float32,0x3d545de0,0x3d54458c,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8014a4c2,0x8014a4c2,2 +np.float32,0xbe93f58a,0xbe91f938,2 +np.float32,0x17de33,0x17de33,2 +np.float32,0xfefb679a,0xc2b168d2,2 +np.float32,0xbf23423e,0xbf19d511,2 +np.float32,0x7e893fa1,0x42b032ec,2 +np.float32,0x3f44fe2d,0x3f356bda,2 +np.float32,0xbebb2e78,0xbeb73e8f,2 +np.float32,0x3f5632e0,0x3f42d633,2 +np.float32,0x3ddd8698,0x3ddd1896,2 +np.float32,0x80164ea7,0x80164ea7,2 +np.float32,0x80087b37,0x80087b37,2 +np.float32,0xbf06ab1e,0xbf011f95,2 +np.float32,0x3db95524,0x3db9149f,2 +np.float32,0x7aa1fbb3,0x42a570a1,2 +np.float32,0xbd84fc48,0xbd84e467,2 +np.float32,0x3d65c6f5,0x3d65a826,2 +np.float32,0xfe987800,0xc2b068c4,2 +np.float32,0x7ec59532,0x42b0ed7a,2 +np.float32,0x3ea0232c,0x3e9da29a,2 +np.float32,0x80292a08,0x80292a08,2 +np.float32,0x734cfe,0x734cfe,2 +np.float32,0x3f3b6d63,0x3f2dc596,2 +np.float32,0x3f27bcc1,0x3f1d97e6,2 +np.float32,0xfe1da554,0xc2af16f9,2 +np.float32,0x7c91f5,0x7c91f5,2 +np.float32,0xfe4e78cc,0xc2afa11e,2 +np.float32,0x7e4b4e08,0x42af9933,2 +np.float32,0xfe0949ec,0xc2aed02e,2 +np.float32,0x7e2f057f,0x42af4c81,2 +np.float32,0xbf200ae0,0xbf171ce1,2 +np.float32,0x3ebcc244,0x3eb8b99e,2 +np.float32,0xbf68f58d,0xbf50f7aa,2 +np.float32,0x4420b1,0x4420b1,2 +np.float32,0x3f5b61bf,0x3f46cac7,2 +np.float32,0x3fec78,0x3fec78,2 +np.float32,0x7f4183c8,0x42b245b7,2 +np.float32,0xbf10587c,0xbf099ee2,2 +np.float32,0x0,0x0,2 +np.float32,0x7ec84dc3,0x42b0f47a,2 +np.float32,0x3f5fbd7b,0x3f4a166d,2 +np.float32,0xbd884eb8,0xbd883502,2 +np.float32,0xfe3f10a4,0xc2af7969,2 +np.float32,0xff3f4920,0xc2b23fc9,2 +np.float32,0x8013900f,0x8013900f,2 +np.float32,0x8003529d,0x8003529d,2 +np.float32,0xbf032384,0xbefbfb3c,2 +np.float32,0xff418c7c,0xc2b245ce,2 +np.float32,0xbec0aad0,0xbebc633b,2 +np.float32,0xfdbff178,0xc2ae18de,2 +np.float32,0x68ab15,0x68ab15,2 +np.float32,0xbdfc4a88,0xbdfba848,2 +np.float32,0xbf5adec6,0xbf466747,2 +np.float32,0x807d5dcc,0x807d5dcc,2 +np.float32,0x61d144,0x61d144,2 +np.float32,0x807e3a03,0x807e3a03,2 +np.float32,0x1872f2,0x1872f2,2 +np.float32,0x7f2a272c,0x42b203d8,2 +np.float32,0xfe7f8314,0xc2b00e3a,2 +np.float32,0xbe42aeac,0xbe418737,2 +np.float32,0x8024b614,0x8024b614,2 +np.float32,0xbe41b6b8,0xbe40939a,2 +np.float32,0xa765c,0xa765c,2 +np.float32,0x7ea74f4b,0x42b09853,2 +np.float32,0x7f7ef631,0x42b2d2e7,2 +np.float32,0x7eaef5e6,0x42b0af38,2 +np.float32,0xff733d85,0xc2b2bacf,2 +np.float32,0x537ac0,0x537ac0,2 +np.float32,0xbeca4790,0xbec55b1d,2 +np.float32,0x80117314,0x80117314,2 +np.float32,0xfe958536,0xc2b05ec5,2 +np.float32,0x8066ecc2,0x8066ecc2,2 +np.float32,0xbf56baf3,0xbf433e82,2 +np.float32,0x1f7fd7,0x1f7fd7,2 +np.float32,0x3e942104,0x3e9222fc,2 +np.float32,0xfeaffe82,0xc2b0b23c,2 +np.float32,0xfe0e02b0,0xc2aee17e,2 +np.float32,0xbf800000,0xbf61a1b3,2 +np.float32,0x800b7e49,0x800b7e49,2 +np.float32,0x6c514f,0x6c514f,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x7f7d9a45,0x42b2d02b,2 +np.float32,0x800c9c69,0x800c9c69,2 +np.float32,0x274b14,0x274b14,2 +np.float32,0xbf4b22b0,0xbf3a42e2,2 +np.float32,0x63e5ae,0x63e5ae,2 +np.float32,0xbe18facc,0xbe186a90,2 +np.float32,0x7e137351,0x42aef4bd,2 +np.float32,0x80518ffd,0x80518ffd,2 +np.float32,0xbf0a8ffc,0xbf048f0d,2 +np.float32,0x841d,0x841d,2 +np.float32,0x7edfdc9e,0x42b12d69,2 +np.float32,0xfd1092b0,0xc2ac24de,2 +np.float32,0x7e2c9bdf,0x42af4566,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f4954a6,0x3f38d853,2 +np.float32,0xbe83efd2,0xbe8284c3,2 +np.float32,0x800e8e02,0x800e8e02,2 +np.float32,0x78ad39,0x78ad39,2 +np.float32,0x7eb0f967,0x42b0b514,2 +np.float32,0xbe39aa94,0xbe38a9ee,2 +np.float32,0x80194e7b,0x80194e7b,2 +np.float32,0x3cf3a340,0x3cf39a0f,2 +np.float32,0x3ed3117a,0x3ecd8173,2 +np.float32,0x7f530b11,0x42b2721c,2 +np.float32,0xff756ba2,0xc2b2bf60,2 +np.float32,0x15ea25,0x15ea25,2 +np.float32,0x803cbb64,0x803cbb64,2 +np.float32,0x3f34722d,0x3f281a2c,2 +np.float32,0x3ddd88e0,0x3ddd1adb,2 +np.float32,0x3f54244c,0x3f41418b,2 +np.float32,0x3e0adb98,0x3e0a6f8b,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x58902b,0x58902b,2 +np.float32,0xfe3b50b8,0xc2af6f43,2 +np.float32,0xfe0846d0,0xc2aecc64,2 +np.float32,0xbe0299d0,0xbe023fd4,2 +np.float32,0x18dde6,0x18dde6,2 +np.float32,0x8039fe8b,0x8039fe8b,2 +np.float32,0x8015d179,0x8015d179,2 +np.float32,0x3f551322,0x3f41f947,2 +np.float32,0x2ab387,0x2ab387,2 +np.float32,0xbf7e311e,0xbf6059d0,2 +np.float32,0xbdba58a8,0xbdba1713,2 +np.float32,0xbf1d008a,0xbf148724,2 +np.float32,0xbf6b9c97,0xbf52ec98,2 +np.float32,0x802acf04,0x802acf04,2 +np.float32,0x1,0x1,2 +np.float32,0xbe9e16d6,0xbe9bade3,2 +np.float32,0xbf048a14,0xbefe78c7,2 +np.float32,0x7e432ad3,0x42af8449,2 +np.float32,0xbdcc7fe0,0xbdcc2944,2 +np.float32,0x6dfc27,0x6dfc27,2 +np.float32,0xfef6eed8,0xc2b15fa1,2 +np.float32,0xbeeff6e8,0xbee7f2e4,2 +np.float32,0x7e3a6ca8,0x42af6cd2,2 +np.float32,0xff2c82e8,0xc2b20ae4,2 +np.float32,0x3e9f8d74,0x3e9d13b0,2 +np.float32,0x7ea36191,0x42b08c29,2 +np.float32,0x7f734bed,0x42b2baed,2 +np.float32,0x7f2df96d,0x42b20f37,2 +np.float32,0x5036fd,0x5036fd,2 +np.float32,0x806eab38,0x806eab38,2 +np.float32,0xbe9db90e,0xbe9b5446,2 +np.float32,0xfeef6fac,0xc2b14fd9,2 +np.float32,0xc2bf7,0xc2bf7,2 +np.float32,0xff53ec3d,0xc2b2743d,2 +np.float32,0x7e837637,0x42b01cde,2 +np.float32,0xbefb5934,0xbef23662,2 +np.float32,0x3f6cec80,0x3f53e371,2 +np.float32,0x3e86e7de,0x3e85643f,2 +np.float32,0x3f09cb42,0x3f03e1ef,2 +np.float32,0xbec3d236,0xbebf5620,2 +np.float32,0xfedef246,0xc2b12b50,2 +np.float32,0xbf08d6a8,0xbf030a62,2 +np.float32,0x8036cbf9,0x8036cbf9,2 +np.float32,0x3f74d3e3,0x3f59a512,2 +np.float32,0x6a600c,0x6a600c,2 +np.float32,0xfd1295b0,0xc2ac2bf1,2 +np.float32,0xbeb61142,0xbeb26efa,2 +np.float32,0x80216556,0x80216556,2 +np.float32,0xbf1fa0f6,0xbf16c30a,2 +np.float32,0x3e0af8e1,0x3e0a8c90,2 +np.float32,0x80434709,0x80434709,2 +np.float32,0x49efd9,0x49efd9,2 +np.float32,0x7f7cce6c,0x42b2ce8f,2 +np.float32,0x6e5450,0x6e5450,2 +np.float32,0x7f0fc115,0x42b1ad86,2 +np.float32,0x632db0,0x632db0,2 +np.float32,0x3f6f4c2a,0x3f55a064,2 +np.float32,0x7ec4f273,0x42b0ebd3,2 +np.float32,0x61ae1e,0x61ae1e,2 +np.float32,0x5f47c4,0x5f47c4,2 +np.float32,0xbf3c8f62,0xbf2eaf54,2 +np.float32,0xfca38900,0xc2ab0113,2 +np.float32,0x3ec89d52,0x3ec3ce78,2 +np.float32,0xbe0e3f70,0xbe0dcb53,2 +np.float32,0x805d3156,0x805d3156,2 +np.float32,0x3eee33f8,0x3ee65a4e,2 +np.float32,0xbeda7e9a,0xbed45a90,2 +np.float32,0x7e2fac7b,0x42af4e69,2 +np.float32,0x7efd0e28,0x42b16c2c,2 +np.float32,0x3f0c7b17,0x3f063e46,2 +np.float32,0xbf395bec,0xbf2c198f,2 +np.float32,0xfdf1c3f8,0xc2ae8f05,2 +np.float32,0xbe11f4e4,0xbe117783,2 +np.float32,0x7eddc901,0x42b128a3,2 +np.float32,0x3f4bad09,0x3f3aaf33,2 +np.float32,0xfefb5d76,0xc2b168bd,2 +np.float32,0x3ed3a4cf,0x3ece09a3,2 +np.float32,0x7ec582e4,0x42b0ed4a,2 +np.float32,0x3dc2268a,0x3dc1dc64,2 +np.float32,0x3ef9b17c,0x3ef0b9c9,2 +np.float32,0x2748ac,0x2748ac,2 +np.float32,0xfed6a602,0xc2b117e4,2 +np.float32,0xbefc9c36,0xbef35832,2 +np.float32,0x7e0476,0x7e0476,2 +np.float32,0x804be1a0,0x804be1a0,2 +np.float32,0xbefbc1c2,0xbef2943a,2 +np.float32,0xbd4698f0,0xbd46850a,2 +np.float32,0x688627,0x688627,2 +np.float32,0x3f7f7685,0x3f61406f,2 +np.float32,0x827fb,0x827fb,2 +np.float32,0x3f503264,0x3f3e34fd,2 +np.float32,0x7f5458d1,0x42b27543,2 +np.float32,0x800ac01f,0x800ac01f,2 +np.float32,0x6188dd,0x6188dd,2 +np.float32,0x806ac0ba,0x806ac0ba,2 +np.float32,0xbe14493c,0xbe13c5cc,2 +np.float32,0x3f77542c,0x3f5b72ae,2 +np.float32,0xfeaacab6,0xc2b0a2df,2 +np.float32,0x7f2893d5,0x42b1ff15,2 +np.float32,0x66b528,0x66b528,2 +np.float32,0xbf653e24,0xbf4e3573,2 +np.float32,0x801a2853,0x801a2853,2 +np.float32,0x3f3d8c98,0x3f2f7b04,2 +np.float32,0xfdffbad8,0xc2aeabc5,2 +np.float32,0x3dd50f,0x3dd50f,2 +np.float32,0x3f325a4c,0x3f266353,2 +np.float32,0xfcc48ec0,0xc2ab5f3f,2 +np.float32,0x3e6f5b9a,0x3e6d3ae5,2 +np.float32,0x3dbcd62b,0x3dbc91ee,2 +np.float32,0xbf7458d9,0xbf594c1c,2 +np.float32,0xff5adb24,0xc2b284b9,2 +np.float32,0x807b246d,0x807b246d,2 +np.float32,0x3f800000,0x3f61a1b3,2 +np.float32,0x231a28,0x231a28,2 +np.float32,0xbdc66258,0xbdc61341,2 +np.float32,0x3c84b4b4,0x3c84b338,2 +np.float32,0xbf215894,0xbf183783,2 +np.float32,0xff4ee298,0xc2b267ec,2 +np.float32,0x801ef52e,0x801ef52e,2 +np.float32,0x1040b0,0x1040b0,2 +np.float32,0xff545582,0xc2b2753b,2 +np.float32,0x3f3b9dda,0x3f2decaf,2 +np.float32,0x730f99,0x730f99,2 +np.float32,0xff7fffff,0xc2b2d4fc,2 +np.float32,0xff24cc5e,0xc2b1f379,2 +np.float32,0xbe9b456a,0xbe98fc0b,2 +np.float32,0x188fb,0x188fb,2 +np.float32,0x3f5c7ce2,0x3f47a18a,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x806ea4da,0x806ea4da,2 +np.float32,0xfe810570,0xc2b01345,2 +np.float32,0x8036af89,0x8036af89,2 +np.float32,0x8043cec6,0x8043cec6,2 +np.float32,0x80342bb3,0x80342bb3,2 +np.float32,0x1a2bd4,0x1a2bd4,2 +np.float32,0x3f6248c2,0x3f4bff9a,2 +np.float32,0x8024eb35,0x8024eb35,2 +np.float32,0x7ea55872,0x42b09247,2 +np.float32,0x806d6e56,0x806d6e56,2 +np.float32,0x25c21a,0x25c21a,2 +np.float32,0x3f4e95f3,0x3f3cf483,2 +np.float32,0x15ca38,0x15ca38,2 +np.float32,0x803f01b2,0x803f01b2,2 +np.float32,0xbe731634,0xbe70dc10,2 +np.float32,0x3e80cee4,0x3e7ef933,2 +np.float32,0x3ef6dda5,0x3eee2e7b,2 +np.float32,0x3f3dfdc2,0x3f2fd5ed,2 +np.float32,0xff0492a7,0xc2b18411,2 +np.float32,0xbf1d0adf,0xbf148ff3,2 +np.float32,0xfcf75460,0xc2abd4e3,2 +np.float32,0x3f46fca6,0x3f36ffa6,2 +np.float32,0xbe63b5c0,0xbe61dfb3,2 +np.float32,0xff019bec,0xc2b1787d,2 +np.float32,0x801f14a9,0x801f14a9,2 +np.float32,0x3f176cfa,0x3f0fc051,2 +np.float32,0x3f69d976,0x3f51a015,2 +np.float32,0x3f4917cb,0x3f38a87a,2 +np.float32,0x3b2a0bea,0x3b2a0bdd,2 +np.float32,0xbf41d857,0xbf32eb50,2 +np.float32,0xbf08841a,0xbf02c18f,2 +np.float32,0x7ec86f14,0x42b0f4d0,2 +np.float32,0xbf7d15d1,0xbf5f9090,2 +np.float32,0xbd080550,0xbd07feea,2 +np.float32,0xbf6f1bef,0xbf557d26,2 +np.float32,0xfebc282c,0xc2b0d473,2 +np.float32,0x3e68d2f5,0x3e66dd03,2 +np.float32,0x3f3ed8fe,0x3f3085d5,2 +np.float32,0xff2f78ae,0xc2b2139a,2 +np.float32,0xff647a70,0xc2b29ac1,2 +np.float32,0xfd0859a0,0xc2ac06e2,2 +np.float32,0x3ea578a8,0x3ea2b7e1,2 +np.float32,0x6c58c6,0x6c58c6,2 +np.float32,0xff23f26a,0xc2b1f0d2,2 +np.float32,0x800902a4,0x800902a4,2 +np.float32,0xfe8ba64e,0xc2b03bcd,2 +np.float32,0x3f091143,0x3f033e0f,2 +np.float32,0x8017c4bd,0x8017c4bd,2 +np.float32,0xbf708fd4,0xbf568c8c,2 +np.float32,0x3be1d8,0x3be1d8,2 +np.float32,0x80091f07,0x80091f07,2 +np.float32,0x68eabe,0x68eabe,2 +np.float32,0xfe9ab2c8,0xc2b07033,2 +np.float32,0x3eabe752,0x3ea8d3d7,2 +np.float32,0xbf7adcb2,0xbf5dfaf5,2 +np.float32,0x801ecc01,0x801ecc01,2 +np.float32,0xbf5570a9,0xbf424123,2 +np.float32,0x3e89eecd,0x3e88510e,2 +np.float32,0xfeb2feee,0xc2b0bae4,2 +np.float32,0xbeb25ec2,0xbeaef22b,2 +np.float32,0x201e49,0x201e49,2 +np.float32,0x800a35f6,0x800a35f6,2 +np.float32,0xbf02d449,0xbefb6e2a,2 +np.float32,0x3f062bea,0x3f00aef6,2 +np.float32,0x7f5219ff,0x42b26fd2,2 +np.float32,0xbd4561d0,0xbd454e47,2 +np.float32,0x3f6c4789,0x3f536a4b,2 +np.float32,0x7f58b06d,0x42b27fa1,2 +np.float32,0x7f132f39,0x42b1b999,2 +np.float32,0x3e05dcb4,0x3e057bd8,2 +np.float32,0x7f526045,0x42b2707d,2 +np.float32,0x3f6117d0,0x3f4b1adb,2 +np.float32,0xbf21f47d,0xbf18bb57,2 +np.float32,0x1a26d6,0x1a26d6,2 +np.float32,0x46b114,0x46b114,2 +np.float32,0x3eb24518,0x3eaed9ef,2 +np.float32,0xfe2139c8,0xc2af2278,2 +np.float32,0xbf7c36fb,0xbf5ef1f6,2 +np.float32,0x3f193834,0x3f114af7,2 +np.float32,0xff3ea650,0xc2b23e14,2 +np.float32,0xfeeb3bca,0xc2b146c7,2 +np.float32,0x7e8b8ca0,0x42b03b6f,2 +np.float32,0x3eed903d,0x3ee5c5d2,2 +np.float32,0xbdc73740,0xbdc6e72a,2 +np.float32,0x7e500307,0x42afa4ec,2 +np.float32,0xe003c,0xe003c,2 +np.float32,0x3e612bb4,0x3e5f64fd,2 +np.float32,0xfd81e248,0xc2ad50e6,2 +np.float32,0x766a4f,0x766a4f,2 +np.float32,0x3e8708c9,0x3e858414,2 +np.float32,0xbf206c58,0xbf176f7f,2 +np.float32,0x7e93aeb0,0x42b0586f,2 +np.float32,0xfd9d36b8,0xc2adb2ad,2 +np.float32,0xff1f4e0e,0xc2b1e21d,2 +np.float32,0x3f22bd5a,0x3f1964f8,2 +np.float32,0x7f6a517a,0x42b2a7ad,2 +np.float32,0xff6ca773,0xc2b2acc1,2 +np.float32,0x7f6bf453,0x42b2ab3d,2 +np.float32,0x3edfdd64,0x3ed9489f,2 +np.float32,0xbeafc5ba,0xbeac7daa,2 +np.float32,0x7d862039,0x42ad615b,2 +np.float32,0xbe9d2002,0xbe9ac1fc,2 +np.float32,0xbdcc54c0,0xbdcbfe5b,2 +np.float32,0xbf1bc0aa,0xbf13762a,2 +np.float32,0xbf4679ce,0xbf36984b,2 +np.float32,0x3ef45696,0x3eebe713,2 +np.float32,0xff6eb999,0xc2b2b137,2 +np.float32,0xbe4b2e4c,0xbe49dee8,2 +np.float32,0x3f498951,0x3f3901b7,2 +np.float32,0xbe9692f4,0xbe947be1,2 +np.float32,0xbf44ce26,0xbf3545c8,2 +np.float32,0x805787a8,0x805787a8,2 +np.float32,0xbf342650,0xbf27dc26,2 +np.float32,0x3edafbf0,0x3ed4cdd2,2 +np.float32,0x3f6fb858,0x3f55ef63,2 +np.float32,0xff227d0a,0xc2b1ec3f,2 +np.float32,0xfeb9a202,0xc2b0cd89,2 +np.float32,0x7f5b12c1,0x42b2853b,2 +np.float32,0x584578,0x584578,2 +np.float32,0x7ec0b76f,0x42b0e0b5,2 +np.float32,0x3f57f54b,0x3f442f10,2 +np.float32,0x7eef3620,0x42b14f5d,2 +np.float32,0x4525b5,0x4525b5,2 +np.float32,0x801bd407,0x801bd407,2 +np.float32,0xbed1f166,0xbecc7703,2 +np.float32,0x3f57e732,0x3f442449,2 +np.float32,0x80767cd5,0x80767cd5,2 +np.float32,0xbef1a7d2,0xbee97aa3,2 +np.float32,0x3dd5b1af,0x3dd54ee6,2 +np.float32,0x960c,0x960c,2 +np.float32,0x7c392d41,0x42a9ddd1,2 +np.float32,0x3f5c9a34,0x3f47b7c1,2 +np.float32,0x3f5cecee,0x3f47f667,2 +np.float32,0xbee482ce,0xbedd8899,2 +np.float32,0x8066ba7e,0x8066ba7e,2 +np.float32,0x7ed76127,0x42b119a2,2 +np.float32,0x805ca40b,0x805ca40b,2 +np.float32,0x7f5ed5d1,0x42b28df3,2 +np.float32,0xfe9e1b1e,0xc2b07b5b,2 +np.float32,0x3f0201a2,0x3ef9f6c4,2 +np.float32,0xbf2e6430,0xbf232039,2 +np.float32,0x80326b4d,0x80326b4d,2 +np.float32,0x3f11dc7c,0x3f0af06e,2 +np.float32,0xbe89c42e,0xbe8827e6,2 +np.float32,0x3f3c69f8,0x3f2e9133,2 +np.float32,0x806326a9,0x806326a9,2 +np.float32,0x3f1c5286,0x3f13f2b6,2 +np.float32,0xff5c0ead,0xc2b28786,2 +np.float32,0xff32b952,0xc2b21d01,2 +np.float32,0x7dd27c4e,0x42ae4815,2 +np.float32,0xbf7a6816,0xbf5da7a2,2 +np.float32,0xfeac72f8,0xc2b0a7d1,2 +np.float32,0x335ad7,0x335ad7,2 +np.float32,0xbe682da4,0xbe663bcc,2 +np.float32,0x3f2df244,0x3f22c208,2 +np.float32,0x80686e8e,0x80686e8e,2 +np.float32,0x7f50120f,0x42b26ad9,2 +np.float32,0x3dbc596a,0x3dbc15b3,2 +np.float32,0xbf4f2868,0xbf3d666d,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xff66c059,0xc2b29fd2,2 +np.float32,0xfe8bbcaa,0xc2b03c1f,2 +np.float32,0x3ece6a51,0x3ec93271,2 +np.float32,0x7f06cd26,0x42b18c9a,2 +np.float32,0x7e41e6dc,0x42af80f5,2 +np.float32,0x7d878334,0x42ad669f,2 +np.float32,0xfe8c5c4c,0xc2b03e67,2 +np.float32,0x337a05,0x337a05,2 +np.float32,0x3e63801d,0x3e61ab58,2 +np.float32,0x62c315,0x62c315,2 +np.float32,0x802aa888,0x802aa888,2 +np.float32,0x80038b43,0x80038b43,2 +np.float32,0xff5c1271,0xc2b2878f,2 +np.float32,0xff4184a5,0xc2b245b9,2 +np.float32,0x7ef58f4b,0x42b15cc6,2 +np.float32,0x7f42d8ac,0x42b2493a,2 +np.float32,0x806609f2,0x806609f2,2 +np.float32,0x801e763b,0x801e763b,2 +np.float32,0x7f2bc073,0x42b208a2,2 +np.float32,0x801d7d7f,0x801d7d7f,2 +np.float32,0x7d415dc1,0x42acb9c2,2 +np.float32,0xbf624ff9,0xbf4c0502,2 +np.float32,0xbf603afd,0xbf4a74e2,2 +np.float32,0x8007fe42,0x8007fe42,2 +np.float32,0x800456db,0x800456db,2 +np.float32,0x620871,0x620871,2 +np.float32,0x3e9c6c1e,0x3e9a15fa,2 +np.float32,0x4245d,0x4245d,2 +np.float32,0x8035bde9,0x8035bde9,2 +np.float32,0xbf597418,0xbf45533c,2 +np.float32,0x3c730f80,0x3c730d38,2 +np.float32,0x3f7cd8ed,0x3f5f6540,2 +np.float32,0x807e49c3,0x807e49c3,2 +np.float32,0x3d6584c0,0x3d65660c,2 +np.float32,0xff42a744,0xc2b248b8,2 +np.float32,0xfedc6f56,0xc2b12583,2 +np.float32,0x806263a4,0x806263a4,2 +np.float32,0x175a17,0x175a17,2 +np.float32,0x3f1e8537,0x3f15d208,2 +np.float32,0x4055b5,0x4055b5,2 +np.float32,0x438aa6,0x438aa6,2 +np.float32,0x8038507f,0x8038507f,2 +np.float32,0xbed75348,0xbed16f85,2 +np.float32,0x7f07b7d6,0x42b19012,2 +np.float32,0xfe8b9d30,0xc2b03bac,2 +np.float32,0x805c501c,0x805c501c,2 +np.float32,0x3ef22b1d,0x3ee9f159,2 +np.float32,0x802b6759,0x802b6759,2 +np.float32,0x45281a,0x45281a,2 +np.float32,0xbf7e9970,0xbf60a3cf,2 +np.float32,0xbf14d152,0xbf0d8062,2 +np.float32,0x3d9ff950,0x3d9fcfc8,2 +np.float32,0x7865d9,0x7865d9,2 +np.float32,0xbee67fa4,0xbedf58eb,2 +np.float32,0x7dc822d1,0x42ae2e44,2 +np.float32,0x3f3af0fe,0x3f2d612c,2 +np.float32,0xbefea106,0xbef5274e,2 +np.float32,0xbf758a3f,0xbf5a28c5,2 +np.float32,0xbf331bdd,0xbf270209,2 +np.float32,0x7f51c901,0x42b26f0d,2 +np.float32,0x3f67c33b,0x3f5014d8,2 +np.float32,0xbbc9d980,0xbbc9d92c,2 +np.float32,0xbc407540,0xbc40741e,2 +np.float32,0x7eed9a3c,0x42b14be9,2 +np.float32,0x1be0fe,0x1be0fe,2 +np.float32,0xbf6b4913,0xbf52af1f,2 +np.float32,0xbda8eba8,0xbda8bac6,2 +np.float32,0x8004bcea,0x8004bcea,2 +np.float32,0xff6f6afe,0xc2b2b2b3,2 +np.float32,0xbf205810,0xbf175e50,2 +np.float32,0x80651944,0x80651944,2 +np.float32,0xbec73016,0xbec27a3f,2 +np.float32,0x5701b9,0x5701b9,2 +np.float32,0xbf1062ce,0xbf09a7df,2 +np.float32,0x3e0306ae,0x3e02abd1,2 +np.float32,0x7bfc62,0x7bfc62,2 +np.float32,0xbf48dd3c,0xbf387a6b,2 +np.float32,0x8009573e,0x8009573e,2 +np.float32,0x660a2c,0x660a2c,2 +np.float32,0xff2280da,0xc2b1ec4b,2 +np.float32,0xbf7034fe,0xbf564a54,2 +np.float32,0xbeeb448e,0xbee3b045,2 +np.float32,0xff4e949c,0xc2b2672b,2 +np.float32,0xbf3c4486,0xbf2e7309,2 +np.float32,0x7eb086d8,0x42b0b3c8,2 +np.float32,0x7eac8aca,0x42b0a817,2 +np.float32,0xfd3d2d60,0xc2acae8b,2 +np.float32,0xbf363226,0xbf2987bd,2 +np.float32,0x7f02e524,0x42b17d8c,2 +np.float32,0x8049a148,0x8049a148,2 +np.float32,0x147202,0x147202,2 +np.float32,0x8031d3f6,0x8031d3f6,2 +np.float32,0xfe78bf68,0xc2b0007d,2 +np.float32,0x7ebd16d0,0x42b0d6fb,2 +np.float32,0xbdaed2e8,0xbdae9cbb,2 +np.float32,0x802833ae,0x802833ae,2 +np.float32,0x7f62adf6,0x42b296b5,2 +np.float32,0xff2841c0,0xc2b1fe1b,2 +np.float32,0xbeb2c47e,0xbeaf523b,2 +np.float32,0x7e42a36e,0x42af82e6,2 +np.float32,0x41ea29,0x41ea29,2 +np.float32,0xbcaaa800,0xbcaaa4d7,2 +np.float64,0x3fed71f27ebae3e5,0x3fea5c6095012ca6,2 +np.float64,0x224dc392449b9,0x224dc392449b9,2 +np.float64,0x3fdf897a7d3f12f5,0x3fde620339360992,2 +np.float64,0xbfe1f99a5123f334,0xbfe124a57cfaf556,2 +np.float64,0xbfd9725c3bb2e4b8,0xbfd8d1e3f75110c7,2 +np.float64,0x3fe38977546712ee,0x3fe27d9d37f4b91f,2 +np.float64,0xbfc36c29e526d854,0xbfc3594743ee45c4,2 +np.float64,0xbfe5cbec332b97d8,0xbfe4638802316849,2 +np.float64,0x2ff35efe5fe6d,0x2ff35efe5fe6d,2 +np.float64,0x7fd3f828e227f051,0x40862a7d4a40b1e0,2 +np.float64,0xffd06fc11620df82,0xc08628ee8f1bf6c8,2 +np.float64,0x3fe5321bf4aa6438,0x3fe3e3d9fa453199,2 +np.float64,0xffd07a323ca0f464,0xc08628f3a2930f8c,2 +np.float64,0x3fdf7abe7abef57c,0x3fde54cb193d49cb,2 +np.float64,0x40941f1881285,0x40941f1881285,2 +np.float64,0xffef18defc7e31bd,0xc0863393f2c9f061,2 +np.float64,0xbfe379f871e6f3f1,0xbfe270620cb68347,2 +np.float64,0xffec829848f90530,0xc08632e210edaa2b,2 +np.float64,0x80070c00574e1801,0x80070c00574e1801,2 +np.float64,0xffce7654b23ceca8,0xc086285291e89975,2 +np.float64,0x7fc9932daa33265a,0x408626ec6cc2b807,2 +np.float64,0x355ee98c6abde,0x355ee98c6abde,2 +np.float64,0x3fac54962c38a920,0x3fac50e40b6c19f2,2 +np.float64,0x800857984af0af31,0x800857984af0af31,2 +np.float64,0x7fea6a3d55f4d47a,0x40863245bf39f179,2 +np.float64,0x3fdb8fab33371f56,0x3fdac5ffc9e1c347,2 +np.float64,0x800a887a7bf510f5,0x800a887a7bf510f5,2 +np.float64,0xbfbdbda3c63b7b48,0xbfbdac9dd5a2d3e8,2 +np.float64,0xbfd4a2457b29448a,0xbfd44acb3b316d6d,2 +np.float64,0x7fd5329a502a6534,0x40862af789b528b5,2 +np.float64,0x3fd96a7bceb2d4f8,0x3fd8ca92104d6cd6,2 +np.float64,0x3fde6a0cd6bcd41a,0x3fdd5f4b85abf749,2 +np.float64,0xbfc7faaff32ff560,0xbfc7d7560b8c4a52,2 +np.float64,0x7fec381b2f787035,0x408632cd0e9c095c,2 +np.float64,0x1fc2eb543f85e,0x1fc2eb543f85e,2 +np.float64,0x7ac6000af58c1,0x7ac6000af58c1,2 +np.float64,0xffe060a87920c150,0xc0862e72c37d5a4e,2 +np.float64,0xbfb7d8c89e2fb190,0xbfb7cffd3c3f8e3a,2 +np.float64,0x3fd91033deb22068,0x3fd87695b067aa1e,2 +np.float64,0x3fec1aff01b835fe,0x3fe95d5cbd729af7,2 +np.float64,0x7fb97f69ec32fed3,0x4086215aaae5c697,2 +np.float64,0x7feaf1e4e5f5e3c9,0x4086326e6ca6a2bb,2 +np.float64,0x800537e44d0a6fc9,0x800537e44d0a6fc9,2 +np.float64,0x800b2a0d0d36541a,0x800b2a0d0d36541a,2 +np.float64,0x3fe2193846e43270,0x3fe140308550138e,2 +np.float64,0x5e2a0a32bc542,0x5e2a0a32bc542,2 +np.float64,0xffe5888b09eb1116,0xc08630a348783aa3,2 +np.float64,0xbfceb9b5033d736c,0xbfce701049c10435,2 +np.float64,0x7fe5d68589abad0a,0x408630c00ce63f23,2 +np.float64,0x8009b5457ff36a8b,0x8009b5457ff36a8b,2 +np.float64,0xbfb5518c2e2aa318,0xbfb54b42638ca718,2 +np.float64,0x3f9c58469838b080,0x3f9c575974fbcd7b,2 +np.float64,0x3fe8db4b4731b697,0x3fe6dc9231587966,2 +np.float64,0x8007d0f77f4fa1f0,0x8007d0f77f4fa1f0,2 +np.float64,0x7fe79eef542f3dde,0x40863160c673c67f,2 +np.float64,0xffbdc0b6163b8170,0xc0862296be4bf032,2 +np.float64,0x3fbb8d3312371a66,0x3fbb7fa76fb4cf8d,2 +np.float64,0xffd8a0eedbb141de,0xc0862c2ac6e512f0,2 +np.float64,0x7fee99d8d87d33b1,0x4086337301c4c8df,2 +np.float64,0xffe7479b552e8f36,0xc0863142fba0f0ec,2 +np.float64,0xffedf8ef4abbf1de,0xc08633488068fe69,2 +np.float64,0x895c4d9f12b8a,0x895c4d9f12b8a,2 +np.float64,0x29b4caf05369a,0x29b4caf05369a,2 +np.float64,0xbfefb90d657f721b,0xbfec01efa2425b35,2 +np.float64,0xde07c3bdbc0f9,0xde07c3bdbc0f9,2 +np.float64,0x7feae9fd02f5d3f9,0x4086326c1368ed5a,2 +np.float64,0x3feab792da756f26,0x3fe84f6e15338ed7,2 +np.float64,0xbfeff8ed72fff1db,0xbfec2f35da06daaf,2 +np.float64,0x8004b2c132896583,0x8004b2c132896583,2 +np.float64,0xbf9fcb00103f9600,0xbf9fc9b1751c569e,2 +np.float64,0x4182b72e83058,0x4182b72e83058,2 +np.float64,0x90820d812105,0x90820d812105,2 +np.float64,0xbfdec9a0ba3d9342,0xbfddb585df607ce1,2 +np.float64,0x7fdc0a69a03814d2,0x40862d347f201b63,2 +np.float64,0xbfef0708937e0e11,0xbfeb82d27f8ea97f,2 +np.float64,0xffda57e4ddb4afca,0xc0862cb49e2e0c4c,2 +np.float64,0xbfa30b9af4261730,0xbfa30a7b4a633060,2 +np.float64,0x7feb57fcc4b6aff9,0x4086328c83957a0b,2 +np.float64,0x7fe6759153eceb22,0x408630f980433963,2 +np.float64,0x7fdd3278c8ba64f1,0x40862d87445243e9,2 +np.float64,0xd3b8e6b9a771d,0xd3b8e6b9a771d,2 +np.float64,0x6267dc88c4cfc,0x6267dc88c4cfc,2 +np.float64,0x7fedd3cf00bba79d,0x4086333e91712ff5,2 +np.float64,0xffbe512ce03ca258,0xc08622bd39314cea,2 +np.float64,0xbfe71742ca6e2e86,0xbfe572ccbf2d010d,2 +np.float64,0x8002fb048c65f60a,0x8002fb048c65f60a,2 +np.float64,0x800d9d9ddf7b3b3c,0x800d9d9ddf7b3b3c,2 +np.float64,0xbfeaf6230df5ec46,0xbfe87f5d751ec3d5,2 +np.float64,0xbfe69973a42d32e8,0xbfe50c680f7002fe,2 +np.float64,0x3fe309cf87e613a0,0x3fe21048714ce1ac,2 +np.float64,0x800435d17a286ba4,0x800435d17a286ba4,2 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,2 +np.float64,0x3fe36ade1766d5bc,0x3fe26379fb285dde,2 +np.float64,0x3f98d8d94831b1c0,0x3f98d839885dc527,2 +np.float64,0xbfd08f7ae5211ef6,0xbfd0618ab5293e1e,2 +np.float64,0xbfcf630bd53ec618,0xbfcf14a0cd20704d,2 +np.float64,0xbfe58f0ca6eb1e1a,0xbfe4312225df8e28,2 +np.float64,0xffef4f6406be9ec7,0xc08633a1ed1d27e5,2 +np.float64,0x7fe10120b3e20240,0x40862ebfaf94e6e8,2 +np.float64,0xffe96c52fbb2d8a5,0xc08631f75d9a59a0,2 +np.float64,0xbfe448a333e89146,0xbfe31fee44c3ec43,2 +np.float64,0x80045ff4e788bfeb,0x80045ff4e788bfeb,2 +np.float64,0x7fefaa2f823f545e,0x408633b8fea29524,2 +np.float64,0xffea6b8bf234d717,0xc0863246248e5960,2 +np.float64,0xbfdb085d80b610bc,0xbfda498b15b43eec,2 +np.float64,0xbfd5e12da3abc25c,0xbfd57970e2b8aecc,2 +np.float64,0x3fcc84928a390925,0x3fcc497c417a89f3,2 +np.float64,0xbfdcb713bf396e28,0xbfdbd46c5e731fd9,2 +np.float64,0xffdf50c0453ea180,0xc0862e16b5562f25,2 +np.float64,0x800342c2f7268587,0x800342c2f7268587,2 +np.float64,0x7feb8b6d743716da,0x4086329b8248de2c,2 +np.float64,0x800a9b18b4953632,0x800a9b18b4953632,2 +np.float64,0xffedaf0d12fb5e19,0xc0863334af82de1a,2 +np.float64,0x800aebda4ab5d7b5,0x800aebda4ab5d7b5,2 +np.float64,0xbfa9f5848433eb10,0xbfa9f2ac7ac065d4,2 +np.float64,0x3fea375928f46eb2,0x3fe7ec9f10eeac7d,2 +np.float64,0x3fd6c213fead8428,0x3fd64dcc1eff5f1b,2 +np.float64,0xbfa0476f44208ee0,0xbfa046bb986007ac,2 +np.float64,0x6c8e18aed91c4,0x6c8e18aed91c4,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0x7fea86b5ba350d6a,0x4086324e59f13027,2 +np.float64,0x2316c3b0462d9,0x2316c3b0462d9,2 +np.float64,0x3fec4e3281389c65,0x3fe983c5c9d65940,2 +np.float64,0x3fbb87c47f772,0x3fbb87c47f772,2 +np.float64,0x8004af00fdc95e03,0x8004af00fdc95e03,2 +np.float64,0xbfd316db9ba62db8,0xbfd2d12765b9d155,2 +np.float64,0x3fec1a7a99f834f6,0x3fe95cf941889b3d,2 +np.float64,0x3feff7e1477fefc3,0x3fec2e782392d4b9,2 +np.float64,0xbfc683ea042d07d4,0xbfc66698cfa5026e,2 +np.float64,0x3fdbc8aaa9b79154,0x3fdafa50e6fc3fff,2 +np.float64,0xfb3b630ff676d,0xfb3b630ff676d,2 +np.float64,0x7fe715ef8eae2bde,0x40863131d794b41f,2 +np.float64,0x7fefa06c11bf40d7,0x408633b686c7996a,2 +np.float64,0x80002a40f5205483,0x80002a40f5205483,2 +np.float64,0x7fe95f3c74b2be78,0x408631f33e37bf76,2 +np.float64,0x3fb2977b32252ef0,0x3fb2934eaf5a4be8,2 +np.float64,0x3fc0f3dbc821e7b8,0x3fc0e745288c84c3,2 +np.float64,0x3fda98da56b531b5,0x3fd9e2b19447dacc,2 +np.float64,0x3f95b9d5202b73aa,0x3f95b96a53282949,2 +np.float64,0x3fdc1ace7738359d,0x3fdb4597d31df7ff,2 +np.float64,0xffeac5bb2e358b76,0xc0863261452ab66c,2 +np.float64,0xbfefb1b78f7f636f,0xbfebfcb9be100ced,2 +np.float64,0xf5c9e191eb93c,0xf5c9e191eb93c,2 +np.float64,0x3fe83a977630752f,0x3fe65d0df90ff6ef,2 +np.float64,0x3fc317515d262ea0,0x3fc3056072b719f0,2 +np.float64,0x7fe2dcfab225b9f4,0x40862f94257c28a2,2 +np.float64,0xca2b115794562,0xca2b115794562,2 +np.float64,0x3fd495301aa92a60,0x3fd43e57108761d5,2 +np.float64,0x800ccc4293199885,0x800ccc4293199885,2 +np.float64,0xc8d3173d91a63,0xc8d3173d91a63,2 +np.float64,0xbf2541bb7e4a8,0xbf2541bb7e4a8,2 +np.float64,0xbfe9a330df334662,0xbfe779816573f5be,2 +np.float64,0xffd5e4c8252bc990,0xc0862b39b3ca5d72,2 +np.float64,0x3fe90f3a53721e75,0x3fe70585ae09531d,2 +np.float64,0xbfe2b5ddc7a56bbc,0xbfe1c7fa91a675ed,2 +np.float64,0xbf981a0360303400,0xbf9819719345073a,2 +np.float64,0x19174b0e322ea,0x19174b0e322ea,2 +np.float64,0xbfd2f71a1725ee34,0xbfd2b2b6f7cd10b1,2 +np.float64,0x80056e83236add07,0x80056e83236add07,2 +np.float64,0x7fe4bc41d9697883,0x40863055f20ce0cb,2 +np.float64,0xffe76e06c46edc0d,0xc086315024b25559,2 +np.float64,0x3fe3c4f0f96789e2,0x3fe2b04b584609bf,2 +np.float64,0x3fe6cfc533ed9f8a,0x3fe538b4d784d5ee,2 +np.float64,0x7fd234a640a4694c,0x408629bfead4f0b2,2 +np.float64,0x3fdbc49c9ab78939,0x3fdaf698a83d08e2,2 +np.float64,0x3fe4c5336ee98a66,0x3fe388c6ddb60e0a,2 +np.float64,0xf4b9497be9729,0xf4b9497be9729,2 +np.float64,0x3fb312be12262580,0x3fb30e3c847c1d16,2 +np.float64,0x3fe9554218f2aa84,0x3fe73c8b311c7a98,2 +np.float64,0xff899816a0333040,0xc08610bfb2cd8559,2 +np.float64,0x8006008ad52c0116,0x8006008ad52c0116,2 +np.float64,0x3fd7d47be4afa8f8,0x3fd74fa71ec17fd0,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0xdf2a9943be553,0xdf2a9943be553,2 +np.float64,0xbfeb86bf1eb70d7e,0xbfe8ed797580ba5c,2 +np.float64,0x800e2c0c28bc5818,0x800e2c0c28bc5818,2 +np.float64,0xbfe2be65d4657ccc,0xbfe1cf578dec2323,2 +np.float64,0xbfedea3a5afbd475,0xbfeab490bf05e585,2 +np.float64,0xbfe04b1583a0962b,0xbfdf523dfd7be25c,2 +np.float64,0x75929bb4eb254,0x75929bb4eb254,2 +np.float64,0x3fd7b4968caf692d,0x3fd731c0938ff97c,2 +np.float64,0x60bd8fd2c17b3,0x60bd8fd2c17b3,2 +np.float64,0xbfdaf15e70b5e2bc,0xbfda345a95ce18fe,2 +np.float64,0x7fdd7c35c2baf86b,0x40862d9b5f40c6b2,2 +np.float64,0x7feeb4d2ab7d69a4,0x4086337a0c0dffaf,2 +np.float64,0xffe65b5a1decb6b4,0xc08630f024420efb,2 +np.float64,0x7feb272b30764e55,0x4086327e2e553aa2,2 +np.float64,0x3fd27513e8a4ea28,0x3fd235ea49670f6a,2 +np.float64,0x3fe6541a6aeca834,0x3fe4d3a5b69fd1b6,2 +np.float64,0xbfe0c6ca0f618d94,0xbfe017058259efdb,2 +np.float64,0x7fc1bf07b7237e0e,0x4086240000fa5a52,2 +np.float64,0x7fe96af9c0f2d5f3,0x408631f6f0f4faa2,2 +np.float64,0x3fe0728be7a0e518,0x3fdf9881a5869de9,2 +np.float64,0xffe8ea4441b1d488,0xc08631ce0685ae7e,2 +np.float64,0xffd0b973f02172e8,0xc08629121e7fdf85,2 +np.float64,0xffe37b907a26f720,0xc0862fd6529401a0,2 +np.float64,0x3fe0ee826461dd05,0x3fe03a2a424a1b40,2 +np.float64,0xbfe8073c92300e79,0xbfe6340cbd179ac1,2 +np.float64,0x800768383f8ed071,0x800768383f8ed071,2 +np.float64,0x8002e467c7c5c8d0,0x8002e467c7c5c8d0,2 +np.float64,0xbfd8d53ea5b1aa7e,0xbfd83fa7243289d7,2 +np.float64,0xffebefce2bb7df9c,0xc08632b874f4f8dc,2 +np.float64,0xffe3be9eb9277d3d,0xc0862ff1ac70ad0b,2 +np.float64,0xffe2f8a82e65f150,0xc0862f9fd9e77d86,2 +np.float64,0xbfa01d151c203a30,0xbfa01c66dc13a70a,2 +np.float64,0x800877062d30ee0d,0x800877062d30ee0d,2 +np.float64,0xaade16a755bc3,0xaade16a755bc3,2 +np.float64,0xbfeb1abc70363579,0xbfe89b52c3b003aa,2 +np.float64,0x80097d0b2ad2fa17,0x80097d0b2ad2fa17,2 +np.float64,0x8001499907429333,0x8001499907429333,2 +np.float64,0x3fe8db2aaf71b656,0x3fe6dc7873f1b235,2 +np.float64,0x5cfeadc4b9fd6,0x5cfeadc4b9fd6,2 +np.float64,0xff3f77d1fe7ef,0xff3f77d1fe7ef,2 +np.float64,0xffeecd56f9bd9aad,0xc08633806cb1163d,2 +np.float64,0xbf96f3ca582de7a0,0xbf96f34c6b8e1c85,2 +np.float64,0x7ed6b44afdad7,0x7ed6b44afdad7,2 +np.float64,0x80071808da4e3012,0x80071808da4e3012,2 +np.float64,0x3feb8aee2bf715dc,0x3fe8f0a55516615c,2 +np.float64,0x800038f62e2071ed,0x800038f62e2071ed,2 +np.float64,0x3fb13f9af2227f30,0x3fb13c456ced8e08,2 +np.float64,0xffd584d1812b09a4,0xc0862b165558ec0c,2 +np.float64,0x800b20c30fb64186,0x800b20c30fb64186,2 +np.float64,0x80024f9646e49f2d,0x80024f9646e49f2d,2 +np.float64,0xffefffffffffffff,0xc08633ce8fb9f87e,2 +np.float64,0x3fdddbcb5bbbb797,0x3fdcde981111f650,2 +np.float64,0xffed14077f3a280e,0xc086330a795ad634,2 +np.float64,0x800fec2da7ffd85b,0x800fec2da7ffd85b,2 +np.float64,0x3fe8205ffc7040c0,0x3fe6482318d217f9,2 +np.float64,0x3013e5226027d,0x3013e5226027d,2 +np.float64,0xffe4e5aad469cb55,0xc0863065dc2fb4e3,2 +np.float64,0x5cb0f7b2b9620,0x5cb0f7b2b9620,2 +np.float64,0xbfeb4537d2768a70,0xbfe8bbb2c1d3bff9,2 +np.float64,0xbfd859e297b0b3c6,0xbfd7cc807948bf9d,2 +np.float64,0x71f00b8ce3e02,0x71f00b8ce3e02,2 +np.float64,0xf5c1b875eb837,0xf5c1b875eb837,2 +np.float64,0xa0f35c8141e8,0xa0f35c8141e8,2 +np.float64,0xffe24860b42490c1,0xc0862f54222f616e,2 +np.float64,0xffcd9ae8583b35d0,0xc08628181e643a42,2 +np.float64,0x7fe9b710c7736e21,0x4086320ec033490f,2 +np.float64,0x3fd2b9ca1d257394,0x3fd277e631f0c0b3,2 +np.float64,0x23559bfc46ab4,0x23559bfc46ab4,2 +np.float64,0x8002adf75e455bef,0x8002adf75e455bef,2 +np.float64,0xbfefa4d75cbf49af,0xbfebf392e51d6a1a,2 +np.float64,0xffcfef263e3fde4c,0xc08628b336adb611,2 +np.float64,0x80061acaa8ec3596,0x80061acaa8ec3596,2 +np.float64,0x7fc1b33be0236677,0x408623faaddcc17e,2 +np.float64,0x7fe3a84083675080,0x40862fe8972e41e1,2 +np.float64,0xbfe756c1276ead82,0xbfe5a6318b061e1b,2 +np.float64,0xbfae4b71b43c96e0,0xbfae46ed0b6203a4,2 +np.float64,0x800421c6d0a8438e,0x800421c6d0a8438e,2 +np.float64,0x8009ad56fe335aae,0x8009ad56fe335aae,2 +np.float64,0xbfe71afc976e35f9,0xbfe575d21f3d7193,2 +np.float64,0x7fec0bbe4c38177c,0x408632c0710f1d8a,2 +np.float64,0x750e1daeea1c4,0x750e1daeea1c4,2 +np.float64,0x800501d4240a03a9,0x800501d4240a03a9,2 +np.float64,0x800794955cef292b,0x800794955cef292b,2 +np.float64,0x3fdf8a87f5bf1510,0x3fde62f4f00cfa19,2 +np.float64,0xbfebebdbc7f7d7b8,0xbfe939e51ba1340c,2 +np.float64,0xbfe3a16217a742c4,0xbfe292039dd08a71,2 +np.float64,0x3fed6cd04c3ad9a1,0x3fea58995973f74b,2 +np.float64,0xffcad8787335b0f0,0xc086274fbb35dd37,2 +np.float64,0x3fcb178e3d362f1c,0x3fcae4c9f3e6dddc,2 +np.float64,0xbfcadc669435b8cc,0xbfcaaae7cf075420,2 +np.float64,0x7fe0e3906321c720,0x40862eb1bacc5c43,2 +np.float64,0xff8ad5edb035abc0,0xc0861120b6404d0b,2 +np.float64,0x3fe175a21562eb44,0x3fe0b13120a46549,2 +np.float64,0xbfeb4c4a5f769895,0xbfe8c1147f1c9d8f,2 +np.float64,0x7fca22f4e63445e9,0x40862718e9b4094e,2 +np.float64,0x3fe4269d0c684d3a,0x3fe3032aa2015c53,2 +np.float64,0x3fef551c09beaa38,0x3febbabe03f49c83,2 +np.float64,0xffd843df9fb087c0,0xc0862c0c52d5e5d9,2 +np.float64,0x7fc497e2ca292fc5,0x40862530bbd9fcc7,2 +np.float64,0x3fee02919efc0523,0x3feac655588a4acd,2 +np.float64,0x7fed1e52c0fa3ca5,0x4086330d4ddd8a2c,2 +np.float64,0xba04d4ef7409b,0xba04d4ef7409b,2 +np.float64,0x3fee22d0937c45a2,0x3feaddd4ca66b447,2 +np.float64,0xffeb2558cf764ab1,0xc086327da4e84053,2 +np.float64,0xbfe103d987e207b3,0xbfe04d04818ad1ff,2 +np.float64,0x3f9fd7fed03faffe,0x3f9fd6ae9a45be84,2 +np.float64,0x800a53ec4c34a7d9,0x800a53ec4c34a7d9,2 +np.float64,0xbfe2feb17f65fd63,0xbfe206b9d33a78a2,2 +np.float64,0x989bdd613139,0x989bdd613139,2 +np.float64,0xbfdd0ad3fb3a15a8,0xbfdc20c32a530741,2 +np.float64,0xbfc4222163284444,0xbfc40d1c612784b5,2 +np.float64,0xc30cf5c78619f,0xc30cf5c78619f,2 +np.float64,0x3fe913bd6732277b,0x3fe70912f76bad71,2 +np.float64,0x98f175f531e2f,0x98f175f531e2f,2 +np.float64,0x3fed8c1f717b183f,0x3fea6f9fb3af3423,2 +np.float64,0x7fee46b085bc8d60,0x4086335d269eb7e9,2 +np.float64,0x8007480f564e901f,0x8007480f564e901f,2 +np.float64,0xc9b96e179372e,0xc9b96e179372e,2 +np.float64,0x3fe44deac4289bd6,0x3fe32463a74a69e7,2 +np.float64,0x80021d6c5c243ad9,0x80021d6c5c243ad9,2 +np.float64,0xbfebc805a6f7900b,0xbfe91edcf65a1c19,2 +np.float64,0x80044748adc88e92,0x80044748adc88e92,2 +np.float64,0x4007ee44800fe,0x4007ee44800fe,2 +np.float64,0xbfe24307a4648610,0xbfe1648ad5c47b6f,2 +np.float64,0xbfee6d3a93fcda75,0xbfeb13e1a3196e78,2 +np.float64,0x3fe49a287f293451,0x3fe364a11b9f0068,2 +np.float64,0x80052b37ceaa5670,0x80052b37ceaa5670,2 +np.float64,0xbfd42be893a857d2,0xbfd3da05dac7c286,2 +np.float64,0xffb4bbe4ac2977c8,0xc0861fb31bda6956,2 +np.float64,0xbfc732a4142e6548,0xbfc7129a4eafa399,2 +np.float64,0x7fd0696791a0d2ce,0x408628eb7756cb9c,2 +np.float64,0x3fe46c8f8d68d91f,0x3fe33e3df16187c1,2 +np.float64,0x3fe3a28f1ce7451e,0x3fe293043238d08c,2 +np.float64,0xffedc4eb723b89d6,0xc086333a92258c15,2 +np.float64,0x8000d15b4c41a2b7,0x8000d15b4c41a2b7,2 +np.float64,0xffeb73450236e689,0xc08632947b0148ab,2 +np.float64,0xffe68cf4722d19e8,0xc0863101d08d77bd,2 +np.float64,0x800c70eb4698e1d7,0x800c70eb4698e1d7,2 +np.float64,0xffa94387ff529,0xffa94387ff529,2 +np.float64,0x7fe3835d996706ba,0x40862fd985ff8e7d,2 +np.float64,0x3fe55e476feabc8e,0x3fe408a15594ec52,2 +np.float64,0xffc69672222d2ce4,0xc08625ee0c4c0f6a,2 +np.float64,0xbf9d900b883b2020,0xbf9d8efe811d36df,2 +np.float64,0xbfdb9b9755b7372e,0xbfdad0f2aa2cb110,2 +np.float64,0xffeade6073b5bcc0,0xc08632689f17a25d,2 +np.float64,0xffd1d6a6baa3ad4e,0xc086299630a93a7b,2 +np.float64,0x7fd05ba25620b744,0x408628e4be1ef845,2 +np.float64,0xbfc7d422d52fa844,0xbfc7b170a61531bf,2 +np.float64,0x3fd5196797aa32d0,0x3fd4bc0f0e7d8e1d,2 +np.float64,0x617594a4c2eb3,0x617594a4c2eb3,2 +np.float64,0x7fd779bc4caef378,0x40862bc89271b882,2 +np.float64,0xffd2fb262ba5f64c,0xc0862a15561e9524,2 +np.float64,0x72fd661ae5fad,0x72fd661ae5fad,2 +np.float64,0x3fecf441f339e884,0x3fe9ff880d584f64,2 +np.float64,0x7fc3a8968827512c,0x408624d198b05c61,2 +np.float64,0x3fe7a25c56ef44b9,0x3fe5e32509a7c32d,2 +np.float64,0x7fd117d514222fa9,0x4086293ec640d5f2,2 +np.float64,0x3fe37dfe5ee6fbfc,0x3fe273d1bcaa1ef0,2 +np.float64,0xbfed4cd19d7a99a3,0xbfea41064cba4c8b,2 +np.float64,0x8003ff12aaa7fe26,0x8003ff12aaa7fe26,2 +np.float64,0x3fcbc3d1193787a2,0x3fcb8d39e3e88264,2 +np.float64,0xe9ba1a91d3744,0xe9ba1a91d3744,2 +np.float64,0x8002ab71998556e4,0x8002ab71998556e4,2 +np.float64,0x800110057922200c,0x800110057922200c,2 +np.float64,0xbfe3b7af19a76f5e,0xbfe2a502fc0a2882,2 +np.float64,0x7fd9de9d5e33bd3a,0x40862c8f73cccabf,2 +np.float64,0xbfba0f0a86341e18,0xbfba0392f44c2771,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x7fe5d162e96ba2c5,0x408630be2b15e01b,2 +np.float64,0x800b7f0eac76fe1e,0x800b7f0eac76fe1e,2 +np.float64,0xff98bed150317da0,0xc086160633164f5f,2 +np.float64,0x3fef91fd70ff23fb,0x3febe629709d0ae7,2 +np.float64,0x7fe5bea7f16b7d4f,0x408630b749f445e9,2 +np.float64,0xbfe3dc428467b885,0xbfe2c41ea93fab07,2 +np.float64,0xbfeba1fbfcf743f8,0xbfe9021b52851bb9,2 +np.float64,0x7fd2fb2108a5f641,0x40862a1553f45830,2 +np.float64,0x7feb8199a4370332,0x40863298a7169dad,2 +np.float64,0x800f97ff8d7f2fff,0x800f97ff8d7f2fff,2 +np.float64,0x3fd5e20b6b2bc417,0x3fd57a42bd1c0993,2 +np.float64,0x8006b4072dad680f,0x8006b4072dad680f,2 +np.float64,0x605dccf2c0bba,0x605dccf2c0bba,2 +np.float64,0x3fc705ed142e0bda,0x3fc6e69971d86f73,2 +np.float64,0xffd2ba1aad257436,0xc08629f9bc918f8b,2 +np.float64,0x8002954e23c52a9d,0x8002954e23c52a9d,2 +np.float64,0xbfecc65da7798cbb,0xbfe9dd745be18562,2 +np.float64,0x7fc66110482cc220,0x408625db0db57ef8,2 +np.float64,0x3fcd09446d3a1289,0x3fcccaf2dd0a41ea,2 +np.float64,0x3febe7095437ce13,0x3fe93642d1e73b2a,2 +np.float64,0x8004773c7da8ee7a,0x8004773c7da8ee7a,2 +np.float64,0x8001833241230665,0x8001833241230665,2 +np.float64,0x3fe6a262db6d44c6,0x3fe513b3dab5adce,2 +np.float64,0xe6282cc1cc506,0xe6282cc1cc506,2 +np.float64,0x800b9d8553973b0b,0x800b9d8553973b0b,2 +np.float64,0x3fdfbe0c7b3f7c19,0x3fde912375d867a8,2 +np.float64,0x7fd5ac11ebab5823,0x40862b24dfc6d08e,2 +np.float64,0x800e4b7cb1fc96f9,0x800e4b7cb1fc96f9,2 +np.float64,0x3fe14706da628e0e,0x3fe0883aec2a917a,2 +np.float64,0x7fc963f97532c7f2,0x408626dd9b0cafe1,2 +np.float64,0xbfe9c250b5b384a2,0xbfe791c5eabcb05d,2 +np.float64,0x3fe8d16e6c71a2dd,0x3fe6d4c7a33a0bf4,2 +np.float64,0x3fe474ae4628e95d,0x3fe34515c93f4733,2 +np.float64,0x3fbf3257ee3e64b0,0x3fbf1eb530e126ea,2 +np.float64,0x8005f089b3abe114,0x8005f089b3abe114,2 +np.float64,0x3fece07bccf9c0f8,0x3fe9f0dc228124d5,2 +np.float64,0xbfc52521632a4a44,0xbfc50ccebdf59c2c,2 +np.float64,0x7fdf53beb13ea77c,0x40862e177918195e,2 +np.float64,0x8003d9f6ad07b3ee,0x8003d9f6ad07b3ee,2 +np.float64,0xffeacf96bbb59f2d,0xc086326436b38b1a,2 +np.float64,0xdccaea29b995e,0xdccaea29b995e,2 +np.float64,0x5948d21eb291b,0x5948d21eb291b,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x7fef6d2c543eda58,0x408633a98593cdf5,2 +np.float64,0x7feda454f47b48a9,0x40863331cb6dc9f7,2 +np.float64,0x3fdd377cecba6ef8,0x3fdc4968f74a9c83,2 +np.float64,0x800644096d4c8814,0x800644096d4c8814,2 +np.float64,0xbfe33ca15ae67942,0xbfe23be5de832bd8,2 +np.float64,0xffce9582bd3d2b04,0xc086285abdf9bf9d,2 +np.float64,0x3fe6621e86acc43d,0x3fe4df231bfa93e1,2 +np.float64,0xee7d19e9dcfa3,0xee7d19e9dcfa3,2 +np.float64,0x800be5997277cb33,0x800be5997277cb33,2 +np.float64,0x82069041040e,0x82069041040e,2 +np.float64,0x800d6efdc19addfc,0x800d6efdc19addfc,2 +np.float64,0x7fb27770ee24eee1,0x40861ec5ed91b839,2 +np.float64,0x3fd506064caa0c0d,0x3fd4a9a66353fefd,2 +np.float64,0xbfeca9b36bf95367,0xbfe9c81f03ba37b8,2 +np.float64,0xffeab1b7bab5636f,0xc086325b47f61f2b,2 +np.float64,0xffc99f5b2e333eb8,0xc08626f03b08b412,2 +np.float64,0x3fbf1a71bc3e34e3,0x3fbf06fbcaa5de58,2 +np.float64,0x3fe75015736ea02b,0x3fe5a0cd8d763d8d,2 +np.float64,0xffe6a7442fad4e88,0xc086310b20addba4,2 +np.float64,0x3fe5d62ff86bac60,0x3fe46c033195bf28,2 +np.float64,0x7fd0b1f0362163df,0x4086290e857dc1be,2 +np.float64,0xbe0353737c06b,0xbe0353737c06b,2 +np.float64,0x7fec912d8739225a,0x408632e627704635,2 +np.float64,0xded8ba2fbdb18,0xded8ba2fbdb18,2 +np.float64,0x7fec0b53fdf816a7,0x408632c052bc1bd2,2 +np.float64,0x7fe9640d12b2c819,0x408631f4c2ba54d8,2 +np.float64,0x800be714eeb7ce2a,0x800be714eeb7ce2a,2 +np.float64,0xbfcf444a793e8894,0xbfcef6c126b54853,2 +np.float64,0xffeb20cf1bf6419e,0xc086327c4e6ffe80,2 +np.float64,0xc07de22180fd,0xc07de22180fd,2 +np.float64,0xffed129d387a253a,0xc086330a15ad0adb,2 +np.float64,0x3fd9e94fedb3d2a0,0x3fd94049924706a8,2 +np.float64,0x7fe6ba488c2d7490,0x40863111d51e7861,2 +np.float64,0xbfebbdf25db77be5,0xbfe91740ad7ba521,2 +np.float64,0x7fbc6c3c4838d878,0x40862239160cb613,2 +np.float64,0xbfefa82ecebf505e,0xbfebf5f31957dffd,2 +np.float64,0x800bebeb7ad7d7d7,0x800bebeb7ad7d7d7,2 +np.float64,0x7fecccc6f8f9998d,0x408632f6c6da8aac,2 +np.float64,0xcbe4926197ca,0xcbe4926197ca,2 +np.float64,0x2c5d9fd858bb5,0x2c5d9fd858bb5,2 +np.float64,0xbfe9fb021073f604,0xbfe7bddc61f1151a,2 +np.float64,0xbfebb18572f7630b,0xbfe90ddc5002313f,2 +np.float64,0x13bb0d3227763,0x13bb0d3227763,2 +np.float64,0x3feefa5e5cbdf4bd,0x3feb79b9e8ce16bf,2 +np.float64,0x3fc97f086132fe10,0x3fc9549fc8e15ecb,2 +np.float64,0xffe70887c06e110f,0xc086312d30fd31cf,2 +np.float64,0xa00c113540182,0xa00c113540182,2 +np.float64,0x800950984772a131,0x800950984772a131,2 +np.float64,0x1,0x1,2 +np.float64,0x3fd83b4026b07680,0x3fd7afdc659d9a34,2 +np.float64,0xbfe32348fbe64692,0xbfe226292a706a1a,2 +np.float64,0x800b894dcc77129c,0x800b894dcc77129c,2 +np.float64,0xeb2ca419d6595,0xeb2ca419d6595,2 +np.float64,0xbff0000000000000,0xbfec34366179d427,2 +np.float64,0x3feb269e99f64d3d,0x3fe8a4634b927a21,2 +np.float64,0xbfe83149d7706294,0xbfe655a2b245254e,2 +np.float64,0xbfe6eef3ca6ddde8,0xbfe5521310e24d16,2 +np.float64,0x3fea89a4b7b51349,0x3fe82c1fc69edcec,2 +np.float64,0x800f2a8bf17e5518,0x800f2a8bf17e5518,2 +np.float64,0x800f71fac29ee3f6,0x800f71fac29ee3f6,2 +np.float64,0xe7cb31f1cf966,0xe7cb31f1cf966,2 +np.float64,0x3b0f8752761f2,0x3b0f8752761f2,2 +np.float64,0x3fea27dea3744fbd,0x3fe7e0a4705476b2,2 +np.float64,0xbfa97c019c32f800,0xbfa97950c1257b92,2 +np.float64,0xffeff13647ffe26c,0xc08633cadc7105ed,2 +np.float64,0x3feee162353dc2c4,0x3feb67c2da0fbce8,2 +np.float64,0x80088c0807911810,0x80088c0807911810,2 +np.float64,0x3fe936ab1db26d56,0x3fe72489bc69719d,2 +np.float64,0xa2f84bd545f0a,0xa2f84bd545f0a,2 +np.float64,0xbfed445ed27a88be,0xbfea3acac0aaf482,2 +np.float64,0x800faf3e69df5e7d,0x800faf3e69df5e7d,2 +np.float64,0x3fc145a330228b46,0x3fc13853f11b1c90,2 +np.float64,0xbfe25ec5abe4bd8c,0xbfe17c9e9b486f07,2 +np.float64,0x3fe119b160e23363,0x3fe0604b10178966,2 +np.float64,0x7fe0cbf2836197e4,0x40862ea6831e5f4a,2 +np.float64,0x3fe75dd3b4eebba8,0x3fe5abe80fd628fb,2 +np.float64,0x3f7c391000387220,0x3f7c39015d8f3a36,2 +np.float64,0x899d9cad133b4,0x899d9cad133b4,2 +np.float64,0x3fe5f0e34febe1c6,0x3fe4820cefe138fc,2 +np.float64,0x7fe060dfdba0c1bf,0x40862e72de8afcd0,2 +np.float64,0xbfae42f7103c85f0,0xbfae3e7630819c60,2 +np.float64,0x35f1f2c06be5,0x35f1f2c06be5,2 +np.float64,0xffc5194d362a329c,0xc086256266c8b7ad,2 +np.float64,0xbfda034f1b34069e,0xbfd95860a44c43ad,2 +np.float64,0x32bcebca6579e,0x32bcebca6579e,2 +np.float64,0xbfd1751ebca2ea3e,0xbfd13f79f45bf75c,2 +np.float64,0x3fee4fa1e5bc9f44,0x3feafe69e0d6c1c7,2 +np.float64,0x7f9c03cd5038079a,0x4086170459172900,2 +np.float64,0x7fc5fb6d6d2bf6da,0x408625b6651cfc73,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xffd1a8162ca3502c,0xc0862981333931ad,2 +np.float64,0x7fc415c198282b82,0x408624fd8c155d1b,2 +np.float64,0xffda37fbe7b46ff8,0xc0862caae7865c43,2 +np.float64,0xbfef4312257e8624,0xbfebadd89f3ee31c,2 +np.float64,0xbfec45e1fd788bc4,0xbfe97d8b14db6274,2 +np.float64,0xbfe6fdcfd26dfba0,0xbfe55e25b770d00a,2 +np.float64,0x7feb66d424f6cda7,0x40863290d9ff7ea2,2 +np.float64,0x8b08a29916115,0x8b08a29916115,2 +np.float64,0xffe12ca25c625944,0xc0862ed40d769f72,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x804925e100925,0x804925e100925,2 +np.float64,0xcebf3e019d9,0xcebf3e019d9,2 +np.float64,0xbfd5d75d4aabaeba,0xbfd57027671dedf7,2 +np.float64,0x800b829ecd37053e,0x800b829ecd37053e,2 +np.float64,0x800b1205daf6240c,0x800b1205daf6240c,2 +np.float64,0x3fdf7e9889befd31,0x3fde583fdff406c3,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x3fdc09760d3812ec,0x3fdb35b55c8090c6,2 +np.float64,0x800c4d99e4f89b34,0x800c4d99e4f89b34,2 +np.float64,0xffbaa6772e354cf0,0xc08621b535badb2f,2 +np.float64,0xbfc91188fd322310,0xbfc8e933b5d25ea7,2 +np.float64,0xffc1b947f4237290,0xc08623fd69164251,2 +np.float64,0x3fc6ab3b252d5678,0x3fc68d50bbac106d,2 +np.float64,0xffac8eb968391d70,0xc0861cb734833355,2 +np.float64,0xffe29a35c365346b,0xc0862f77a1aed6d8,2 +np.float64,0x3fde14b9543c2973,0x3fdd122697779015,2 +np.float64,0xbf10f5400021e000,0xbf10f53fffef1383,2 +np.float64,0xffe0831aa3e10635,0xc0862e838553d0ca,2 +np.float64,0x3fccbadbcf3975b8,0x3fcc7e768d0154ec,2 +np.float64,0x3fe092ef66e125df,0x3fdfd212a7116c9b,2 +np.float64,0xbfd727f039ae4fe0,0xbfd6adad040b2334,2 +np.float64,0xbfe4223b93a84477,0xbfe2ff7587364db4,2 +np.float64,0x3f4e5c3a003cb874,0x3f4e5c39b75c70f7,2 +np.float64,0x800e76b1a87ced63,0x800e76b1a87ced63,2 +np.float64,0x3fed2b7368fa56e7,0x3fea2863b9131b8c,2 +np.float64,0xffadb76ec43b6ee0,0xc0861d08ae79f20c,2 +np.float64,0x800b6a0cd1f6d41a,0x800b6a0cd1f6d41a,2 +np.float64,0xffee6aa943fcd552,0xc0863366a24250d5,2 +np.float64,0xbfe68cbc4e6d1978,0xbfe502040591aa5b,2 +np.float64,0xff859a38002b3480,0xc0860f64726235cc,2 +np.float64,0x3474d13e68e9b,0x3474d13e68e9b,2 +np.float64,0xffc11d49f6223a94,0xc08623b5c2df9712,2 +np.float64,0x800d82d019bb05a0,0x800d82d019bb05a0,2 +np.float64,0xbfe2af0192255e03,0xbfe1c20e38106388,2 +np.float64,0x3fe97d13c032fa28,0x3fe75bba11a65f86,2 +np.float64,0x7fcd457e133a8afb,0x40862800e80f5863,2 +np.float64,0x9d7254cf3ae4b,0x9d7254cf3ae4b,2 +np.float64,0x8003047675a608ee,0x8003047675a608ee,2 +np.float64,0x3fead6cd7d75ad9a,0x3fe8676138e5ff93,2 +np.float64,0x3fea6ee3b0f4ddc7,0x3fe817838a2bcbe3,2 +np.float64,0x3feed0edea7da1dc,0x3feb5bea3cb12fe2,2 +np.float64,0x88003fe510008,0x88003fe510008,2 +np.float64,0x3fe64cadc56c995c,0x3fe4cd8ead87fc79,2 +np.float64,0xaae30c5955c62,0xaae30c5955c62,2 +np.float64,0x7fc8c97cae3192f8,0x408626ac579f4fc5,2 +np.float64,0xbfc2bc0e8b25781c,0xbfc2ab188fdab7dc,2 +np.float64,0xc8f8e5e791f1d,0xc8f8e5e791f1d,2 +np.float64,0x3fecfaa5d6f9f54c,0x3fea0444dabe5a15,2 +np.float64,0xbfeb93740ff726e8,0xbfe8f71a9ab13baf,2 +np.float64,0xffd951236c32a246,0xc0862c633a4661eb,2 +np.float64,0x3fddbc5fcd3b78c0,0x3fdcc21c1a0a9246,2 +np.float64,0xbfd242443da48488,0xbfd20512d91f7924,2 +np.float64,0x2a3689b2546d2,0x2a3689b2546d2,2 +np.float64,0xffe24c67382498ce,0xc0862f55e4ea6283,2 +np.float64,0x800cbfce22197f9c,0x800cbfce22197f9c,2 +np.float64,0x8002269428044d29,0x8002269428044d29,2 +np.float64,0x7fd44babbd289756,0x40862a9e79b51c3b,2 +np.float64,0x3feea056a27d40ad,0x3feb38dcddb682f0,2 +np.float64,0xffeca8174b39502e,0xc08632ec8f88a5b2,2 +np.float64,0x7fbe0853a03c10a6,0x408622a9e8d53a9e,2 +np.float64,0xbfa9704b2432e090,0xbfa96d9dfc8c0cc2,2 +np.float64,0x800bda28fab7b452,0x800bda28fab7b452,2 +np.float64,0xbfb0ffa2f621ff48,0xbfb0fc71f405e82a,2 +np.float64,0xbfe66c04216cd808,0xbfe4e73ea3b58cf6,2 +np.float64,0x3fe336ea5d266dd5,0x3fe236ffcf078c62,2 +np.float64,0xbfe7729ae6aee536,0xbfe5bcad4b8ac62d,2 +np.float64,0x558cfc96ab1a0,0x558cfc96ab1a0,2 +np.float64,0xbfe7d792aaefaf26,0xbfe60de1b8f0279d,2 +np.float64,0xffd19ef6bda33dee,0xc086297d0ffee3c7,2 +np.float64,0x666b3ab4ccd68,0x666b3ab4ccd68,2 +np.float64,0xffa3d89e3c27b140,0xc08619cdeb2c1e49,2 +np.float64,0xbfb1728f7f62f,0xbfb1728f7f62f,2 +np.float64,0x3fc76319f32ec634,0x3fc74247bd005e20,2 +np.float64,0xbfbf1caee23e3960,0xbfbf0934c13d70e2,2 +np.float64,0x7fe79626f32f2c4d,0x4086315dcc68a5cb,2 +np.float64,0xffee78c4603cf188,0xc086336a572c05c2,2 +np.float64,0x3fce546eda3ca8de,0x3fce0d8d737fd31d,2 +np.float64,0xa223644d4446d,0xa223644d4446d,2 +np.float64,0x3fecea878b79d510,0x3fe9f850d50973f6,2 +np.float64,0x3fc20e0ea1241c1d,0x3fc1fedda87c5e75,2 +np.float64,0xffd1c5a99ca38b54,0xc086298e8e94cd47,2 +np.float64,0x7feb2c299d765852,0x4086327fa6db2808,2 +np.float64,0xcaf9d09595f3a,0xcaf9d09595f3a,2 +np.float64,0xbfe293bf21e5277e,0xbfe1aa7f6ac274ef,2 +np.float64,0xbfbaa3c8ce354790,0xbfba97891df19c01,2 +np.float64,0x3faf5784543eaf09,0x3faf5283acc7d71d,2 +np.float64,0x7fc014f8f62029f1,0x40862336531c662d,2 +np.float64,0xbfe0d9ac2d61b358,0xbfe027bce36699ca,2 +np.float64,0x8003e112ff27c227,0x8003e112ff27c227,2 +np.float64,0xffec0d4151381a82,0xc08632c0df718dd0,2 +np.float64,0x7fa2156fb0242ade,0x4086190f7587d708,2 +np.float64,0xd698358dad307,0xd698358dad307,2 +np.float64,0xbfed8d1b0efb1a36,0xbfea70588ef9ba18,2 +np.float64,0xbfd2cae6a92595ce,0xbfd28851e2185dee,2 +np.float64,0xffe7a36764ef46ce,0xc086316249c9287a,2 +np.float64,0xbfdb8ad8e5b715b2,0xbfdac19213c14315,2 +np.float64,0x3b5dba6076bc,0x3b5dba6076bc,2 +np.float64,0x800e6e8347bcdd07,0x800e6e8347bcdd07,2 +np.float64,0x800bea9f3fb7d53f,0x800bea9f3fb7d53f,2 +np.float64,0x7fb6d0e5fc2da1cb,0x4086207714c4ab85,2 +np.float64,0x0,0x0,2 +np.float64,0xbfe2aa1e1465543c,0xbfe1bdd550ef2966,2 +np.float64,0x7fd3f6a47fa7ed48,0x40862a7caea33055,2 +np.float64,0x800094e292c129c6,0x800094e292c129c6,2 +np.float64,0x800e1500ecbc2a02,0x800e1500ecbc2a02,2 +np.float64,0xbfd8ff6f97b1fee0,0xbfd866f84346ecdc,2 +np.float64,0x681457d0d028c,0x681457d0d028c,2 +np.float64,0x3feed0b5987da16b,0x3feb5bc1ab424984,2 +np.float64,0x3fdbcb34cdb79668,0x3fdafca540f32c06,2 +np.float64,0xbfdc9eacdcb93d5a,0xbfdbbe274aa8aeb0,2 +np.float64,0xffe6e35d526dc6ba,0xc08631203df38ed2,2 +np.float64,0x3fcac1cc65358398,0x3fca90de41889613,2 +np.float64,0xbfebf07a55b7e0f5,0xbfe93d6007db0c67,2 +np.float64,0xbfd7a7b1e7af4f64,0xbfd725a9081c22cb,2 +np.float64,0x800232bd7de4657c,0x800232bd7de4657c,2 +np.float64,0x7fb1dae43c23b5c7,0x40861e80f5c0a64e,2 +np.float64,0x8013ded70027c,0x8013ded70027c,2 +np.float64,0x7fc4373a59286e74,0x4086250ad60575d0,2 +np.float64,0xbfe9980fd6733020,0xbfe770d1352d0ed3,2 +np.float64,0x8008a66b8dd14cd7,0x8008a66b8dd14cd7,2 +np.float64,0xbfaebc67f83d78d0,0xbfaeb7b015848478,2 +np.float64,0xffd0c52762218a4e,0xc0862917b564afc6,2 +np.float64,0xbfd503860aaa070c,0xbfd4a74618441561,2 +np.float64,0x5bdacabcb7b5a,0x5bdacabcb7b5a,2 +np.float64,0xf3623cffe6c48,0xf3623cffe6c48,2 +np.float64,0x7fe16c6c7ea2d8d8,0x40862ef18d90201f,2 +np.float64,0x3ff0000000000000,0x3fec34366179d427,2 +np.float64,0x7fe19cbc84233978,0x40862f079dcbc169,2 +np.float64,0x3fcfd3d6933fa7ad,0x3fcf822187907f6b,2 +np.float64,0x8007d65d672facbc,0x8007d65d672facbc,2 +np.float64,0xffca6115aa34c22c,0xc086272bd7728750,2 +np.float64,0xbfe77ab1556ef562,0xbfe5c332fb55b66e,2 +np.float64,0x8001ed797c23daf4,0x8001ed797c23daf4,2 +np.float64,0x7fdd3d16cb3a7a2d,0x40862d8a2c869281,2 +np.float64,0x75f36beaebe6e,0x75f36beaebe6e,2 +np.float64,0xffda3c2798b47850,0xc0862cac2d3435df,2 +np.float64,0xbfa37cc3c426f980,0xbfa37b8f9d3ec4b7,2 +np.float64,0x80030ea8bd061d52,0x80030ea8bd061d52,2 +np.float64,0xffe41f7617683eec,0xc08630188a3e135e,2 +np.float64,0x800e40590dfc80b2,0x800e40590dfc80b2,2 +np.float64,0x3fea950d80f52a1c,0x3fe834e74481e66f,2 +np.float64,0xffec95e39a792bc6,0xc08632e779150084,2 +np.float64,0xbfd54310ecaa8622,0xbfd4e39c4d767002,2 +np.float64,0xffd40c9971a81932,0xc0862a85764eb2f4,2 +np.float64,0xb0a2230761445,0xb0a2230761445,2 +np.float64,0x80092973661252e7,0x80092973661252e7,2 +np.float64,0x7fb13b030a227605,0x40861e380aeb5549,2 +np.float64,0x3fbd5d8db23abb1b,0x3fbd4d2a0b94af36,2 +np.float64,0xbfd6cb8567ad970a,0xbfd656b19ab8fa61,2 +np.float64,0xbfe7c0fd346f81fa,0xbfe5fbc28807c794,2 +np.float64,0xffd586579eab0cb0,0xc0862b16e65c0754,2 +np.float64,0x8000e52da461ca5c,0x8000e52da461ca5c,2 +np.float64,0x3fc69d17112d3a2e,0x3fc67f63fe1fea1c,2 +np.float64,0x3fd36ba892a6d750,0x3fd3225be1fa87af,2 +np.float64,0x7fe2850598e50a0a,0x40862f6e7fcd6c1a,2 +np.float64,0x80074a4dacce949c,0x80074a4dacce949c,2 +np.float64,0x3fe25eea4d64bdd5,0x3fe17cbe5fefbd4e,2 +np.float64,0xbfe250c08be4a181,0xbfe17074c520e5de,2 +np.float64,0x8000f5665481eacd,0x8000f5665481eacd,2 +np.float64,0x7fdb3172f83662e5,0x40862cf5a46764f1,2 +np.float64,0x7fd8ed82d631db05,0x40862c4380658afa,2 +np.float64,0xffec5163feb8a2c7,0xc08632d4366aab06,2 +np.float64,0x800ff14ac6ffe296,0x800ff14ac6ffe296,2 +np.float64,0xbfc7cc7aea2f98f4,0xbfc7a9e9cb38f023,2 +np.float64,0xbfd50cdfc32a19c0,0xbfd4b0282b452fb2,2 +np.float64,0xbfec256d75b84adb,0xbfe965328c1860b2,2 +np.float64,0xffe860c4cdb0c189,0xc08631a164b7059a,2 +np.float64,0xbfe23de164247bc3,0xbfe16011bffa4651,2 +np.float64,0xcc96b39d992d7,0xcc96b39d992d7,2 +np.float64,0xbfec43acf938875a,0xbfe97be3a13b50c3,2 +np.float64,0xc4f587bb89eb1,0xc4f587bb89eb1,2 +np.float64,0xbfcd971d9a3b2e3c,0xbfcd5537ad15dab4,2 +np.float64,0xffcaf00d8035e01c,0xc0862756bf2cdf8f,2 +np.float64,0x8008c26f93f184e0,0x8008c26f93f184e0,2 +np.float64,0xfff0000000000000,0xfff0000000000000,2 +np.float64,0xbfd13552c3a26aa6,0xbfd101e5e252eb7b,2 +np.float64,0x7fe497235e292e46,0x4086304792fb423a,2 +np.float64,0x7fd6dc0192adb802,0x40862b921a5e935d,2 +np.float64,0xf16d49a1e2da9,0xf16d49a1e2da9,2 +np.float64,0xffef6b1b71bed636,0xc08633a8feed0178,2 +np.float64,0x7fe15ec62f62bd8b,0x40862eeb46b193dc,2 +np.float64,0x3fef4369ec7e86d4,0x3febae1768be52cc,2 +np.float64,0x4f84e8e89f09e,0x4f84e8e89f09e,2 +np.float64,0xbfe19e71ade33ce4,0xbfe0d4fad05e0ebc,2 +np.float64,0xbfe7e1df1defc3be,0xbfe616233e15b3d0,2 +np.float64,0x7fe9349afdb26935,0x408631e5c1c5c6cd,2 +np.float64,0xff90c35ac82186c0,0xc08612e896a06467,2 +np.float64,0xbfe88bf8807117f1,0xbfe69dc786464422,2 +np.float64,0x3feaf9ff6475f3fe,0x3fe8825132410d18,2 +np.float64,0x9ff487a33fe91,0x9ff487a33fe91,2 +np.float64,0x7fedb30159bb6602,0x40863335c0419322,2 +np.float64,0x800bddf6ed77bbee,0x800bddf6ed77bbee,2 +np.float64,0x3fd919df133233be,0x3fd87f963b9584ce,2 +np.float64,0x7fd64da3b52c9b46,0x40862b5fa9dd3b6d,2 +np.float64,0xbfce288db43c511c,0xbfcde2d953407ae8,2 +np.float64,0x3fe88bc72771178e,0x3fe69da05e9e9b4e,2 +np.float64,0x800feafe259fd5fc,0x800feafe259fd5fc,2 +np.float64,0x3febbbff4a7777ff,0x3fe915c78f6a280f,2 +np.float64,0xbfefbde4417f7bc9,0xbfec055f4fb2cd21,2 +np.float64,0xf13ca103e2794,0xf13ca103e2794,2 +np.float64,0x3fe6423884ec8471,0x3fe4c4f97eaa876a,2 +np.float64,0x800ca01c8cb94039,0x800ca01c8cb94039,2 +np.float64,0x3fbc5073f638a0e0,0x3fbc41c163ac0001,2 +np.float64,0xbfda0d83cfb41b08,0xbfd961d4cacc82cf,2 +np.float64,0x800f37b8f17e6f72,0x800f37b8f17e6f72,2 +np.float64,0x7fe0b08cd7216119,0x40862e996becb771,2 +np.float64,0xffd4222a40a84454,0xc0862a8e0c984917,2 +np.float64,0x7feb3df98ff67bf2,0x40863284e3a86ee6,2 +np.float64,0x8001d5d291e3aba6,0x8001d5d291e3aba6,2 +np.float64,0xbfd3c21629a7842c,0xbfd3750095a5894a,2 +np.float64,0xbfd069eb48a0d3d6,0xbfd03d2b1c2ae9db,2 +np.float64,0xffeb1be2973637c4,0xc086327ada954662,2 +np.float64,0x3fc659f97e2cb3f3,0x3fc63d497a451f10,2 +np.float64,0xbfeb624bc776c498,0xbfe8d1cf7c0626ca,2 +np.float64,0xffeedf26e23dbe4d,0xc08633850baab425,2 +np.float64,0xffe70da48a6e1b48,0xc086312ef75d5036,2 +np.float64,0x2b4f4830569ea,0x2b4f4830569ea,2 +np.float64,0xffe82e7fcfb05cff,0xc0863190d4771f75,2 +np.float64,0x3fcc2c1fd5385840,0x3fcbf3211ddc5123,2 +np.float64,0x7fe22ced5a6459da,0x40862f481629ee6a,2 +np.float64,0x7fe13d2895e27a50,0x40862edbbc411899,2 +np.float64,0x3fd54c4280aa9884,0x3fd4ec55a946c5d7,2 +np.float64,0xffd75b8e01aeb71c,0xc0862bbe42d76e5e,2 +np.float64,0x7f1d5376fe3ab,0x7f1d5376fe3ab,2 +np.float64,0x3fe6ec6c902dd8d9,0x3fe55004f35192bd,2 +np.float64,0x5634504aac68b,0x5634504aac68b,2 +np.float64,0x3feedb0d83bdb61b,0x3feb633467467ce6,2 +np.float64,0x3fddb1c0dcbb6380,0x3fdcb87a02daf1fa,2 +np.float64,0xbfa832da443065b0,0xbfa8308c70257209,2 +np.float64,0x87a9836b0f531,0x87a9836b0f531,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv new file mode 100644 index 00000000..1e92073d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f338252,0x3f1c8d9c,3 +np.float32,0x7e569df2,0x3fc90fdb,3 +np.float32,0xbf347e25,0xbf1d361f,3 +np.float32,0xbf0a654e,0xbefdbfd2,3 +np.float32,0x8070968e,0x8070968e,3 +np.float32,0x803cfb27,0x803cfb27,3 +np.float32,0x8024362e,0x8024362e,3 +np.float32,0xfd55dca0,0xbfc90fdb,3 +np.float32,0x592b82,0x592b82,3 +np.float32,0x802eb8e1,0x802eb8e1,3 +np.float32,0xbc5fef40,0xbc5febae,3 +np.float32,0x3f1f6ce8,0x3f0e967c,3 +np.float32,0x20bedc,0x20bedc,3 +np.float32,0xbf058860,0xbef629c7,3 +np.float32,0x311504,0x311504,3 +np.float32,0xbd23f560,0xbd23defa,3 +np.float32,0x800ff4e8,0x800ff4e8,3 +np.float32,0x355009,0x355009,3 +np.float32,0x3f7be42e,0x3f46fdb3,3 +np.float32,0xbf225f7c,0xbf10b364,3 +np.float32,0x8074fa9e,0x8074fa9e,3 +np.float32,0xbea4b418,0xbe9f59ce,3 +np.float32,0xbe909c14,0xbe8cf045,3 +np.float32,0x80026bee,0x80026bee,3 +np.float32,0x3d789c20,0x3d784e25,3 +np.float32,0x7f56a4ba,0x3fc90fdb,3 +np.float32,0xbf70d141,0xbf413db7,3 +np.float32,0xbf2c4886,0xbf17a505,3 +np.float32,0x7e2993bf,0x3fc90fdb,3 +np.float32,0xbe2c8a30,0xbe2aef28,3 +np.float32,0x803f82d9,0x803f82d9,3 +np.float32,0x3f062fbc,0x3ef730a1,3 +np.float32,0x3f349ee0,0x3f1d4bfa,3 +np.float32,0x3eccfb69,0x3ec2f9e8,3 +np.float32,0x7e8a85dd,0x3fc90fdb,3 +np.float32,0x25331,0x25331,3 +np.float32,0x464f19,0x464f19,3 +np.float32,0x8035c818,0x8035c818,3 +np.float32,0x802e5799,0x802e5799,3 +np.float32,0x64e1c0,0x64e1c0,3 +np.float32,0x701cc2,0x701cc2,3 +np.float32,0x265c57,0x265c57,3 +np.float32,0x807a053f,0x807a053f,3 +np.float32,0x3bd2c412,0x3bd2c354,3 +np.float32,0xff28f1c8,0xbfc90fdb,3 +np.float32,0x7f08f08b,0x3fc90fdb,3 +np.float32,0x800c50e4,0x800c50e4,3 +np.float32,0x369674,0x369674,3 +np.float32,0xbf5b7db3,0xbf3571bf,3 +np.float32,0x7edcf5e2,0x3fc90fdb,3 +np.float32,0x800e5d4b,0x800e5d4b,3 +np.float32,0x80722554,0x80722554,3 +np.float32,0x693f33,0x693f33,3 +np.float32,0x800844e4,0x800844e4,3 +np.float32,0xbf111b82,0xbf0402ec,3 +np.float32,0x7df9c9ac,0x3fc90fdb,3 +np.float32,0xbf6619a6,0xbf3b6f57,3 +np.float32,0x8002fafe,0x8002fafe,3 +np.float32,0xfe1e67f8,0xbfc90fdb,3 +np.float32,0x3f7f4bf8,0x3f48b5b7,3 +np.float32,0x7f017b20,0x3fc90fdb,3 +np.float32,0x2d9b07,0x2d9b07,3 +np.float32,0x803aa174,0x803aa174,3 +np.float32,0x7d530336,0x3fc90fdb,3 +np.float32,0x80662195,0x80662195,3 +np.float32,0xfd5ebcf0,0xbfc90fdb,3 +np.float32,0xbe7b8dcc,0xbe76ab59,3 +np.float32,0x7f2bacaf,0x3fc90fdb,3 +np.float32,0x3f194fc4,0x3f0a229e,3 +np.float32,0x7ee21cdf,0x3fc90fdb,3 +np.float32,0x3f5a17fc,0x3f34a307,3 +np.float32,0x7f100c58,0x3fc90fdb,3 +np.float32,0x7e9128f5,0x3fc90fdb,3 +np.float32,0xbf2107c6,0xbf0fbdb4,3 +np.float32,0xbd29c800,0xbd29af22,3 +np.float32,0xbf5af499,0xbf3522a6,3 +np.float32,0x801bde44,0x801bde44,3 +np.float32,0xfeb4761a,0xbfc90fdb,3 +np.float32,0x3d88aa1b,0x3d887650,3 +np.float32,0x7eba5e0b,0x3fc90fdb,3 +np.float32,0x803906bd,0x803906bd,3 +np.float32,0x80101512,0x80101512,3 +np.float32,0x7e898f83,0x3fc90fdb,3 +np.float32,0x806406d3,0x806406d3,3 +np.float32,0x7ed20fc0,0x3fc90fdb,3 +np.float32,0x20827d,0x20827d,3 +np.float32,0x3f361359,0x3f1e43fe,3 +np.float32,0xfe4ef8d8,0xbfc90fdb,3 +np.float32,0x805e7d2d,0x805e7d2d,3 +np.float32,0xbe4316b0,0xbe40c745,3 +np.float32,0xbf0a1c06,0xbefd4e5a,3 +np.float32,0x3e202860,0x3e1edee1,3 +np.float32,0xbeb32a2c,0xbeac5899,3 +np.float32,0xfe528838,0xbfc90fdb,3 +np.float32,0x2f73e2,0x2f73e2,3 +np.float32,0xbe16e010,0xbe15cc27,3 +np.float32,0x3f50d6c5,0x3f2f2d75,3 +np.float32,0xbe88a6a2,0xbe8589c7,3 +np.float32,0x3ee36060,0x3ed5fb36,3 +np.float32,0x6c978b,0x6c978b,3 +np.float32,0x7f1b735f,0x3fc90fdb,3 +np.float32,0x3dad8256,0x3dad1885,3 +np.float32,0x807f5094,0x807f5094,3 +np.float32,0x65c358,0x65c358,3 +np.float32,0xff315ce4,0xbfc90fdb,3 +np.float32,0x7411a6,0x7411a6,3 +np.float32,0x80757b04,0x80757b04,3 +np.float32,0x3eec73a6,0x3edd82f4,3 +np.float32,0xfe9f69e8,0xbfc90fdb,3 +np.float32,0x801f4fa8,0x801f4fa8,3 +np.float32,0xbf6f2fae,0xbf405f79,3 +np.float32,0xfea206b6,0xbfc90fdb,3 +np.float32,0x3f257301,0x3f12e1ee,3 +np.float32,0x7ea6a506,0x3fc90fdb,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0xff735c2d,0xbfc90fdb,3 +np.float32,0x80197f95,0x80197f95,3 +np.float32,0x7f4a354f,0x3fc90fdb,3 +np.float32,0xff320c00,0xbfc90fdb,3 +np.float32,0x3f2659de,0x3f138484,3 +np.float32,0xbe5451bc,0xbe515a52,3 +np.float32,0x3f6e228c,0x3f3fcf7c,3 +np.float32,0x66855a,0x66855a,3 +np.float32,0x8034b3a3,0x8034b3a3,3 +np.float32,0xbe21a2fc,0xbe20505d,3 +np.float32,0x7f79e2dc,0x3fc90fdb,3 +np.float32,0xbe19a8e0,0xbe18858c,3 +np.float32,0x10802c,0x10802c,3 +np.float32,0xfeee579e,0xbfc90fdb,3 +np.float32,0x3f3292c8,0x3f1becc0,3 +np.float32,0xbf595a71,0xbf34350a,3 +np.float32,0xbf7c3373,0xbf4725f4,3 +np.float32,0xbdd30938,0xbdd24b36,3 +np.float32,0x153a17,0x153a17,3 +np.float32,0x807282a0,0x807282a0,3 +np.float32,0xfe817322,0xbfc90fdb,3 +np.float32,0x3f1b3628,0x3f0b8771,3 +np.float32,0x41be8f,0x41be8f,3 +np.float32,0x7f4a8343,0x3fc90fdb,3 +np.float32,0x3dc4ea2b,0x3dc44fae,3 +np.float32,0x802aac25,0x802aac25,3 +np.float32,0xbf20e1d7,0xbf0fa284,3 +np.float32,0xfd91a1b0,0xbfc90fdb,3 +np.float32,0x3f0d5476,0x3f012265,3 +np.float32,0x21c916,0x21c916,3 +np.float32,0x807df399,0x807df399,3 +np.float32,0x7e207b4c,0x3fc90fdb,3 +np.float32,0x8055f8ff,0x8055f8ff,3 +np.float32,0x7edf3b01,0x3fc90fdb,3 +np.float32,0x803a8df3,0x803a8df3,3 +np.float32,0x3ce3b002,0x3ce3a101,3 +np.float32,0x3f62dd54,0x3f39a248,3 +np.float32,0xff33ae10,0xbfc90fdb,3 +np.float32,0x7e3de69d,0x3fc90fdb,3 +np.float32,0x8024581e,0x8024581e,3 +np.float32,0xbf4ac99d,0xbf2b807a,3 +np.float32,0x3f157d19,0x3f074d8c,3 +np.float32,0xfed383f4,0xbfc90fdb,3 +np.float32,0xbf5a39fa,0xbf34b6b8,3 +np.float32,0x800d757d,0x800d757d,3 +np.float32,0x807d606b,0x807d606b,3 +np.float32,0x3e828f89,0x3e7fac2d,3 +np.float32,0x7a6604,0x7a6604,3 +np.float32,0x7dc7e72b,0x3fc90fdb,3 +np.float32,0x80144146,0x80144146,3 +np.float32,0x7c2eed69,0x3fc90fdb,3 +np.float32,0x3f5b4d8c,0x3f3555fc,3 +np.float32,0xfd8b7778,0xbfc90fdb,3 +np.float32,0xfc9d9140,0xbfc90fdb,3 +np.float32,0xbea265d4,0xbe9d4232,3 +np.float32,0xbe9344d0,0xbe8f65da,3 +np.float32,0x3f71f19a,0x3f41d65b,3 +np.float32,0x804a3f59,0x804a3f59,3 +np.float32,0x3e596290,0x3e563476,3 +np.float32,0x3e994ee4,0x3e94f546,3 +np.float32,0xbc103e00,0xbc103d0c,3 +np.float32,0xbf1cd896,0xbf0cb889,3 +np.float32,0x7f52b080,0x3fc90fdb,3 +np.float32,0xff584452,0xbfc90fdb,3 +np.float32,0x58b26b,0x58b26b,3 +np.float32,0x3f23cd4c,0x3f11b799,3 +np.float32,0x707d7,0x707d7,3 +np.float32,0xff732cff,0xbfc90fdb,3 +np.float32,0x3e41c2a6,0x3e3f7f0f,3 +np.float32,0xbf7058e9,0xbf40fdcf,3 +np.float32,0x7dca9857,0x3fc90fdb,3 +np.float32,0x7f0eb44b,0x3fc90fdb,3 +np.float32,0x8000405c,0x8000405c,3 +np.float32,0x4916ab,0x4916ab,3 +np.float32,0x4811a8,0x4811a8,3 +np.float32,0x3d69bf,0x3d69bf,3 +np.float32,0xfeadcf1e,0xbfc90fdb,3 +np.float32,0x3e08dbbf,0x3e080d58,3 +np.float32,0xff031f88,0xbfc90fdb,3 +np.float32,0xbe09cab8,0xbe08f818,3 +np.float32,0x21d7cd,0x21d7cd,3 +np.float32,0x3f23230d,0x3f113ea9,3 +np.float32,0x7e8a48d4,0x3fc90fdb,3 +np.float32,0x413869,0x413869,3 +np.float32,0x7e832990,0x3fc90fdb,3 +np.float32,0x800f5c09,0x800f5c09,3 +np.float32,0x7f5893b6,0x3fc90fdb,3 +np.float32,0x7f06b5b1,0x3fc90fdb,3 +np.float32,0xbe1cbee8,0xbe1b89d6,3 +np.float32,0xbf279f14,0xbf1468a8,3 +np.float32,0xfea86060,0xbfc90fdb,3 +np.float32,0x3e828174,0x3e7f91bb,3 +np.float32,0xff682c82,0xbfc90fdb,3 +np.float32,0x4e20f3,0x4e20f3,3 +np.float32,0x7f17d7e9,0x3fc90fdb,3 +np.float32,0x80671f92,0x80671f92,3 +np.float32,0x7f6dd100,0x3fc90fdb,3 +np.float32,0x3f219a4d,0x3f102695,3 +np.float32,0x803c9808,0x803c9808,3 +np.float32,0x3c432ada,0x3c43287d,3 +np.float32,0xbd3db450,0xbd3d91a2,3 +np.float32,0x3baac135,0x3baac0d0,3 +np.float32,0xff7fffe1,0xbfc90fdb,3 +np.float32,0xfe38a6f4,0xbfc90fdb,3 +np.float32,0x3dfb0a04,0x3df9cb04,3 +np.float32,0x800b05c2,0x800b05c2,3 +np.float32,0x644163,0x644163,3 +np.float32,0xff03a025,0xbfc90fdb,3 +np.float32,0x3f7d506c,0x3f47b641,3 +np.float32,0xff0e682a,0xbfc90fdb,3 +np.float32,0x3e09b7b0,0x3e08e567,3 +np.float32,0x7f72a216,0x3fc90fdb,3 +np.float32,0x7f800000,0x3fc90fdb,3 +np.float32,0x8050a281,0x8050a281,3 +np.float32,0x7edafa2f,0x3fc90fdb,3 +np.float32,0x3f4e0df6,0x3f2d7f2f,3 +np.float32,0xbf6728e0,0xbf3c050f,3 +np.float32,0x3e904ce4,0x3e8ca6eb,3 +np.float32,0x0,0x0,3 +np.float32,0xfd215070,0xbfc90fdb,3 +np.float32,0x7e406b15,0x3fc90fdb,3 +np.float32,0xbf2803c9,0xbf14af18,3 +np.float32,0x5950c8,0x5950c8,3 +np.float32,0xbeddcec8,0xbed14faa,3 +np.float32,0xbec6457e,0xbebd2aa5,3 +np.float32,0xbf42843c,0xbf2656db,3 +np.float32,0x3ee9cba8,0x3edb5163,3 +np.float32,0xbe30c954,0xbe2f0f90,3 +np.float32,0xbeee6b44,0xbedf216f,3 +np.float32,0xbe35d818,0xbe33f7cd,3 +np.float32,0xbe47c630,0xbe454bc6,3 +np.float32,0x801b146f,0x801b146f,3 +np.float32,0x7f6788da,0x3fc90fdb,3 +np.float32,0x3eaef088,0x3ea8927d,3 +np.float32,0x3eb5983e,0x3eae81fc,3 +np.float32,0x40b51d,0x40b51d,3 +np.float32,0xfebddd04,0xbfc90fdb,3 +np.float32,0x3e591aee,0x3e55efea,3 +np.float32,0xbe2b6b48,0xbe29d81f,3 +np.float32,0xff4a8826,0xbfc90fdb,3 +np.float32,0x3e791df0,0x3e745eac,3 +np.float32,0x7c8f681f,0x3fc90fdb,3 +np.float32,0xfe7a15c4,0xbfc90fdb,3 +np.float32,0x3c8963,0x3c8963,3 +np.float32,0x3f0afa0a,0x3efea5cc,3 +np.float32,0xbf0d2680,0xbf00ff29,3 +np.float32,0x3dc306b0,0x3dc27096,3 +np.float32,0x7f4cf105,0x3fc90fdb,3 +np.float32,0xbe196060,0xbe183ea4,3 +np.float32,0x5caf1c,0x5caf1c,3 +np.float32,0x801f2852,0x801f2852,3 +np.float32,0xbe01aa0c,0xbe00fa53,3 +np.float32,0x3f0cfd32,0x3f00df7a,3 +np.float32,0x7d82038e,0x3fc90fdb,3 +np.float32,0x7f7b927f,0x3fc90fdb,3 +np.float32,0xbe93b2e4,0xbe8fcb7f,3 +np.float32,0x1ffe8c,0x1ffe8c,3 +np.float32,0x3faaf6,0x3faaf6,3 +np.float32,0x3e32b1b8,0x3e30e9ab,3 +np.float32,0x802953c0,0x802953c0,3 +np.float32,0xfe5d9844,0xbfc90fdb,3 +np.float32,0x3e1a59d0,0x3e193292,3 +np.float32,0x801c6edc,0x801c6edc,3 +np.float32,0x1ecf41,0x1ecf41,3 +np.float32,0xfe56b09c,0xbfc90fdb,3 +np.float32,0x7e878351,0x3fc90fdb,3 +np.float32,0x3f401e2c,0x3f24cfcb,3 +np.float32,0xbf204a40,0xbf0f35bb,3 +np.float32,0x3e155a98,0x3e144ee1,3 +np.float32,0xbf34f929,0xbf1d8838,3 +np.float32,0x801bbf70,0x801bbf70,3 +np.float32,0x7e7c9730,0x3fc90fdb,3 +np.float32,0x7cc23432,0x3fc90fdb,3 +np.float32,0xbf351638,0xbf1d9b97,3 +np.float32,0x80152094,0x80152094,3 +np.float32,0x3f2d731c,0x3f187219,3 +np.float32,0x804ab0b7,0x804ab0b7,3 +np.float32,0x37d6db,0x37d6db,3 +np.float32,0xbf3ccc56,0xbf22acbf,3 +np.float32,0x3e546f8c,0x3e5176e7,3 +np.float32,0xbe90e87e,0xbe8d3707,3 +np.float32,0x48256c,0x48256c,3 +np.float32,0x7e2468d0,0x3fc90fdb,3 +np.float32,0x807af47e,0x807af47e,3 +np.float32,0x3ed4b221,0x3ec996f0,3 +np.float32,0x3d3b1956,0x3d3af811,3 +np.float32,0xbe69d93c,0xbe65e7f0,3 +np.float32,0xff03ff14,0xbfc90fdb,3 +np.float32,0x801e79dc,0x801e79dc,3 +np.float32,0x3f467c53,0x3f28d63d,3 +np.float32,0x3eab6baa,0x3ea56a1c,3 +np.float32,0xbf15519c,0xbf072d1c,3 +np.float32,0x7f0bd8e8,0x3fc90fdb,3 +np.float32,0xbe1e0d1c,0xbe1cd053,3 +np.float32,0x8016edab,0x8016edab,3 +np.float32,0x7ecaa09b,0x3fc90fdb,3 +np.float32,0x3f72e6d9,0x3f4257a8,3 +np.float32,0xbefe787e,0xbeec29a4,3 +np.float32,0xbee989e8,0xbedb1af9,3 +np.float32,0xbe662db0,0xbe626a45,3 +np.float32,0x495bf7,0x495bf7,3 +np.float32,0x26c379,0x26c379,3 +np.float32,0x7f54d41a,0x3fc90fdb,3 +np.float32,0x801e7dd9,0x801e7dd9,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0xfa3d3000,0xbfc90fdb,3 +np.float32,0xfa3cb800,0xbfc90fdb,3 +np.float32,0x264894,0x264894,3 +np.float32,0xff6de011,0xbfc90fdb,3 +np.float32,0x7e9045b2,0x3fc90fdb,3 +np.float32,0x3f2253a8,0x3f10aaf4,3 +np.float32,0xbd462bf0,0xbd460469,3 +np.float32,0x7f1796af,0x3fc90fdb,3 +np.float32,0x3e718858,0x3e6d3279,3 +np.float32,0xff437d7e,0xbfc90fdb,3 +np.float32,0x805ae7cb,0x805ae7cb,3 +np.float32,0x807e32e9,0x807e32e9,3 +np.float32,0x3ee0bafc,0x3ed3c453,3 +np.float32,0xbf721dee,0xbf41edc3,3 +np.float32,0xfec9f792,0xbfc90fdb,3 +np.float32,0x7f050720,0x3fc90fdb,3 +np.float32,0x182261,0x182261,3 +np.float32,0x3e39e678,0x3e37e5be,3 +np.float32,0x7e096e4b,0x3fc90fdb,3 +np.float32,0x103715,0x103715,3 +np.float32,0x3f7e7741,0x3f484ae4,3 +np.float32,0x3e29aea5,0x3e28277c,3 +np.float32,0x58c183,0x58c183,3 +np.float32,0xff72fdb2,0xbfc90fdb,3 +np.float32,0xbd9a9420,0xbd9a493c,3 +np.float32,0x7f1e07e7,0x3fc90fdb,3 +np.float32,0xff79f522,0xbfc90fdb,3 +np.float32,0x7c7d0e96,0x3fc90fdb,3 +np.float32,0xbeba9e8e,0xbeb2f504,3 +np.float32,0xfd880a80,0xbfc90fdb,3 +np.float32,0xff7f2a33,0xbfc90fdb,3 +np.float32,0x3e861ae0,0x3e83289c,3 +np.float32,0x7f0161c1,0x3fc90fdb,3 +np.float32,0xfe844ff8,0xbfc90fdb,3 +np.float32,0xbebf4b98,0xbeb7128e,3 +np.float32,0x652bee,0x652bee,3 +np.float32,0xff188a4b,0xbfc90fdb,3 +np.float32,0xbf800000,0xbf490fdb,3 +np.float32,0x80418711,0x80418711,3 +np.float32,0xbeb712d4,0xbeafd1f6,3 +np.float32,0xbf7cee28,0xbf478491,3 +np.float32,0xfe66c59c,0xbfc90fdb,3 +np.float32,0x4166a2,0x4166a2,3 +np.float32,0x3dfa1a2c,0x3df8deb5,3 +np.float32,0xbdbfbcb8,0xbdbf2e0f,3 +np.float32,0xfe60ef70,0xbfc90fdb,3 +np.float32,0xfe009444,0xbfc90fdb,3 +np.float32,0xfeb27aa0,0xbfc90fdb,3 +np.float32,0xbe99f7bc,0xbe95902b,3 +np.float32,0x8043d28d,0x8043d28d,3 +np.float32,0xfe5328c4,0xbfc90fdb,3 +np.float32,0x8017b27e,0x8017b27e,3 +np.float32,0x3ef1d2cf,0x3ee1ebd7,3 +np.float32,0x805ddd90,0x805ddd90,3 +np.float32,0xbf424263,0xbf262d17,3 +np.float32,0xfc99dde0,0xbfc90fdb,3 +np.float32,0xbf7ec13b,0xbf487015,3 +np.float32,0xbef727ea,0xbee64377,3 +np.float32,0xff15ce95,0xbfc90fdb,3 +np.float32,0x1fbba4,0x1fbba4,3 +np.float32,0x3f3b2368,0x3f2198a9,3 +np.float32,0xfefda26e,0xbfc90fdb,3 +np.float32,0x801519ad,0x801519ad,3 +np.float32,0x80473fa2,0x80473fa2,3 +np.float32,0x7e7a8bc1,0x3fc90fdb,3 +np.float32,0x3e8a9289,0x3e87548a,3 +np.float32,0x3ed68987,0x3ecb2872,3 +np.float32,0x805bca66,0x805bca66,3 +np.float32,0x8079c4e3,0x8079c4e3,3 +np.float32,0x3a2510,0x3a2510,3 +np.float32,0x7eedc598,0x3fc90fdb,3 +np.float32,0x80681956,0x80681956,3 +np.float32,0xff64c778,0xbfc90fdb,3 +np.float32,0x806bbc46,0x806bbc46,3 +np.float32,0x433643,0x433643,3 +np.float32,0x705b92,0x705b92,3 +np.float32,0xff359392,0xbfc90fdb,3 +np.float32,0xbee78672,0xbed96fa7,3 +np.float32,0x3e21717b,0x3e202010,3 +np.float32,0xfea13c34,0xbfc90fdb,3 +np.float32,0x2c8895,0x2c8895,3 +np.float32,0x3ed33290,0x3ec84f7c,3 +np.float32,0x3e63031e,0x3e5f662e,3 +np.float32,0x7e30907b,0x3fc90fdb,3 +np.float32,0xbe293708,0xbe27b310,3 +np.float32,0x3ed93738,0x3ecd6ea3,3 +np.float32,0x9db7e,0x9db7e,3 +np.float32,0x3f7cd1b8,0x3f47762c,3 +np.float32,0x3eb5143c,0x3eae0cb0,3 +np.float32,0xbe69b234,0xbe65c2d7,3 +np.float32,0x3f6e74de,0x3f3ffb97,3 +np.float32,0x5d0559,0x5d0559,3 +np.float32,0x3e1e8c30,0x3e1d4c70,3 +np.float32,0xbf2d1878,0xbf1833ef,3 +np.float32,0xff2adf82,0xbfc90fdb,3 +np.float32,0x8012e2c1,0x8012e2c1,3 +np.float32,0x7f031be3,0x3fc90fdb,3 +np.float32,0x805ff94e,0x805ff94e,3 +np.float32,0x3e9d5b27,0x3e98aa31,3 +np.float32,0x3f56d5cf,0x3f32bc9e,3 +np.float32,0x3eaa0412,0x3ea4267f,3 +np.float32,0xbe899ea4,0xbe86712f,3 +np.float32,0x800f2f48,0x800f2f48,3 +np.float32,0x3f1c2269,0x3f0c33ea,3 +np.float32,0x3f4a5f64,0x3f2b3f28,3 +np.float32,0x80739318,0x80739318,3 +np.float32,0x806e9b47,0x806e9b47,3 +np.float32,0x3c8cd300,0x3c8ccf73,3 +np.float32,0x7f39a39d,0x3fc90fdb,3 +np.float32,0x3ec95d61,0x3ebfd9dc,3 +np.float32,0xff351ff8,0xbfc90fdb,3 +np.float32,0xff3a8f58,0xbfc90fdb,3 +np.float32,0x7f313ec0,0x3fc90fdb,3 +np.float32,0x803aed13,0x803aed13,3 +np.float32,0x7f771d9b,0x3fc90fdb,3 +np.float32,0x8045a6d6,0x8045a6d6,3 +np.float32,0xbc85f280,0xbc85ef72,3 +np.float32,0x7e9c68f5,0x3fc90fdb,3 +np.float32,0xbf0f9379,0xbf02d975,3 +np.float32,0x7e97bcb1,0x3fc90fdb,3 +np.float32,0x804a07d5,0x804a07d5,3 +np.float32,0x802e6117,0x802e6117,3 +np.float32,0x7ed5e388,0x3fc90fdb,3 +np.float32,0x80750455,0x80750455,3 +np.float32,0xff4a8325,0xbfc90fdb,3 +np.float32,0xbedb6866,0xbecf497c,3 +np.float32,0x52ea3b,0x52ea3b,3 +np.float32,0xff773172,0xbfc90fdb,3 +np.float32,0xbeaa8ff0,0xbea4a46e,3 +np.float32,0x7eef2058,0x3fc90fdb,3 +np.float32,0x3f712472,0x3f4169d3,3 +np.float32,0xff6c8608,0xbfc90fdb,3 +np.float32,0xbf6eaa41,0xbf40182a,3 +np.float32,0x3eb03c24,0x3ea9bb34,3 +np.float32,0xfe118cd4,0xbfc90fdb,3 +np.float32,0x3e5b03b0,0x3e57c378,3 +np.float32,0x7f34d92d,0x3fc90fdb,3 +np.float32,0x806c3418,0x806c3418,3 +np.float32,0x7f3074e3,0x3fc90fdb,3 +np.float32,0x8002df02,0x8002df02,3 +np.float32,0x3f6df63a,0x3f3fb7b7,3 +np.float32,0xfd2b4100,0xbfc90fdb,3 +np.float32,0x80363d5c,0x80363d5c,3 +np.float32,0xbeac1f98,0xbea60bd6,3 +np.float32,0xff7fffff,0xbfc90fdb,3 +np.float32,0x80045097,0x80045097,3 +np.float32,0xfe011100,0xbfc90fdb,3 +np.float32,0x80739ef5,0x80739ef5,3 +np.float32,0xff3976ed,0xbfc90fdb,3 +np.float32,0xbe18e3a0,0xbe17c49e,3 +np.float32,0xbe289294,0xbe2712f6,3 +np.float32,0x3f1d41e7,0x3f0d050e,3 +np.float32,0x39364a,0x39364a,3 +np.float32,0x8072b77e,0x8072b77e,3 +np.float32,0x3f7cfec0,0x3f478cf6,3 +np.float32,0x2f68f6,0x2f68f6,3 +np.float32,0xbf031fb8,0xbef25c84,3 +np.float32,0xbf0b842c,0xbeff7afc,3 +np.float32,0x3f081e7e,0x3efa3676,3 +np.float32,0x7f7fffff,0x3fc90fdb,3 +np.float32,0xff15da0e,0xbfc90fdb,3 +np.float32,0x3d2001b2,0x3d1fece1,3 +np.float32,0x7f76efef,0x3fc90fdb,3 +np.float32,0x3f2405dd,0x3f11dfb7,3 +np.float32,0xa0319,0xa0319,3 +np.float32,0x3e23d2bd,0x3e227255,3 +np.float32,0xbd4d4c50,0xbd4d205e,3 +np.float32,0x382344,0x382344,3 +np.float32,0x21bbf,0x21bbf,3 +np.float32,0xbf209e82,0xbf0f7239,3 +np.float32,0xff03bf9f,0xbfc90fdb,3 +np.float32,0x7b1789,0x7b1789,3 +np.float32,0xff314944,0xbfc90fdb,3 +np.float32,0x1a63eb,0x1a63eb,3 +np.float32,0x803dc983,0x803dc983,3 +np.float32,0x3f0ff558,0x3f0323dc,3 +np.float32,0x3f544f2c,0x3f313f58,3 +np.float32,0xff032948,0xbfc90fdb,3 +np.float32,0x7f4933cc,0x3fc90fdb,3 +np.float32,0x7f14c5ed,0x3fc90fdb,3 +np.float32,0x803aeebf,0x803aeebf,3 +np.float32,0xbf0d4c0f,0xbf011bf5,3 +np.float32,0xbeaf8de2,0xbea91f57,3 +np.float32,0xff3ae030,0xbfc90fdb,3 +np.float32,0xbb362d00,0xbb362ce1,3 +np.float32,0x3d1f79e0,0x3d1f6544,3 +np.float32,0x3f56e9d9,0x3f32c860,3 +np.float32,0x3f723e5e,0x3f41fee2,3 +np.float32,0x4c0179,0x4c0179,3 +np.float32,0xfee36132,0xbfc90fdb,3 +np.float32,0x619ae6,0x619ae6,3 +np.float32,0xfde5d670,0xbfc90fdb,3 +np.float32,0xff079ac5,0xbfc90fdb,3 +np.float32,0x3e974fbd,0x3e931fae,3 +np.float32,0x8020ae6b,0x8020ae6b,3 +np.float32,0x6b5af1,0x6b5af1,3 +np.float32,0xbeb57cd6,0xbeae69a3,3 +np.float32,0x806e7eb2,0x806e7eb2,3 +np.float32,0x7e666edb,0x3fc90fdb,3 +np.float32,0xbf458c18,0xbf283ff0,3 +np.float32,0x3e50518e,0x3e4d8399,3 +np.float32,0x3e9ce224,0x3e983b98,3 +np.float32,0x3e6bc067,0x3e67b6c6,3 +np.float32,0x13783d,0x13783d,3 +np.float32,0xff3d518c,0xbfc90fdb,3 +np.float32,0xfeba5968,0xbfc90fdb,3 +np.float32,0xbf0b9f76,0xbeffa50f,3 +np.float32,0xfe174900,0xbfc90fdb,3 +np.float32,0x3f38bb0a,0x3f200527,3 +np.float32,0x7e94a77d,0x3fc90fdb,3 +np.float32,0x29d776,0x29d776,3 +np.float32,0xbf4e058d,0xbf2d7a15,3 +np.float32,0xbd94abc8,0xbd946923,3 +np.float32,0xbee62db0,0xbed85124,3 +np.float32,0x800000,0x800000,3 +np.float32,0xbef1df7e,0xbee1f636,3 +np.float32,0xbcf3cd20,0xbcf3bab5,3 +np.float32,0x80007b05,0x80007b05,3 +np.float32,0x3d9b3f2e,0x3d9af351,3 +np.float32,0xbf714a68,0xbf417dee,3 +np.float32,0xbf2a2d37,0xbf163069,3 +np.float32,0x8055104f,0x8055104f,3 +np.float32,0x7f5c40d7,0x3fc90fdb,3 +np.float32,0x1,0x1,3 +np.float32,0xff35f3a6,0xbfc90fdb,3 +np.float32,0xd9c7c,0xd9c7c,3 +np.float32,0xbf440cfc,0xbf274f22,3 +np.float32,0x8050ac43,0x8050ac43,3 +np.float32,0x63ee16,0x63ee16,3 +np.float32,0x7d90419b,0x3fc90fdb,3 +np.float32,0xfee22198,0xbfc90fdb,3 +np.float32,0xc2ead,0xc2ead,3 +np.float32,0x7f5cd6a6,0x3fc90fdb,3 +np.float32,0x3f6fab7e,0x3f40a184,3 +np.float32,0x3ecf998c,0x3ec53a73,3 +np.float32,0x7e5271f0,0x3fc90fdb,3 +np.float32,0x67c016,0x67c016,3 +np.float32,0x2189c8,0x2189c8,3 +np.float32,0x27d892,0x27d892,3 +np.float32,0x3f0d02c4,0x3f00e3c0,3 +np.float32,0xbf69ebca,0xbf3d8862,3 +np.float32,0x3e60c0d6,0x3e5d3ebb,3 +np.float32,0x3f45206c,0x3f27fc66,3 +np.float32,0xbf6b47dc,0xbf3e4592,3 +np.float32,0xfe9be2e2,0xbfc90fdb,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0xff271562,0xbfc90fdb,3 +np.float32,0x3e2e5270,0x3e2caaaf,3 +np.float32,0x80222934,0x80222934,3 +np.float32,0xbd01d220,0xbd01c701,3 +np.float32,0x223aa0,0x223aa0,3 +np.float32,0x3f4b5a7e,0x3f2bd967,3 +np.float32,0x3f217d85,0x3f101200,3 +np.float32,0xbf57663a,0xbf331144,3 +np.float32,0x3f219862,0x3f102536,3 +np.float32,0x28a28c,0x28a28c,3 +np.float32,0xbf3f55f4,0xbf244f86,3 +np.float32,0xbf3de287,0xbf236092,3 +np.float32,0xbf1c1ce2,0xbf0c2fe3,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x3db695d0,0x3db61a90,3 +np.float32,0x6c39bf,0x6c39bf,3 +np.float32,0x7e33a12f,0x3fc90fdb,3 +np.float32,0x67623a,0x67623a,3 +np.float32,0x3e45dc54,0x3e4373b6,3 +np.float32,0x7f62fa68,0x3fc90fdb,3 +np.float32,0x3f0e1d01,0x3f01bbe5,3 +np.float32,0x3f13dc69,0x3f0615f5,3 +np.float32,0x246703,0x246703,3 +np.float32,0xbf1055b5,0xbf036d07,3 +np.float32,0x7f46d3d0,0x3fc90fdb,3 +np.float32,0x3d2b8086,0x3d2b66e5,3 +np.float32,0xbf03be44,0xbef35776,3 +np.float32,0x3f800000,0x3f490fdb,3 +np.float32,0xbec8d226,0xbebf613d,3 +np.float32,0x3d8faf00,0x3d8f72d4,3 +np.float32,0x170c4e,0x170c4e,3 +np.float32,0xff14c0f0,0xbfc90fdb,3 +np.float32,0xff16245d,0xbfc90fdb,3 +np.float32,0x7f44ce6d,0x3fc90fdb,3 +np.float32,0xbe8175d8,0xbe7d9aeb,3 +np.float32,0x3df7a4a1,0x3df67254,3 +np.float32,0xfe2cc46c,0xbfc90fdb,3 +np.float32,0x3f284e63,0x3f14e335,3 +np.float32,0x7e46e5d6,0x3fc90fdb,3 +np.float32,0x397be4,0x397be4,3 +np.float32,0xbf2560bc,0xbf12d50b,3 +np.float32,0x3ed9b8c1,0x3ecddc60,3 +np.float32,0xfec18c5a,0xbfc90fdb,3 +np.float32,0x64894d,0x64894d,3 +np.float32,0x36a65d,0x36a65d,3 +np.float32,0x804ffcd7,0x804ffcd7,3 +np.float32,0x800f79e4,0x800f79e4,3 +np.float32,0x5d45ac,0x5d45ac,3 +np.float32,0x6cdda0,0x6cdda0,3 +np.float32,0xbf7f2077,0xbf489fe5,3 +np.float32,0xbf152f78,0xbf0713a1,3 +np.float32,0x807bf344,0x807bf344,3 +np.float32,0x3f775023,0x3f44a4d8,3 +np.float32,0xbf3edf67,0xbf240365,3 +np.float32,0x7eed729c,0x3fc90fdb,3 +np.float32,0x14cc29,0x14cc29,3 +np.float32,0x7edd7b6b,0x3fc90fdb,3 +np.float32,0xbf3c6e2c,0xbf226fb7,3 +np.float32,0x51b9ad,0x51b9ad,3 +np.float32,0x3f617ee8,0x3f38dd7c,3 +np.float32,0xff800000,0xbfc90fdb,3 +np.float32,0x7f440ea0,0x3fc90fdb,3 +np.float32,0x3e639893,0x3e5ff49e,3 +np.float32,0xbd791bb0,0xbd78cd3c,3 +np.float32,0x8059fcbc,0x8059fcbc,3 +np.float32,0xbf7d1214,0xbf4796bd,3 +np.float32,0x3ef368fa,0x3ee33788,3 +np.float32,0xbecec0f4,0xbec48055,3 +np.float32,0xbc83d940,0xbc83d656,3 +np.float32,0xbce01220,0xbce003d4,3 +np.float32,0x803192a5,0x803192a5,3 +np.float32,0xbe40e0c0,0xbe3ea4f0,3 +np.float32,0xfb692600,0xbfc90fdb,3 +np.float32,0x3f1bec65,0x3f0c0c88,3 +np.float32,0x7f042798,0x3fc90fdb,3 +np.float32,0xbe047374,0xbe03b83b,3 +np.float32,0x7f7c6630,0x3fc90fdb,3 +np.float32,0x7f58dae3,0x3fc90fdb,3 +np.float32,0x80691c92,0x80691c92,3 +np.float32,0x7dbe76,0x7dbe76,3 +np.float32,0xbf231384,0xbf11339d,3 +np.float32,0xbef4acf8,0xbee43f8b,3 +np.float32,0x3ee9f9d0,0x3edb7793,3 +np.float32,0x3f0064f6,0x3eee04a8,3 +np.float32,0x313732,0x313732,3 +np.float32,0xfd58cf80,0xbfc90fdb,3 +np.float32,0x3f7a2bc9,0x3f461d30,3 +np.float32,0x7f7681af,0x3fc90fdb,3 +np.float32,0x7f504211,0x3fc90fdb,3 +np.float32,0xfeae0c00,0xbfc90fdb,3 +np.float32,0xbee14396,0xbed436d1,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x693406,0x693406,3 +np.float32,0x3eb4a679,0x3eadab1b,3 +np.float32,0x550505,0x550505,3 +np.float32,0xfd493d10,0xbfc90fdb,3 +np.float32,0x3f4fc907,0x3f2e8b2c,3 +np.float32,0x80799aa4,0x80799aa4,3 +np.float32,0xff1ea89b,0xbfc90fdb,3 +np.float32,0xff424510,0xbfc90fdb,3 +np.float32,0x7f68d026,0x3fc90fdb,3 +np.float32,0xbea230ca,0xbe9d1200,3 +np.float32,0x7ea585da,0x3fc90fdb,3 +np.float32,0x3f3db211,0x3f23414c,3 +np.float32,0xfea4d964,0xbfc90fdb,3 +np.float32,0xbf17fe18,0xbf092984,3 +np.float32,0x7cc8a2,0x7cc8a2,3 +np.float32,0xff0330ba,0xbfc90fdb,3 +np.float32,0x3f769835,0x3f444592,3 +np.float32,0xeb0ac,0xeb0ac,3 +np.float32,0x7f7e45de,0x3fc90fdb,3 +np.float32,0xbdb510a8,0xbdb49873,3 +np.float32,0x3ebf900b,0x3eb74e9c,3 +np.float32,0xbf21bbce,0xbf103e89,3 +np.float32,0xbf3f4682,0xbf24459d,3 +np.float32,0x7eb6e9c8,0x3fc90fdb,3 +np.float32,0xbf42532d,0xbf2637be,3 +np.float32,0xbd3b2600,0xbd3b04b4,3 +np.float32,0x3f1fa9aa,0x3f0ec23e,3 +np.float32,0x7ed6a0f1,0x3fc90fdb,3 +np.float32,0xff4759a1,0xbfc90fdb,3 +np.float32,0x6d26e3,0x6d26e3,3 +np.float32,0xfe1108e0,0xbfc90fdb,3 +np.float32,0xfdf76900,0xbfc90fdb,3 +np.float32,0xfec66f22,0xbfc90fdb,3 +np.float32,0xbf3d097f,0xbf22d458,3 +np.float32,0x3d85be25,0x3d858d99,3 +np.float32,0x7f36739f,0x3fc90fdb,3 +np.float32,0x7bc0a304,0x3fc90fdb,3 +np.float32,0xff48dd90,0xbfc90fdb,3 +np.float32,0x48cab0,0x48cab0,3 +np.float32,0x3ed3943c,0x3ec8a2ef,3 +np.float32,0xbf61488e,0xbf38bede,3 +np.float32,0x3f543df5,0x3f313525,3 +np.float32,0x5cf2ca,0x5cf2ca,3 +np.float32,0x572686,0x572686,3 +np.float32,0x80369c7c,0x80369c7c,3 +np.float32,0xbd2c1d20,0xbd2c0338,3 +np.float32,0x3e255428,0x3e23ea0b,3 +np.float32,0xbeba9ee0,0xbeb2f54c,3 +np.float32,0x8015c165,0x8015c165,3 +np.float32,0x3d31f488,0x3d31d7e6,3 +np.float32,0x3f68591c,0x3f3cac43,3 +np.float32,0xf5ed5,0xf5ed5,3 +np.float32,0xbf3b1d34,0xbf21949e,3 +np.float32,0x1f0343,0x1f0343,3 +np.float32,0x3f0e52b5,0x3f01e4ef,3 +np.float32,0x7f57c596,0x3fc90fdb,3 +np.float64,0x7fd8e333ddb1c667,0x3ff921fb54442d18,3 +np.float64,0x800bcc9cdad7993a,0x800bcc9cdad7993a,3 +np.float64,0x3fcd6f81df3adf00,0x3fcceebbafc5d55e,3 +np.float64,0x3fed7338a57ae671,0x3fe7ce3e5811fc0a,3 +np.float64,0x7fe64994fcac9329,0x3ff921fb54442d18,3 +np.float64,0xfa5a6345f4b4d,0xfa5a6345f4b4d,3 +np.float64,0xe9dcd865d3b9b,0xe9dcd865d3b9b,3 +np.float64,0x7fea6cffabf4d9fe,0x3ff921fb54442d18,3 +np.float64,0xa9e1de6153c3c,0xa9e1de6153c3c,3 +np.float64,0xab6bdc5356d7c,0xab6bdc5356d7c,3 +np.float64,0x80062864a02c50ca,0x80062864a02c50ca,3 +np.float64,0xbfdac03aa7b58076,0xbfd9569f3230128d,3 +np.float64,0xbfe61b77752c36ef,0xbfe3588f51b8be8f,3 +np.float64,0x800bc854c8d790aa,0x800bc854c8d790aa,3 +np.float64,0x3feed1a2da3da346,0x3fe887f9b8ea031f,3 +np.float64,0x3fe910d3697221a7,0x3fe54365a53d840e,3 +np.float64,0x7fe7ab4944ef5692,0x3ff921fb54442d18,3 +np.float64,0x3fa462f1a028c5e3,0x3fa460303a6a4e69,3 +np.float64,0x800794f1a3af29e4,0x800794f1a3af29e4,3 +np.float64,0x3fee6fe7fafcdfd0,0x3fe854f863816d55,3 +np.float64,0x8000000000000000,0x8000000000000000,3 +np.float64,0x7f336472fe66d,0x7f336472fe66d,3 +np.float64,0xffb1623ac822c478,0xbff921fb54442d18,3 +np.float64,0x3fbacd68ce359ad2,0x3fbab480b3638846,3 +np.float64,0xffd5c02706ab804e,0xbff921fb54442d18,3 +np.float64,0xbfd4daf03d29b5e0,0xbfd42928f069c062,3 +np.float64,0x800c6e85dbd8dd0c,0x800c6e85dbd8dd0c,3 +np.float64,0x800e3599c5bc6b34,0x800e3599c5bc6b34,3 +np.float64,0x2c0d654c581ad,0x2c0d654c581ad,3 +np.float64,0xbfdd3eb13fba7d62,0xbfdb6e8143302de7,3 +np.float64,0x800b60cb8776c197,0x800b60cb8776c197,3 +np.float64,0x80089819ad113034,0x80089819ad113034,3 +np.float64,0x29fe721453fcf,0x29fe721453fcf,3 +np.float64,0x3fe8722f4df0e45f,0x3fe4e026d9eadb4d,3 +np.float64,0xffd1fbcd01a3f79a,0xbff921fb54442d18,3 +np.float64,0x7fc74e1e982e9c3c,0x3ff921fb54442d18,3 +np.float64,0x800c09d3d15813a8,0x800c09d3d15813a8,3 +np.float64,0xbfeee4578b3dc8af,0xbfe891ab3d6c3ce4,3 +np.float64,0xffdd01a6f33a034e,0xbff921fb54442d18,3 +np.float64,0x7fcc130480382608,0x3ff921fb54442d18,3 +np.float64,0xffcbb6bd1d376d7c,0xbff921fb54442d18,3 +np.float64,0xc068a53780d15,0xc068a53780d15,3 +np.float64,0xbfc974f15532e9e4,0xbfc92100b355f3e7,3 +np.float64,0x3fe6da79442db4f3,0x3fe3d87393b082e7,3 +np.float64,0xd9d9be4db3b38,0xd9d9be4db3b38,3 +np.float64,0x5ea50a20bd4a2,0x5ea50a20bd4a2,3 +np.float64,0xbfe5597f7d2ab2ff,0xbfe2d3ccc544b52b,3 +np.float64,0x80019364e4e326cb,0x80019364e4e326cb,3 +np.float64,0x3fed2902c3fa5206,0x3fe7a5e1df07e5c1,3 +np.float64,0xbfa7b72b5c2f6e50,0xbfa7b2d545b3cc1f,3 +np.float64,0xffdb60dd43b6c1ba,0xbff921fb54442d18,3 +np.float64,0x81a65d8b034cc,0x81a65d8b034cc,3 +np.float64,0x8000c30385818608,0x8000c30385818608,3 +np.float64,0x6022f5f4c045f,0x6022f5f4c045f,3 +np.float64,0x8007a2bb810f4578,0x8007a2bb810f4578,3 +np.float64,0x7fdc68893238d111,0x3ff921fb54442d18,3 +np.float64,0x7fd443454ea8868a,0x3ff921fb54442d18,3 +np.float64,0xffe6b04209ed6084,0xbff921fb54442d18,3 +np.float64,0x7fcd9733d13b2e67,0x3ff921fb54442d18,3 +np.float64,0xf5ee80a9ebdd0,0xf5ee80a9ebdd0,3 +np.float64,0x3fe3788e8de6f11e,0x3fe17dec7e6843a0,3 +np.float64,0x3fee36f62f7c6dec,0x3fe836f832515b43,3 +np.float64,0xf6cb49aded969,0xf6cb49aded969,3 +np.float64,0x3fd2b15ea4a562bc,0x3fd22fdc09920e67,3 +np.float64,0x7fccf6aef139ed5d,0x3ff921fb54442d18,3 +np.float64,0x3fd396b8ce272d72,0x3fd3026118857bd4,3 +np.float64,0x7fe53d3c80ea7a78,0x3ff921fb54442d18,3 +np.float64,0x3feae88fc4f5d120,0x3fe65fb04b18ef7a,3 +np.float64,0x3fedc643747b8c86,0x3fe7fafa6c20e25a,3 +np.float64,0xffdb2dc0df365b82,0xbff921fb54442d18,3 +np.float64,0xbfa2af3658255e70,0xbfa2ad17348f4253,3 +np.float64,0x3f8aa77b30354ef6,0x3f8aa71892336a69,3 +np.float64,0xbfdd1b1efbba363e,0xbfdb510dcd186820,3 +np.float64,0x800f50d99c5ea1b3,0x800f50d99c5ea1b3,3 +np.float64,0xff6ed602403dac00,0xbff921fb54442d18,3 +np.float64,0x800477d71aa8efaf,0x800477d71aa8efaf,3 +np.float64,0xbfe729a9e86e5354,0xbfe40ca78d9eefcf,3 +np.float64,0x3fd81ab2d4303566,0x3fd70d7e3937ea22,3 +np.float64,0xb617cbab6c2fa,0xb617cbab6c2fa,3 +np.float64,0x7fefffffffffffff,0x3ff921fb54442d18,3 +np.float64,0xffa40933ac281260,0xbff921fb54442d18,3 +np.float64,0xbfe1ede621e3dbcc,0xbfe057bb2b341ced,3 +np.float64,0xbfec700f03b8e01e,0xbfe73fb190bc722e,3 +np.float64,0x6e28af02dc517,0x6e28af02dc517,3 +np.float64,0x3fe37ad37ae6f5a7,0x3fe17f94674818a9,3 +np.float64,0x8000cbdeeae197bf,0x8000cbdeeae197bf,3 +np.float64,0x3fe8fd1f01f1fa3e,0x3fe5372bbec5d72c,3 +np.float64,0x3f8f9229103f2452,0x3f8f918531894256,3 +np.float64,0x800536858e0a6d0c,0x800536858e0a6d0c,3 +np.float64,0x7fe82bb4f9f05769,0x3ff921fb54442d18,3 +np.float64,0xffc1c2fb592385f8,0xbff921fb54442d18,3 +np.float64,0x7f924ddfc0249bbf,0x3ff921fb54442d18,3 +np.float64,0xffd5e125c52bc24c,0xbff921fb54442d18,3 +np.float64,0xbfef0d8738be1b0e,0xbfe8a6ef17b16c10,3 +np.float64,0x3fc9c8875233910f,0x3fc9715e708503cb,3 +np.float64,0xbfe2d926f4e5b24e,0xbfe108956e61cbb3,3 +np.float64,0x7fd61c496dac3892,0x3ff921fb54442d18,3 +np.float64,0x7fed545c6b7aa8b8,0x3ff921fb54442d18,3 +np.float64,0x8003746fea86e8e1,0x8003746fea86e8e1,3 +np.float64,0x3fdf515e75bea2bd,0x3fdd201a5585caa3,3 +np.float64,0xffda87c8ee350f92,0xbff921fb54442d18,3 +np.float64,0xffc675d8e22cebb0,0xbff921fb54442d18,3 +np.float64,0xffcdc173433b82e8,0xbff921fb54442d18,3 +np.float64,0xffed9df1517b3be2,0xbff921fb54442d18,3 +np.float64,0x3fd6a2eec72d45de,0x3fd5c1f1d7dcddcf,3 +np.float64,0xffec116a66f822d4,0xbff921fb54442d18,3 +np.float64,0x8007c2a2458f8545,0x8007c2a2458f8545,3 +np.float64,0x3fe4ee80d969dd02,0x3fe2895076094668,3 +np.float64,0x3fe3cae7116795ce,0x3fe1b9c07e0d03a7,3 +np.float64,0xbfd81bf8d8b037f2,0xbfd70e9bbbb4ca57,3 +np.float64,0x800c88ccd1f9119a,0x800c88ccd1f9119a,3 +np.float64,0xffdab2aee2b5655e,0xbff921fb54442d18,3 +np.float64,0x3fe743d227ee87a4,0x3fe41dcaef186d96,3 +np.float64,0x3fb060fd0220c1fa,0x3fb05b47f56ebbb4,3 +np.float64,0xbfd3f03772a7e06e,0xbfd3541522377291,3 +np.float64,0x190a5ae03216,0x190a5ae03216,3 +np.float64,0x3fe48c71916918e4,0x3fe24442f45b3183,3 +np.float64,0x800862470590c48e,0x800862470590c48e,3 +np.float64,0x7fd3ced89d279db0,0x3ff921fb54442d18,3 +np.float64,0x3feb3d9b4ab67b37,0x3fe69140cf2623f7,3 +np.float64,0xbc3f296b787e5,0xbc3f296b787e5,3 +np.float64,0xbfed6b905dfad721,0xbfe7ca1881a8c0fd,3 +np.float64,0xbfe621c2aaac4386,0xbfe35cd1969a82db,3 +np.float64,0x8009e7b17593cf63,0x8009e7b17593cf63,3 +np.float64,0x80045f580ca8beb1,0x80045f580ca8beb1,3 +np.float64,0xbfea2e177e745c2f,0xbfe5f13971633339,3 +np.float64,0x3fee655787fccab0,0x3fe84f6b98b6de26,3 +np.float64,0x3fc9cde92f339bd0,0x3fc9768a88b2c97c,3 +np.float64,0x3fc819c3b3303388,0x3fc7d25e1526e731,3 +np.float64,0x3fd3e848d2a7d090,0x3fd34cd9e6af558f,3 +np.float64,0x3fe19dacac633b5a,0x3fe01a6b4d27adc2,3 +np.float64,0x800b190da316321c,0x800b190da316321c,3 +np.float64,0xd5c69711ab8d3,0xd5c69711ab8d3,3 +np.float64,0xbfdc31bed7b8637e,0xbfda8ea3c1309d6d,3 +np.float64,0xbfd02ba007a05740,0xbfcfad86f0d756dc,3 +np.float64,0x3fe874473d70e88e,0x3fe4e1793cd82123,3 +np.float64,0xffb465585c28cab0,0xbff921fb54442d18,3 +np.float64,0xbfb5d8e13e2bb1c0,0xbfb5cb5c7807fc4d,3 +np.float64,0xffe80f933bf01f26,0xbff921fb54442d18,3 +np.float64,0x7feea783f5fd4f07,0x3ff921fb54442d18,3 +np.float64,0xbfae6665f43cccd0,0xbfae5d45b0a6f90a,3 +np.float64,0x800bd6ef5a77addf,0x800bd6ef5a77addf,3 +np.float64,0x800d145babda28b8,0x800d145babda28b8,3 +np.float64,0x39de155473bc3,0x39de155473bc3,3 +np.float64,0x3fefbd6bb1ff7ad8,0x3fe9008e73a3296e,3 +np.float64,0x3fc40bca3d281798,0x3fc3e2710e167007,3 +np.float64,0x3fcae0918335c120,0x3fca7e09e704a678,3 +np.float64,0x51287fbea2511,0x51287fbea2511,3 +np.float64,0x7fa6bc33a82d7866,0x3ff921fb54442d18,3 +np.float64,0xe72a2bebce546,0xe72a2bebce546,3 +np.float64,0x3fe1c8fd686391fa,0x3fe03b9622aeb4e3,3 +np.float64,0x3fe2a73ac3654e76,0x3fe0e36bc1ee4ac4,3 +np.float64,0x59895218b312b,0x59895218b312b,3 +np.float64,0xc6dc25c78db85,0xc6dc25c78db85,3 +np.float64,0xbfc06cfac520d9f4,0xbfc0561f85d2c907,3 +np.float64,0xbfea912dc4f5225c,0xbfe62c3b1c01c793,3 +np.float64,0x3fb78ce89a2f19d0,0x3fb77bfcb65a67d3,3 +np.float64,0xbfece5cdea39cb9c,0xbfe78103d24099e5,3 +np.float64,0x30d3054e61a61,0x30d3054e61a61,3 +np.float64,0xbfd3fe26fba7fc4e,0xbfd360c8447c4f7a,3 +np.float64,0x800956072a92ac0f,0x800956072a92ac0f,3 +np.float64,0x7fe639b3b6ec7366,0x3ff921fb54442d18,3 +np.float64,0x800ee30240bdc605,0x800ee30240bdc605,3 +np.float64,0x7fef6af0d2bed5e1,0x3ff921fb54442d18,3 +np.float64,0xffefce8725ff9d0d,0xbff921fb54442d18,3 +np.float64,0x3fe2e311da65c624,0x3fe10ff1623089dc,3 +np.float64,0xbfe7e5cbe56fcb98,0xbfe486c3daeda67c,3 +np.float64,0x80095bc14472b783,0x80095bc14472b783,3 +np.float64,0xffef0cb4553e1968,0xbff921fb54442d18,3 +np.float64,0xe3e60567c7cc1,0xe3e60567c7cc1,3 +np.float64,0xffde919f06bd233e,0xbff921fb54442d18,3 +np.float64,0x3fe3f9632e27f2c6,0x3fe1db49ebd21c4e,3 +np.float64,0x9dee9a233bdd4,0x9dee9a233bdd4,3 +np.float64,0xbfe3bb0602e7760c,0xbfe1ae41b6d4c488,3 +np.float64,0x3fc46945a128d288,0x3fc43da54c6c6a2a,3 +np.float64,0x7fdef149ac3de292,0x3ff921fb54442d18,3 +np.float64,0x800a96c76d752d8f,0x800a96c76d752d8f,3 +np.float64,0x3f971a32382e3464,0x3f9719316b9e9baf,3 +np.float64,0x7fe97bcf15b2f79d,0x3ff921fb54442d18,3 +np.float64,0x7fea894558f5128a,0x3ff921fb54442d18,3 +np.float64,0x3fc9e3be1933c780,0x3fc98b847c3923eb,3 +np.float64,0x3f7accac40359959,0x3f7acc9330741b64,3 +np.float64,0xa80c136950183,0xa80c136950183,3 +np.float64,0x3fe408732b2810e6,0x3fe1e61e7cbc8824,3 +np.float64,0xffa775bc042eeb80,0xbff921fb54442d18,3 +np.float64,0x3fbf04bd223e0980,0x3fbede37b8fc697e,3 +np.float64,0x7fd999b34c333366,0x3ff921fb54442d18,3 +np.float64,0xe72146dfce429,0xe72146dfce429,3 +np.float64,0x4f511ee49ea24,0x4f511ee49ea24,3 +np.float64,0xffb3e6e58827cdc8,0xbff921fb54442d18,3 +np.float64,0x3fd1f180cfa3e300,0x3fd17e85b2871de2,3 +np.float64,0x97c8e45b2f91d,0x97c8e45b2f91d,3 +np.float64,0xbfeeb20e88fd641d,0xbfe8778f878440bf,3 +np.float64,0xbfe1fc6dee23f8dc,0xbfe062c815a93cde,3 +np.float64,0xab4bf71f5697f,0xab4bf71f5697f,3 +np.float64,0xa9675a2952cec,0xa9675a2952cec,3 +np.float64,0xbfef3ea4a33e7d49,0xbfe8c02743ebc1b6,3 +np.float64,0x3fe22a2eafa4545d,0x3fe08577afca52a9,3 +np.float64,0x3fe8a08daaf1411c,0x3fe4fd5a34f05305,3 +np.float64,0xbfc6cda77b2d9b50,0xbfc6910bcfa0cf4f,3 +np.float64,0x3fec398394387307,0x3fe7211dd5276500,3 +np.float64,0x3fe36c95c626d92c,0x3fe1752e5aa2357b,3 +np.float64,0xffd8b9e7073173ce,0xbff921fb54442d18,3 +np.float64,0xffe19f043ae33e08,0xbff921fb54442d18,3 +np.float64,0x800e3640709c6c81,0x800e3640709c6c81,3 +np.float64,0x3fe7d6c20aafad84,0x3fe47d1a3307d9c8,3 +np.float64,0x80093fd63b727fad,0x80093fd63b727fad,3 +np.float64,0xffe1a671a4634ce3,0xbff921fb54442d18,3 +np.float64,0xbfe53a6b386a74d6,0xbfe2be41859cb10d,3 +np.float64,0xbfed149a097a2934,0xbfe79ab7e3e93c1c,3 +np.float64,0x7fc2769a5724ed34,0x3ff921fb54442d18,3 +np.float64,0xffd01e4e99a03c9e,0xbff921fb54442d18,3 +np.float64,0xa61f38434c3e7,0xa61f38434c3e7,3 +np.float64,0x800ad4ac5195a959,0x800ad4ac5195a959,3 +np.float64,0x7ff8000000000000,0x7ff8000000000000,3 +np.float64,0x80034a45b6c6948c,0x80034a45b6c6948c,3 +np.float64,0x6350b218c6a17,0x6350b218c6a17,3 +np.float64,0xfff0000000000000,0xbff921fb54442d18,3 +np.float64,0x3fe363e759e6c7cf,0x3fe16ed58d80f9ce,3 +np.float64,0xffe3b98e59e7731c,0xbff921fb54442d18,3 +np.float64,0x3fdbf7b40337ef68,0x3fda5df7ad3c80f9,3 +np.float64,0xbfe9cdf784739bef,0xbfe5b74f346ef93d,3 +np.float64,0xbfc321bea326437c,0xbfc2fdc0d4ff7561,3 +np.float64,0xbfe40f77d2a81ef0,0xbfe1eb28c4ae4dde,3 +np.float64,0x7fe071806960e300,0x3ff921fb54442d18,3 +np.float64,0x7fd269006ea4d200,0x3ff921fb54442d18,3 +np.float64,0x80017a56e0e2f4af,0x80017a56e0e2f4af,3 +np.float64,0x8004b4ea09a969d5,0x8004b4ea09a969d5,3 +np.float64,0xbfedbb01e63b7604,0xbfe7f4f0e84297df,3 +np.float64,0x3fe44454826888a9,0x3fe210ff6d005706,3 +np.float64,0xbfe0e77e6ea1cefd,0xbfdf1a977da33402,3 +np.float64,0xbfed6d4c8c3ada99,0xbfe7cb0932093f60,3 +np.float64,0x1d74cb9e3ae9a,0x1d74cb9e3ae9a,3 +np.float64,0x80082a785d1054f1,0x80082a785d1054f1,3 +np.float64,0x3fe58393266b0726,0x3fe2f0d8e91d4887,3 +np.float64,0xffe4028899680510,0xbff921fb54442d18,3 +np.float64,0x783a2e5af0746,0x783a2e5af0746,3 +np.float64,0x7fcdce88e73b9d11,0x3ff921fb54442d18,3 +np.float64,0x3fc58672a72b0ce5,0x3fc5535e090e56e2,3 +np.float64,0x800889c839b11391,0x800889c839b11391,3 +np.float64,0xffe5e05c466bc0b8,0xbff921fb54442d18,3 +np.float64,0xbfcbef6ebe37dedc,0xbfcb810752468f49,3 +np.float64,0xffe9408563b2810a,0xbff921fb54442d18,3 +np.float64,0xbfee4738367c8e70,0xbfe83f8e5dd7602f,3 +np.float64,0xbfe4aeb587295d6b,0xbfe25c7a0c76a454,3 +np.float64,0xffc9aea0a7335d40,0xbff921fb54442d18,3 +np.float64,0xe1e02199c3c04,0xe1e02199c3c04,3 +np.float64,0xbfbd9400783b2800,0xbfbd729345d1d14f,3 +np.float64,0x7a5418bcf4a84,0x7a5418bcf4a84,3 +np.float64,0x3fdc1c2fa5b83860,0x3fda7c935965ae72,3 +np.float64,0x80076a9f58ced53f,0x80076a9f58ced53f,3 +np.float64,0x3fedc4bf957b897f,0x3fe7fa2a83148f1c,3 +np.float64,0x800981b8a9d30372,0x800981b8a9d30372,3 +np.float64,0xffe1082311621046,0xbff921fb54442d18,3 +np.float64,0xe0091f89c0124,0xe0091f89c0124,3 +np.float64,0xbfce8d674f3d1ad0,0xbfcdfdbf2ddaa0ca,3 +np.float64,0x800516e72eaa2dcf,0x800516e72eaa2dcf,3 +np.float64,0xffe61ee64c6c3dcc,0xbff921fb54442d18,3 +np.float64,0x7fed2683cafa4d07,0x3ff921fb54442d18,3 +np.float64,0xffd4faf27729f5e4,0xbff921fb54442d18,3 +np.float64,0x7fe308fa842611f4,0x3ff921fb54442d18,3 +np.float64,0x3fc612a62b2c2550,0x3fc5db9ddbd4e159,3 +np.float64,0xbfe5b01e766b603d,0xbfe30f72a875e988,3 +np.float64,0x3fc2dd8b9a25bb17,0x3fc2bb06246b9f78,3 +np.float64,0x8170908102e12,0x8170908102e12,3 +np.float64,0x800c1c8a8a583915,0x800c1c8a8a583915,3 +np.float64,0xffe5d91e8b6bb23c,0xbff921fb54442d18,3 +np.float64,0xffd140adee22815c,0xbff921fb54442d18,3 +np.float64,0xbfe2f1f5f8e5e3ec,0xbfe11afa5d749952,3 +np.float64,0xbfed6d1d587ada3b,0xbfe7caef9ecf7651,3 +np.float64,0x3fe9b85e67f370bd,0x3fe5aa3474768982,3 +np.float64,0x7fdc8932edb91265,0x3ff921fb54442d18,3 +np.float64,0x7fd136bc54a26d78,0x3ff921fb54442d18,3 +np.float64,0x800a1ea12a343d43,0x800a1ea12a343d43,3 +np.float64,0x3fec6a5c1b78d4b8,0x3fe73c82235c3f8f,3 +np.float64,0x800fbf6a00df7ed4,0x800fbf6a00df7ed4,3 +np.float64,0xbfd0e6e0cda1cdc2,0xbfd0864bf8cad294,3 +np.float64,0x3fc716df482e2dbf,0x3fc6d7fbfd4a8470,3 +np.float64,0xbfe75990936eb321,0xbfe42bffec3fa0d7,3 +np.float64,0x3fd58e54a02b1ca9,0x3fd4cace1107a5cc,3 +np.float64,0xbfc9c04136338084,0xbfc9696ad2591d54,3 +np.float64,0xdd1f0147ba3e0,0xdd1f0147ba3e0,3 +np.float64,0x5c86a940b90e,0x5c86a940b90e,3 +np.float64,0xbfecae3b8e795c77,0xbfe7624d4988c612,3 +np.float64,0xffd0370595206e0c,0xbff921fb54442d18,3 +np.float64,0xbfdc26d443384da8,0xbfda857ecd33ba9f,3 +np.float64,0xbfd1c849d9a39094,0xbfd15849449cc378,3 +np.float64,0xffee04acdb3c0959,0xbff921fb54442d18,3 +np.float64,0xbfded1056dbda20a,0xbfdcb83b30e1528c,3 +np.float64,0x7fb7b826622f704c,0x3ff921fb54442d18,3 +np.float64,0xbfee4df8ae7c9bf1,0xbfe8431df9dfd05d,3 +np.float64,0x7fe7f3670e2fe6cd,0x3ff921fb54442d18,3 +np.float64,0x8008ac9ae0d15936,0x8008ac9ae0d15936,3 +np.float64,0x800dce9f3b3b9d3f,0x800dce9f3b3b9d3f,3 +np.float64,0x7fbb19db203633b5,0x3ff921fb54442d18,3 +np.float64,0x3fe56c7f302ad8fe,0x3fe2e0eec3ad45fd,3 +np.float64,0x7fe82c05c570580b,0x3ff921fb54442d18,3 +np.float64,0xc0552b7780aa6,0xc0552b7780aa6,3 +np.float64,0x39d40e3073a83,0x39d40e3073a83,3 +np.float64,0x3fd8db54d731b6aa,0x3fd7b589b3ee9b20,3 +np.float64,0xffcdd355233ba6ac,0xbff921fb54442d18,3 +np.float64,0x3fbe97b3a43d2f67,0x3fbe72bca9be0348,3 +np.float64,0xbff0000000000000,0xbfe921fb54442d18,3 +np.float64,0xbfb4f55e6229eac0,0xbfb4e96df18a75a7,3 +np.float64,0xbfc66399ba2cc734,0xbfc62a3298bd96fc,3 +np.float64,0x3fd00988bb201311,0x3fcf6d67a9374c38,3 +np.float64,0x7fe471867d28e30c,0x3ff921fb54442d18,3 +np.float64,0xbfe38e0e64271c1d,0xbfe18d9888b7523b,3 +np.float64,0x8009dc127573b825,0x8009dc127573b825,3 +np.float64,0x800047bde4608f7d,0x800047bde4608f7d,3 +np.float64,0xffeede42c77dbc85,0xbff921fb54442d18,3 +np.float64,0xd8cf6d13b19ee,0xd8cf6d13b19ee,3 +np.float64,0xbfd08fb302a11f66,0xbfd034b1f8235e23,3 +np.float64,0x7fdb404c0b368097,0x3ff921fb54442d18,3 +np.float64,0xbfd6ba0438ad7408,0xbfd5d673e3276ec1,3 +np.float64,0xffd9568027b2ad00,0xbff921fb54442d18,3 +np.float64,0xbfb313b73e262770,0xbfb30ab4acb4fa67,3 +np.float64,0xbfe2dc1a15e5b834,0xbfe10ac5f8f3acd3,3 +np.float64,0xbfee426bf4bc84d8,0xbfe83d061df91edd,3 +np.float64,0xd9142c2fb2286,0xd9142c2fb2286,3 +np.float64,0x7feb0d11dff61a23,0x3ff921fb54442d18,3 +np.float64,0x800fea5b509fd4b7,0x800fea5b509fd4b7,3 +np.float64,0x3fe1a8818da35103,0x3fe022ba1bdf366e,3 +np.float64,0x8010000000000000,0x8010000000000000,3 +np.float64,0xbfd8fc6de6b1f8dc,0xbfd7d24726ed8dcc,3 +np.float64,0xf4b3dc2de967c,0xf4b3dc2de967c,3 +np.float64,0x8af0409b15e08,0x8af0409b15e08,3 +np.float64,0x3fb21e6934243cd2,0x3fb216b065f8709a,3 +np.float64,0x3fc53069392a60d2,0x3fc4ffa931211fb9,3 +np.float64,0xffc955812c32ab04,0xbff921fb54442d18,3 +np.float64,0xbfe3de42b1a7bc86,0xbfe1c7bd1324de75,3 +np.float64,0x1dc149a03b82a,0x1dc149a03b82a,3 +np.float64,0x8001bc5a24a378b5,0x8001bc5a24a378b5,3 +np.float64,0x3da14c407b44,0x3da14c407b44,3 +np.float64,0x80025e8da924bd1c,0x80025e8da924bd1c,3 +np.float64,0xbfcb0141c9360284,0xbfca9d572ea5e1f3,3 +np.float64,0xc90036fd92007,0xc90036fd92007,3 +np.float64,0x138312c427063,0x138312c427063,3 +np.float64,0x800dda3a963bb475,0x800dda3a963bb475,3 +np.float64,0x3fe9339934f26732,0x3fe558e723291f78,3 +np.float64,0xbfea8357027506ae,0xbfe6240826faaf48,3 +np.float64,0x7fe04735cae08e6b,0x3ff921fb54442d18,3 +np.float64,0x3fe29aca3c653594,0x3fe0da214c8bc6a4,3 +np.float64,0x3fbe1f09a03c3e13,0x3fbdfbbefef0155b,3 +np.float64,0x816ee4ad02ddd,0x816ee4ad02ddd,3 +np.float64,0xffddd1b31d3ba366,0xbff921fb54442d18,3 +np.float64,0x3fe2e01e0625c03c,0x3fe10dc0bd6677c2,3 +np.float64,0x3fec6bcf1978d79e,0x3fe73d518cddeb7c,3 +np.float64,0x7fe01aaaf8603555,0x3ff921fb54442d18,3 +np.float64,0xdf300cc5be602,0xdf300cc5be602,3 +np.float64,0xbfe71c01a36e3804,0xbfe403af80ce47b8,3 +np.float64,0xffa5be00ac2b7c00,0xbff921fb54442d18,3 +np.float64,0xbfda9ba711b5374e,0xbfd93775e3ac6bda,3 +np.float64,0xbfe56d8a27eadb14,0xbfe2e1a7185e8e6d,3 +np.float64,0x800f1bc937be3792,0x800f1bc937be3792,3 +np.float64,0x800a61d93c74c3b3,0x800a61d93c74c3b3,3 +np.float64,0x7fe71a52fcae34a5,0x3ff921fb54442d18,3 +np.float64,0x7fb4aef256295de4,0x3ff921fb54442d18,3 +np.float64,0x3fe6c1e861ed83d1,0x3fe3c828f281a7ef,3 +np.float64,0x3fba128402342508,0x3fb9fb94cf141860,3 +np.float64,0x3fee55a7ecfcab50,0x3fe8472a9af893ee,3 +np.float64,0x3fe586f31b2b0de6,0x3fe2f32bce9e91bc,3 +np.float64,0xbfbb1d1442363a28,0xbfbb034c7729d5f2,3 +np.float64,0xc78b4d3f8f16a,0xc78b4d3f8f16a,3 +np.float64,0x7fdbc277d4b784ef,0x3ff921fb54442d18,3 +np.float64,0xbfa728ca2c2e5190,0xbfa724c04e73ccbd,3 +np.float64,0x7fefc7b2143f8f63,0x3ff921fb54442d18,3 +np.float64,0x3fd153a3dda2a748,0x3fd0ebccd33a4dca,3 +np.float64,0xbfe18a6eace314de,0xbfe00ba32ec89d30,3 +np.float64,0x7feef518537dea30,0x3ff921fb54442d18,3 +np.float64,0x8005f007cd4be010,0x8005f007cd4be010,3 +np.float64,0x7fd890b840b12170,0x3ff921fb54442d18,3 +np.float64,0x7feed0582ebda0af,0x3ff921fb54442d18,3 +np.float64,0x1013f53220280,0x1013f53220280,3 +np.float64,0xbfe77273986ee4e7,0xbfe43c375a8bf6de,3 +np.float64,0x7fe3ab8918675711,0x3ff921fb54442d18,3 +np.float64,0xbfc6ad515b2d5aa4,0xbfc671b2f7f86624,3 +np.float64,0x7fcd86231d3b0c45,0x3ff921fb54442d18,3 +np.float64,0xffe2523299a4a464,0xbff921fb54442d18,3 +np.float64,0x7fcadc5a1b35b8b3,0x3ff921fb54442d18,3 +np.float64,0x3fe5e020c4ebc042,0x3fe330418eec75bd,3 +np.float64,0x7fe332a9dc266553,0x3ff921fb54442d18,3 +np.float64,0xfa11dc21f425,0xfa11dc21f425,3 +np.float64,0xbec800177d900,0xbec800177d900,3 +np.float64,0x3fcadd057835ba0b,0x3fca7aa42face8bc,3 +np.float64,0xbfe6b9a206ad7344,0xbfe3c2a9719803de,3 +np.float64,0x3fbb4250b63684a0,0x3fbb281e9cefc519,3 +np.float64,0x7fef8787517f0f0e,0x3ff921fb54442d18,3 +np.float64,0x8001315c2d6262b9,0x8001315c2d6262b9,3 +np.float64,0xbfd94e3cf2b29c7a,0xbfd819257d36f56c,3 +np.float64,0xf1f325abe3e65,0xf1f325abe3e65,3 +np.float64,0x7fd6c07079ad80e0,0x3ff921fb54442d18,3 +np.float64,0x7fe328b075a65160,0x3ff921fb54442d18,3 +np.float64,0x7fe7998f812f331e,0x3ff921fb54442d18,3 +np.float64,0xffe026bb65604d76,0xbff921fb54442d18,3 +np.float64,0xffd6c06de8ad80dc,0xbff921fb54442d18,3 +np.float64,0x3fcd5a37bf3ab46f,0x3fccda82935d98ce,3 +np.float64,0xffc3e5a45227cb48,0xbff921fb54442d18,3 +np.float64,0x3febf7dd8177efbc,0x3fe6fc0bb999883e,3 +np.float64,0x7fd7047ea92e08fc,0x3ff921fb54442d18,3 +np.float64,0x35b3fc406b680,0x35b3fc406b680,3 +np.float64,0x7fd52e97632a5d2e,0x3ff921fb54442d18,3 +np.float64,0x3fd464d401a8c9a8,0x3fd3be2967fc97c3,3 +np.float64,0x800e815b2ebd02b6,0x800e815b2ebd02b6,3 +np.float64,0x3fca8428af350850,0x3fca257b466b8970,3 +np.float64,0x8007b7526f6f6ea6,0x8007b7526f6f6ea6,3 +np.float64,0x82f60a8f05ec2,0x82f60a8f05ec2,3 +np.float64,0x3fb71a5d0a2e34c0,0x3fb70a629ef8e2a2,3 +np.float64,0x7fc8570c7d30ae18,0x3ff921fb54442d18,3 +np.float64,0x7fe5528e77eaa51c,0x3ff921fb54442d18,3 +np.float64,0xffc20dbbf1241b78,0xbff921fb54442d18,3 +np.float64,0xeb13368fd6267,0xeb13368fd6267,3 +np.float64,0x7fe7d529056faa51,0x3ff921fb54442d18,3 +np.float64,0x3fecd02eabf9a05d,0x3fe77516f0ba1ac4,3 +np.float64,0x800fcba6a09f974d,0x800fcba6a09f974d,3 +np.float64,0x7fe7e8e015afd1bf,0x3ff921fb54442d18,3 +np.float64,0xbfd271a382a4e348,0xbfd1f513a191c595,3 +np.float64,0x9f1014013e21,0x9f1014013e21,3 +np.float64,0x3fc05da47f20bb49,0x3fc04708a13a3a47,3 +np.float64,0x3fe0f427dda1e850,0x3fdf2e60ba8678b9,3 +np.float64,0xbfecb29fa539653f,0xbfe764bc791c45dd,3 +np.float64,0x45881ec68b104,0x45881ec68b104,3 +np.float64,0x8000000000000001,0x8000000000000001,3 +np.float64,0x3fe9c67ee1338cfe,0x3fe5b2c7b3df6ce8,3 +np.float64,0x7fedb8fef6bb71fd,0x3ff921fb54442d18,3 +np.float64,0x3fe54f6aaaea9ed6,0x3fe2ccd1df2abaa9,3 +np.float64,0x7feff58a1bbfeb13,0x3ff921fb54442d18,3 +np.float64,0x7fe3b62827276c4f,0x3ff921fb54442d18,3 +np.float64,0x3fe5feb682ebfd6d,0x3fe345105bc6d980,3 +np.float64,0x3fe49f38d9693e72,0x3fe2518b2824757f,3 +np.float64,0x8006bfd27c6d7fa6,0x8006bfd27c6d7fa6,3 +np.float64,0x3fc13409e2226814,0x3fc119ce0c01a5a2,3 +np.float64,0x95f8c7212bf19,0x95f8c7212bf19,3 +np.float64,0x3fd9f0fa6133e1f5,0x3fd8a567515edecf,3 +np.float64,0x3fef95cbe5ff2b98,0x3fe8ec88c768ba0b,3 +np.float64,0x3fbed28bba3da510,0x3fbeacbf136e51c2,3 +np.float64,0xbfd3987aeca730f6,0xbfd303fca58e3e60,3 +np.float64,0xbfed0f90cbfa1f22,0xbfe797f59249410d,3 +np.float64,0xffe55d8cbf2abb19,0xbff921fb54442d18,3 +np.float64,0x3feb4d9fc6769b40,0x3fe69a88131a1f1f,3 +np.float64,0x80085569acd0aad4,0x80085569acd0aad4,3 +np.float64,0x20557a6e40ab0,0x20557a6e40ab0,3 +np.float64,0x3fead2fd5df5a5fb,0x3fe653091f33b27f,3 +np.float64,0x3fe7b9983eaf7330,0x3fe46a50c4b5235e,3 +np.float64,0xffdad237ffb5a470,0xbff921fb54442d18,3 +np.float64,0xbfe5cc39a4eb9874,0xbfe322ad3a903f93,3 +np.float64,0x800ad6eecb35adde,0x800ad6eecb35adde,3 +np.float64,0xffec620f6438c41e,0xbff921fb54442d18,3 +np.float64,0xbfe5ef29122bde52,0xbfe33a7dfcc255e2,3 +np.float64,0x3fd451e7d0a8a3d0,0x3fd3acfa4939af10,3 +np.float64,0x8003ea93c127d528,0x8003ea93c127d528,3 +np.float64,0x800b48d37c9691a7,0x800b48d37c9691a7,3 +np.float64,0x3fe7e202acafc405,0x3fe484558246069b,3 +np.float64,0x80070c9b686e1938,0x80070c9b686e1938,3 +np.float64,0xbfda90bbc6352178,0xbfd92e25fcd12288,3 +np.float64,0x800e1ffebb1c3ffe,0x800e1ffebb1c3ffe,3 +np.float64,0x3ff0000000000000,0x3fe921fb54442d18,3 +np.float64,0xffd8cfdd46319fba,0xbff921fb54442d18,3 +np.float64,0x7fd8cd4182319a82,0x3ff921fb54442d18,3 +np.float64,0x3fed8bb778bb176f,0x3fe7db7c77c4c694,3 +np.float64,0x3fc74a70302e94e0,0x3fc709e95d6defec,3 +np.float64,0x3fe87269d070e4d4,0x3fe4e04bcc4a2137,3 +np.float64,0x7fb48223f6290447,0x3ff921fb54442d18,3 +np.float64,0xffe8ec444b71d888,0xbff921fb54442d18,3 +np.float64,0x7fde17d280bc2fa4,0x3ff921fb54442d18,3 +np.float64,0x3fd1cbde01a397bc,0x3fd15b9bb7b3147b,3 +np.float64,0x800883a64451074d,0x800883a64451074d,3 +np.float64,0x7fe3160a3f262c13,0x3ff921fb54442d18,3 +np.float64,0xbfe051d4d9a0a3aa,0xbfde2ecf14dc75fb,3 +np.float64,0xbfd89de689b13bce,0xbfd780176d1a28a3,3 +np.float64,0x3fecde2bf779bc58,0x3fe77ccf10bdd8e2,3 +np.float64,0xffe75774dc6eaee9,0xbff921fb54442d18,3 +np.float64,0x7fe834414d706882,0x3ff921fb54442d18,3 +np.float64,0x1,0x1,3 +np.float64,0xbfea5e4e4a74bc9c,0xbfe60e0601711835,3 +np.float64,0xffec248d4cb8491a,0xbff921fb54442d18,3 +np.float64,0xffd9942c2c332858,0xbff921fb54442d18,3 +np.float64,0xa9db36a553b67,0xa9db36a553b67,3 +np.float64,0x7fec630718b8c60d,0x3ff921fb54442d18,3 +np.float64,0xbfd062188f20c432,0xbfd009ecd652be89,3 +np.float64,0x8001b84e3023709d,0x8001b84e3023709d,3 +np.float64,0xbfe9e26d7cb3c4db,0xbfe5c3b157ecf668,3 +np.float64,0xbfef66ddf33ecdbc,0xbfe8d4b1f6410a24,3 +np.float64,0x3fd8d7109431ae21,0x3fd7b1d4860719a2,3 +np.float64,0xffee0f53107c1ea5,0xbff921fb54442d18,3 +np.float64,0x80000b4fd60016a0,0x80000b4fd60016a0,3 +np.float64,0xbfd99ff6e5333fee,0xbfd85fb3cbdaa049,3 +np.float64,0xbfe9cfd268339fa5,0xbfe5b86ef021a1b1,3 +np.float64,0xe32eace1c65d6,0xe32eace1c65d6,3 +np.float64,0xffc81f6627303ecc,0xbff921fb54442d18,3 +np.float64,0x7fe98dadde331b5b,0x3ff921fb54442d18,3 +np.float64,0xbfbcebd11e39d7a0,0xbfbccc8ec47883c7,3 +np.float64,0x7fe164880f22c90f,0x3ff921fb54442d18,3 +np.float64,0x800467c0cae8cf82,0x800467c0cae8cf82,3 +np.float64,0x800071e4b140e3ca,0x800071e4b140e3ca,3 +np.float64,0xbfc87a7eae30f4fc,0xbfc82fbc55bb0f24,3 +np.float64,0xffb2e0e23225c1c8,0xbff921fb54442d18,3 +np.float64,0x20ef338041df,0x20ef338041df,3 +np.float64,0x7fe6de71ca6dbce3,0x3ff921fb54442d18,3 +np.float64,0x5d1fa026ba3f5,0x5d1fa026ba3f5,3 +np.float64,0xffd112a9ce222554,0xbff921fb54442d18,3 +np.float64,0x3fb351f66626a3ed,0x3fb3489ab578c452,3 +np.float64,0x7fef7b2bd3bef657,0x3ff921fb54442d18,3 +np.float64,0xffe144f5d4e289eb,0xbff921fb54442d18,3 +np.float64,0xffd63a6750ac74ce,0xbff921fb54442d18,3 +np.float64,0x7fd2d8bb25a5b175,0x3ff921fb54442d18,3 +np.float64,0x3fec5920a078b242,0x3fe732dcffcf6521,3 +np.float64,0x80009a8b7f813518,0x80009a8b7f813518,3 +np.float64,0x3fdea220893d4441,0x3fdc921edf6bf3d8,3 +np.float64,0x8006cee2208d9dc5,0x8006cee2208d9dc5,3 +np.float64,0xdd0b0081ba17,0xdd0b0081ba17,3 +np.float64,0x7ff4000000000000,0x7ffc000000000000,3 +np.float64,0xbfdac33955358672,0xbfd9592bce7daf1f,3 +np.float64,0x7fe8301d7170603a,0x3ff921fb54442d18,3 +np.float64,0xbfc1d34d8523a69c,0xbfc1b62449af9684,3 +np.float64,0x800c62239458c447,0x800c62239458c447,3 +np.float64,0xffd398c009a73180,0xbff921fb54442d18,3 +np.float64,0xbfe0c6d9ee218db4,0xbfdee777557f4401,3 +np.float64,0x3feccdd373799ba7,0x3fe773c9c2263f89,3 +np.float64,0xbfd21898bda43132,0xbfd1a2be8545fcc5,3 +np.float64,0x3fd77019b62ee033,0x3fd67793cabdf267,3 +np.float64,0x7fa609cad42c1395,0x3ff921fb54442d18,3 +np.float64,0x7fb4eaea5a29d5d4,0x3ff921fb54442d18,3 +np.float64,0x3fc570dc9a2ae1b9,0x3fc53e5f6218a799,3 +np.float64,0x800344ae8466895e,0x800344ae8466895e,3 +np.float64,0xbfc7c985252f930c,0xbfc784d60fa27bac,3 +np.float64,0xffaa2929fc345250,0xbff921fb54442d18,3 +np.float64,0xffe63e5ee9ac7cbe,0xbff921fb54442d18,3 +np.float64,0x73f0280ce7e06,0x73f0280ce7e06,3 +np.float64,0xffc525f8822a4bf0,0xbff921fb54442d18,3 +np.float64,0x7fd744d00aae899f,0x3ff921fb54442d18,3 +np.float64,0xbfe0fe590761fcb2,0xbfdf3e493e8b1f32,3 +np.float64,0xfae04ae7f5c0a,0xfae04ae7f5c0a,3 +np.float64,0xef821939df043,0xef821939df043,3 +np.float64,0x7fef6135843ec26a,0x3ff921fb54442d18,3 +np.float64,0xbfebf34dcbf7e69c,0xbfe6f97588a8f911,3 +np.float64,0xbfeec0b498fd8169,0xbfe87f2eceeead12,3 +np.float64,0x7fb67161b42ce2c2,0x3ff921fb54442d18,3 +np.float64,0x3fdcfd998639fb33,0x3fdb38934927c096,3 +np.float64,0xffda5960bc34b2c2,0xbff921fb54442d18,3 +np.float64,0xbfe11f8c71223f19,0xbfdf71fe770c96ab,3 +np.float64,0x3fe4ac1bab695838,0x3fe25aa4517b8322,3 +np.float64,0x3f730458a02608b1,0x3f73044fabb5e999,3 +np.float64,0x3fdb14ffcdb62a00,0x3fd99ea6c241a3ed,3 +np.float64,0xbfc93208cd326410,0xbfc8e09d78b6d4db,3 +np.float64,0x19e734dc33ce8,0x19e734dc33ce8,3 +np.float64,0x3fe5e98428abd308,0x3fe336a6a085eb55,3 +np.float64,0x7fec672a1378ce53,0x3ff921fb54442d18,3 +np.float64,0x800f8bd8d4ff17b2,0x800f8bd8d4ff17b2,3 +np.float64,0xbfe5a12e4e6b425c,0xbfe30533f99d5d06,3 +np.float64,0x75a34cb0eb46a,0x75a34cb0eb46a,3 +np.float64,0x7fe1d21d16a3a439,0x3ff921fb54442d18,3 +np.float64,0x7ff0000000000000,0x3ff921fb54442d18,3 +np.float64,0xffe0f50db261ea1b,0xbff921fb54442d18,3 +np.float64,0xbfd9dc22feb3b846,0xbfd8937ec965a501,3 +np.float64,0x8009d68e48d3ad1d,0x8009d68e48d3ad1d,3 +np.float64,0xbfe2eba620e5d74c,0xbfe1164d7d273c60,3 +np.float64,0x992efa09325e0,0x992efa09325e0,3 +np.float64,0x3fdab640ea356c82,0x3fd94e20cab88db2,3 +np.float64,0x69a6f04ad34df,0x69a6f04ad34df,3 +np.float64,0x3fe397df25272fbe,0x3fe194bd1a3a6192,3 +np.float64,0xebcce9fdd799d,0xebcce9fdd799d,3 +np.float64,0x3fbb49490c369292,0x3fbb2f02eccc497d,3 +np.float64,0xffd871f980b0e3f4,0xbff921fb54442d18,3 +np.float64,0x800348f6966691ee,0x800348f6966691ee,3 +np.float64,0xbfebc270a7f784e1,0xbfe6dda8d0d80f26,3 +np.float64,0xffd6d559b1adaab4,0xbff921fb54442d18,3 +np.float64,0x3fec3635c0b86c6c,0x3fe71f420256e43e,3 +np.float64,0x7fbc82ad7039055a,0x3ff921fb54442d18,3 +np.float64,0x7f873050602e60a0,0x3ff921fb54442d18,3 +np.float64,0x3fca44b8c3348970,0x3fc9e8a1a1a2d96e,3 +np.float64,0x3fe0fc308fe1f861,0x3fdf3aeb469ea225,3 +np.float64,0x7fefc27de8bf84fb,0x3ff921fb54442d18,3 +np.float64,0x8005f3f3916be7e8,0x8005f3f3916be7e8,3 +np.float64,0xbfd4278c7c284f18,0xbfd38678988873b6,3 +np.float64,0x435eafc486bd7,0x435eafc486bd7,3 +np.float64,0xbfd01f5199203ea4,0xbfcf96631f2108a3,3 +np.float64,0xffd5ee9185abdd24,0xbff921fb54442d18,3 +np.float64,0xffedb363257b66c5,0xbff921fb54442d18,3 +np.float64,0x800d68e6e11ad1ce,0x800d68e6e11ad1ce,3 +np.float64,0xbfcf687f8e3ed100,0xbfceccb771b0d39a,3 +np.float64,0x7feb3b9ef2f6773d,0x3ff921fb54442d18,3 +np.float64,0x3fe15ec5ca62bd8c,0x3fdfd3fab9d96f81,3 +np.float64,0x10000000000000,0x10000000000000,3 +np.float64,0xd2386f81a470e,0xd2386f81a470e,3 +np.float64,0xb9feed4573fde,0xb9feed4573fde,3 +np.float64,0x3fe7ed25c9efda4c,0x3fe48b7b72db4014,3 +np.float64,0xbfe01478726028f1,0xbfddcd1f5a2efc59,3 +np.float64,0x9946d02f328da,0x9946d02f328da,3 +np.float64,0xbfe3bb67f06776d0,0xbfe1ae88aa81c5a6,3 +np.float64,0xbfd3fd8a4c27fb14,0xbfd3603982e3b78d,3 +np.float64,0xffd5c3ab912b8758,0xbff921fb54442d18,3 +np.float64,0xffd5f502b12bea06,0xbff921fb54442d18,3 +np.float64,0xbfc64981ec2c9304,0xbfc610e0382b1fa6,3 +np.float64,0xffec42e3413885c6,0xbff921fb54442d18,3 +np.float64,0x80084eb4ed109d6a,0x80084eb4ed109d6a,3 +np.float64,0xbfd17cac9fa2f95a,0xbfd112020588a4b3,3 +np.float64,0xbfd06c1359a0d826,0xbfd0134a28aa9a66,3 +np.float64,0x7fdc3d7c03b87af7,0x3ff921fb54442d18,3 +np.float64,0x7bdf5aaaf7bec,0x7bdf5aaaf7bec,3 +np.float64,0xbfee3cd966fc79b3,0xbfe83a14bc07ac3b,3 +np.float64,0x7fec910da3f9221a,0x3ff921fb54442d18,3 +np.float64,0xffb4ea667029d4d0,0xbff921fb54442d18,3 +np.float64,0x800103d7cce207b0,0x800103d7cce207b0,3 +np.float64,0x7fbb229a6c364534,0x3ff921fb54442d18,3 +np.float64,0x0,0x0,3 +np.float64,0xffd8fccd0331f99a,0xbff921fb54442d18,3 +np.float64,0xbfd0784ae1a0f096,0xbfd01ebff62e39ad,3 +np.float64,0xbfed2ec9b3ba5d93,0xbfe7a9099410bc76,3 +np.float64,0x800690b8d16d2172,0x800690b8d16d2172,3 +np.float64,0x7fc061b26520c364,0x3ff921fb54442d18,3 +np.float64,0x8007ec47054fd88f,0x8007ec47054fd88f,3 +np.float64,0x775546b6eeaa9,0x775546b6eeaa9,3 +np.float64,0x8005e00fb56bc020,0x8005e00fb56bc020,3 +np.float64,0xbfe510f8d0ea21f2,0xbfe2a16862b5a37f,3 +np.float64,0xffd87a6bf3b0f4d8,0xbff921fb54442d18,3 +np.float64,0x800906e3d0520dc8,0x800906e3d0520dc8,3 +np.float64,0x2296f000452f,0x2296f000452f,3 +np.float64,0xbfe3189fa2e63140,0xbfe1378c0e005be4,3 +np.float64,0xb4d2447f69a49,0xb4d2447f69a49,3 +np.float64,0xffd056a24a20ad44,0xbff921fb54442d18,3 +np.float64,0xbfe3b23fe4e76480,0xbfe1a7e5840fcbeb,3 +np.float64,0x80018ee270831dc6,0x80018ee270831dc6,3 +np.float64,0x800df89f245bf13e,0x800df89f245bf13e,3 +np.float64,0x3fee1409d7bc2814,0x3fe824779d133232,3 +np.float64,0xbfef8d81667f1b03,0xbfe8e85523620368,3 +np.float64,0xffd8a6519b314ca4,0xbff921fb54442d18,3 +np.float64,0x7fc7bc86f32f790d,0x3ff921fb54442d18,3 +np.float64,0xffea6159e674c2b3,0xbff921fb54442d18,3 +np.float64,0x3fe153c3fba2a788,0x3fdfc2f74769d300,3 +np.float64,0xffc4261ef3284c3c,0xbff921fb54442d18,3 +np.float64,0x7fe8a8961ff1512b,0x3ff921fb54442d18,3 +np.float64,0xbfe3fb1fd167f640,0xbfe1dc89dcb7ecdf,3 +np.float64,0x3fd88577c2b10af0,0x3fd76acc09660704,3 +np.float64,0x3fe128ec27e251d8,0x3fdf808fc7ebcd8f,3 +np.float64,0xbfed6ca7c4fad950,0xbfe7caafe9a3e213,3 +np.float64,0xbf9a3912b8347220,0xbf9a379b3349352e,3 +np.float64,0xbfd724d7bcae49b0,0xbfd6351efa2a5fc5,3 +np.float64,0xbfed59700a7ab2e0,0xbfe7c043014c694c,3 +np.float64,0x8002ad435bc55a87,0x8002ad435bc55a87,3 +np.float64,0xffe46ed345a8dda6,0xbff921fb54442d18,3 +np.float64,0x7fd2f1d1d825e3a3,0x3ff921fb54442d18,3 +np.float64,0xbfea0265e23404cc,0xbfe5d6fb3fd30464,3 +np.float64,0xbfd17e049122fc0a,0xbfd113421078bbae,3 +np.float64,0xffea03b986b40772,0xbff921fb54442d18,3 +np.float64,0x800b55331a16aa67,0x800b55331a16aa67,3 +np.float64,0xbfc6fcafbf2df960,0xbfc6be9ecd0ebc1f,3 +np.float64,0xd6a36017ad46c,0xd6a36017ad46c,3 +np.float64,0xbfe9ba86dfb3750e,0xbfe5ab840cb0ef86,3 +np.float64,0x75c4a108eb895,0x75c4a108eb895,3 +np.float64,0x8008d6bc8051ad79,0x8008d6bc8051ad79,3 +np.float64,0xbfd3dc5984a7b8b4,0xbfd341f78e0528ec,3 +np.float64,0xffe1cbb01aa39760,0xbff921fb54442d18,3 +np.float64,0x3fc7e292f52fc526,0x3fc79d0ce9365767,3 +np.float64,0xbfcbeae2bd37d5c4,0xbfcb7cb034f82467,3 +np.float64,0x8000f0c62e21e18d,0x8000f0c62e21e18d,3 +np.float64,0xbfe23d8bc6247b18,0xbfe09418ee35c3c7,3 +np.float64,0x717394bae2e73,0x717394bae2e73,3 +np.float64,0xffa2ef1cc425de40,0xbff921fb54442d18,3 +np.float64,0x3fd938c229b27184,0x3fd806900735c99d,3 +np.float64,0x800bf3ec8a77e7d9,0x800bf3ec8a77e7d9,3 +np.float64,0xffeef41dd57de83b,0xbff921fb54442d18,3 +np.float64,0x8008df97e5b1bf30,0x8008df97e5b1bf30,3 +np.float64,0xffe9ab9d0db35739,0xbff921fb54442d18,3 +np.float64,0x99ff391333fe7,0x99ff391333fe7,3 +np.float64,0x3fb864b4a630c969,0x3fb851e883ea2cf9,3 +np.float64,0x22c1230a45825,0x22c1230a45825,3 +np.float64,0xff2336fbfe467,0xff2336fbfe467,3 +np.float64,0xbfd488f4cea911ea,0xbfd3def0490f5414,3 +np.float64,0x3fa379c78426f38f,0x3fa377607370800b,3 +np.float64,0xbfb0873302210e68,0xbfb08155b78dfd53,3 +np.float64,0xbfdf9ff7c2bf3ff0,0xbfdd5f658e357ad2,3 +np.float64,0x800978719192f0e4,0x800978719192f0e4,3 +np.float64,0xbfba8759ea350eb0,0xbfba6f325013b9e5,3 +np.float64,0xbfdd3e6b06ba7cd6,0xbfdb6e472b6091b0,3 +np.float64,0x7fe0c334a7a18668,0x3ff921fb54442d18,3 +np.float64,0xbfeb971feb772e40,0xbfe6c4e0f61404d1,3 +np.float64,0x3fe2a50968e54a13,0x3fe0e1c8b8d96e85,3 +np.float64,0x800fa9c5515f538b,0x800fa9c5515f538b,3 +np.float64,0x800f8532fbbf0a66,0x800f8532fbbf0a66,3 +np.float64,0x167d6f1e2cfaf,0x167d6f1e2cfaf,3 +np.float64,0xffee88e769fd11ce,0xbff921fb54442d18,3 +np.float64,0xbfeecc8529fd990a,0xbfe885520cdad8ea,3 +np.float64,0xffefffffffffffff,0xbff921fb54442d18,3 +np.float64,0xbfef6a566afed4ad,0xbfe8d6767b4c4235,3 +np.float64,0xffec12415af82482,0xbff921fb54442d18,3 +np.float64,0x3678a20a6cf15,0x3678a20a6cf15,3 +np.float64,0xffe468d54ee8d1aa,0xbff921fb54442d18,3 +np.float64,0x800ad6006795ac01,0x800ad6006795ac01,3 +np.float64,0x8001d5b61063ab6d,0x8001d5b61063ab6d,3 +np.float64,0x800dfcd1863bf9a3,0x800dfcd1863bf9a3,3 +np.float64,0xc9fbff6f93f80,0xc9fbff6f93f80,3 +np.float64,0xffe55c20f9eab842,0xbff921fb54442d18,3 +np.float64,0xbfcb596b6536b2d8,0xbfcaf1b339c5c615,3 +np.float64,0xbfe092689ea124d1,0xbfde94fa58946e51,3 +np.float64,0x3fe9ec733af3d8e6,0x3fe5c9bf5dee2623,3 +np.float64,0x3fe30f3d83261e7b,0x3fe1309fd6620e03,3 +np.float64,0xffd31d7f84263b00,0xbff921fb54442d18,3 +np.float64,0xbfe88d2d3e711a5a,0xbfe4f12b5a136178,3 +np.float64,0xffc81e4ce1303c98,0xbff921fb54442d18,3 +np.float64,0xffe5b96ebfab72dd,0xbff921fb54442d18,3 +np.float64,0x512f0502a25e1,0x512f0502a25e1,3 +np.float64,0x7fa3a376982746ec,0x3ff921fb54442d18,3 +np.float64,0x80005b5f2f60b6bf,0x80005b5f2f60b6bf,3 +np.float64,0xc337cc69866fa,0xc337cc69866fa,3 +np.float64,0x3fe7719c4caee339,0x3fe43bab42b19e64,3 +np.float64,0x7fde7ec1d93cfd83,0x3ff921fb54442d18,3 +np.float64,0x3fd2f38f3825e71e,0x3fd26cc7b1dd0acb,3 +np.float64,0x7fce298b993c5316,0x3ff921fb54442d18,3 +np.float64,0x56ae3b2cad5c8,0x56ae3b2cad5c8,3 +np.float64,0x3fe9299f2bf2533e,0x3fe552bddd999e72,3 +np.float64,0x7feff3a4823fe748,0x3ff921fb54442d18,3 +np.float64,0xbfd05c670aa0b8ce,0xbfd00494d78e9e97,3 +np.float64,0xffe745323eae8a64,0xbff921fb54442d18,3 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv new file mode 100644 index 00000000..a655269d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee82930,0x3efa60fd,2 +np.float32,0x3f0aa640,0x3f1b3e13,2 +np.float32,0x3ec1a21c,0x3ecbbf8d,2 +np.float32,0x3cdb1740,0x3cdb24a1,2 +np.float32,0xbf28b6f3,0xbf4a86ac,2 +np.float32,0xbe490dcc,0xbe4bb2eb,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf44f9dd,0xbf826ce1,2 +np.float32,0xbf1d66c4,0xbf37786b,2 +np.float32,0x3f0ad26a,0x3f1b7c9b,2 +np.float32,0x3f7b6c54,0x4016aab0,2 +np.float32,0xbf715bb8,0xbfe1a0bc,2 +np.float32,0xbee8a562,0xbefafd6a,2 +np.float32,0x3db94d00,0x3db9cf16,2 +np.float32,0x3ee2970c,0x3ef368b3,2 +np.float32,0x3f3f8614,0x3f77fdca,2 +np.float32,0xbf1fb5f0,0xbf3b3789,2 +np.float32,0x3f798dc0,0x400b96bb,2 +np.float32,0x3e975d64,0x3e9c0573,2 +np.float32,0xbe3f1908,0xbe415d1f,2 +np.float32,0x3f2cea38,0x3f52192e,2 +np.float32,0x3e82f1ac,0x3e85eaa1,2 +np.float32,0x3eab6b30,0x3eb24acd,2 +np.float32,0xbe9bb90c,0xbea0cf5f,2 +np.float32,0xbf43e847,0xbf81202f,2 +np.float32,0xbd232fa0,0xbd2345c0,2 +np.float32,0xbbabbc00,0xbbabbc67,2 +np.float32,0xbf0b2975,0xbf1bf808,2 +np.float32,0xbef5ab0a,0xbf05d305,2 +np.float32,0x3f2cad16,0x3f51a8e2,2 +np.float32,0xbef75940,0xbf06eb08,2 +np.float32,0xbf0c1216,0xbf1d4325,2 +np.float32,0x3e7bdc08,0x3e8090c2,2 +np.float32,0x3da14e10,0x3da1a3c5,2 +np.float32,0x3f627412,0x3fb2bf21,2 +np.float32,0xbd6d08c0,0xbd6d4ca0,2 +np.float32,0x3f3e2368,0x3f74df8b,2 +np.float32,0xbe0df104,0xbe0edc77,2 +np.float32,0x3e8a265c,0x3e8da833,2 +np.float32,0xbdccdbb0,0xbdcd8ba8,2 +np.float32,0x3eb080c4,0x3eb80a44,2 +np.float32,0x3e627800,0x3e6645fe,2 +np.float32,0xbd8be0b0,0xbd8c1886,2 +np.float32,0xbf3282ac,0xbf5cae8c,2 +np.float32,0xbe515910,0xbe545707,2 +np.float32,0xbf2e64ac,0xbf54d637,2 +np.float32,0x3e0fc230,0x3e10b6de,2 +np.float32,0x3eb13ca0,0x3eb8df94,2 +np.float32,0x3f07a3ca,0x3f170572,2 +np.float32,0x3f2c7026,0x3f513935,2 +np.float32,0x3f3c4ec8,0x3f70d67c,2 +np.float32,0xbee9cce8,0xbefc724f,2 +np.float32,0xbe53ca60,0xbe56e3f3,2 +np.float32,0x3dd9e9a0,0x3ddabd98,2 +np.float32,0x3f38b8d4,0x3f69319b,2 +np.float32,0xbe176dc4,0xbe188c1d,2 +np.float32,0xbf322f2e,0xbf5c0c51,2 +np.float32,0xbe9b8676,0xbea097a2,2 +np.float32,0xbca44280,0xbca44823,2 +np.float32,0xbe2b0248,0xbe2ca036,2 +np.float32,0x3d101e80,0x3d102dbd,2 +np.float32,0xbf4eb610,0xbf8f526d,2 +np.float32,0xbec32a50,0xbecd89d1,2 +np.float32,0x3d549100,0x3d54c1ee,2 +np.float32,0x3f78e55e,0x40087025,2 +np.float32,0x3e592798,0x3e5c802d,2 +np.float32,0x3de045d0,0x3de12cfb,2 +np.float32,0xbdad28e0,0xbdad92f7,2 +np.float32,0x3e9a69e0,0x3e9f5e59,2 +np.float32,0x3e809778,0x3e836716,2 +np.float32,0xbf3278d9,0xbf5c9b6d,2 +np.float32,0x3f39fa00,0x3f6bd4a5,2 +np.float32,0xbec8143c,0xbed34ffa,2 +np.float32,0x3ddb7f40,0x3ddc57e6,2 +np.float32,0x3f0e8342,0x3f20c634,2 +np.float32,0x3f353dda,0x3f6213a4,2 +np.float32,0xbe96b400,0xbe9b4bea,2 +np.float32,0x3e626580,0x3e66328a,2 +np.float32,0xbde091c8,0xbde179df,2 +np.float32,0x3eb47b5c,0x3ebc91ca,2 +np.float32,0xbf282182,0xbf497f2f,2 +np.float32,0x3ea9f64c,0x3eb0a748,2 +np.float32,0x3f28dd4e,0x3f4aca86,2 +np.float32,0xbf71de18,0xbfe3f587,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf6696a6,0xbfbcf11a,2 +np.float32,0xbc853ae0,0xbc853de2,2 +np.float32,0xbeced246,0xbedb51b8,2 +np.float32,0x3f3472a4,0x3f607e00,2 +np.float32,0xbee90124,0xbefb7117,2 +np.float32,0x3eb45b90,0x3ebc6d7c,2 +np.float32,0xbe53ead0,0xbe5705d6,2 +np.float32,0x3f630c80,0x3fb420e2,2 +np.float32,0xbf408cd0,0xbf7a56a2,2 +np.float32,0x3dda4ed0,0x3ddb23f1,2 +np.float32,0xbf37ae88,0xbf67096b,2 +np.float32,0xbdd48c28,0xbdd550c9,2 +np.float32,0xbf5745b0,0xbf9cb4a4,2 +np.float32,0xbf44e6fc,0xbf8255c1,2 +np.float32,0x3f5c8e6a,0x3fa65020,2 +np.float32,0xbea45fe8,0xbeaa6630,2 +np.float32,0x3f08bdee,0x3f188ef5,2 +np.float32,0x3ec77e74,0x3ed29f4b,2 +np.float32,0xbf1a1d3c,0xbf324029,2 +np.float32,0x3cad7340,0x3cad79e3,2 +np.float32,0xbf4fac2e,0xbf90b72a,2 +np.float32,0x3f58516e,0x3f9e8330,2 +np.float32,0x3f442008,0x3f816391,2 +np.float32,0xbf6e0c6c,0xbfd42854,2 +np.float32,0xbf266f7a,0xbf4689b2,2 +np.float32,0x3eb7e2f0,0x3ec077ba,2 +np.float32,0xbf320fd0,0xbf5bcf83,2 +np.float32,0xbf6a76b9,0xbfc80a11,2 +np.float32,0xbf2a91b4,0xbf4dd526,2 +np.float32,0x3f176e30,0x3f2e150e,2 +np.float32,0xbdcccad0,0xbdcd7a9c,2 +np.float32,0x3f60a8a4,0x3faebbf7,2 +np.float32,0x3d9706f0,0x3d974d40,2 +np.float32,0x3ef3cd34,0x3f049d58,2 +np.float32,0xbf73c615,0xbfed79fe,2 +np.float32,0x3df1b170,0x3df2d31b,2 +np.float32,0x3f632a46,0x3fb466c7,2 +np.float32,0xbf3ea18e,0xbf75f9ce,2 +np.float32,0xbf3ea05c,0xbf75f71f,2 +np.float32,0xbdd76750,0xbdd83403,2 +np.float32,0xbca830c0,0xbca836cd,2 +np.float32,0x3f1d4162,0x3f373c59,2 +np.float32,0x3c115700,0x3c1157fa,2 +np.float32,0x3dae8ab0,0x3daef758,2 +np.float32,0xbcad5020,0xbcad56bf,2 +np.float32,0x3ee299c4,0x3ef36c15,2 +np.float32,0xbf7f566c,0xc054c3bd,2 +np.float32,0x3f0cc698,0x3f1e4557,2 +np.float32,0xbe75c648,0xbe7aaa04,2 +np.float32,0x3ea29238,0x3ea86417,2 +np.float32,0x3f09d9c0,0x3f1a1d61,2 +np.float32,0x3f67275c,0x3fbe74b3,2 +np.float32,0x3e1a4e18,0x3e1b7d3a,2 +np.float32,0xbef6e3fc,0xbf069e98,2 +np.float32,0xbf6038ac,0xbfadc9fd,2 +np.float32,0xbe46bdd4,0xbe494b7f,2 +np.float32,0xbf4df1f4,0xbf8e3a98,2 +np.float32,0x3d094dc0,0x3d095aed,2 +np.float32,0x3f44c7d2,0x3f822fa3,2 +np.float32,0xbea30816,0xbea8e737,2 +np.float32,0xbe3c27c4,0xbe3e511b,2 +np.float32,0x3f3bb47c,0x3f6f8789,2 +np.float32,0xbe423760,0xbe4498c3,2 +np.float32,0x3ece1a74,0x3eda7634,2 +np.float32,0x3f14d1f6,0x3f2a1a89,2 +np.float32,0xbf4d9e8f,0xbf8dc4c1,2 +np.float32,0xbe92968e,0xbe96cd7f,2 +np.float32,0x3e99e6c0,0x3e9ece26,2 +np.float32,0xbf397361,0xbf6ab878,2 +np.float32,0xbf4fcea4,0xbf90e99f,2 +np.float32,0x3de37640,0x3de46779,2 +np.float32,0x3eb1b604,0x3eb9698c,2 +np.float32,0xbf52d0a2,0xbf957361,2 +np.float32,0xbe20435c,0xbe21975a,2 +np.float32,0x3f437a58,0x3f809bf1,2 +np.float32,0x3f27d1cc,0x3f48f335,2 +np.float32,0x3f7d4ff2,0x4027d1e2,2 +np.float32,0xbef732e4,0xbf06d205,2 +np.float32,0x3f4a0ae6,0x3f88e18e,2 +np.float32,0x3f800000,0x7f800000,2 +np.float32,0x3e3e56a0,0x3e4093ba,2 +np.float32,0xbed2fcfa,0xbee0517d,2 +np.float32,0xbe0e0114,0xbe0eecd7,2 +np.float32,0xbe808574,0xbe8353db,2 +np.float32,0x3f572e2a,0x3f9c8c86,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x3f3f3c82,0x3f775703,2 +np.float32,0xbf6e2482,0xbfd4818b,2 +np.float32,0xbf3943b0,0xbf6a5439,2 +np.float32,0x3f6e42ac,0x3fd4f1ea,2 +np.float32,0x3eb676c4,0x3ebed619,2 +np.float32,0xbe5e56c4,0xbe61ef6c,2 +np.float32,0x3eea200c,0x3efcdb65,2 +np.float32,0x3e3d2c78,0x3e3f5ef8,2 +np.float32,0xbdfd8fb0,0xbdfede71,2 +np.float32,0xbee69c8a,0xbef86e89,2 +np.float32,0x3e9efca0,0x3ea46a1c,2 +np.float32,0x3e4c2498,0x3e4ee9ee,2 +np.float32,0xbf3cc93c,0xbf71e21d,2 +np.float32,0x3ee0d77c,0x3ef13d2b,2 +np.float32,0xbefbcd2a,0xbf09d6a3,2 +np.float32,0x3f6dbe5c,0x3fd30a3e,2 +np.float32,0x3dae63e0,0x3daed03f,2 +np.float32,0xbd5001e0,0xbd502fb9,2 +np.float32,0x3f59632a,0x3fa067c8,2 +np.float32,0x3f0d355a,0x3f1ee452,2 +np.float32,0x3f2cbe5c,0x3f51c896,2 +np.float32,0x3c5e6e80,0x3c5e7200,2 +np.float32,0xbe8ac49c,0xbe8e52f0,2 +np.float32,0x3f54e576,0x3f98c0e6,2 +np.float32,0xbeaa0762,0xbeb0ba7c,2 +np.float32,0x3ec81e88,0x3ed35c21,2 +np.float32,0x3f5a6738,0x3fa23fb6,2 +np.float32,0xbf24a682,0xbf43784a,2 +np.float32,0x1,0x1,2 +np.float32,0x3ee6bc24,0x3ef89630,2 +np.float32,0x3f19444a,0x3f30ecf5,2 +np.float32,0x3ec1fc70,0x3ecc28fc,2 +np.float32,0xbf706e14,0xbfdd92fb,2 +np.float32,0x3eccb630,0x3ed8cd98,2 +np.float32,0xbcdf7aa0,0xbcdf88d3,2 +np.float32,0xbe450da8,0xbe478a8e,2 +np.float32,0x3ec9c210,0x3ed54c0b,2 +np.float32,0xbf3b86ca,0xbf6f24d1,2 +np.float32,0x3edcc7a0,0x3eec3a5c,2 +np.float32,0x3f075d5c,0x3f16a39a,2 +np.float32,0xbf5719ce,0xbf9c69de,2 +np.float32,0x3f62cb22,0x3fb3885a,2 +np.float32,0x3f639216,0x3fb55c93,2 +np.float32,0xbf473ee7,0xbf85413a,2 +np.float32,0xbf01b66c,0xbf0eea86,2 +np.float32,0x3e872d80,0x3e8a74f8,2 +np.float32,0xbf60957e,0xbfae925c,2 +np.float32,0xbf6847b2,0xbfc1929b,2 +np.float32,0x3f78bb94,0x4007b363,2 +np.float32,0xbf47efdb,0xbf8622db,2 +np.float32,0xbe1f2308,0xbe206fd6,2 +np.float32,0xbf414926,0xbf7c0a7e,2 +np.float32,0x3eecc268,0x3f00194d,2 +np.float32,0x3eb086d0,0x3eb81120,2 +np.float32,0xbef1af80,0xbf033ff5,2 +np.float32,0xbf454e56,0xbf82d4aa,2 +np.float32,0x3e622560,0x3e65ef20,2 +np.float32,0x3f50d2b2,0x3f926a83,2 +np.float32,0x3eb2c45c,0x3eba9d2c,2 +np.float32,0x3e42d1a0,0x3e4538c9,2 +np.float32,0xbf24cc5c,0xbf43b8e3,2 +np.float32,0x3e8c6464,0x3e90141a,2 +np.float32,0xbf3abff2,0xbf6d79c5,2 +np.float32,0xbec8f2e6,0xbed456fa,2 +np.float32,0xbf787b38,0xc00698b4,2 +np.float32,0xbf58d5cd,0xbf9f6c03,2 +np.float32,0x3df4ee20,0x3df61ba8,2 +np.float32,0xbf34581e,0xbf604951,2 +np.float32,0xbeba5cf4,0xbec35119,2 +np.float32,0xbf76c22d,0xbfffc51c,2 +np.float32,0x3ef63b2c,0x3f0630b4,2 +np.float32,0x3eeadb64,0x3efdc877,2 +np.float32,0x3dfd8c70,0x3dfedb24,2 +np.float32,0x3f441600,0x3f81576d,2 +np.float32,0x3f23a0d8,0x3f41bbf6,2 +np.float32,0x3cb84d40,0x3cb85536,2 +np.float32,0xbf25cb5c,0xbf456e38,2 +np.float32,0xbc108540,0xbc108636,2 +np.float32,0xbc5b9140,0xbc5b949e,2 +np.float32,0xbf62ff40,0xbfb401dd,2 +np.float32,0x3e8e0710,0x3e91d93e,2 +np.float32,0x3f1b6ae0,0x3f344dfd,2 +np.float32,0xbf4dbbbe,0xbf8dedea,2 +np.float32,0x3f1a5fb2,0x3f32a880,2 +np.float32,0xbe56bd00,0xbe59f8cb,2 +np.float32,0xbf490a5c,0xbf87902d,2 +np.float32,0xbf513072,0xbf92f717,2 +np.float32,0x3e73ee28,0x3e78b542,2 +np.float32,0x3f0a4c7a,0x3f1abf2c,2 +np.float32,0x3e10d5c8,0x3e11d00b,2 +np.float32,0xbf771aac,0xc001207e,2 +np.float32,0x3efe2f54,0x3f0b6a46,2 +np.float32,0xbea5f3ea,0xbeac291f,2 +np.float32,0xbf1a73e8,0xbf32c845,2 +np.float32,0x3ebcc82c,0x3ec61c4f,2 +np.float32,0xbf24f492,0xbf43fd9a,2 +np.float32,0x3ecbd908,0x3ed7c691,2 +np.float32,0x3f461c5e,0x3f83d3f0,2 +np.float32,0x3eed0524,0x3f0043c1,2 +np.float32,0x3d06e840,0x3d06f4bf,2 +np.float32,0x3eb6c974,0x3ebf34d7,2 +np.float32,0xbf1c85e1,0xbf36100f,2 +np.float32,0x3ed697d0,0x3ee4ad04,2 +np.float32,0x3eab0484,0x3eb1d733,2 +np.float32,0xbf3b02f2,0xbf6e0935,2 +np.float32,0xbeeab154,0xbefd9334,2 +np.float32,0xbf695372,0xbfc49881,2 +np.float32,0x3e8aaa7c,0x3e8e36be,2 +np.float32,0xbf208754,0xbf3c8f7b,2 +np.float32,0xbe0dbf28,0xbe0ea9a1,2 +np.float32,0x3ca780c0,0x3ca786ba,2 +np.float32,0xbeb320b4,0xbebb065e,2 +np.float32,0x3f13c698,0x3f288821,2 +np.float32,0xbe8cbbec,0xbe9072c4,2 +np.float32,0x3f1ed534,0x3f39c8df,2 +np.float32,0x3e1ca450,0x3e1de190,2 +np.float32,0x3f54be1c,0x3f988134,2 +np.float32,0x3f34e4ee,0x3f6161b4,2 +np.float32,0xbf7e6913,0xc038b246,2 +np.float32,0x3d3c3f20,0x3d3c6119,2 +np.float32,0x3ca9dc80,0x3ca9e2bc,2 +np.float32,0xbf577ea2,0xbf9d161a,2 +np.float32,0xbedb22c8,0xbeea3644,2 +np.float32,0x3f22a044,0x3f400bfa,2 +np.float32,0xbe214b8c,0xbe22a637,2 +np.float32,0x3e8cd300,0x3e908bbc,2 +np.float32,0xbec4d214,0xbecf7a58,2 +np.float32,0x3e9399a4,0x3e97e7e4,2 +np.float32,0xbee6a1a2,0xbef874ed,2 +np.float32,0xbf323742,0xbf5c1bfd,2 +np.float32,0x3f48b882,0x3f8725ac,2 +np.float32,0xbf4d4dba,0xbf8d532e,2 +np.float32,0xbf59640a,0xbfa0695a,2 +np.float32,0xbf2ad562,0xbf4e4f03,2 +np.float32,0x3e317d98,0x3e334d03,2 +np.float32,0xbf6a5b71,0xbfc7b5a2,2 +np.float32,0x3e87b434,0x3e8b05cf,2 +np.float32,0xbf1c344c,0xbf358dee,2 +np.float32,0x3e449428,0x3e470c65,2 +np.float32,0xbf2c0f2f,0xbf508808,2 +np.float32,0xbec5b5ac,0xbed0859c,2 +np.float32,0xbf4aa956,0xbf89b4b1,2 +np.float32,0x3f6dd374,0x3fd35717,2 +np.float32,0x3f45f76c,0x3f83a5ef,2 +np.float32,0xbed1fba8,0xbedf1bd5,2 +np.float32,0xbd26b2d0,0xbd26ca66,2 +np.float32,0xbe9817c2,0xbe9cd1c3,2 +np.float32,0x3e725988,0x3e770875,2 +np.float32,0xbf1a8ded,0xbf32f132,2 +np.float32,0xbe695860,0xbe6d83d3,2 +np.float32,0x3d8cecd0,0x3d8d25ea,2 +np.float32,0x3f574706,0x3f9cb6ec,2 +np.float32,0xbf5c5a1f,0xbfa5eaf3,2 +np.float32,0x3e7a7c88,0x3e7fab83,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f66396a,0x3fbbfbb0,2 +np.float32,0x3ed6e588,0x3ee50b53,2 +np.float32,0xbb56d500,0xbb56d532,2 +np.float32,0x3ebd23fc,0x3ec6869a,2 +np.float32,0xbf70d490,0xbfdf4af5,2 +np.float32,0x3e514f88,0x3e544d15,2 +np.float32,0x3e660f98,0x3e6a0dac,2 +np.float32,0xbf034da1,0xbf1110bb,2 +np.float32,0xbf60d9be,0xbfaf2714,2 +np.float32,0x3df67b10,0x3df7ae64,2 +np.float32,0xbeeedc0a,0xbf017010,2 +np.float32,0xbe149224,0xbe15a072,2 +np.float32,0x3f455084,0x3f82d759,2 +np.float32,0x3f210f9e,0x3f3d7093,2 +np.float32,0xbeaea3e0,0xbeb5edd3,2 +np.float32,0x3e0724b0,0x3e07efad,2 +np.float32,0x3f09a784,0x3f19d6ac,2 +np.float32,0xbf044340,0xbf125ee8,2 +np.float32,0xbf71adc9,0xbfe315fe,2 +np.float32,0x3efd3870,0x3f0ac6a8,2 +np.float32,0xbf53c7a6,0xbf96f6df,2 +np.float32,0xbf3cf784,0xbf7247af,2 +np.float32,0x3e0ce9e0,0x3e0dd035,2 +np.float32,0xbd3051a0,0xbd306d89,2 +np.float32,0x3ecab804,0x3ed66f77,2 +np.float32,0x3e984350,0x3e9d0189,2 +np.float32,0x3edd1c00,0x3eeca20b,2 +np.float32,0xbe8e22a0,0xbe91f71b,2 +np.float32,0x3ebebc18,0x3ec85fd6,2 +np.float32,0xba275c00,0xba275c01,2 +np.float32,0x3f1d8190,0x3f37a385,2 +np.float32,0x3f17343e,0x3f2dbbfe,2 +np.float32,0x3caa8000,0x3caa864e,2 +np.float32,0x3e7a7308,0x3e7fa168,2 +np.float32,0x3f7359a6,0x3feb3e1a,2 +np.float32,0xbf7ad15a,0xc012a743,2 +np.float32,0xbf122efb,0xbf262812,2 +np.float32,0xbf03ba04,0xbf11a3fa,2 +np.float32,0x3ed7a90c,0x3ee5f8d4,2 +np.float32,0xbe23e318,0xbe254eed,2 +np.float32,0xbe2866f4,0xbe29f20a,2 +np.float32,0xbeaedff2,0xbeb631d0,2 +np.float32,0x0,0x0,2 +np.float32,0x3ef2a034,0x3f03dafd,2 +np.float32,0x3f35806c,0x3f62994e,2 +np.float32,0xbf655e19,0xbfb9c718,2 +np.float32,0x3f5d54ce,0x3fa7d4f4,2 +np.float32,0x3f33e64a,0x3f5f67e3,2 +np.float32,0x3ebf4010,0x3ec8f923,2 +np.float32,0xbe050dc8,0xbe05cf70,2 +np.float32,0x3f61693e,0x3fb063b0,2 +np.float32,0xbd94ac00,0xbd94ef12,2 +np.float32,0x3e9de008,0x3ea32f61,2 +np.float32,0xbe3d042c,0xbe3f3540,2 +np.float32,0x3e8fdfc0,0x3e93d9e4,2 +np.float32,0x3f28bc48,0x3f4a9019,2 +np.float32,0x3edea928,0x3eee8b09,2 +np.float32,0xbf05f673,0xbf14b362,2 +np.float32,0xbf360730,0xbf63a914,2 +np.float32,0xbe3fb454,0xbe41fe0a,2 +np.float32,0x3f6d99a8,0x3fd28552,2 +np.float32,0xbf3ae866,0xbf6dd052,2 +np.float32,0x3f5b1164,0x3fa37aec,2 +np.float32,0xbf64a451,0xbfb7f61b,2 +np.float32,0xbdd79bd0,0xbdd86919,2 +np.float32,0x3e89fc00,0x3e8d7a85,2 +np.float32,0x3f4bf690,0x3f8b77ea,2 +np.float32,0x3cbdf280,0x3cbdfb38,2 +np.float32,0x3f138f98,0x3f2835b4,2 +np.float32,0xbe33967c,0xbe3576bc,2 +np.float32,0xbf298164,0xbf4bedda,2 +np.float32,0x3e9955cc,0x3e9e2edb,2 +np.float32,0xbf79b383,0xc00c56c0,2 +np.float32,0x3ea0834c,0x3ea61aea,2 +np.float32,0xbf511184,0xbf92c89a,2 +np.float32,0x3f4d9fba,0x3f8dc666,2 +np.float32,0x3f3387c2,0x3f5ead80,2 +np.float32,0x3e3f7360,0x3e41babb,2 +np.float32,0xbf3cc4d6,0xbf71d879,2 +np.float32,0x3f2e4402,0x3f54994e,2 +np.float32,0x3e6a7118,0x3e6eabff,2 +np.float32,0xbf05d83e,0xbf1489cc,2 +np.float32,0xbdce4fd8,0xbdcf039a,2 +np.float32,0xbf03e2f4,0xbf11dbaf,2 +np.float32,0x3f1ea0a0,0x3f397375,2 +np.float32,0x3f7aff54,0x4013cb1b,2 +np.float32,0x3f5ef158,0x3fab1801,2 +np.float32,0xbe33bcc8,0xbe359e40,2 +np.float32,0xbf04dd0e,0xbf133111,2 +np.float32,0xbf14f887,0xbf2a54d1,2 +np.float32,0x3f75c37a,0x3ff9196e,2 +np.float32,0x3f35c3c8,0x3f6320f2,2 +np.float32,0x3f53bb94,0x3f96e3c3,2 +np.float32,0x3f4d473e,0x3f8d4a19,2 +np.float32,0xbdfe19e0,0xbdff6ac9,2 +np.float32,0xbf7f0cc4,0xc049342d,2 +np.float32,0xbdbfc778,0xbdc057bb,2 +np.float32,0xbf7575b7,0xbff73067,2 +np.float32,0xbe9df488,0xbea34609,2 +np.float32,0xbefbd3c6,0xbf09daff,2 +np.float32,0x3f19962c,0x3f316cbd,2 +np.float32,0x3f7acec6,0x40129732,2 +np.float32,0xbf5db7de,0xbfa89a21,2 +np.float32,0x3f62f444,0x3fb3e830,2 +np.float32,0xbf522adb,0xbf94737f,2 +np.float32,0xbef6ceb2,0xbf0690ba,2 +np.float32,0xbf57c41e,0xbf9d8db0,2 +np.float32,0x3eb3360c,0x3ebb1eb0,2 +np.float32,0x3f29327e,0x3f4b618e,2 +np.float32,0xbf08d099,0xbf18a916,2 +np.float32,0x3ea21014,0x3ea7d369,2 +np.float32,0x3f39e516,0x3f6ba861,2 +np.float32,0x3e7c4f28,0x3e80ce08,2 +np.float32,0xbec5a7f8,0xbed07582,2 +np.float32,0xbf0b1b46,0xbf1be3e7,2 +np.float32,0xbef0e0ec,0xbf02bb2e,2 +np.float32,0x3d835a30,0x3d838869,2 +np.float32,0x3f08aa40,0x3f18736e,2 +np.float32,0x3eb0e4c8,0x3eb87bcd,2 +np.float32,0x3eb3821c,0x3ebb7564,2 +np.float32,0xbe3a7320,0xbe3c8d5a,2 +np.float32,0x3e43f8c0,0x3e466b10,2 +np.float32,0x3e914288,0x3e955b69,2 +np.float32,0x3ec7d800,0x3ed308e7,2 +np.float32,0x3e603df8,0x3e63eef2,2 +np.float32,0x3f225cac,0x3f3f9ac6,2 +np.float32,0x3e3db8f0,0x3e3ff06b,2 +np.float32,0x3f358d78,0x3f62b38c,2 +np.float32,0xbed9bd64,0xbee88158,2 +np.float32,0x800000,0x800000,2 +np.float32,0x3f1adfce,0x3f337230,2 +np.float32,0xbefdc346,0xbf0b229d,2 +np.float32,0xbf091018,0xbf190208,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f27c2c4,0x3f48d8db,2 +np.float32,0x3ef59c80,0x3f05c993,2 +np.float32,0x3e18a340,0x3e19c893,2 +np.float32,0x3f209610,0x3f3ca7c5,2 +np.float32,0x3f69cc22,0x3fc60087,2 +np.float32,0xbf66cf07,0xbfbd8721,2 +np.float32,0xbf768098,0xbffdfcc4,2 +np.float32,0x3df27a40,0x3df39ec4,2 +np.float32,0x3daf5bd0,0x3dafca02,2 +np.float32,0x3f53f2be,0x3f973b41,2 +np.float32,0xbf7edcbc,0xc0436ce3,2 +np.float32,0xbdf61db8,0xbdf74fae,2 +np.float32,0x3e2c9328,0x3e2e3cb2,2 +np.float32,0x3f1a4570,0x3f327f41,2 +np.float32,0xbf766306,0xbffd32f1,2 +np.float32,0xbf468b9d,0xbf845f0f,2 +np.float32,0x3e398970,0x3e3b9bb1,2 +np.float32,0xbbefa900,0xbbefaa18,2 +np.float32,0xbf54c989,0xbf9893ad,2 +np.float32,0x3f262cf6,0x3f46169d,2 +np.float32,0x3f638a8a,0x3fb54a98,2 +np.float32,0xbeb36c78,0xbebb5cb8,2 +np.float32,0xbeac4d42,0xbeb34993,2 +np.float32,0x3f1d1942,0x3f36fbf2,2 +np.float32,0xbf5d49ba,0xbfa7bf07,2 +np.float32,0xbf182b5c,0xbf2f38d0,2 +np.float32,0x3f41a742,0x3f7ce5ef,2 +np.float32,0x3f0b9a6c,0x3f1c9898,2 +np.float32,0x3e847494,0x3e8788f3,2 +np.float32,0xbde41608,0xbde50941,2 +np.float32,0x3f693944,0x3fc44b5a,2 +np.float32,0x3f0386b2,0x3f115e37,2 +np.float32,0x3f3a08b0,0x3f6bf3c1,2 +np.float32,0xbf78ee64,0xc0089977,2 +np.float32,0xbf013a11,0xbf0e436e,2 +np.float32,0x3f00668e,0x3f0d2836,2 +np.float32,0x3e6d9850,0x3e720081,2 +np.float32,0x3eacf578,0x3eb4075d,2 +np.float32,0x3f18aef8,0x3f3004b4,2 +np.float32,0x3de342f0,0x3de43385,2 +np.float32,0x3e56cee8,0x3e5a0b85,2 +np.float32,0xbf287912,0xbf4a1966,2 +np.float32,0x3e92c948,0x3e9704c2,2 +np.float32,0x3c07d080,0x3c07d14c,2 +np.float32,0xbe90f6a0,0xbe9508e0,2 +np.float32,0x3e8b4f28,0x3e8ee884,2 +np.float32,0xbf35b56c,0xbf6303ff,2 +np.float32,0xbef512b8,0xbf057027,2 +np.float32,0x3e36c630,0x3e38c0cd,2 +np.float32,0x3f0b3ca8,0x3f1c134a,2 +np.float32,0x3e4cd610,0x3e4fa2c5,2 +np.float32,0xbf5a8372,0xbfa273a3,2 +np.float32,0xbecaad3c,0xbed662ae,2 +np.float32,0xbec372d2,0xbecddeac,2 +np.float32,0x3f6fb2b2,0x3fda8a22,2 +np.float32,0x3f365f28,0x3f645b5a,2 +np.float32,0xbecd00fa,0xbed926a4,2 +np.float32,0xbebafa32,0xbec40672,2 +np.float32,0xbf235b73,0xbf4146c4,2 +np.float32,0x3f7a4658,0x400f6e2c,2 +np.float32,0x3f35e824,0x3f636a54,2 +np.float32,0x3cb87640,0x3cb87e3c,2 +np.float32,0xbf296288,0xbf4bb6ee,2 +np.float32,0x7f800000,0xffc00000,2 +np.float32,0xbf4de86e,0xbf8e2d1a,2 +np.float32,0xbf4ace12,0xbf89e5f3,2 +np.float32,0x3d65a300,0x3d65e0b5,2 +np.float32,0xbe10c534,0xbe11bf21,2 +np.float32,0xbeba3c1c,0xbec32b3e,2 +np.float32,0x3e87eaf8,0x3e8b40b8,2 +np.float32,0x3d5c3bc0,0x3d5c722d,2 +np.float32,0x3e8c14b8,0x3e8fbdf8,2 +np.float32,0xbf06c6f0,0xbf15d327,2 +np.float32,0xbe0f1e30,0xbe100f96,2 +np.float32,0xbee244b0,0xbef30251,2 +np.float32,0x3f2a21b0,0x3f4d0c1d,2 +np.float32,0xbf5f7f81,0xbfac408e,2 +np.float32,0xbe3dba2c,0xbe3ff1b2,2 +np.float32,0x3f3ffc22,0x3f790abf,2 +np.float32,0x3edc3dac,0x3eeb90fd,2 +np.float32,0x7f7fffff,0xffc00000,2 +np.float32,0x3ecfaaac,0x3edc5485,2 +np.float32,0x3f0affbe,0x3f1bbcd9,2 +np.float32,0x3f5f2264,0x3fab7dca,2 +np.float32,0x3f37394c,0x3f66186c,2 +np.float32,0xbe6b2f6c,0xbe6f74e3,2 +np.float32,0x3f284772,0x3f49c1f1,2 +np.float32,0xbdf27bc8,0xbdf3a051,2 +np.float32,0xbc8b14e0,0xbc8b184c,2 +np.float32,0x3f6a867c,0x3fc83b07,2 +np.float32,0x3f1ec876,0x3f39b429,2 +np.float32,0x3f6fd9a8,0x3fdb28d6,2 +np.float32,0xbf473cca,0xbf853e8c,2 +np.float32,0x3e23eff8,0x3e255c23,2 +np.float32,0x3ebefdfc,0x3ec8ac5d,2 +np.float32,0x3f6c8c22,0x3fced2b1,2 +np.float32,0x3f168388,0x3f2cad44,2 +np.float32,0xbece2410,0xbeda81ac,2 +np.float32,0x3f5532f0,0x3f993eea,2 +np.float32,0x3ef1938c,0x3f032dfa,2 +np.float32,0xbef05268,0xbf025fba,2 +np.float32,0x3f552e4a,0x3f993754,2 +np.float32,0x3e9ed068,0x3ea4392d,2 +np.float32,0xbe1a0c24,0xbe1b39be,2 +np.float32,0xbf2623aa,0xbf46068c,2 +np.float32,0xbe1cc300,0xbe1e00fc,2 +np.float32,0xbe9c0576,0xbea12397,2 +np.float32,0xbd827338,0xbd82a07e,2 +np.float32,0x3f0fc31a,0x3f229786,2 +np.float32,0x3e577810,0x3e5abc7d,2 +np.float32,0x3e0e1cb8,0x3e0f0906,2 +np.float32,0x3e84d344,0x3e87ee73,2 +np.float32,0xbf39c45e,0xbf6b6337,2 +np.float32,0x3edfb25c,0x3eefd273,2 +np.float32,0x3e016398,0x3e021596,2 +np.float32,0xbefeb1be,0xbf0bc0de,2 +np.float32,0x3f37e104,0x3f677196,2 +np.float32,0x3f545316,0x3f97d500,2 +np.float32,0xbefc165a,0xbf0a06ed,2 +np.float32,0xbf0923e6,0xbf191dcd,2 +np.float32,0xbf386508,0xbf68831f,2 +np.float32,0xbf3d4630,0xbf72f4e1,2 +np.float32,0x3f3dbe82,0x3f73ff13,2 +np.float32,0xbf703de4,0xbfdcc7e2,2 +np.float32,0xbf531482,0xbf95dd1a,2 +np.float32,0xbf0af1b6,0xbf1ba8f4,2 +np.float32,0xbec8fd9c,0xbed463a4,2 +np.float32,0xbe230320,0xbe24691a,2 +np.float32,0xbf7de541,0xc02faf38,2 +np.float32,0x3efd2360,0x3f0ab8b7,2 +np.float32,0x3db7f350,0x3db87291,2 +np.float32,0x3e74c510,0x3e799924,2 +np.float32,0x3da549c0,0x3da5a5fc,2 +np.float32,0x3e8a3bc4,0x3e8dbf4a,2 +np.float32,0xbf69f086,0xbfc66e84,2 +np.float32,0x3f323f8e,0x3f5c2c17,2 +np.float32,0x3ec0ae3c,0x3ecaa334,2 +np.float32,0xbebe8966,0xbec824fc,2 +np.float32,0x3f34691e,0x3f606b13,2 +np.float32,0x3f13790e,0x3f2813f5,2 +np.float32,0xbf61c027,0xbfb12618,2 +np.float32,0x3e90c690,0x3e94d4a1,2 +np.float32,0xbefce8f0,0xbf0a920e,2 +np.float32,0xbf5c0e8a,0xbfa559a7,2 +np.float32,0x3f374f60,0x3f6645b6,2 +np.float32,0x3f25f6fa,0x3f45b967,2 +np.float32,0x3f2421aa,0x3f42963a,2 +np.float32,0x3ebfa328,0x3ec96c57,2 +np.float32,0x3e3bef28,0x3e3e1685,2 +np.float32,0x3ea3fa3c,0x3ea9f4dd,2 +np.float32,0x3f362b8e,0x3f63f2b2,2 +np.float32,0xbedcef18,0xbeec6ada,2 +np.float32,0xbdd29c88,0xbdd35bd0,2 +np.float32,0x3f261aea,0x3f45f76f,2 +np.float32,0xbe62c470,0xbe66965e,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xbee991aa,0xbefc277b,2 +np.float32,0xbf571960,0xbf9c6923,2 +np.float32,0xbe6fb410,0xbe743b41,2 +np.float32,0x3eb1bed0,0x3eb9738d,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3eddcbe4,0x3eed7a69,2 +np.float32,0xbf2a81ba,0xbf4db86d,2 +np.float32,0x3f74da54,0x3ff38737,2 +np.float32,0xbeb6bff4,0xbebf29f4,2 +np.float32,0x3f445752,0x3f81a698,2 +np.float32,0x3ed081b4,0x3edd5618,2 +np.float32,0xbee73802,0xbef931b4,2 +np.float32,0xbd13f2a0,0xbd14031c,2 +np.float32,0xbb4d1200,0xbb4d122c,2 +np.float32,0xbee8777a,0xbefac393,2 +np.float32,0x3f42047c,0x3f7dc06c,2 +np.float32,0xbd089270,0xbd089f67,2 +np.float32,0xbf628c16,0xbfb2f66b,2 +np.float32,0x3e72e098,0x3e77978d,2 +np.float32,0x3ed967cc,0x3ee818e4,2 +np.float32,0x3e284c80,0x3e29d6d9,2 +np.float32,0x3f74e8ba,0x3ff3dbef,2 +np.float32,0x3f013e86,0x3f0e4969,2 +np.float32,0xbf610d4f,0xbfaf983c,2 +np.float32,0xbf3c8d36,0xbf715eba,2 +np.float32,0xbedbc756,0xbeeaffdb,2 +np.float32,0x3e143ec8,0x3e154b4c,2 +np.float32,0xbe1c9808,0xbe1dd4fc,2 +np.float32,0xbe887a1e,0xbe8bdac5,2 +np.float32,0xbe85c4bc,0xbe88f17a,2 +np.float32,0x3f35967e,0x3f62c5b4,2 +np.float32,0x3ea2c4a4,0x3ea89c2d,2 +np.float32,0xbc8703c0,0xbc8706e1,2 +np.float32,0xbf13d52c,0xbf289dff,2 +np.float32,0xbf63bb56,0xbfb5bf29,2 +np.float32,0xbf61c5ef,0xbfb13319,2 +np.float32,0xbf128410,0xbf26a675,2 +np.float32,0x3f03fcf2,0x3f11ff13,2 +np.float32,0xbe49c924,0xbe4c75cd,2 +np.float32,0xbf211a9c,0xbf3d82c5,2 +np.float32,0x3f7e9d52,0x403d1b42,2 +np.float32,0x3edfefd4,0x3ef01e71,2 +np.float32,0x3ebc5bd8,0x3ec59efb,2 +np.float32,0x3d7b02e0,0x3d7b537f,2 +np.float32,0xbf1163ba,0xbf24fb43,2 +np.float32,0x3f5072f2,0x3f91dbf1,2 +np.float32,0xbee700ce,0xbef8ec60,2 +np.float32,0x3f534168,0x3f962359,2 +np.float32,0x3e6d6c40,0x3e71d1ef,2 +np.float32,0x3def9d70,0x3df0b7a8,2 +np.float32,0x3e89cf80,0x3e8d4a8a,2 +np.float32,0xbf687ca7,0xbfc2290f,2 +np.float32,0x3f35e134,0x3f635c51,2 +np.float32,0x3e59eef8,0x3e5d50fa,2 +np.float32,0xbf65c9e1,0xbfbada61,2 +np.float32,0xbf759292,0xbff7e43d,2 +np.float32,0x3f4635a0,0x3f83f372,2 +np.float32,0x3f29baaa,0x3f4c53f1,2 +np.float32,0x3f6b15a6,0x3fc9fe04,2 +np.float32,0x3edabc88,0x3ee9b922,2 +np.float32,0x3ef382e0,0x3f046d4d,2 +np.float32,0xbe351310,0xbe36ff7f,2 +np.float32,0xbf05c935,0xbf14751c,2 +np.float32,0xbf0e7c50,0xbf20bc24,2 +np.float32,0xbf69bc94,0xbfc5d1b8,2 +np.float32,0xbed41aca,0xbee1aa23,2 +np.float32,0x3f518c08,0x3f938162,2 +np.float32,0xbf3d7974,0xbf73661a,2 +np.float32,0x3f1951a6,0x3f3101c9,2 +np.float32,0xbeb3f436,0xbebbf787,2 +np.float32,0xbf77a190,0xc0031d43,2 +np.float32,0x3eb5b3cc,0x3ebdf6e7,2 +np.float32,0xbed534b4,0xbee2fed2,2 +np.float32,0xbe53e1b8,0xbe56fc56,2 +np.float32,0x3f679e20,0x3fbfb91c,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0xbf7b9bcb,0xc0180073,2 +np.float32,0xbf5635e8,0xbf9aea15,2 +np.float32,0xbe5a3318,0xbe5d9856,2 +np.float32,0xbe003284,0xbe00df9a,2 +np.float32,0x3eb119a4,0x3eb8b7d6,2 +np.float32,0xbf3bccf8,0xbf6fbc84,2 +np.float32,0x3f36f600,0x3f658ea8,2 +np.float32,0x3f1ea834,0x3f397fc2,2 +np.float32,0xbe7cfb54,0xbe8129b3,2 +np.float32,0xbe9b3746,0xbea0406a,2 +np.float32,0x3edc0f90,0x3eeb586c,2 +np.float32,0x3e1842e8,0x3e19660c,2 +np.float32,0xbd8f10b0,0xbd8f4c70,2 +np.float32,0xbf064aca,0xbf1527a2,2 +np.float32,0x3e632e58,0x3e6705be,2 +np.float32,0xbef28ba4,0xbf03cdbb,2 +np.float32,0x3f27b21e,0x3f48bbaf,2 +np.float32,0xbe6f30d4,0xbe73b06e,2 +np.float32,0x3f3e6cb0,0x3f75834b,2 +np.float32,0xbf264aa5,0xbf4649f0,2 +np.float32,0xbf690775,0xbfc3b978,2 +np.float32,0xbf3e4a38,0xbf753632,2 +np.float64,0x3fe12bbe8c62577e,0x3fe32de8e5f961b0,2 +np.float64,0x3fc9b8909b337120,0x3fca1366da00efff,2 +np.float64,0x3feaee4245f5dc84,0x3ff3a011ea0432f3,2 +np.float64,0xbfe892c000f12580,0xbff03e5adaed6f0c,2 +np.float64,0xbf9be8de4837d1c0,0xbf9beaa367756bd1,2 +np.float64,0x3fe632e58fec65cc,0x3feb5ccc5114ca38,2 +np.float64,0x3fe78a0ef7ef141e,0x3fee1b4521d8eb6c,2 +np.float64,0x3feec27a65fd84f4,0x3fff643c8318e81e,2 +np.float64,0x3fbed6efce3dade0,0x3fbefd76cff00111,2 +np.float64,0xbfe3a05fab6740c0,0xbfe6db078aeeb0ca,2 +np.float64,0x3fdca11a56b94234,0x3fdece9e6eacff1b,2 +np.float64,0x3fe0fb15aae1f62c,0x3fe2e9e095ec2089,2 +np.float64,0x3fede12abf7bc256,0x3ffafd0ff4142807,2 +np.float64,0x3feb919edcf7233e,0x3ff4c9aa0bc2432f,2 +np.float64,0x3fd39633b5a72c68,0x3fd43c2e6d5f441c,2 +np.float64,0x3fd9efcbfeb3df98,0x3fdb83f03e58f91c,2 +np.float64,0x3fe2867a36650cf4,0x3fe525858c8ce72e,2 +np.float64,0x3fdacbb8f3b59770,0x3fdc8cd431b6e3ff,2 +np.float64,0x3fcc120503382408,0x3fcc88a8fa43e1c6,2 +np.float64,0xbfd99ff4eab33fea,0xbfdb24a20ae3687d,2 +np.float64,0xbfe8caf0157195e0,0xbff083b8dd0941d3,2 +np.float64,0x3fddc9bf92bb9380,0x3fe022aac0f761d5,2 +np.float64,0x3fe2dbb66e65b76c,0x3fe5a6e7caf3f1f2,2 +np.float64,0x3fe95f5c4a72beb8,0x3ff1444697e96138,2 +np.float64,0xbfc6b163d92d62c8,0xbfc6ef6e006658a1,2 +np.float64,0x3fdf1b2616be364c,0x3fe0fcbd2848c9e8,2 +np.float64,0xbfdca1ccf7b9439a,0xbfdecf7dc0eaa663,2 +np.float64,0x3fe078d6a260f1ae,0x3fe236a7c66ef6c2,2 +np.float64,0x3fdf471bb9be8e38,0x3fe11990ec74e704,2 +np.float64,0xbfe417626be82ec5,0xbfe79c9aa5ed2e2f,2 +np.float64,0xbfeb9cf5677739eb,0xbff4dfc24c012c90,2 +np.float64,0x3f8d9142b03b2280,0x3f8d91c9559d4779,2 +np.float64,0x3fb052c67220a590,0x3fb05873c90d1cd6,2 +np.float64,0x3fd742e2c7ae85c4,0x3fd860128947d15d,2 +np.float64,0x3fec2e2a2bf85c54,0x3ff60eb554bb8d71,2 +np.float64,0xbfeb2b8bc8f65718,0xbff40b734679497a,2 +np.float64,0x3fe25f8e0d64bf1c,0x3fe4eb381d077803,2 +np.float64,0x3fe56426256ac84c,0x3fe9dafbe79370f0,2 +np.float64,0x3feecc1e5d7d983c,0x3fffa49bedc7aa25,2 +np.float64,0xbfc88ce94b3119d4,0xbfc8dbba0fdee2d2,2 +np.float64,0xbfabcf51ac379ea0,0xbfabd6552aa63da3,2 +np.float64,0xbfccc8b849399170,0xbfcd48d6ff057a4d,2 +np.float64,0x3fd2f831e8a5f064,0x3fd38e67b0dda905,2 +np.float64,0x3fcafdcd6135fb98,0x3fcb670ae2ef4d36,2 +np.float64,0x3feda6042efb4c08,0x3ffa219442ac4ea5,2 +np.float64,0x3fed382b157a7056,0x3ff8bc01bc6d10bc,2 +np.float64,0x3fed858a50fb0b14,0x3ff9b1c05cb6cc0f,2 +np.float64,0x3fcc3960653872c0,0x3fccb2045373a3d1,2 +np.float64,0xbfec5177e478a2f0,0xbff65eb4557d94eb,2 +np.float64,0x3feafe0d5e75fc1a,0x3ff3bb4a260a0dcb,2 +np.float64,0x3fe08bc87ee11790,0x3fe25078aac99d31,2 +np.float64,0xffefffffffffffff,0xfff8000000000000,2 +np.float64,0x3f79985ce0333100,0x3f799872b591d1cb,2 +np.float64,0xbfd4001cf9a8003a,0xbfd4b14b9035b94f,2 +np.float64,0x3fe54a17e6ea9430,0x3fe9ac0f18682343,2 +np.float64,0xbfb4e07fea29c100,0xbfb4ec6520dd0689,2 +np.float64,0xbfed2b6659fa56cd,0xbff895ed57dc1450,2 +np.float64,0xbfe81fc8b5f03f92,0xbfef6b95e72a7a7c,2 +np.float64,0xbfe6aced16ed59da,0xbfec4ce131ee3704,2 +np.float64,0xbfe599f30ceb33e6,0xbfea3d07c1cd78e2,2 +np.float64,0xbfe0ff278b61fe4f,0xbfe2ef8b5efa89ed,2 +np.float64,0xbfe3e9406467d281,0xbfe750e43e841736,2 +np.float64,0x3fcc6b52cf38d6a8,0x3fcce688f4fb2cf1,2 +np.float64,0xbfc890e8133121d0,0xbfc8dfdfee72d258,2 +np.float64,0x3fe46e81dbe8dd04,0x3fe82e09783811a8,2 +np.float64,0x3fd94455e5b288ac,0x3fdab7cef2de0b1f,2 +np.float64,0xbfe82151fff042a4,0xbfef6f254c9696ca,2 +np.float64,0x3fcee1ac1d3dc358,0x3fcf80a6ed07070a,2 +np.float64,0x3fcce8f90939d1f0,0x3fcd6ad18d34f8b5,2 +np.float64,0x3fd6afe56fad5fcc,0x3fd7b7567526b1fb,2 +np.float64,0x3fb1a77092234ee0,0x3fb1ae9fe0d176fc,2 +np.float64,0xbfeb758b0d76eb16,0xbff493d105652edc,2 +np.float64,0xbfb857c24e30af88,0xbfb86aa4da3be53f,2 +np.float64,0x3fe89064eff120ca,0x3ff03b7c5b3339a8,2 +np.float64,0xbfc1bd2fef237a60,0xbfc1da99893473ed,2 +np.float64,0xbfe5ad6e2eeb5adc,0xbfea60ed181b5c05,2 +np.float64,0x3fd5a66358ab4cc8,0x3fd6899e640aeb1f,2 +np.float64,0xbfe198e832e331d0,0xbfe3c8c9496d0de5,2 +np.float64,0xbfdaa5c0d7b54b82,0xbfdc5ed7d3c5ce49,2 +np.float64,0x3fcceccb6939d998,0x3fcd6ed88c2dd3a5,2 +np.float64,0xbfe44413eae88828,0xbfe7e6cd32b34046,2 +np.float64,0xbfc7cbeccf2f97d8,0xbfc8139a2626edae,2 +np.float64,0x3fbf31e4fa3e63d0,0x3fbf59c6e863255e,2 +np.float64,0x3fdf03fa05be07f4,0x3fe0ed953f7989ad,2 +np.float64,0x3fe7f4eaceefe9d6,0x3fef092ca7e2ac39,2 +np.float64,0xbfc084e9d92109d4,0xbfc09ca10fd6aaea,2 +np.float64,0xbf88cfbf70319f80,0xbf88d00effa6d897,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0xbfa0176e9c202ee0,0xbfa018ca0a6ceef3,2 +np.float64,0xbfd88d0815b11a10,0xbfd9dfc6c6bcbe4e,2 +np.float64,0x3fe89f7730713eee,0x3ff04de52fb536f3,2 +np.float64,0xbfedc9707bfb92e1,0xbffaa25fcf9dd6da,2 +np.float64,0x3fe936d1a6726da4,0x3ff10e40c2d94bc9,2 +np.float64,0x3fdb64aec7b6c95c,0x3fdd473177317b3f,2 +np.float64,0xbfee4f9aaefc9f35,0xbffcdd212667003c,2 +np.float64,0x3fe3730067e6e600,0x3fe692b0a0babf5f,2 +np.float64,0xbfc257e58924afcc,0xbfc27871f8c218d7,2 +np.float64,0x3fe62db12dec5b62,0x3feb52c61b97d9f6,2 +np.float64,0xbfe3ff491367fe92,0xbfe774f1b3a96fd6,2 +np.float64,0x3fea43255274864a,0x3ff28b0c4b7b8d21,2 +np.float64,0xbfea37923c746f24,0xbff27962159f2072,2 +np.float64,0x3fcd0ac3c73a1588,0x3fcd8e6f8de41755,2 +np.float64,0xbfdccafde6b995fc,0xbfdf030fea8a0630,2 +np.float64,0x3fdba35268b746a4,0x3fdd94094f6f50c1,2 +np.float64,0x3fc68ea1d92d1d40,0x3fc6cb8d07cbb0e4,2 +np.float64,0xbfb88b1f6e311640,0xbfb89e7af4e58778,2 +np.float64,0xbfedc7cadffb8f96,0xbffa9c3766227956,2 +np.float64,0x3fe7928d3eef251a,0x3fee2dcf2ac7961b,2 +np.float64,0xbfeff42ede7fe85e,0xc00cef6b0f1e8323,2 +np.float64,0xbfebf07fa477e0ff,0xbff5893f99e15236,2 +np.float64,0x3fe3002ab9660056,0x3fe5defba550c583,2 +np.float64,0x3feb8f4307f71e86,0x3ff4c517ec8d6de9,2 +np.float64,0x3fd3c16f49a782e0,0x3fd46becaacf74da,2 +np.float64,0x3fc7613df12ec278,0x3fc7a52b2a3c3368,2 +np.float64,0xbfe33af560e675eb,0xbfe63a6528ff1587,2 +np.float64,0xbfde86495abd0c92,0xbfe09bd7ba05b461,2 +np.float64,0x3fe1e7fb4ee3cff6,0x3fe43b04311c0ab6,2 +np.float64,0xbfc528b6bd2a516c,0xbfc55ae0a0c184c8,2 +np.float64,0xbfd81025beb0204c,0xbfd94dd72d804613,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x3fc1151c47222a38,0x3fc12f5aad80a6bf,2 +np.float64,0x3feafa136775f426,0x3ff3b46854da0b3a,2 +np.float64,0x3fed2da0747a5b40,0x3ff89c85b658459e,2 +np.float64,0x3fda2a4b51b45498,0x3fdbca0d908ddbbd,2 +np.float64,0xbfd04cf518a099ea,0xbfd0aae0033b9e4c,2 +np.float64,0xbfb9065586320ca8,0xbfb91adb7e31f322,2 +np.float64,0xbfd830b428b06168,0xbfd973ca3c484d8d,2 +np.float64,0x3fc952f7ed32a5f0,0x3fc9a9994561fc1a,2 +np.float64,0xbfeb06c83c760d90,0xbff3ca77b326df20,2 +np.float64,0xbfeb1c98ac763931,0xbff3f0d0900f6149,2 +np.float64,0x3fdf061dbebe0c3c,0x3fe0eefb32b48d17,2 +np.float64,0xbf9acbaf28359760,0xbf9acd4024be9fec,2 +np.float64,0x3fec0adde2f815bc,0x3ff5c1628423794d,2 +np.float64,0xbfc4bc750d2978ec,0xbfc4eba43f590b94,2 +np.float64,0x3fdbe47878b7c8f0,0x3fdde44a2b500d73,2 +np.float64,0x3fe160d18162c1a4,0x3fe378cff08f18f0,2 +np.float64,0x3fc3b58dfd276b18,0x3fc3de01d3802de9,2 +np.float64,0x3fa860343430c060,0x3fa864ecd07ec962,2 +np.float64,0x3fcaebfb4b35d7f8,0x3fcb546512d1b4c7,2 +np.float64,0x3fe3fda558e7fb4a,0x3fe772412e5776de,2 +np.float64,0xbfe8169f2c702d3e,0xbfef5666c9a10f6d,2 +np.float64,0x3feda78e9efb4f1e,0x3ffa270712ded769,2 +np.float64,0xbfda483161b49062,0xbfdbedfbf2e850ba,2 +np.float64,0x3fd7407cf3ae80f8,0x3fd85d4f52622743,2 +np.float64,0xbfd63de4d4ac7bca,0xbfd73550a33e3c32,2 +np.float64,0xbfd9c30b90b38618,0xbfdb4e7695c856f3,2 +np.float64,0x3fcd70c00b3ae180,0x3fcdfa0969e0a119,2 +np.float64,0x3feb4f127f769e24,0x3ff44bf42514e0f4,2 +np.float64,0xbfec1db44af83b69,0xbff5ea54aed1f8e9,2 +np.float64,0x3fd68ff051ad1fe0,0x3fd792d0ed6d6122,2 +np.float64,0x3fe0a048a5614092,0x3fe26c80a826b2a2,2 +np.float64,0x3fd59f3742ab3e70,0x3fd6818563fcaf80,2 +np.float64,0x3fca26ecf9344dd8,0x3fca867ceb5d7ba8,2 +np.float64,0x3fdc1d547ab83aa8,0x3fde2a9cea866484,2 +np.float64,0xbfc78df6312f1bec,0xbfc7d3719b698a39,2 +np.float64,0x3fe754e72b6ea9ce,0x3feda89ea844a2e5,2 +np.float64,0x3fe740c1a4ee8184,0x3fed7dc56ec0c425,2 +np.float64,0x3fe77566a9eeeace,0x3fedee6f408df6de,2 +np.float64,0xbfbbf5bf8e37eb80,0xbfbc126a223781b4,2 +np.float64,0xbfe0acb297615965,0xbfe27d86681ca2b5,2 +np.float64,0xbfc20a0487241408,0xbfc228f5f7d52ce8,2 +np.float64,0xfff0000000000000,0xfff8000000000000,2 +np.float64,0x3fef98a4dbff314a,0x40043cfb60bd46fa,2 +np.float64,0x3fd059102ca0b220,0x3fd0b7d2be6d7822,2 +np.float64,0x3fe89f18a1f13e32,0x3ff04d714bbbf400,2 +np.float64,0x3fd45b6275a8b6c4,0x3fd516a44a276a4b,2 +np.float64,0xbfe04463e86088c8,0xbfe1ef9dfc9f9a53,2 +np.float64,0xbfe086e279610dc5,0xbfe249c9c1040a13,2 +np.float64,0x3f89c9b110339380,0x3f89ca0a641454b5,2 +np.float64,0xbfb5f5b4322beb68,0xbfb6038dc3fd1516,2 +np.float64,0x3fe6eae76f6dd5ce,0x3feccabae04d5c14,2 +np.float64,0x3fa9ef6c9c33dee0,0x3fa9f51c9a8c8a2f,2 +np.float64,0xbfe171b45f62e368,0xbfe390ccc4c01bf6,2 +np.float64,0x3fb2999442253330,0x3fb2a1fc006804b5,2 +np.float64,0x3fd124bf04a24980,0x3fd1927abb92472d,2 +np.float64,0xbfe6e05938edc0b2,0xbfecb519ba78114f,2 +np.float64,0x3fed466ee6fa8cde,0x3ff8e75405b50490,2 +np.float64,0xbfb999aa92333358,0xbfb9afa4f19f80a2,2 +np.float64,0xbfe98969ed7312d4,0xbff17d887b0303e7,2 +np.float64,0x3fe782843e6f0508,0x3fee0adbeebe3486,2 +np.float64,0xbfe232fcc26465fa,0xbfe4a90a68d46040,2 +np.float64,0x3fd190a90fa32154,0x3fd206f56ffcdca2,2 +np.float64,0xbfc4f8b75929f170,0xbfc5298b2d4e7740,2 +np.float64,0xbfba3a63d63474c8,0xbfba520835c2fdc2,2 +np.float64,0xbfb7708eea2ee120,0xbfb781695ec17846,2 +np.float64,0x3fed9fb7a5fb3f70,0x3ffa0b717bcd1609,2 +np.float64,0xbfc1b158cd2362b0,0xbfc1ce87345f3473,2 +np.float64,0x3f963478082c6900,0x3f96355c3000953b,2 +np.float64,0x3fc5050e532a0a20,0x3fc536397f38f616,2 +np.float64,0x3fe239f9eee473f4,0x3fe4b360da3b2faa,2 +np.float64,0xbfd66bd80eacd7b0,0xbfd769a29fd784c0,2 +np.float64,0x3fc57cdad52af9b8,0x3fc5b16b937f5f72,2 +np.float64,0xbfd3c36a0aa786d4,0xbfd46e1cd0b4eddc,2 +np.float64,0x3feff433487fe866,0x400cf0ea1def3161,2 +np.float64,0xbfed5577807aaaef,0xbff915e8f6bfdf22,2 +np.float64,0xbfca0dd3eb341ba8,0xbfca6c4d11836cb6,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbf974deaa82e9be0,0xbf974ef26a3130d1,2 +np.float64,0xbfe7f425e1efe84c,0xbfef076cb00d649d,2 +np.float64,0xbfe4413605e8826c,0xbfe7e20448b8a4b1,2 +np.float64,0xbfdfad202cbf5a40,0xbfe15cd9eb2be707,2 +np.float64,0xbfe43261ee6864c4,0xbfe7c952c951fe33,2 +np.float64,0xbfec141225782824,0xbff5d54d33861d98,2 +np.float64,0x3fd0f47abaa1e8f4,0x3fd15e8691a7f1c2,2 +np.float64,0x3fd378f0baa6f1e0,0x3fd41bea4a599081,2 +np.float64,0xbfb52523462a4a48,0xbfb5317fa7f436e2,2 +np.float64,0x3fcb30797d3660f0,0x3fcb9c174ea401ff,2 +np.float64,0xbfd48480dea90902,0xbfd5446e02c8b329,2 +np.float64,0xbfee4ae3ab7c95c7,0xbffcc650340ba274,2 +np.float64,0xbfeab086d075610e,0xbff3387f4e83ae26,2 +np.float64,0x3fa17cddf422f9c0,0x3fa17e9bf1b25736,2 +np.float64,0xbfe3064536e60c8a,0xbfe5e86aa5244319,2 +np.float64,0x3feb2882c5765106,0x3ff40604c7d97d44,2 +np.float64,0xbfa6923ff42d2480,0xbfa695ff57b2fc3f,2 +np.float64,0xbfa8bdbdcc317b80,0xbfa8c2ada0d94aa7,2 +np.float64,0x3fe7f16b8e6fe2d8,0x3fef013948c391a6,2 +np.float64,0x3fe4e7169f69ce2e,0x3fe8fceef835050a,2 +np.float64,0x3fed877638fb0eec,0x3ff9b83694127959,2 +np.float64,0xbfe0cc9ecf61993e,0xbfe2a978234cbde5,2 +np.float64,0xbfe977e79672efcf,0xbff16589ea494a38,2 +np.float64,0xbfe240130ae48026,0xbfe4bc69113e0d7f,2 +np.float64,0x3feb1e9b70763d36,0x3ff3f4615938a491,2 +np.float64,0xbfdf197dfcbe32fc,0xbfe0fba78a0fc816,2 +np.float64,0xbfee0f8543fc1f0a,0xbffbb9d9a4ee5387,2 +np.float64,0x3fe88d2191f11a44,0x3ff037843b5b6313,2 +np.float64,0xbfd11bb850a23770,0xbfd188c1cef40007,2 +np.float64,0xbfa1b36e9c2366e0,0xbfa1b53d1d8a8bc4,2 +np.float64,0xbfea2d70d9f45ae2,0xbff26a0629e36b3e,2 +np.float64,0xbfd9188703b2310e,0xbfda83f9ddc18348,2 +np.float64,0xbfee194894fc3291,0xbffbe3c83b61e7cb,2 +np.float64,0xbfe093b4a9e1276a,0xbfe25b4ad6f8f83d,2 +np.float64,0x3fea031489f4062a,0x3ff22accc000082e,2 +np.float64,0xbfc6c0827b2d8104,0xbfc6ff0a94326381,2 +np.float64,0x3fef5cd340feb9a6,0x4002659c5a1b34af,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0x3fd97cb533b2f96c,0x3fdafab28aaae8e3,2 +np.float64,0x3fe2123334642466,0x3fe478bd83a8ce02,2 +np.float64,0xbfd9a69637b34d2c,0xbfdb2c87c6b6fb8c,2 +np.float64,0x3fc58def7f2b1be0,0x3fc5c2ff724a9f61,2 +np.float64,0xbfedd5da1f7babb4,0xbffad15949b7fb22,2 +np.float64,0x3fe90e92a0721d26,0x3ff0d9b64323efb8,2 +np.float64,0x3fd34b9442a69728,0x3fd3e9f8fe80654e,2 +np.float64,0xbfc5f509ab2bea14,0xbfc62d2ad325c59f,2 +np.float64,0x3feb245634f648ac,0x3ff3fe91a46acbe1,2 +np.float64,0x3fd101e539a203cc,0x3fd16cf52ae6d203,2 +np.float64,0xbfc51e9ba72a3d38,0xbfc5507d00521ba3,2 +np.float64,0x3fe5fe1683ebfc2e,0x3feaf7dd8b1f92b0,2 +np.float64,0x3fc362e59126c5c8,0x3fc389601814170b,2 +np.float64,0x3fea34dbd77469b8,0x3ff27542eb721e7e,2 +np.float64,0xbfc13ed241227da4,0xbfc159d42c0a35a9,2 +np.float64,0xbfe6df118cedbe23,0xbfecb27bb5d3f784,2 +np.float64,0x3fd92895f6b2512c,0x3fda96f5f94b625e,2 +np.float64,0xbfe7ea3aa76fd476,0xbfeef0e93939086e,2 +np.float64,0xbfc855498330aa94,0xbfc8a1ff690c9533,2 +np.float64,0x3fd9f27b3ab3e4f8,0x3fdb8726979afc3b,2 +np.float64,0x3fc65d52232cbaa8,0x3fc698ac4367afba,2 +np.float64,0x3fd1271dd0a24e3c,0x3fd195087649d54e,2 +np.float64,0xbfe983445df30689,0xbff175158b773b90,2 +np.float64,0xbfe0d9b13261b362,0xbfe2bb8908fc9e6e,2 +np.float64,0x3fd7671f2aaece40,0x3fd889dccbf21629,2 +np.float64,0x3fe748aebfee915e,0x3fed8e970d94c17d,2 +np.float64,0x3fea756e4e74eadc,0x3ff2d947ef3a54f4,2 +np.float64,0x3fde22311cbc4464,0x3fe05b4ce9df1fdd,2 +np.float64,0x3fe2b55ec1e56abe,0x3fe56c6849e3985a,2 +np.float64,0x3fed7b47437af68e,0x3ff98f8e82de99a0,2 +np.float64,0x3fec8184b179030a,0x3ff6d03aaf0135ba,2 +np.float64,0x3fc9ea825533d508,0x3fca4776d7190e71,2 +np.float64,0xbfe8ddd58b71bbab,0xbff09b770ed7bc9a,2 +np.float64,0xbfed41741bfa82e8,0xbff8d81c2a9fc615,2 +np.float64,0x3fe0a73888e14e72,0x3fe27602ad9a3726,2 +np.float64,0xbfe9d0a565f3a14b,0xbff1e1897b628f66,2 +np.float64,0x3fda12b381b42568,0x3fdbadbec22fbd5a,2 +np.float64,0x3fef0081187e0102,0x4000949eff8313c2,2 +np.float64,0x3fef6942b67ed286,0x4002b7913eb1ee76,2 +np.float64,0x3fda10f882b421f0,0x3fdbababa2d6659d,2 +np.float64,0x3fe5828971eb0512,0x3fea122b5088315a,2 +np.float64,0x3fe9d4b53ff3a96a,0x3ff1e75c148bda01,2 +np.float64,0x3fe95d246bf2ba48,0x3ff1414a61a136ec,2 +np.float64,0x3f9e575eb83caec0,0x3f9e59a4f17179e3,2 +np.float64,0x3fdb0a20b5b61440,0x3fdcd8a56178a17f,2 +np.float64,0xbfdef425e3bde84c,0xbfe0e33eeacf3861,2 +np.float64,0x3fd6afcf6bad5fa0,0x3fd7b73d47288347,2 +np.float64,0x3fe89256367124ac,0x3ff03dd9f36ce40e,2 +np.float64,0x3fe7e560fcefcac2,0x3feee5ef8688b60b,2 +np.float64,0x3fedef55e1fbdeac,0x3ffb350ee1df986b,2 +np.float64,0xbfe44b926de89725,0xbfe7f3539910c41f,2 +np.float64,0x3fc58310f32b0620,0x3fc5b7cfdba15bd0,2 +np.float64,0x3f736d256026da00,0x3f736d2eebe91a90,2 +np.float64,0x3feb012d2076025a,0x3ff3c0b5d21a7259,2 +np.float64,0xbfe466a6c468cd4e,0xbfe820c9c197601f,2 +np.float64,0x3fe1aba8aa635752,0x3fe3e3b73920f64c,2 +np.float64,0x3fe5597c336ab2f8,0x3fe9c7bc4b765b15,2 +np.float64,0x3fe1004ac5e20096,0x3fe2f12116e99821,2 +np.float64,0x3fecbc67477978ce,0x3ff76377434dbdad,2 +np.float64,0x3fe0e64515e1cc8a,0x3fe2ccf5447c1579,2 +np.float64,0x3febcfa874f79f50,0x3ff54528f0822144,2 +np.float64,0x3fc36915ed26d228,0x3fc38fb5b28d3f72,2 +np.float64,0xbfe01213e5e02428,0xbfe1ac0e1e7418f1,2 +np.float64,0x3fcd97875b3b2f10,0x3fce22fe3fc98702,2 +np.float64,0xbfe30383c5e60708,0xbfe5e427e62cc957,2 +np.float64,0xbfde339bf9bc6738,0xbfe0667f337924f5,2 +np.float64,0xbfda7c1c49b4f838,0xbfdc2c8801ce654a,2 +np.float64,0x3fb6b3489e2d6690,0x3fb6c29650387b92,2 +np.float64,0xbfe1fd4d76e3fa9b,0xbfe45a1f60077678,2 +np.float64,0xbf67c5e0402f8c00,0xbf67c5e49fce115a,2 +np.float64,0xbfd4f9aa2da9f354,0xbfd5c759603d0b9b,2 +np.float64,0x3fe83c227bf07844,0x3fefada9f1bd7fa9,2 +np.float64,0xbf97f717982fee20,0xbf97f836701a8cd5,2 +np.float64,0x3fe9688a2472d114,0x3ff150aa575e7d51,2 +np.float64,0xbfc5a9779d2b52f0,0xbfc5df56509c48b1,2 +np.float64,0xbfe958d5f472b1ac,0xbff13b813f9bee20,2 +np.float64,0xbfd7b3b944af6772,0xbfd8e276c2b2920f,2 +np.float64,0x3fed10198e7a2034,0x3ff8469c817572f0,2 +np.float64,0xbfeeecc4517dd989,0xc000472b1f858be3,2 +np.float64,0xbfdbcce47eb799c8,0xbfddc734aa67812b,2 +np.float64,0xbfd013ee24a027dc,0xbfd06df3089384ca,2 +np.float64,0xbfd215f2bfa42be6,0xbfd29774ffe26a74,2 +np.float64,0x3fdfd0ae67bfa15c,0x3fe1746e3a963a9f,2 +np.float64,0xbfc84aa10b309544,0xbfc896f0d25b723a,2 +np.float64,0xbfcd0c627d3a18c4,0xbfcd9024c73747a9,2 +np.float64,0x3fd87df6dbb0fbec,0x3fd9ce1dde757f31,2 +np.float64,0xbfdad85e05b5b0bc,0xbfdc9c2addb6ce47,2 +np.float64,0xbfee4f8977fc9f13,0xbffcdccd68e514b3,2 +np.float64,0x3fa5c290542b8520,0x3fa5c5ebdf09ca70,2 +np.float64,0xbfd7e401d2afc804,0xbfd91a7e4eb5a026,2 +np.float64,0xbfe33ff73b667fee,0xbfe6423cc6eb07d7,2 +np.float64,0x3fdfb7d6c4bf6fac,0x3fe163f2e8175177,2 +np.float64,0xbfd515d69eaa2bae,0xbfd5e6eedd6a1598,2 +np.float64,0x3fb322232e264440,0x3fb32b49d91c3cbe,2 +np.float64,0xbfe20ac39e641587,0xbfe46dd4b3803f19,2 +np.float64,0x3fe282dc18e505b8,0x3fe520152120c297,2 +np.float64,0xbfc905a4cd320b48,0xbfc95929b74865fb,2 +np.float64,0x3fe0ae3b83615c78,0x3fe27fa1dafc825b,2 +np.float64,0xbfc1bfed0f237fdc,0xbfc1dd6466225cdf,2 +np.float64,0xbfeca4d47d7949a9,0xbff72761a34fb682,2 +np.float64,0xbfe8cf8c48f19f18,0xbff0897ebc003626,2 +np.float64,0xbfe1aaf0a36355e2,0xbfe3e2ae7b17a286,2 +np.float64,0x3fe2ca442e659488,0x3fe58c3a2fb4f14a,2 +np.float64,0xbfda3c2deeb4785c,0xbfdbdf89fe96a243,2 +np.float64,0xbfdc12bfecb82580,0xbfde1d81dea3c221,2 +np.float64,0xbfe2d6d877e5adb1,0xbfe59f73e22c1fc7,2 +np.float64,0x3fe5f930636bf260,0x3feaee96a462e4de,2 +np.float64,0x3fcf3c0ea53e7820,0x3fcfe0b0f92be7e9,2 +np.float64,0xbfa5bb90f42b7720,0xbfa5bee9424004cc,2 +np.float64,0xbfe2fb3a3265f674,0xbfe5d75b988bb279,2 +np.float64,0x3fcaec7aab35d8f8,0x3fcb54ea582fff6f,2 +np.float64,0xbfd8d3228db1a646,0xbfda322297747fbc,2 +np.float64,0x3fedd2e0ad7ba5c2,0x3ffac6002b65c424,2 +np.float64,0xbfd9edeca2b3dbda,0xbfdb81b2b7785e33,2 +np.float64,0xbfef5febb17ebfd7,0xc002796b15950960,2 +np.float64,0x3fde22f787bc45f0,0x3fe05bcc624b9ba2,2 +np.float64,0xbfc716a4ab2e2d48,0xbfc758073839dd44,2 +np.float64,0xbf9bed852837db00,0xbf9bef4b2a3f3bdc,2 +np.float64,0x3fef8f88507f1f10,0x4003e5e566444571,2 +np.float64,0xbfdc1bbed6b8377e,0xbfde28a64e174e60,2 +np.float64,0x3fe02d30eae05a62,0x3fe1d064ec027cd3,2 +np.float64,0x3fd9dbb500b3b76c,0x3fdb6bea40162279,2 +np.float64,0x3fe353ff1d66a7fe,0x3fe661b3358c925e,2 +np.float64,0x3fac3ebfb4387d80,0x3fac4618effff2b0,2 +np.float64,0x3fe63cf0ba6c79e2,0x3feb7030cff5f434,2 +np.float64,0x3fd0e915f8a1d22c,0x3fd152464597b510,2 +np.float64,0xbfd36987cda6d310,0xbfd40af049d7621e,2 +np.float64,0xbfdc5b4dc7b8b69c,0xbfde7790a35da2bc,2 +np.float64,0x3feee7ff4a7dcffe,0x40003545989e07c7,2 +np.float64,0xbfeb2c8308765906,0xbff40d2e6469249e,2 +np.float64,0x3fe535a894ea6b52,0x3fe98781648550d0,2 +np.float64,0xbfef168eb9fe2d1d,0xc000f274ed3cd312,2 +np.float64,0x3fc3e2d98927c5b0,0x3fc40c6991b8900c,2 +np.float64,0xbfcd8fe3e73b1fc8,0xbfce1aec7f9b7f7d,2 +np.float64,0xbfd55d8c3aaabb18,0xbfd6378132ee4892,2 +np.float64,0xbfe424a66168494d,0xbfe7b289d72c98b3,2 +np.float64,0x3fd81af13eb035e4,0x3fd95a6a9696ab45,2 +np.float64,0xbfe3016722e602ce,0xbfe5e0e46db228cd,2 +np.float64,0x3fe9a20beff34418,0x3ff19faca17fc468,2 +np.float64,0xbfe2124bc7e42498,0xbfe478e19927e723,2 +np.float64,0x3fd96f8622b2df0c,0x3fdaeb08da6b08ae,2 +np.float64,0x3fecd6796579acf2,0x3ff7a7d02159e181,2 +np.float64,0x3fe60015df6c002c,0x3feafba6f2682a61,2 +np.float64,0x3fc7181cf72e3038,0x3fc7598c2cc3c3b4,2 +np.float64,0xbfce6e2e0b3cdc5c,0xbfcf0621b3e37115,2 +np.float64,0xbfe52a829e6a5505,0xbfe973a785980af9,2 +np.float64,0x3fed4bbac37a9776,0x3ff8f7a0e68a2bbe,2 +np.float64,0x3fabdfaacc37bf60,0x3fabe6bab42bd246,2 +np.float64,0xbfcd9598cb3b2b30,0xbfce20f3c4c2c261,2 +np.float64,0x3fd717d859ae2fb0,0x3fd82e88eca09ab1,2 +np.float64,0x3fe28ccb18e51996,0x3fe52f071d2694fd,2 +np.float64,0xbfe43f064ae87e0c,0xbfe7de5eab36b5b9,2 +np.float64,0x7fefffffffffffff,0xfff8000000000000,2 +np.float64,0xbfb39b045a273608,0xbfb3a4dd3395fdd5,2 +np.float64,0xbfb3358bae266b18,0xbfb33ece5e95970a,2 +np.float64,0xbfeeafb6717d5f6d,0xbffeec3f9695b575,2 +np.float64,0xbfe7a321afef4644,0xbfee522dd80f41f4,2 +np.float64,0x3fe3a17e5be742fc,0x3fe6dcd32af51e92,2 +np.float64,0xbfc61694bd2c2d28,0xbfc64fbbd835f6e7,2 +np.float64,0xbfd795906faf2b20,0xbfd8bf89b370655c,2 +np.float64,0xbfe4b39b59e96736,0xbfe8a3c5c645b6e3,2 +np.float64,0x3fd310af3ba62160,0x3fd3a9442e825e1c,2 +np.float64,0xbfd45198a6a8a332,0xbfd50bc10311a0a3,2 +np.float64,0x3fd0017eaaa002fc,0x3fd05a472a837999,2 +np.float64,0xbfea974d98752e9b,0xbff30f67f1835183,2 +np.float64,0xbf978f60582f1ec0,0xbf979070e1c2b59d,2 +np.float64,0x3fe1c715d4e38e2c,0x3fe40b479e1241a2,2 +np.float64,0xbfccb965cd3972cc,0xbfcd38b40c4a352d,2 +np.float64,0xbfd9897048b312e0,0xbfdb09d55624c2a3,2 +np.float64,0x3fe7f5de4befebbc,0x3fef0b56be259f9c,2 +np.float64,0x3fcc6c6d4338d8d8,0x3fcce7b20ed68a78,2 +np.float64,0xbfe63884046c7108,0xbfeb67a3b945c3ee,2 +np.float64,0xbfce64e2ad3cc9c4,0xbfcefc47fae2e81f,2 +np.float64,0x3fefeb57b27fd6b0,0x400ab2eac6321cfb,2 +np.float64,0x3fe679627e6cf2c4,0x3febe6451b6ee0c4,2 +np.float64,0x3fc5f710172bee20,0x3fc62f40f85cb040,2 +np.float64,0x3fc34975e52692e8,0x3fc36f58588c7fa2,2 +np.float64,0x3fe8a3784cf146f0,0x3ff052ced9bb9406,2 +np.float64,0x3fd11a607ca234c0,0x3fd1874f876233fe,2 +np.float64,0x3fb2d653f625aca0,0x3fb2df0f4c9633f3,2 +np.float64,0x3fe555f39eeaabe8,0x3fe9c15ee962a28c,2 +np.float64,0xbfea297e3bf452fc,0xbff264107117f709,2 +np.float64,0x3fe1581cdde2b03a,0x3fe36c79acedf99c,2 +np.float64,0x3fd4567063a8ace0,0x3fd51123dbd9106f,2 +np.float64,0x3fa3883aec271080,0x3fa38aa86ec71218,2 +np.float64,0x3fe40e5d7de81cba,0x3fe78dbb9b568850,2 +np.float64,0xbfe9a2f7347345ee,0xbff1a0f4faa05041,2 +np.float64,0x3f9eef03a83dde00,0x3f9ef16caa0c1478,2 +np.float64,0xbfcb4641d1368c84,0xbfcbb2e7ff8c266d,2 +np.float64,0xbfa8403b2c308070,0xbfa844e148b735b7,2 +np.float64,0xbfe1875cd6e30eba,0xbfe3afadc08369f5,2 +np.float64,0xbfdd3c3d26ba787a,0xbfdf919b3e296766,2 +np.float64,0x3fcd6c4c853ad898,0x3fcdf55647b518b8,2 +np.float64,0xbfe360a173e6c143,0xbfe6759eb3a08cf2,2 +np.float64,0x3fe5a13147eb4262,0x3fea4a5a060f5adb,2 +np.float64,0x3feb3cdd7af679ba,0x3ff42aae0cf61234,2 +np.float64,0x3fe5205128ea40a2,0x3fe9618f3d0c54af,2 +np.float64,0x3fce35343f3c6a68,0x3fcec9c4e612b050,2 +np.float64,0xbfc345724d268ae4,0xbfc36b3ce6338e6a,2 +np.float64,0x3fedc4fc0e7b89f8,0x3ffa91c1d775c1f7,2 +np.float64,0x3fe41fbf21683f7e,0x3fe7aa6c174a0e65,2 +np.float64,0xbfc7a1a5d32f434c,0xbfc7e7d27a4c5241,2 +np.float64,0x3fd3e33eaca7c67c,0x3fd4915264441e2f,2 +np.float64,0x3feb3f02f6f67e06,0x3ff42e942249e596,2 +np.float64,0x3fdb75fcb0b6ebf8,0x3fdd5c63f98b6275,2 +np.float64,0x3fd6476603ac8ecc,0x3fd74020b164cf38,2 +np.float64,0x3fed535372faa6a6,0x3ff90f3791821841,2 +np.float64,0x3fe8648ead70c91e,0x3ff006a62befd7ed,2 +np.float64,0x3fd0f90760a1f210,0x3fd1636b39bb1525,2 +np.float64,0xbfca052443340a48,0xbfca633d6e777ae0,2 +np.float64,0xbfa6a5e3342d4bc0,0xbfa6a9ac6a488f5f,2 +np.float64,0x3fd5598038aab300,0x3fd632f35c0c3d52,2 +np.float64,0xbfdf66218fbecc44,0xbfe12df83b19f300,2 +np.float64,0x3fe78e15b56f1c2c,0x3fee240d12489cd1,2 +np.float64,0x3fe3d6a7b3e7ad50,0x3fe7329dcf7401e2,2 +np.float64,0xbfddb8e97bbb71d2,0xbfe017ed6d55a673,2 +np.float64,0xbfd57afd55aaf5fa,0xbfd658a9607c3370,2 +np.float64,0xbfdba4c9abb74994,0xbfdd95d69e5e8814,2 +np.float64,0xbfe71d8090ee3b01,0xbfed3390be6d2eef,2 +np.float64,0xbfc738ac0f2e7158,0xbfc77b3553b7c026,2 +np.float64,0x3f873656302e6c80,0x3f873697556ae011,2 +np.float64,0x3fe559491d6ab292,0x3fe9c7603b12c608,2 +np.float64,0xbfe262776864c4ef,0xbfe4ef905dda8599,2 +np.float64,0x3fe59d8917eb3b12,0x3fea439f44b7573f,2 +np.float64,0xbfd4b5afb5a96b60,0xbfd57b4e3df4dbc8,2 +np.float64,0x3fe81158447022b0,0x3fef4a3cea3eb6a9,2 +np.float64,0xbfeb023441f60468,0xbff3c27f0fc1a4dc,2 +np.float64,0x3fefb212eaff6426,0x40055fc6d949cf44,2 +np.float64,0xbfe1300ac1e26016,0xbfe333f297a1260e,2 +np.float64,0xbfeae0a2f575c146,0xbff388d58c380b8c,2 +np.float64,0xbfeddd8e55fbbb1d,0xbffaef045b2e21d9,2 +np.float64,0x3fec7c6c1d78f8d8,0x3ff6c3ebb019a8e5,2 +np.float64,0xbfe27e071f64fc0e,0xbfe518d2ff630f33,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0x3fc5872abf2b0e58,0x3fc5bc083105db76,2 +np.float64,0x3fe65114baeca22a,0x3feb9745b82ef15a,2 +np.float64,0xbfc783abe52f0758,0xbfc7c8cb23f93e79,2 +np.float64,0x3fe4b7a5dd696f4c,0x3fe8aab9d492f0ca,2 +np.float64,0xbf91a8e8a82351e0,0xbf91a95b6ae806f1,2 +np.float64,0xbfee482eb77c905d,0xbffcb952830e715a,2 +np.float64,0x3fba0eee2a341de0,0x3fba261d495e3a1b,2 +np.float64,0xbfeb8876ae7710ed,0xbff4b7f7f4343506,2 +np.float64,0xbfe4d29e46e9a53c,0xbfe8d9547a601ba7,2 +np.float64,0xbfe12413b8e24828,0xbfe3232656541d10,2 +np.float64,0x3fc0bd8f61217b20,0x3fc0d63f937f0aa4,2 +np.float64,0xbfd3debafda7bd76,0xbfd48c534e5329e4,2 +np.float64,0x3fc0f92de921f258,0x3fc112eb7d47349b,2 +np.float64,0xbfe576b95f6aed72,0xbfe9fca859239b3c,2 +np.float64,0x3fd10e520da21ca4,0x3fd17a546e4152f7,2 +np.float64,0x3fcef917eb3df230,0x3fcf998677a8fa8f,2 +np.float64,0x3fdfcf863abf9f0c,0x3fe173a98af1cb13,2 +np.float64,0x3fc28c4b4f251898,0x3fc2adf43792e917,2 +np.float64,0x3fceb837ad3d7070,0x3fcf54a63b7d8c5c,2 +np.float64,0x3fc0140a05202818,0x3fc029e4f75330cb,2 +np.float64,0xbfd76c3362aed866,0xbfd88fb9e790b4e8,2 +np.float64,0xbfe475300868ea60,0xbfe8395334623e1f,2 +np.float64,0x3fea70b9b4f4e174,0x3ff2d1dad92173ba,2 +np.float64,0xbfe2edbd4965db7a,0xbfe5c29449a9365d,2 +np.float64,0xbfddf86f66bbf0de,0xbfe0408439cada9b,2 +np.float64,0xbfb443cdfa288798,0xbfb44eae796ad3ea,2 +np.float64,0xbf96a8a0482d5140,0xbf96a992b6ef073b,2 +np.float64,0xbfd279db2fa4f3b6,0xbfd3043db6acbd9e,2 +np.float64,0x3fe5d99088ebb322,0x3feab30be14e1605,2 +np.float64,0xbfe1a917abe35230,0xbfe3e0063d0f5f63,2 +np.float64,0x3fc77272f52ee4e8,0x3fc7b6f8ab6f4591,2 +np.float64,0x3fd6b62146ad6c44,0x3fd7be77eef8390a,2 +np.float64,0xbfe39fd9bc673fb4,0xbfe6da30dc4eadde,2 +np.float64,0x3fe35545c066aa8c,0x3fe663b5873e4d4b,2 +np.float64,0xbfcbbeffb3377e00,0xbfcc317edf7f6992,2 +np.float64,0xbfe28a58366514b0,0xbfe52b5734579ffa,2 +np.float64,0xbfbf0c87023e1910,0xbfbf33d970a0dfa5,2 +np.float64,0xbfd31144cba6228a,0xbfd3a9e84f9168f9,2 +np.float64,0xbfe5c044056b8088,0xbfea83d607c1a88a,2 +np.float64,0x3fdaabdf18b557c0,0x3fdc663ee8eddc83,2 +np.float64,0xbfeb883006f71060,0xbff4b76feff615be,2 +np.float64,0xbfebaef41d775de8,0xbff5034111440754,2 +np.float64,0x3fd9b6eb3bb36dd8,0x3fdb3fff5071dacf,2 +np.float64,0x3fe4e33c45e9c678,0x3fe8f637779ddedf,2 +np.float64,0x3fe52213a06a4428,0x3fe964adeff5c14e,2 +np.float64,0x3fe799254cef324a,0x3fee3c3ecfd3cdc5,2 +np.float64,0x3fd0533f35a0a680,0x3fd0b19a003469d3,2 +np.float64,0x3fec7ef5c7f8fdec,0x3ff6ca0abe055048,2 +np.float64,0xbfd1b5da82a36bb6,0xbfd22f357acbee79,2 +np.float64,0xbfd8f9c652b1f38c,0xbfda5faacbce9cf9,2 +np.float64,0x3fc8fc818b31f900,0x3fc94fa9a6aa53c8,2 +np.float64,0x3fcf42cc613e8598,0x3fcfe7dc128f33f2,2 +np.float64,0x3fd393a995a72754,0x3fd4396127b19305,2 +np.float64,0x3fec7b7df9f8f6fc,0x3ff6c1ae51753ef2,2 +np.float64,0x3fc07f175b20fe30,0x3fc096b55c11568c,2 +np.float64,0xbf979170082f22e0,0xbf979280d9555f44,2 +np.float64,0xbfb9d110c633a220,0xbfb9e79ba19b3c4a,2 +np.float64,0x3fedcd7d417b9afa,0x3ffab19734e86d58,2 +np.float64,0xbfec116f27f822de,0xbff5cf9425cb415b,2 +np.float64,0xbfec4fa0bef89f42,0xbff65a771982c920,2 +np.float64,0x3f94d4452829a880,0x3f94d501789ad11c,2 +np.float64,0xbfefe5ede27fcbdc,0xc009c440d3c2a4ce,2 +np.float64,0xbfe7e5f7b5efcbf0,0xbfeee74449aee1db,2 +np.float64,0xbfeb71dc8976e3b9,0xbff48cd84ea54ed2,2 +np.float64,0xbfe4cdb65f699b6c,0xbfe8d0d3bce901ef,2 +np.float64,0x3fb78ef1ee2f1de0,0x3fb7a00e7d183c48,2 +np.float64,0x3fb681864a2d0310,0x3fb6906fe64b4cd7,2 +np.float64,0xbfd2ad3b31a55a76,0xbfd33c57b5985399,2 +np.float64,0x3fdcdaaa95b9b554,0x3fdf16b99628db1e,2 +np.float64,0x3fa4780b7428f020,0x3fa47ad6ce9b8081,2 +np.float64,0x3fc546b0ad2a8d60,0x3fc579b361b3b18f,2 +np.float64,0x3feaf98dd6f5f31c,0x3ff3b38189c3539c,2 +np.float64,0x3feb0b2eca76165e,0x3ff3d22797083f9a,2 +np.float64,0xbfdc02ae3ab8055c,0xbfde099ecb5dbacf,2 +np.float64,0x3fd248bf17a49180,0x3fd2ceb77b346d1d,2 +np.float64,0x3fe349d666e693ac,0x3fe651b9933a8853,2 +np.float64,0xbfca526fc534a4e0,0xbfcab3e83f0d9b93,2 +np.float64,0x3fc156421722ac88,0x3fc171b38826563b,2 +np.float64,0xbfe4244569e8488b,0xbfe7b1e93e7d4f92,2 +np.float64,0x3fe010faabe021f6,0x3fe1aa961338886d,2 +np.float64,0xbfc52dacb72a5b58,0xbfc55ffa50eba380,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x3fea1d4865f43a90,0x3ff251b839eb4817,2 +np.float64,0xbfa0f65c8421ecc0,0xbfa0f7f37c91be01,2 +np.float64,0x3fcab29c0b356538,0x3fcb1863edbee184,2 +np.float64,0x3fe7949162ef2922,0x3fee323821958b88,2 +np.float64,0x3fdaf9288ab5f250,0x3fdcc400190a4839,2 +np.float64,0xbfe13ece6be27d9d,0xbfe348ba07553179,2 +np.float64,0x3f8a0c4fd0341880,0x3f8a0cabdf710185,2 +np.float64,0x3fdd0442a2ba0884,0x3fdf4b016c4da452,2 +np.float64,0xbfaf06d2343e0da0,0xbfaf1090b1600422,2 +np.float64,0xbfd3b65225a76ca4,0xbfd45fa49ae76cca,2 +np.float64,0x3fef5d75fefebaec,0x400269a5e7c11891,2 +np.float64,0xbfe048e35ce091c6,0xbfe1f5af45dd64f8,2 +np.float64,0xbfe27d4599e4fa8b,0xbfe517b07843d04c,2 +np.float64,0xbfe6f2a637ede54c,0xbfecdaa730462576,2 +np.float64,0x3fc63fbb752c7f78,0x3fc67a2854974109,2 +np.float64,0x3fedda6bfbfbb4d8,0x3ffae2e6131f3475,2 +np.float64,0x3fe7a6f5286f4dea,0x3fee5a9b1ef46016,2 +np.float64,0xbfd4ea8bcea9d518,0xbfd5b66ab7e5cf00,2 +np.float64,0x3fdc116568b822cc,0x3fde1bd4d0d9fd6c,2 +np.float64,0x3fdc45cb1bb88b98,0x3fde5cd1d2751032,2 +np.float64,0x3feabd932f757b26,0x3ff34e06e56a62a1,2 +np.float64,0xbfae5dbe0c3cbb80,0xbfae66e062ac0d65,2 +np.float64,0xbfdb385a00b670b4,0xbfdd10fedf3a58a7,2 +np.float64,0xbfebb14755f7628f,0xbff507e123a2b47c,2 +np.float64,0x3fe6de2fdfedbc60,0x3fecb0ae6e131da2,2 +np.float64,0xbfd86de640b0dbcc,0xbfd9bb4dbf0bf6af,2 +np.float64,0x3fe39e86d9e73d0e,0x3fe6d811c858d5d9,2 +np.float64,0x7ff0000000000000,0xfff8000000000000,2 +np.float64,0x3fa8101684302020,0x3fa814a12176e937,2 +np.float64,0x3fefdd5ad37fbab6,0x4008a08c0b76fbb5,2 +np.float64,0x3fe645c727ec8b8e,0x3feb814ebc470940,2 +np.float64,0x3fe3ba79dce774f4,0x3fe70500db564cb6,2 +np.float64,0xbfe0e5a254e1cb44,0xbfe2cc13940c6d9a,2 +np.float64,0x3fe2cac62465958c,0x3fe58d008c5e31f8,2 +np.float64,0xbfd3ffb531a7ff6a,0xbfd4b0d88cff2040,2 +np.float64,0x3fe0929104612522,0x3fe259bc42dce788,2 +np.float64,0x1,0x1,2 +np.float64,0xbfe7db77e6efb6f0,0xbfeecf93e8a61cb3,2 +np.float64,0xbfe37e9559e6fd2a,0xbfe6a514e29cb7aa,2 +np.float64,0xbfc53a843f2a7508,0xbfc56d2e9ad8b716,2 +np.float64,0xbfedb04485fb6089,0xbffa4615d4334ec3,2 +np.float64,0xbfc44349b1288694,0xbfc46f484b6f1cd6,2 +np.float64,0xbfe265188264ca31,0xbfe4f37d61cd9e17,2 +np.float64,0xbfd030351da0606a,0xbfd08c2537287ee1,2 +np.float64,0x3fd8fb131db1f628,0x3fda613363ca601e,2 +np.float64,0xbff0000000000000,0xfff0000000000000,2 +np.float64,0xbfe48d9a60691b35,0xbfe862c02d8fec1e,2 +np.float64,0x3fd185e050a30bc0,0x3fd1fb4c614ddb07,2 +np.float64,0xbfe4a5807e694b01,0xbfe88b8ff2d6caa7,2 +np.float64,0xbfc934d7ad3269b0,0xbfc98a405d25a666,2 +np.float64,0xbfea0e3c62741c79,0xbff23b4bd3a7b15d,2 +np.float64,0x3fe7244071ee4880,0x3fed41b27ba6bb22,2 +np.float64,0xbfd419f81ba833f0,0xbfd4cdf71b4533a3,2 +np.float64,0xbfe1e73a34e3ce74,0xbfe439eb15fa6baf,2 +np.float64,0x3fcdd9a63f3bb350,0x3fce68e1c401eff0,2 +np.float64,0x3fd1b5960ba36b2c,0x3fd22eeb566f1976,2 +np.float64,0x3fe9ad18e0735a32,0x3ff1af23c534260d,2 +np.float64,0xbfd537918aaa6f24,0xbfd60ccc8df0962b,2 +np.float64,0x3fcba3d3c73747a8,0x3fcc14fd5e5c49ad,2 +np.float64,0x3fd367e3c0a6cfc8,0x3fd40921b14e288e,2 +np.float64,0x3fe94303c6f28608,0x3ff11e62db2db6ac,2 +np.float64,0xbfcc5f77fd38bef0,0xbfccda110c087519,2 +np.float64,0xbfd63b74d7ac76ea,0xbfd7328af9f37402,2 +np.float64,0xbfe5321289ea6425,0xbfe9811ce96609ad,2 +np.float64,0xbfde910879bd2210,0xbfe0a2cd0ed1d368,2 +np.float64,0xbfcc9d9bad393b38,0xbfcd1b722a0b1371,2 +np.float64,0xbfe6dd39e16dba74,0xbfecaeb7c8c069f6,2 +np.float64,0xbfe98316eff3062e,0xbff174d7347d48bf,2 +np.float64,0xbfda88f8d1b511f2,0xbfdc3c0e75dad903,2 +np.float64,0x3fd400d8c2a801b0,0x3fd4b21bacff1f5d,2 +np.float64,0xbfe1ed335863da66,0xbfe4429e45e99779,2 +np.float64,0xbf3423a200284800,0xbf3423a20acb0342,2 +np.float64,0xbfe97bc59672f78b,0xbff16ad1adc44a33,2 +np.float64,0xbfeeca60d7fd94c2,0xbfff98d7f18f7728,2 +np.float64,0x3fd1eb13b2a3d628,0x3fd268e6ff4d56ce,2 +np.float64,0xbfa5594c242ab2a0,0xbfa55c77d6740a39,2 +np.float64,0x3fe72662006e4cc4,0x3fed462a9dedbfee,2 +np.float64,0x3fef4bb221fe9764,0x4001fe4f4cdfedb2,2 +np.float64,0xbfe938d417f271a8,0xbff110e78724ca2b,2 +np.float64,0xbfcc29ab2f385358,0xbfcca182140ef541,2 +np.float64,0x3fe18cd42c6319a8,0x3fe3b77e018165e7,2 +np.float64,0xbfec6c5cae78d8b9,0xbff69d8e01309b48,2 +np.float64,0xbfd5723da7aae47c,0xbfd64ecde17da471,2 +np.float64,0xbfe3096722e612ce,0xbfe5ed43634f37ff,2 +np.float64,0xbfdacaceb1b5959e,0xbfdc8bb826bbed39,2 +np.float64,0x3fc59a57cb2b34b0,0x3fc5cfc4a7c9bac8,2 +np.float64,0x3f84adce10295b80,0x3f84adfc1f1f6e97,2 +np.float64,0x3fdd5b28bbbab650,0x3fdfb8b906d77df4,2 +np.float64,0x3fdebf94c6bd7f28,0x3fe0c10188e1bc7c,2 +np.float64,0x3fdb30c612b6618c,0x3fdd07bf18597821,2 +np.float64,0x3fe7eeb3176fdd66,0x3feefb0be694b855,2 +np.float64,0x0,0x0,2 +np.float64,0xbfe10057e9e200b0,0xbfe2f13365e5b1c9,2 +np.float64,0xbfeb61a82376c350,0xbff46e665d3a60f5,2 +np.float64,0xbfe7f54aec6fea96,0xbfef0a0759f726dc,2 +np.float64,0xbfe4f6da3de9edb4,0xbfe9187d85bd1ab5,2 +np.float64,0xbfeb8be1b3f717c4,0xbff4be8efaab2e75,2 +np.float64,0x3fed40bc31fa8178,0x3ff8d5ec4a7f3e9b,2 +np.float64,0xbfe40f8711681f0e,0xbfe78fa5c62b191b,2 +np.float64,0x3fd1034d94a2069c,0x3fd16e78e9efb85b,2 +np.float64,0x3fc74db15b2e9b60,0x3fc790f26e894098,2 +np.float64,0x3fd912a88cb22550,0x3fda7d0ab3b21308,2 +np.float64,0x3fd8948a3bb12914,0x3fd9e8950c7874c8,2 +np.float64,0xbfa7ada5242f5b50,0xbfa7b1f8db50c104,2 +np.float64,0x3feeb2e1c27d65c4,0x3fff000b7d09c9b7,2 +np.float64,0x3fe9d46cbbf3a8da,0x3ff1e6f405265a6e,2 +np.float64,0xbfe2480b77e49017,0xbfe4c83b9b37bf0c,2 +np.float64,0x3fe950ea9372a1d6,0x3ff130e62468bf2c,2 +np.float64,0x3fefa7272a7f4e4e,0x4004d8c9bf31ab58,2 +np.float64,0xbfe7309209ee6124,0xbfed5b94acef917a,2 +np.float64,0x3fd05e8c64a0bd18,0x3fd0bdb11e0903c6,2 +np.float64,0x3fd9236043b246c0,0x3fda90ccbe4bab1e,2 +np.float64,0xbfdc3d6805b87ad0,0xbfde5266e17154c3,2 +np.float64,0x3fe5e6bad76bcd76,0x3feacbc306c63445,2 +np.float64,0x3ff0000000000000,0x7ff0000000000000,2 +np.float64,0xbfde3d7390bc7ae8,0xbfe06cd480bd0196,2 +np.float64,0xbfd3e2e3c0a7c5c8,0xbfd490edc0a45e26,2 +np.float64,0x3fe39871d76730e4,0x3fe6ce54d1719953,2 +np.float64,0x3fdff00ebcbfe01c,0x3fe1894b6655a6d0,2 +np.float64,0x3f91b7ad58236f40,0x3f91b8213bcb8b0b,2 +np.float64,0xbfd99f48f7b33e92,0xbfdb23d544f62591,2 +np.float64,0x3fae3512cc3c6a20,0x3fae3e10939fd7b5,2 +np.float64,0x3fcc4cf3db3899e8,0x3fccc698a15176d6,2 +np.float64,0xbfd0927e39a124fc,0xbfd0f5522e2bc030,2 +np.float64,0x3fcee859633dd0b0,0x3fcf87bdef7a1e82,2 +np.float64,0xbfe2a8b69565516d,0xbfe5593437b6659a,2 +np.float64,0x3fecf61e20f9ec3c,0x3ff7fda16b0209d4,2 +np.float64,0xbfbf37571e3e6eb0,0xbfbf5f4e1379a64c,2 +np.float64,0xbfd54e1b75aa9c36,0xbfd626223b68971a,2 +np.float64,0x3fe1035a56e206b4,0x3fe2f5651ca0f4b0,2 +np.float64,0x3fe4992989e93254,0x3fe876751afa70dc,2 +np.float64,0x3fc8c313d3318628,0x3fc913faf15d1562,2 +np.float64,0x3f99f6ba8833ed80,0x3f99f8274fb94828,2 +np.float64,0xbfd4a58af0a94b16,0xbfd56947c276e04f,2 +np.float64,0x3fc66f8c872cdf18,0x3fc6ab7a14372a73,2 +np.float64,0x3fc41eee0d283de0,0x3fc449ff1ff0e7a6,2 +np.float64,0x3fefd04d287fa09a,0x4007585010cfa9b0,2 +np.float64,0x3fce9e746f3d3ce8,0x3fcf39514bbe5070,2 +np.float64,0xbfe8056f72700adf,0xbfef2ee2c13e67ba,2 +np.float64,0x3fdd6b1ec0bad63c,0x3fdfccf2ba144fa8,2 +np.float64,0x3fd92ee432b25dc8,0x3fda9e6b96b2b142,2 +np.float64,0xbfc4d18f9529a320,0xbfc50150fb4de0cc,2 +np.float64,0xbfe09939a7613274,0xbfe262d703c317af,2 +np.float64,0xbfd130b132a26162,0xbfd19f5a00ae29c4,2 +np.float64,0x3fa06e21d420dc40,0x3fa06f93aba415fb,2 +np.float64,0x3fc5c48fbd2b8920,0x3fc5fb3bfad3bf55,2 +np.float64,0xbfdfa2bacbbf4576,0xbfe155f839825308,2 +np.float64,0x3fe3e1fa0f67c3f4,0x3fe745081dd4fd03,2 +np.float64,0x3fdae58289b5cb04,0x3fdcac1f6789130a,2 +np.float64,0xbf8ed3ba103da780,0xbf8ed452a9cc1442,2 +np.float64,0xbfec06b46f780d69,0xbff5b86f30d70908,2 +np.float64,0xbfe990c13b732182,0xbff187a90ae611f8,2 +np.float64,0xbfdd46c738ba8d8e,0xbfdf9eee0a113230,2 +np.float64,0x3fe08b83f3611708,0x3fe2501b1c77035c,2 +np.float64,0xbfd501b65baa036c,0xbfd5d05de3fceac8,2 +np.float64,0xbfcf4fa21f3e9f44,0xbfcff5829582c0b6,2 +np.float64,0xbfefbc0bfbff7818,0xc005eca1a2c56b38,2 +np.float64,0xbfe1ba6959e374d2,0xbfe3f8f88d128ce5,2 +np.float64,0xbfd4e74ee3a9ce9e,0xbfd5b2cabeb45e6c,2 +np.float64,0xbfe77c38eaeef872,0xbfedfd332d6f1c75,2 +np.float64,0x3fa9b5e4fc336bc0,0x3fa9bb6f6b80b4af,2 +np.float64,0xbfecba63917974c7,0xbff75e44df7f8e81,2 +np.float64,0x3fd6cf17b2ad9e30,0x3fd7db0b93b7f2b5,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv new file mode 100644 index 00000000..ad141cb4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee7054c,0x3f4459ea,2 +np.float32,0x7d1e2489,0x54095925,2 +np.float32,0x7ee5edf5,0x549b992b,2 +np.float32,0x380607,0x2a425e72,2 +np.float32,0x34a8f3,0x2a3e6603,2 +np.float32,0x3eee2844,0x3f465a45,2 +np.float32,0x59e49c,0x2a638d0a,2 +np.float32,0xbf72c77a,0xbf7b83d4,2 +np.float32,0x7f2517b4,0x54af8bf0,2 +np.float32,0x80068a69,0xa9bdfe8b,2 +np.float32,0xbe8e3578,0xbf270775,2 +np.float32,0xbe4224dc,0xbf131119,2 +np.float32,0xbe0053b8,0xbf001be2,2 +np.float32,0x70e8d,0x29c2ddc5,2 +np.float32,0xff63f7b5,0xd4c37b7f,2 +np.float32,0x3f00bbed,0x3f4b9335,2 +np.float32,0x3f135f4e,0x3f54f5d4,2 +np.float32,0xbe13a488,0xbf063d13,2 +np.float32,0x3f14ec78,0x3f55b478,2 +np.float32,0x7ec35cfb,0x54935fbf,2 +np.float32,0x7d41c589,0x5412f904,2 +np.float32,0x3ef8a16e,0x3f4937f7,2 +np.float32,0x3f5d8464,0x3f73f279,2 +np.float32,0xbeec85ac,0xbf45e5cb,2 +np.float32,0x7f11f722,0x54a87cb1,2 +np.float32,0x8032c085,0xaa3c1219,2 +np.float32,0x80544bac,0xaa5eb9f2,2 +np.float32,0x3e944a10,0x3f296065,2 +np.float32,0xbf29fe50,0xbf5f5796,2 +np.float32,0x7e204d8d,0x545b03d5,2 +np.float32,0xfe1d0254,0xd4598127,2 +np.float32,0x80523129,0xaa5cdba9,2 +np.float32,0x806315fa,0xaa6b0eaf,2 +np.float32,0x3ed3d2a4,0x3f3ec117,2 +np.float32,0x7ee15007,0x549a8cc0,2 +np.float32,0x801ffb5e,0xaa213d4f,2 +np.float32,0x807f9f4a,0xaa7fbf76,2 +np.float32,0xbe45e854,0xbf1402d3,2 +np.float32,0x3d9e2e70,0x3eda0b64,2 +np.float32,0x51f404,0x2a5ca4d7,2 +np.float32,0xbe26a8b0,0xbf0bc54d,2 +np.float32,0x22c99a,0x2a25d2a7,2 +np.float32,0xbf71248b,0xbf7af2d5,2 +np.float32,0x7219fe,0x2a76608e,2 +np.float32,0x7f16fd7d,0x54aa6610,2 +np.float32,0x80716faa,0xaa75e5b9,2 +np.float32,0xbe24f9a4,0xbf0b4c65,2 +np.float32,0x800000,0x2a800000,2 +np.float32,0x80747456,0xaa780f27,2 +np.float32,0x68f9e8,0x2a6fa035,2 +np.float32,0x3f6a297e,0x3f7880d8,2 +np.float32,0x3f28b973,0x3f5ec8f6,2 +np.float32,0x7f58c577,0x54c03a70,2 +np.float32,0x804befcc,0xaa571b4f,2 +np.float32,0x3e2be027,0x3f0d36cf,2 +np.float32,0xfe7e80a4,0xd47f7ff7,2 +np.float32,0xfe9d444a,0xd489181b,2 +np.float32,0x3db3e790,0x3ee399d6,2 +np.float32,0xbf154c3e,0xbf55e23e,2 +np.float32,0x3d1096b7,0x3ea7f4aa,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x804e2521,0xaa592c06,2 +np.float32,0xbeda2f00,0xbf40a513,2 +np.float32,0x3f191788,0x3f57ae30,2 +np.float32,0x3ed24ade,0x3f3e4b34,2 +np.float32,0x807fadb4,0xaa7fc917,2 +np.float32,0xbe0a06dc,0xbf034234,2 +np.float32,0x3f250bba,0x3f5d276d,2 +np.float32,0x7e948b00,0x548682c8,2 +np.float32,0xfe65ecdc,0xd476fed2,2 +np.float32,0x6fdbdd,0x2a74c095,2 +np.float32,0x800112de,0xa9500fa6,2 +np.float32,0xfe63225c,0xd475fdee,2 +np.float32,0x7f3d9acd,0x54b7d648,2 +np.float32,0xfc46f480,0xd3bacf87,2 +np.float32,0xfe5deaac,0xd47417ff,2 +np.float32,0x60ce53,0x2a693d93,2 +np.float32,0x6a6e2f,0x2a70ba2c,2 +np.float32,0x7f43f0f1,0x54b9dcd0,2 +np.float32,0xbf6170c9,0xbf756104,2 +np.float32,0xbe5c9f74,0xbf197852,2 +np.float32,0xff1502b0,0xd4a9a693,2 +np.float32,0x8064f6af,0xaa6c886e,2 +np.float32,0xbf380564,0xbf6552e5,2 +np.float32,0xfeb9b7dc,0xd490e85f,2 +np.float32,0x7f34f941,0x54b5010d,2 +np.float32,0xbe9d4ca0,0xbf2cbd5f,2 +np.float32,0x3f6e43d2,0x3f79f240,2 +np.float32,0xbdad0530,0xbee0a8f2,2 +np.float32,0x3da18459,0x3edb9105,2 +np.float32,0xfd968340,0xd42a3808,2 +np.float32,0x3ea03e64,0x3f2dcf96,2 +np.float32,0x801d2f5b,0xaa1c6525,2 +np.float32,0xbf47d92d,0xbf6bb7e9,2 +np.float32,0x55a6b9,0x2a5fe9fb,2 +np.float32,0x77a7c2,0x2a7a4fb8,2 +np.float32,0xfebbc16e,0xd4916f88,2 +np.float32,0x3f5d3d6e,0x3f73d86a,2 +np.float32,0xfccd2b60,0xd3edcacb,2 +np.float32,0xbd026460,0xbea244b0,2 +np.float32,0x3e55bd,0x2a4968e4,2 +np.float32,0xbe7b5708,0xbf20490d,2 +np.float32,0xfe413cf4,0xd469171f,2 +np.float32,0x7710e3,0x2a79e657,2 +np.float32,0xfc932520,0xd3d4d9ca,2 +np.float32,0xbf764a1b,0xbf7cb8aa,2 +np.float32,0x6b1923,0x2a713aca,2 +np.float32,0xfe4dcd04,0xd46e092d,2 +np.float32,0xff3085ac,0xd4b381f8,2 +np.float32,0x3f72c438,0x3f7b82b4,2 +np.float32,0xbf6f0c6e,0xbf7a3852,2 +np.float32,0x801d2b1b,0xaa1c5d8d,2 +np.float32,0x3e9db91e,0x3f2ce50d,2 +np.float32,0x3f684f9d,0x3f77d8c5,2 +np.float32,0x7dc784,0x2a7e82cc,2 +np.float32,0x7d2c88e9,0x540d64f8,2 +np.float32,0x807fb708,0xaa7fcf51,2 +np.float32,0x8003c49a,0xa99e16e0,2 +np.float32,0x3ee4f5b8,0x3f43c3ff,2 +np.float32,0xfe992c5e,0xd487e4ec,2 +np.float32,0x4b4dfa,0x2a568216,2 +np.float32,0x3d374c80,0x3eb5c6a8,2 +np.float32,0xbd3a4700,0xbeb6c15c,2 +np.float32,0xbf13cb80,0xbf5529e5,2 +np.float32,0xbe7306d4,0xbf1e7f91,2 +np.float32,0xbf800000,0xbf800000,2 +np.float32,0xbea42efe,0xbf2f394e,2 +np.float32,0x3e1981d0,0x3f07fe2c,2 +np.float32,0x3f17ea1d,0x3f572047,2 +np.float32,0x7dc1e0,0x2a7e7efe,2 +np.float32,0x80169c08,0xaa0fa320,2 +np.float32,0x3f3e1972,0x3f67d248,2 +np.float32,0xfe5d3c88,0xd473d815,2 +np.float32,0xbf677448,0xbf778aac,2 +np.float32,0x7e799b7d,0x547dd9e4,2 +np.float32,0x3f00bb2c,0x3f4b92cf,2 +np.float32,0xbeb29f9c,0xbf343798,2 +np.float32,0xbd6b7830,0xbec59a86,2 +np.float32,0x807a524a,0xaa7c282a,2 +np.float32,0xbe0a7a04,0xbf0366ab,2 +np.float32,0x80237470,0xaa26e061,2 +np.float32,0x3ccbc0f6,0x3e95744f,2 +np.float32,0x3edec6bc,0x3f41fcb6,2 +np.float32,0x3f635198,0x3f760efa,2 +np.float32,0x800eca4f,0xa9f960d8,2 +np.float32,0x3f800000,0x3f800000,2 +np.float32,0xff4eeb9e,0xd4bd456a,2 +np.float32,0x56f4e,0x29b29e70,2 +np.float32,0xff5383a0,0xd4bea95c,2 +np.float32,0x3f4c3a77,0x3f6d6d94,2 +np.float32,0x3f6c324a,0x3f79388c,2 +np.float32,0xbebdc092,0xbf37e27c,2 +np.float32,0xff258956,0xd4afb42e,2 +np.float32,0xdc78c,0x29f39012,2 +np.float32,0xbf2db06a,0xbf60f2f5,2 +np.float32,0xbe3c5808,0xbf119660,2 +np.float32,0xbf1ba866,0xbf58e0f4,2 +np.float32,0x80377640,0xaa41b79d,2 +np.float32,0x4fdc4d,0x2a5abfea,2 +np.float32,0x7f5e7560,0x54c1e516,2 +np.float32,0xfeb4d3f2,0xd48f9fde,2 +np.float32,0x3f12a622,0x3f549c7d,2 +np.float32,0x7f737ed7,0x54c7d2dc,2 +np.float32,0xa0ddc,0x29db456d,2 +np.float32,0xfe006740,0xd44b6689,2 +np.float32,0x3f17dfd4,0x3f571b6c,2 +np.float32,0x67546e,0x2a6e5dd1,2 +np.float32,0xff0d0f11,0xd4a693e2,2 +np.float32,0xbd170090,0xbeaa6738,2 +np.float32,0x5274a0,0x2a5d1806,2 +np.float32,0x3e154fe0,0x3f06be1a,2 +np.float32,0x7ddb302e,0x5440f0a7,2 +np.float32,0x3f579d10,0x3f71c2af,2 +np.float32,0xff2bc5bb,0xd4b1e20c,2 +np.float32,0xfee8fa6a,0xd49c4872,2 +np.float32,0xbea551b0,0xbf2fa07b,2 +np.float32,0xfeabc75c,0xd48d3004,2 +np.float32,0x7f50a5a8,0x54bdcbd1,2 +np.float32,0x50354b,0x2a5b110d,2 +np.float32,0x7d139f13,0x54063b6b,2 +np.float32,0xbeee1b08,0xbf465699,2 +np.float32,0xfe5e1650,0xd47427fe,2 +np.float32,0x7f7fffff,0x54cb2ff5,2 +np.float32,0xbf52ede8,0xbf6fff35,2 +np.float32,0x804bba81,0xaa56e8f1,2 +np.float32,0x6609e2,0x2a6d5e94,2 +np.float32,0x692621,0x2a6fc1d6,2 +np.float32,0xbf288bb6,0xbf5eb4d3,2 +np.float32,0x804f28c4,0xaa5a1b82,2 +np.float32,0xbdaad2a8,0xbedfb46e,2 +np.float32,0x5e04f8,0x2a66fb13,2 +np.float32,0x804c10da,0xaa573a81,2 +np.float32,0xbe412764,0xbf12d0fd,2 +np.float32,0x801c35cc,0xaa1aa250,2 +np.float32,0x6364d4,0x2a6b4cf9,2 +np.float32,0xbf6d3cea,0xbf79962f,2 +np.float32,0x7e5a9935,0x5472defb,2 +np.float32,0xbe73a38c,0xbf1ea19c,2 +np.float32,0xbd35e950,0xbeb550f2,2 +np.float32,0x46cc16,0x2a5223d6,2 +np.float32,0x3f005288,0x3f4b5b97,2 +np.float32,0x8034e8b7,0xaa3eb2be,2 +np.float32,0xbea775fc,0xbf3061cf,2 +np.float32,0xea0e9,0x29f87751,2 +np.float32,0xbf38faaf,0xbf65b89d,2 +np.float32,0xbedf3184,0xbf421bb0,2 +np.float32,0xbe04250c,0xbf015def,2 +np.float32,0x7f56dae8,0x54bfa901,2 +np.float32,0xfebe3e04,0xd492132e,2 +np.float32,0x3e4dc326,0x3f15f19e,2 +np.float32,0x803da197,0xaa48a621,2 +np.float32,0x7eeb35aa,0x549cc7c6,2 +np.float32,0xfebb3eb6,0xd4914dc0,2 +np.float32,0xfed17478,0xd496d5e2,2 +np.float32,0x80243694,0xaa280ed2,2 +np.float32,0x8017e666,0xaa1251d3,2 +np.float32,0xbf07e942,0xbf4f4a3e,2 +np.float32,0xbf578fa6,0xbf71bdab,2 +np.float32,0x7ed8d80f,0x549896b6,2 +np.float32,0x3f2277ae,0x3f5bff11,2 +np.float32,0x7e6f195b,0x547a3cd4,2 +np.float32,0xbf441559,0xbf6a3a91,2 +np.float32,0x7f1fb427,0x54ad9d8d,2 +np.float32,0x71695f,0x2a75e12d,2 +np.float32,0xbd859588,0xbece19a1,2 +np.float32,0x7f5702fc,0x54bfb4eb,2 +np.float32,0x3f040008,0x3f4d4842,2 +np.float32,0x3de00ca5,0x3ef4df89,2 +np.float32,0x3eeabb03,0x3f45658c,2 +np.float32,0x3dfe5e65,0x3eff7480,2 +np.float32,0x1,0x26a14518,2 +np.float32,0x8065e400,0xaa6d4130,2 +np.float32,0xff50e1bb,0xd4bdde07,2 +np.float32,0xbe88635a,0xbf24b7e9,2 +np.float32,0x3f46bfab,0x3f6b4908,2 +np.float32,0xbd85c3c8,0xbece3168,2 +np.float32,0xbe633f64,0xbf1afdb1,2 +np.float32,0xff2c7706,0xd4b21f2a,2 +np.float32,0xbf02816c,0xbf4c812a,2 +np.float32,0x80653aeb,0xaa6cbdab,2 +np.float32,0x3eef1d10,0x3f469e24,2 +np.float32,0x3d9944bf,0x3ed7c36a,2 +np.float32,0x1b03d4,0x2a186b2b,2 +np.float32,0x3f251b7c,0x3f5d2e76,2 +np.float32,0x3edebab0,0x3f41f937,2 +np.float32,0xfefc2148,0xd4a073ff,2 +np.float32,0x7448ee,0x2a77f051,2 +np.float32,0x3bb8a400,0x3e3637ee,2 +np.float32,0x57df36,0x2a61d527,2 +np.float32,0xfd8b9098,0xd425fccb,2 +np.float32,0x7f67627e,0x54c4744d,2 +np.float32,0x801165d7,0xaa039fba,2 +np.float32,0x53aae5,0x2a5e2bfd,2 +np.float32,0x8014012b,0xaa09e4f1,2 +np.float32,0x3f7a2d53,0x3f7e0b4b,2 +np.float32,0x3f5fb700,0x3f74c052,2 +np.float32,0x7f192a06,0x54ab366c,2 +np.float32,0x3f569611,0x3f71603b,2 +np.float32,0x25e2dc,0x2a2a9b65,2 +np.float32,0x8036465e,0xaa405342,2 +np.float32,0x804118e1,0xaa4c5785,2 +np.float32,0xbef08d3e,0xbf4703e1,2 +np.float32,0x3447e2,0x2a3df0be,2 +np.float32,0xbf2a350b,0xbf5f6f8c,2 +np.float32,0xbec87e3e,0xbf3b4a73,2 +np.float32,0xbe99a4a8,0xbf2b6412,2 +np.float32,0x2ea2ae,0x2a36d77e,2 +np.float32,0xfcb69600,0xd3e4b9e3,2 +np.float32,0x717700,0x2a75eb06,2 +np.float32,0xbf4e81ce,0xbf6e4ecc,2 +np.float32,0xbe2021ac,0xbf09ebee,2 +np.float32,0xfef94eee,0xd49fda31,2 +np.float32,0x8563e,0x29ce0015,2 +np.float32,0x7f5d0ca5,0x54c17c0f,2 +np.float32,0x3f16459a,0x3f56590f,2 +np.float32,0xbe12f7bc,0xbf0608a0,2 +np.float32,0x3f10fd3d,0x3f53ce5f,2 +np.float32,0x3ca5e1b0,0x3e8b8d96,2 +np.float32,0xbe5288e0,0xbf17181f,2 +np.float32,0xbf7360f6,0xbf7bb8c9,2 +np.float32,0x7e989d33,0x5487ba88,2 +np.float32,0x3ea7b5dc,0x3f307839,2 +np.float32,0x7e8da0c9,0x548463f0,2 +np.float32,0xfeaf7888,0xd48e3122,2 +np.float32,0x7d90402d,0x5427d321,2 +np.float32,0x72e309,0x2a76f0ee,2 +np.float32,0xbe1faa34,0xbf09c998,2 +np.float32,0xbf2b1652,0xbf5fd1f4,2 +np.float32,0x8051eb0c,0xaa5c9cca,2 +np.float32,0x7edf02bf,0x549a058e,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f67f873,0x3f77b9c1,2 +np.float32,0x3f276b63,0x3f5e358c,2 +np.float32,0x7eeb4bf2,0x549cccb9,2 +np.float32,0x3bfa2c,0x2a46d675,2 +np.float32,0x3e133c50,0x3f061d75,2 +np.float32,0x3ca302c0,0x3e8abe4a,2 +np.float32,0x802e152e,0xaa361dd5,2 +np.float32,0x3f504810,0x3f6efd0a,2 +np.float32,0xbf43e0b5,0xbf6a2599,2 +np.float32,0x80800000,0xaa800000,2 +np.float32,0x3f1c0980,0x3f590e03,2 +np.float32,0xbf0084f6,0xbf4b7638,2 +np.float32,0xfee72d32,0xd49be10d,2 +np.float32,0x3f3c00ed,0x3f66f763,2 +np.float32,0x80511e81,0xaa5be492,2 +np.float32,0xfdd1b8a0,0xd43e1f0d,2 +np.float32,0x7d877474,0x54245785,2 +np.float32,0x7f110bfe,0x54a82207,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x6b6a2,0x29bfa706,2 +np.float32,0xbf5bdfd9,0xbf7357b7,2 +np.float32,0x8025bfa3,0xaa2a6676,2 +np.float32,0x3a3581,0x2a44dd3a,2 +np.float32,0x542c2a,0x2a5e9e2f,2 +np.float32,0xbe1d5650,0xbf091d57,2 +np.float32,0x3e97760d,0x3f2a935e,2 +np.float32,0x7f5dcde2,0x54c1b460,2 +np.float32,0x800bde1e,0xa9e7bbaf,2 +np.float32,0x3e6b9e61,0x3f1cdf07,2 +np.float32,0x7d46c003,0x54143884,2 +np.float32,0x80073fbb,0xa9c49e67,2 +np.float32,0x503c23,0x2a5b1748,2 +np.float32,0x7eb7b070,0x549060c8,2 +np.float32,0xe9d8f,0x29f86456,2 +np.float32,0xbeedd4f0,0xbf464320,2 +np.float32,0x3f40d5d6,0x3f68eda1,2 +np.float32,0xff201f28,0xd4adc44b,2 +np.float32,0xbdf61e98,0xbefca9c7,2 +np.float32,0x3e8a0dc9,0x3f2562e3,2 +np.float32,0xbc0c0c80,0xbe515f61,2 +np.float32,0x2b3c15,0x2a3248e3,2 +np.float32,0x42a7bb,0x2a4df592,2 +np.float32,0x7f337947,0x54b480af,2 +np.float32,0xfec21db4,0xd4930f4b,2 +np.float32,0x7f4fdbf3,0x54bd8e94,2 +np.float32,0x1e2253,0x2a1e1286,2 +np.float32,0x800c4c80,0xa9ea819e,2 +np.float32,0x7e96f5b7,0x54873c88,2 +np.float32,0x7ce4e131,0x53f69ed4,2 +np.float32,0xbead8372,0xbf327b63,2 +np.float32,0x3e15ca7e,0x3f06e2f3,2 +np.float32,0xbf63e17b,0xbf7642da,2 +np.float32,0xff5bdbdb,0xd4c122f9,2 +np.float32,0x3f44411e,0x3f6a4bfd,2 +np.float32,0xfd007da0,0xd40029d2,2 +np.float32,0xbe940168,0xbf2944b7,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3d28e356,0x3eb0e1b8,2 +np.float32,0x3eb9fcd8,0x3f36a918,2 +np.float32,0x4f6410,0x2a5a51eb,2 +np.float32,0xbdf18e30,0xbefb1775,2 +np.float32,0x32edbd,0x2a3c49e3,2 +np.float32,0x801f70a5,0xaa2052da,2 +np.float32,0x8045a045,0xaa50f98c,2 +np.float32,0xbdd6cb00,0xbef17412,2 +np.float32,0x3f118f2c,0x3f541557,2 +np.float32,0xbe65c378,0xbf1b8f95,2 +np.float32,0xfd9a9060,0xd42bbb8b,2 +np.float32,0x3f04244f,0x3f4d5b0f,2 +np.float32,0xff05214b,0xd4a3656f,2 +np.float32,0xfe342cd0,0xd463b706,2 +np.float32,0x3f3409a8,0x3f63a836,2 +np.float32,0x80205db2,0xaa21e1e5,2 +np.float32,0xbf37c982,0xbf653a03,2 +np.float32,0x3f36ce8f,0x3f64d17e,2 +np.float32,0x36ffda,0x2a412d61,2 +np.float32,0xff569752,0xd4bf94e6,2 +np.float32,0x802fdb0f,0xaa386c3a,2 +np.float32,0x7ec55a87,0x5493df71,2 +np.float32,0x7f2234c7,0x54ae847e,2 +np.float32,0xbf02df76,0xbf4cb23d,2 +np.float32,0x3d68731a,0x3ec4c156,2 +np.float32,0x8146,0x2921cd8e,2 +np.float32,0x80119364,0xaa041235,2 +np.float32,0xfe6c1c00,0xd47930b5,2 +np.float32,0x8070da44,0xaa757996,2 +np.float32,0xfefbf50c,0xd4a06a9d,2 +np.float32,0xbf01b6a8,0xbf4c170a,2 +np.float32,0x110702,0x2a02aedb,2 +np.float32,0xbf063cd4,0xbf4e6f87,2 +np.float32,0x3f1ff178,0x3f5ad9dd,2 +np.float32,0xbf76dcd4,0xbf7cead0,2 +np.float32,0x80527281,0xaa5d1620,2 +np.float32,0xfea96df8,0xd48c8a7f,2 +np.float32,0x68db02,0x2a6f88b0,2 +np.float32,0x62d971,0x2a6adec7,2 +np.float32,0x3e816fe0,0x3f21df04,2 +np.float32,0x3f586379,0x3f720cc0,2 +np.float32,0x804a3718,0xaa5577ff,2 +np.float32,0x2e2506,0x2a3632b2,2 +np.float32,0x3f297d,0x2a4a4bf3,2 +np.float32,0xbe37aba8,0xbf105f88,2 +np.float32,0xbf18b264,0xbf577ea7,2 +np.float32,0x7f50d02d,0x54bdd8b5,2 +np.float32,0xfee296dc,0xd49ad757,2 +np.float32,0x7ec5137e,0x5493cdb1,2 +np.float32,0x3f4811f4,0x3f6bce3a,2 +np.float32,0xfdff32a0,0xd44af991,2 +np.float32,0x3f6ef140,0x3f7a2ed6,2 +np.float32,0x250838,0x2a2950b5,2 +np.float32,0x25c28e,0x2a2a6ada,2 +np.float32,0xbe875e50,0xbf244e90,2 +np.float32,0x3e3bdff8,0x3f11776a,2 +np.float32,0x3e9fe493,0x3f2daf17,2 +np.float32,0x804d8599,0xaa5897d9,2 +np.float32,0x3f0533da,0x3f4de759,2 +np.float32,0xbe63023c,0xbf1aefc8,2 +np.float32,0x80636e5e,0xaa6b547f,2 +np.float32,0xff112958,0xd4a82d5d,2 +np.float32,0x3e924112,0x3f28991f,2 +np.float32,0xbe996ffc,0xbf2b507a,2 +np.float32,0x802a7cda,0xaa314081,2 +np.float32,0x8022b524,0xaa25b21e,2 +np.float32,0x3f0808c8,0x3f4f5a43,2 +np.float32,0xbef0ec2a,0xbf471e0b,2 +np.float32,0xff4c2345,0xd4bc6b3c,2 +np.float32,0x25ccc8,0x2a2a7a3b,2 +np.float32,0x7f4467d6,0x54ba0260,2 +np.float32,0x7f506539,0x54bdb846,2 +np.float32,0x412ab4,0x2a4c6a2a,2 +np.float32,0x80672c4a,0xaa6e3ef0,2 +np.float32,0xbddfb7f8,0xbef4c0ac,2 +np.float32,0xbf250bb9,0xbf5d276c,2 +np.float32,0x807dca65,0xaa7e84bd,2 +np.float32,0xbf63b8e0,0xbf763438,2 +np.float32,0xbeed1b0c,0xbf460f6b,2 +np.float32,0x8021594f,0xaa238136,2 +np.float32,0xbebc74c8,0xbf377710,2 +np.float32,0x3e9f8e3b,0x3f2d8fce,2 +np.float32,0x7f50ca09,0x54bdd6d8,2 +np.float32,0x805797c1,0xaa6197df,2 +np.float32,0x3de198f9,0x3ef56f98,2 +np.float32,0xf154d,0x29fb0392,2 +np.float32,0xff7fffff,0xd4cb2ff5,2 +np.float32,0xfed22fa8,0xd49702c4,2 +np.float32,0xbf733736,0xbf7baa64,2 +np.float32,0xbf206a8a,0xbf5b1108,2 +np.float32,0xbca49680,0xbe8b3078,2 +np.float32,0xfecba794,0xd4956e1a,2 +np.float32,0x80126582,0xaa061886,2 +np.float32,0xfee5cc82,0xd49b919f,2 +np.float32,0xbf7ad6ae,0xbf7e4491,2 +np.float32,0x7ea88c81,0x548c4c0c,2 +np.float32,0xbf493a0d,0xbf6c4255,2 +np.float32,0xbf06dda0,0xbf4ec1d4,2 +np.float32,0xff3f6e84,0xd4b86cf6,2 +np.float32,0x3e4fe093,0x3f1674b0,2 +np.float32,0x8048ad60,0xaa53fbde,2 +np.float32,0x7ebb7112,0x54915ac5,2 +np.float32,0x5bd191,0x2a652a0d,2 +np.float32,0xfe3121d0,0xd4626cfb,2 +np.float32,0x7e4421c6,0x546a3f83,2 +np.float32,0x19975b,0x2a15b14f,2 +np.float32,0x801c8087,0xaa1b2a64,2 +np.float32,0xfdf6e950,0xd448c0f6,2 +np.float32,0x74e711,0x2a786083,2 +np.float32,0xbf2b2f2e,0xbf5fdccb,2 +np.float32,0x7ed19ece,0x5496e00b,2 +np.float32,0x7f6f8322,0x54c6ba63,2 +np.float32,0x3e90316d,0x3f27cd69,2 +np.float32,0x7ecb42ce,0x54955571,2 +np.float32,0x3f6d49be,0x3f799aaf,2 +np.float32,0x8053d327,0xaa5e4f9a,2 +np.float32,0x7ebd7361,0x5491df3e,2 +np.float32,0xfdb6eed0,0xd435a7aa,2 +np.float32,0x7f3e79f4,0x54b81e4b,2 +np.float32,0xfe83afa6,0xd4813794,2 +np.float32,0x37c443,0x2a421246,2 +np.float32,0xff075a10,0xd4a44cd8,2 +np.float32,0x3ebc5fe0,0x3f377047,2 +np.float32,0x739694,0x2a77714e,2 +np.float32,0xfe832946,0xd4810b91,2 +np.float32,0x7f2638e6,0x54aff235,2 +np.float32,0xfe87f7a6,0xd4829a3f,2 +np.float32,0x3f50f3f8,0x3f6f3eb8,2 +np.float32,0x3eafa3d0,0x3f333548,2 +np.float32,0xbec26ee6,0xbf39626f,2 +np.float32,0x7e6f924f,0x547a66ff,2 +np.float32,0x7f0baa46,0x54a606f8,2 +np.float32,0xbf6dfc49,0xbf79d939,2 +np.float32,0x7f005709,0x54a1699d,2 +np.float32,0x7ee3d7ef,0x549b2057,2 +np.float32,0x803709a4,0xaa4138d7,2 +np.float32,0x3f7bf49a,0x3f7ea509,2 +np.float32,0x509db7,0x2a5b6ff5,2 +np.float32,0x7eb1b0d4,0x548ec9ff,2 +np.float32,0x7eb996ec,0x5490dfce,2 +np.float32,0xbf1fcbaa,0xbf5ac89e,2 +np.float32,0x3e2c9a98,0x3f0d69cc,2 +np.float32,0x3ea77994,0x3f306312,2 +np.float32,0x3f3cbfe4,0x3f67457c,2 +np.float32,0x8422a,0x29cd5a30,2 +np.float32,0xbd974558,0xbed6d264,2 +np.float32,0xfecee77a,0xd496387f,2 +np.float32,0x3f51876b,0x3f6f76f1,2 +np.float32,0x3b1a25,0x2a45ddad,2 +np.float32,0xfe9912f0,0xd487dd67,2 +np.float32,0x3f3ab13d,0x3f666d99,2 +np.float32,0xbf35565a,0xbf64341b,2 +np.float32,0x7d4e84aa,0x54162091,2 +np.float32,0x4c2570,0x2a574dea,2 +np.float32,0x7e82dca6,0x5480f26b,2 +np.float32,0x7f5503e7,0x54bf1c8d,2 +np.float32,0xbeb85034,0xbf361c59,2 +np.float32,0x80460a69,0xaa516387,2 +np.float32,0x805fbbab,0xaa68602c,2 +np.float32,0x7d4b4c1b,0x541557b8,2 +np.float32,0xbefa9a0a,0xbf49bfbc,2 +np.float32,0x3dbd233f,0x3ee76e09,2 +np.float32,0x58b6df,0x2a628d50,2 +np.float32,0xfcdcc180,0xd3f3aad9,2 +np.float32,0x423a37,0x2a4d8487,2 +np.float32,0xbed8b32a,0xbf403507,2 +np.float32,0x3f68e85d,0x3f780f0b,2 +np.float32,0x7ee13c4b,0x549a883d,2 +np.float32,0xff2ed4c5,0xd4b2eec1,2 +np.float32,0xbf54dadc,0xbf70b99a,2 +np.float32,0x3f78b0af,0x3f7d8a32,2 +np.float32,0x3f377372,0x3f651635,2 +np.float32,0xfdaa6178,0xd43166bc,2 +np.float32,0x8060c337,0xaa6934a6,2 +np.float32,0x7ec752c2,0x54945cf6,2 +np.float32,0xbd01a760,0xbea1f624,2 +np.float32,0x6f6599,0x2a746a35,2 +np.float32,0x3f6315b0,0x3f75f95b,2 +np.float32,0x7f2baf32,0x54b1da44,2 +np.float32,0x3e400353,0x3f1286d8,2 +np.float32,0x40d3bf,0x2a4c0f15,2 +np.float32,0x7f733aca,0x54c7c03d,2 +np.float32,0x7e5c5407,0x5473828b,2 +np.float32,0x80191703,0xaa14b56a,2 +np.float32,0xbf4fc144,0xbf6ec970,2 +np.float32,0xbf1137a7,0xbf53eacd,2 +np.float32,0x80575410,0xaa615db3,2 +np.float32,0xbd0911d0,0xbea4fe07,2 +np.float32,0x3e98534a,0x3f2ae643,2 +np.float32,0x3f3b089a,0x3f669185,2 +np.float32,0x4fc752,0x2a5aacc1,2 +np.float32,0xbef44ddc,0xbf480b6e,2 +np.float32,0x80464217,0xaa519af4,2 +np.float32,0x80445fae,0xaa4fb6de,2 +np.float32,0x80771cf4,0xaa79eec8,2 +np.float32,0xfd9182e8,0xd4284fed,2 +np.float32,0xff0a5d16,0xd4a58288,2 +np.float32,0x3f33e169,0x3f63973e,2 +np.float32,0x8021a247,0xaa23f820,2 +np.float32,0xbf362522,0xbf648ab8,2 +np.float32,0x3f457cd7,0x3f6ac95e,2 +np.float32,0xbcadf400,0xbe8dc7e2,2 +np.float32,0x80237210,0xaa26dca7,2 +np.float32,0xbf1293c9,0xbf54939f,2 +np.float32,0xbc5e73c0,0xbe744a37,2 +np.float32,0x3c03f980,0x3e4d44df,2 +np.float32,0x7da46f,0x2a7e6b20,2 +np.float32,0x5d4570,0x2a665dd0,2 +np.float32,0x3e93fbac,0x3f294287,2 +np.float32,0x7e6808fd,0x5477bfa4,2 +np.float32,0xff5aa9a6,0xd4c0c925,2 +np.float32,0xbf5206ba,0xbf6fa767,2 +np.float32,0xbf6e513e,0xbf79f6f1,2 +np.float32,0x3ed01c0f,0x3f3da20f,2 +np.float32,0xff47d93d,0xd4bb1704,2 +np.float32,0x7f466cfd,0x54baa514,2 +np.float32,0x665e10,0x2a6d9fc8,2 +np.float32,0x804d0629,0xaa5820e8,2 +np.float32,0x7e0beaa0,0x54514e7e,2 +np.float32,0xbf7fcb6c,0xbf7fee78,2 +np.float32,0x3f6c5b03,0x3f7946dd,2 +np.float32,0x3e941504,0x3f294c30,2 +np.float32,0xbf2749ad,0xbf5e26a1,2 +np.float32,0xfec2a00a,0xd493302d,2 +np.float32,0x3f15a358,0x3f560bce,2 +np.float32,0x3f15c4e7,0x3f561bcd,2 +np.float32,0xfedc8692,0xd499728c,2 +np.float32,0x7e8f6902,0x5484f180,2 +np.float32,0x7f663d62,0x54c42136,2 +np.float32,0x8027ea62,0xaa2d99b4,2 +np.float32,0x3f3d093d,0x3f67636d,2 +np.float32,0x7f118c33,0x54a85382,2 +np.float32,0x803e866a,0xaa499d43,2 +np.float32,0x80053632,0xa9b02407,2 +np.float32,0xbf36dd66,0xbf64d7af,2 +np.float32,0xbf560358,0xbf71292b,2 +np.float32,0x139a8,0x29596bc0,2 +np.float32,0xbe04f75c,0xbf01a26c,2 +np.float32,0xfe1c3268,0xd45920fa,2 +np.float32,0x7ec77f72,0x5494680c,2 +np.float32,0xbedde724,0xbf41bbba,2 +np.float32,0x3e81dbe0,0x3f220bfd,2 +np.float32,0x800373ac,0xa99989d4,2 +np.float32,0x3f7f859a,0x3f7fd72d,2 +np.float32,0x3eb9dc7e,0x3f369e80,2 +np.float32,0xff5f8eb7,0xd4c236b1,2 +np.float32,0xff1c03cb,0xd4ac44ac,2 +np.float32,0x18cfe1,0x2a14285b,2 +np.float32,0x7f21b075,0x54ae54fd,2 +np.float32,0xff490bd8,0xd4bb7680,2 +np.float32,0xbf15dc22,0xbf5626de,2 +np.float32,0xfe1d5a10,0xd459a9a3,2 +np.float32,0x750544,0x2a7875e4,2 +np.float32,0x8023d5df,0xaa2778b3,2 +np.float32,0x3e42aa08,0x3f1332b2,2 +np.float32,0x3ecaa751,0x3f3bf60d,2 +np.float32,0x0,0x0,2 +np.float32,0x80416da6,0xaa4cb011,2 +np.float32,0x3f4ea9ae,0x3f6e5e22,2 +np.float32,0x2113f4,0x2a230f8e,2 +np.float32,0x3f35c2e6,0x3f64619a,2 +np.float32,0xbf50db8a,0xbf6f3564,2 +np.float32,0xff4d5cea,0xd4bccb8a,2 +np.float32,0x7ee54420,0x549b72d2,2 +np.float32,0x64ee68,0x2a6c81f7,2 +np.float32,0x5330da,0x2a5dbfc2,2 +np.float32,0x80047f88,0xa9a7b467,2 +np.float32,0xbda01078,0xbedae800,2 +np.float32,0xfe96d05a,0xd487315f,2 +np.float32,0x8003cc10,0xa99e7ef4,2 +np.float32,0x8007b4ac,0xa9c8aa3d,2 +np.float32,0x5d4bcf,0x2a66630e,2 +np.float32,0xfdd0c0b0,0xd43dd403,2 +np.float32,0xbf7a1d82,0xbf7e05f0,2 +np.float32,0x74ca33,0x2a784c0f,2 +np.float32,0x804f45e5,0xaa5a3640,2 +np.float32,0x7e6d16aa,0x547988c4,2 +np.float32,0x807d5762,0xaa7e3714,2 +np.float32,0xfecf93d0,0xd4966229,2 +np.float32,0xfecbd25c,0xd4957890,2 +np.float32,0xff7db31c,0xd4ca93b0,2 +np.float32,0x3dac9e18,0x3ee07c4a,2 +np.float32,0xbf4b2d28,0xbf6d0509,2 +np.float32,0xbd4f4c50,0xbebd62e0,2 +np.float32,0xbd2eac40,0xbeb2e0ee,2 +np.float32,0x3d01b69b,0x3ea1fc7b,2 +np.float32,0x7ec63902,0x549416ed,2 +np.float32,0xfcc47700,0xd3ea616d,2 +np.float32,0xbf5ddec2,0xbf7413a1,2 +np.float32,0xff6a6110,0xd4c54c52,2 +np.float32,0xfdfae2a0,0xd449d335,2 +np.float32,0x7e54868c,0x547099cd,2 +np.float32,0x802b5b88,0xaa327413,2 +np.float32,0x80440e72,0xaa4f647a,2 +np.float32,0x3e313c94,0x3f0eaad5,2 +np.float32,0x3ebb492a,0x3f3715a2,2 +np.float32,0xbef56286,0xbf4856d5,2 +np.float32,0x3f0154ba,0x3f4be3a0,2 +np.float32,0xff2df86c,0xd4b2a376,2 +np.float32,0x3ef6a850,0x3f48af57,2 +np.float32,0x3d8d33e1,0x3ed1f22d,2 +np.float32,0x4dd9b9,0x2a58e615,2 +np.float32,0x7f1caf83,0x54ac83c9,2 +np.float32,0xbf7286b3,0xbf7b6d73,2 +np.float32,0x80064f88,0xa9bbbd9f,2 +np.float32,0xbf1f55fa,0xbf5a92db,2 +np.float32,0x546a81,0x2a5ed516,2 +np.float32,0xbe912880,0xbf282d0a,2 +np.float32,0x5df587,0x2a66ee6e,2 +np.float32,0x801f706c,0xaa205279,2 +np.float32,0x58cb6d,0x2a629ece,2 +np.float32,0xfe754f8c,0xd47c62da,2 +np.float32,0xbefb6f4c,0xbf49f8e7,2 +np.float32,0x80000001,0xa6a14518,2 +np.float32,0xbf067837,0xbf4e8df4,2 +np.float32,0x3e8e715c,0x3f271ee4,2 +np.float32,0x8009de9b,0xa9d9ebc8,2 +np.float32,0xbf371ff1,0xbf64f36e,2 +np.float32,0x7f5ce661,0x54c170e4,2 +np.float32,0x3f3c47d1,0x3f671467,2 +np.float32,0xfea5e5a6,0xd48b8eb2,2 +np.float32,0xff62b17f,0xd4c31e15,2 +np.float32,0xff315932,0xd4b3c98f,2 +np.float32,0xbf1c3ca8,0xbf5925b9,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfdf20868,0xd4476c3b,2 +np.float32,0x5b790e,0x2a64e052,2 +np.float32,0x3f5ddf4e,0x3f7413d4,2 +np.float32,0x7f1a3182,0x54ab9861,2 +np.float32,0x3f4b906e,0x3f6d2b9d,2 +np.float32,0x7ebac760,0x54912edb,2 +np.float32,0x7f626d3f,0x54c30a7e,2 +np.float32,0x3e27b058,0x3f0c0edc,2 +np.float32,0x8041e69c,0xaa4d2de8,2 +np.float32,0x3f42cee0,0x3f69b84a,2 +np.float32,0x7ec5fe83,0x5494085b,2 +np.float32,0x9d3e6,0x29d99cde,2 +np.float32,0x3edc50c0,0x3f41452d,2 +np.float32,0xbf2c463a,0xbf60562c,2 +np.float32,0x800bfa33,0xa9e871e8,2 +np.float32,0x7c9f2c,0x2a7dba4d,2 +np.float32,0x7f2ef9fd,0x54b2fb73,2 +np.float32,0x80741847,0xaa77cdb9,2 +np.float32,0x7e9c462a,0x5488ce1b,2 +np.float32,0x3ea47ec1,0x3f2f55a9,2 +np.float32,0x7f311c43,0x54b3b4f5,2 +np.float32,0x3d8f4c73,0x3ed2facd,2 +np.float32,0x806d7bd2,0xaa7301ef,2 +np.float32,0xbf633d24,0xbf760799,2 +np.float32,0xff4f9a3f,0xd4bd7a99,2 +np.float32,0x3f6021ca,0x3f74e73d,2 +np.float32,0x7e447015,0x546a5eac,2 +np.float32,0x6bff3c,0x2a71e711,2 +np.float32,0xe9c9f,0x29f85f06,2 +np.float32,0x8009fe14,0xa9dad277,2 +np.float32,0x807cf79c,0xaa7df644,2 +np.float32,0xff440e1b,0xd4b9e608,2 +np.float32,0xbddf9a50,0xbef4b5db,2 +np.float32,0x7f3b1c39,0x54b706fc,2 +np.float32,0x3c7471a0,0x3e7c16a7,2 +np.float32,0x8065b02b,0xaa6d18ee,2 +np.float32,0x7f63a3b2,0x54c36379,2 +np.float32,0xbe9c9d92,0xbf2c7d33,2 +np.float32,0x3d93aad3,0x3ed51a2e,2 +np.float32,0xbf41b040,0xbf694571,2 +np.float32,0x80396b9e,0xaa43f899,2 +np.float64,0x800fa025695f404b,0xaaa4000ff64bb00c,2 +np.float64,0xbfecc00198f98003,0xbfeee0b623fbd94b,2 +np.float64,0x7f9eeb60b03dd6c0,0x55291bf8554bb303,2 +np.float64,0x3fba74485634e890,0x3fde08710bdb148d,2 +np.float64,0xbfdd9a75193b34ea,0xbfe8bf711660a2f5,2 +np.float64,0xbfcf92e17a3f25c4,0xbfe4119eda6f3773,2 +np.float64,0xbfe359e2ba66b3c6,0xbfeb0f7ae97ea142,2 +np.float64,0x20791a5640f24,0x2a9441f13d262bed,2 +np.float64,0x3fe455fbfae8abf8,0x3feb830d63e1022c,2 +np.float64,0xbd112b7b7a226,0x2aa238c097ec269a,2 +np.float64,0x93349ba126694,0x2aa0c363cd74465a,2 +np.float64,0x20300cd440602,0x2a9432b4f4081209,2 +np.float64,0x3fdcfae677b9f5cc,0x3fe892a9ee56fe8d,2 +np.float64,0xbfefaae3f7bf55c8,0xbfefe388066132c4,2 +np.float64,0x1a7d6eb634faf,0x2a92ed9851d29ab5,2 +np.float64,0x7fd5308d39aa6119,0x553be444e30326c6,2 +np.float64,0xff811c7390223900,0xd5205cb404952fa7,2 +np.float64,0x80083d24aff07a4a,0xaaa0285cf764d898,2 +np.float64,0x800633810ccc6703,0xaa9d65341419586b,2 +np.float64,0x800ff456223fe8ac,0xaaa423bbcc24dff1,2 +np.float64,0x7fde5c99aebcb932,0x553f71be7d6d9daa,2 +np.float64,0x3fed961c4b3b2c39,0x3fef2ca146270cac,2 +np.float64,0x7fe744d30c6e89a5,0x554220a4cdc78e62,2 +np.float64,0x3fd8f527c7b1ea50,0x3fe76101085be1cb,2 +np.float64,0xbfc96a14b232d428,0xbfe2ab1a8962606c,2 +np.float64,0xffe85f540cf0bea7,0xd54268dff964519a,2 +np.float64,0x800e3be0fe7c77c2,0xaaa3634efd7f020b,2 +np.float64,0x3feb90d032f721a0,0x3fee72a4579e8b12,2 +np.float64,0xffe05674aaa0ace9,0xd5401c9e3fb4abcf,2 +np.float64,0x3fefc2e32c3f85c6,0x3fefeb940924bf42,2 +np.float64,0xbfecfd89e9f9fb14,0xbfeef6addf73ee49,2 +np.float64,0xf5862717eb0c5,0x2aa3e1428780382d,2 +np.float64,0xffc3003b32260078,0xd53558f92202dcdb,2 +np.float64,0x3feb4c152c36982a,0x3fee5940f7da0825,2 +np.float64,0x3fe7147b002e28f6,0x3fecb2948f46d1e3,2 +np.float64,0x7fe00ad9b4a015b2,0x5540039d15e1da54,2 +np.float64,0x8010000000000000,0xaaa428a2f98d728b,2 +np.float64,0xbfd3a41bfea74838,0xbfe595ab45b1be91,2 +np.float64,0x7fdbfd6e5537fadc,0x553e9a6e1107b8d0,2 +np.float64,0x800151d9d9a2a3b4,0xaa918cd8fb63f40f,2 +np.float64,0x7fe6828401ad0507,0x5541eda05dcd1fcf,2 +np.float64,0x3fdae1e7a1b5c3d0,0x3fe7f711e72ecc35,2 +np.float64,0x7fdf4936133e926b,0x553fc29c8d5edea3,2 +np.float64,0x80079de12d4f3bc3,0xaa9f7b06a9286da4,2 +np.float64,0x3fe1261cade24c39,0x3fe9fe09488e417a,2 +np.float64,0xbfc20dce21241b9c,0xbfe0a842fb207a28,2 +np.float64,0x3fe3285dfa2650bc,0x3feaf85215f59ef9,2 +np.float64,0x7fe42b93aea85726,0x554148c3c3bb35e3,2 +np.float64,0xffe6c74e7f6d8e9c,0xd541ffd13fa36dbd,2 +np.float64,0x3fe73ea139ee7d42,0x3fecc402242ab7d3,2 +np.float64,0xffbd4b46be3a9690,0xd53392de917c72e4,2 +np.float64,0x800caed8df395db2,0xaaa2a811a02e6be4,2 +np.float64,0x800aacdb6c9559b7,0xaaa19d6fbc8feebf,2 +np.float64,0x839fb4eb073f7,0x2aa0264b98327c12,2 +np.float64,0xffd0157ba9a02af8,0xd5397157a11c0d05,2 +np.float64,0x7fddc8ff173b91fd,0x553f3e7663fb2ac7,2 +np.float64,0x67b365facf66d,0x2a9dd4d838b0d853,2 +np.float64,0xffe12e7fc7225cff,0xd5406272a83a8e1b,2 +np.float64,0x7fea5b19a034b632,0x5542e567658b3e36,2 +np.float64,0x124989d824932,0x2a90ba8dc7a39532,2 +np.float64,0xffe12ef098225de0,0xd54062968450a078,2 +np.float64,0x3fea2f44a3f45e8a,0x3fedee3c461f4716,2 +np.float64,0x3fe6b033e66d6068,0x3fec88c8035e06b1,2 +np.float64,0x3fe928a2ccf25146,0x3fed88d4cde7a700,2 +np.float64,0x3feead27e97d5a50,0x3fef8d7537d82e60,2 +np.float64,0x8003ab80b6875702,0xaa98adfedd7715a9,2 +np.float64,0x45a405828b481,0x2a9a1fa99a4eff1e,2 +np.float64,0x8002ddebad85bbd8,0xaa96babfda4e0031,2 +np.float64,0x3fc278c32824f186,0x3fe0c8e7c979fbd5,2 +np.float64,0x2e10fffc5c221,0x2a96c30a766d06fa,2 +np.float64,0xffd6ba8c2ead7518,0xd53c8d1d92bc2788,2 +np.float64,0xbfeb5ec3a036bd87,0xbfee602bbf0a0d01,2 +np.float64,0x3fed5bd58f7ab7ab,0x3fef181bf591a4a7,2 +np.float64,0x7feb5274a5b6a4e8,0x55431fcf81876218,2 +np.float64,0xaf8fd6cf5f1fb,0x2aa1c6edbb1e2aaf,2 +np.float64,0x7fece718f179ce31,0x55437c74efb90933,2 +np.float64,0xbfa3c42d0c278860,0xbfd5a16407c77e73,2 +np.float64,0x800b5cff0576b9fe,0xaaa1fc4ecb0dec4f,2 +np.float64,0x800be89ae557d136,0xaaa244d115fc0963,2 +np.float64,0x800d2578f5ba4af2,0xaaa2e18a3a3fc134,2 +np.float64,0x80090ff93e321ff3,0xaaa0add578e3cc3c,2 +np.float64,0x28c5a240518c,0x2a81587cccd7e202,2 +np.float64,0x7fec066929780cd1,0x55434971435d1069,2 +np.float64,0x7fc84d4d15309a99,0x55372c204515694f,2 +np.float64,0xffe070a75de0e14e,0xd54025365046dad2,2 +np.float64,0x7fe5b27cc36b64f9,0x5541b5b822f0b6ca,2 +np.float64,0x3fdea35ac8bd46b6,0x3fe9086a0fb792c2,2 +np.float64,0xbfe79996f7af332e,0xbfece9571d37a5b3,2 +np.float64,0xffdfb47f943f6900,0xd53fe6c14c3366db,2 +np.float64,0xc015cf63802ba,0x2aa2517164d075f4,2 +np.float64,0x7feba98948375312,0x5543340b5b1f1181,2 +np.float64,0x8008678e6550cf1d,0xaaa043e7cea90da5,2 +np.float64,0x3fb11b92fa223726,0x3fd9f8b53be4d90b,2 +np.float64,0x7fc9b18cf0336319,0x55379b42da882047,2 +np.float64,0xbfe5043e736a087d,0xbfebd0c67db7a8e3,2 +np.float64,0x7fde88546a3d10a8,0x553f80cfe5bcf5fe,2 +np.float64,0x8006a6c82dcd4d91,0xaa9e171d182ba049,2 +np.float64,0xbfa0f707ac21ee10,0xbfd48e5d3faa1699,2 +np.float64,0xbfe7716bffaee2d8,0xbfecd8e6abfb8964,2 +np.float64,0x9511ccab2a23a,0x2aa0d56d748f0313,2 +np.float64,0x8003ddb9b847bb74,0xaa991ca06fd9d308,2 +np.float64,0x80030710fac60e23,0xaa9725845ac95fe8,2 +np.float64,0xffece5bbaeb9cb76,0xd5437c2670f894f4,2 +np.float64,0x3fd9be5c72b37cb9,0x3fe79f2e932a5708,2 +np.float64,0x1f050cca3e0a3,0x2a93f36499fe5228,2 +np.float64,0x3fd5422becaa8458,0x3fe6295d6150df58,2 +np.float64,0xffd72c050e2e580a,0xd53cbc52d73b495f,2 +np.float64,0xbfe66d5235ecdaa4,0xbfec6ca27e60bf23,2 +np.float64,0x17ac49a42f58a,0x2a923b5b757087a0,2 +np.float64,0xffd39edc40273db8,0xd53b2f7bb99b96bf,2 +np.float64,0x7fde6cf009bcd9df,0x553f77614eb30d75,2 +np.float64,0x80042b4c3fa85699,0xaa99c05fbdd057db,2 +np.float64,0xbfde5547f8bcaa90,0xbfe8f3147d67a940,2 +np.float64,0xbfdd02f9bf3a05f4,0xbfe894f2048aa3fe,2 +np.float64,0xbfa20ec82c241d90,0xbfd4fd02ee55aac7,2 +np.float64,0x8002f670f8c5ece3,0xaa96fad7e53dd479,2 +np.float64,0x80059f24d7eb3e4a,0xaa9c7312dae0d7bc,2 +np.float64,0x7fe6ae7423ad5ce7,0x5541f9430be53062,2 +np.float64,0xe135ea79c26be,0x2aa350d8f8c526e1,2 +np.float64,0x3fec188ce4f8311a,0x3feea44d21c23f68,2 +np.float64,0x800355688286aad2,0xaa97e6ca51eb8357,2 +np.float64,0xa2d6530b45acb,0x2aa15635bbd366e8,2 +np.float64,0x600e0150c01c1,0x2a9d1456ea6c239c,2 +np.float64,0x8009c30863338611,0xaaa118f94b188bcf,2 +np.float64,0x3fe7e4c0dfefc982,0x3fed07e8480b8c07,2 +np.float64,0xbfddac6407bb58c8,0xbfe8c46f63a50225,2 +np.float64,0xbc85e977790bd,0x2aa2344636ed713d,2 +np.float64,0xfff0000000000000,0xfff0000000000000,2 +np.float64,0xffcd1570303a2ae0,0xd5389a27d5148701,2 +np.float64,0xbf937334d026e660,0xbfd113762e4e29a7,2 +np.float64,0x3fdbfdaa9b37fb55,0x3fe84a425fdff7df,2 +np.float64,0xffc10800f5221000,0xd5349535ffe12030,2 +np.float64,0xaf40f3755e81f,0x2aa1c443af16cd27,2 +np.float64,0x800f7da34f7efb47,0xaaa3f14bf25fc89f,2 +np.float64,0xffe4a60125a94c02,0xd5416b764a294128,2 +np.float64,0xbf8e25aa903c4b40,0xbfcf5ebc275b4789,2 +np.float64,0x3fca681bbb34d038,0x3fe2e882bcaee320,2 +np.float64,0xbfd0f3c9c1a1e794,0xbfe48d0df7b47572,2 +np.float64,0xffeb99b49d373368,0xd5433060dc641910,2 +np.float64,0x3fe554fb916aa9f8,0x3febf437cf30bd67,2 +np.float64,0x80079518d0af2a32,0xaa9f6ee87044745a,2 +np.float64,0x5e01a8a0bc036,0x2a9cdf0badf222c3,2 +np.float64,0xbfea9831b3f53064,0xbfee1601ee953ab3,2 +np.float64,0xbfc369d1a826d3a4,0xbfe110b675c311e0,2 +np.float64,0xa82e640d505cd,0x2aa1863d4e523b9c,2 +np.float64,0x3fe506d70a2a0dae,0x3febd1eba3aa83fa,2 +np.float64,0xcbacba7197598,0x2aa2adeb9927f1f2,2 +np.float64,0xc112d6038225b,0x2aa25978f12038b0,2 +np.float64,0xffa7f5f44c2febf0,0xd52d0ede02d4e18b,2 +np.float64,0x8006f218e34de433,0xaa9e870cf373b4eb,2 +np.float64,0xffe6d9a5d06db34b,0xd54204a4adc608c7,2 +np.float64,0x7fe717210eae2e41,0x554214bf3e2b5228,2 +np.float64,0xbfdd4b45cdba968c,0xbfe8a94c7f225f8e,2 +np.float64,0x883356571066b,0x2aa055ab0b2a8833,2 +np.float64,0x3fe307fc02a60ff8,0x3feae9175053288f,2 +np.float64,0x3fefa985f77f530c,0x3fefe31289446615,2 +np.float64,0x8005698a98aad316,0xaa9c17814ff7d630,2 +np.float64,0x3fea77333c74ee66,0x3fee098ba70e10fd,2 +np.float64,0xbfd1d00b0023a016,0xbfe4e497fd1cbea1,2 +np.float64,0x80009b0c39813619,0xaa8b130a6909cc3f,2 +np.float64,0x3fdbeb896fb7d714,0x3fe84502ba5437f8,2 +np.float64,0x3fb6e7e3562dcfc7,0x3fdca00d35c389ad,2 +np.float64,0xb2d46ebf65a8e,0x2aa1e2fe158d0838,2 +np.float64,0xbfd5453266aa8a64,0xbfe62a6a74c8ef6e,2 +np.float64,0x7fe993aa07732753,0x5542b5438bf31cb7,2 +np.float64,0xbfda5a098cb4b414,0xbfe7ce6d4d606203,2 +np.float64,0xbfe40c3ce068187a,0xbfeb61a32c57a6d0,2 +np.float64,0x3fcf17671d3e2ed0,0x3fe3f753170ab686,2 +np.float64,0xbfe4f814b6e9f02a,0xbfebcb67c60b7b08,2 +np.float64,0x800efedf59fdfdbf,0xaaa3ba4ed44ad45a,2 +np.float64,0x800420b556e8416b,0xaa99aa7fb14edeab,2 +np.float64,0xbf6e4ae6403c9600,0xbfc3cb2b29923989,2 +np.float64,0x3fda5c760a34b8ec,0x3fe7cf2821c52391,2 +np.float64,0x7f898faac0331f55,0x5522b44a01408188,2 +np.float64,0x3fd55af4b7aab5e9,0x3fe631f6d19503b3,2 +np.float64,0xbfa30a255c261450,0xbfd55caf0826361d,2 +np.float64,0x7fdfb801343f7001,0x553fe7ee50b9199a,2 +np.float64,0x7fa89ee91c313dd1,0x552d528ca2a4d659,2 +np.float64,0xffea72921d34e524,0xd542eb01af2e470d,2 +np.float64,0x3feddf0f33fbbe1e,0x3fef462b67fc0a91,2 +np.float64,0x3fe36700b566ce01,0x3feb1596caa8eff7,2 +np.float64,0x7fe6284a25ac5093,0x5541d58be3956601,2 +np.float64,0xffda16f7c8b42df0,0xd53de4f722485205,2 +np.float64,0x7f9355b94026ab72,0x552578cdeb41d2ca,2 +np.float64,0xffd3a9b022275360,0xd53b347b02dcea21,2 +np.float64,0x3fcb7f4f4a36fe9f,0x3fe32a40e9f6c1aa,2 +np.float64,0x7fdb958836372b0f,0x553e746103f92111,2 +np.float64,0x3fd37761c0a6eec4,0x3fe5853c5654027e,2 +np.float64,0x3fe449f1a2e893e4,0x3feb7d9e4eacc356,2 +np.float64,0x80077dfbef0efbf9,0xaa9f4ed788d2fadd,2 +np.float64,0x4823aa7890476,0x2a9a6eb4b653bad5,2 +np.float64,0xbfede01a373bc034,0xbfef468895fbcd29,2 +np.float64,0xbfe2bac5f125758c,0xbfeac4811c4dd66f,2 +np.float64,0x3fec10373af8206e,0x3feea14529e0f178,2 +np.float64,0x3fe305e30ca60bc6,0x3feae81a2f9d0302,2 +np.float64,0xa9668c5f52cd2,0x2aa1910e3a8f2113,2 +np.float64,0xbfd98b1717b3162e,0xbfe78f75995335d2,2 +np.float64,0x800fa649c35f4c94,0xaaa402ae79026a8f,2 +np.float64,0xbfb07dacf620fb58,0xbfd9a7d33d93a30f,2 +np.float64,0x80015812f382b027,0xaa91a843e9c85c0e,2 +np.float64,0x3fc687d96c2d0fb3,0x3fe1ef0ac16319c5,2 +np.float64,0xbfecad2ecd795a5e,0xbfeed9f786697af0,2 +np.float64,0x1608c1242c119,0x2a91cd11e9b4ccd2,2 +np.float64,0x6df775e8dbeef,0x2a9e6ba8c71130eb,2 +np.float64,0xffe96e9332b2dd26,0xd542ac342d06299b,2 +np.float64,0x7fecb6a3b8396d46,0x5543718af8162472,2 +np.float64,0x800d379f893a6f3f,0xaaa2ea36bbcb9308,2 +np.float64,0x3f924cdb202499b6,0x3fd0bb90af8d1f79,2 +np.float64,0x0,0x0,2 +np.float64,0x7feaf3b365f5e766,0x5543099a160e2427,2 +np.float64,0x3fea169ed0742d3e,0x3fede4d526e404f8,2 +np.float64,0x7feaf5f2f775ebe5,0x55430a2196c5f35a,2 +np.float64,0xbfc80d4429301a88,0xbfe2541f2ddd3334,2 +np.float64,0xffc75203b32ea408,0xd536db2837068689,2 +np.float64,0xffed2850e63a50a1,0xd5438b1217b72b8a,2 +np.float64,0x7fc16b0e7f22d61c,0x5534bcd0bfddb6f0,2 +np.float64,0x7feee8ed09fdd1d9,0x5543ed5b3ca483ab,2 +np.float64,0x7fb6c7ee662d8fdc,0x5531fffb5d46dafb,2 +np.float64,0x3fd77cebf8aef9d8,0x3fe6e9242e2bd29d,2 +np.float64,0x3f81c33f70238680,0x3fca4c7f3c9848f7,2 +np.float64,0x3fd59fea92ab3fd5,0x3fe649c1558cadd5,2 +np.float64,0xffeba82d4bf7505a,0xd54333bad387f7bd,2 +np.float64,0xffd37630e1a6ec62,0xd53b1ca62818c670,2 +np.float64,0xffec2c1e70b8583c,0xd5435213dcd27c22,2 +np.float64,0x7fec206971f840d2,0x55434f6660a8ae41,2 +np.float64,0x3fed2964adba52c9,0x3fef0642fe72e894,2 +np.float64,0xffd08e30d6211c62,0xd539b060e0ae02da,2 +np.float64,0x3e5f976c7cbf4,0x2a992e6ff991a122,2 +np.float64,0xffe6eee761adddce,0xd5420a393c67182f,2 +np.float64,0xbfe8ec9a31f1d934,0xbfed714426f58147,2 +np.float64,0x7fefffffffffffff,0x554428a2f98d728b,2 +np.float64,0x3fb3ae8b2c275d16,0x3fdb36b81b18a546,2 +np.float64,0x800f73df4dfee7bf,0xaaa3ed1a3e2cf49c,2 +np.float64,0xffd0c8873b21910e,0xd539ce6a3eab5dfd,2 +np.float64,0x3facd6c49439ad80,0x3fd8886f46335df1,2 +np.float64,0x3935859c726b2,0x2a98775f6438dbb1,2 +np.float64,0x7feed879fbfdb0f3,0x5543e9d1ac239469,2 +np.float64,0xbfe84dd990f09bb3,0xbfed323af09543b1,2 +np.float64,0xbfe767cc5a6ecf98,0xbfecd4f39aedbacb,2 +np.float64,0xffd8bd91d5b17b24,0xd53d5eb3734a2609,2 +np.float64,0xbfe13edeb2a27dbe,0xbfea0a856f0b9656,2 +np.float64,0xd933dd53b267c,0x2aa3158784e428c9,2 +np.float64,0xbfef6fef987edfdf,0xbfefcfb1c160462b,2 +np.float64,0x8009eeda4893ddb5,0xaaa13268a41045b1,2 +np.float64,0xab48c7a156919,0x2aa1a1a9c124c87d,2 +np.float64,0xa997931d532f3,0x2aa192bfe5b7bbb4,2 +np.float64,0xffe39ce8b1e739d1,0xd5411fa1c5c2cbd8,2 +np.float64,0x7e7ac2f6fcf59,0x2a9fdf6f263a9e9f,2 +np.float64,0xbfee1e35a6fc3c6b,0xbfef5c25d32b4047,2 +np.float64,0xffe5589c626ab138,0xd5419d220cc9a6da,2 +np.float64,0x7fe12509bf224a12,0x55405f7036dc5932,2 +np.float64,0xa6f15ba94de2c,0x2aa17b3367b1fc1b,2 +np.float64,0x3fca8adbfa3515b8,0x3fe2f0ca775749e5,2 +np.float64,0xbfcb03aa21360754,0xbfe30d5b90ca41f7,2 +np.float64,0x3fefafb2da7f5f66,0x3fefe5251aead4e7,2 +np.float64,0xffd90a59d23214b4,0xd53d7cf63a644f0e,2 +np.float64,0x3fba499988349333,0x3fddf84154fab7e5,2 +np.float64,0x800a76a0bc54ed42,0xaaa17f68cf67f2fa,2 +np.float64,0x3fea33d15bb467a3,0x3fedeff7f445b2ff,2 +np.float64,0x8005d9b0726bb362,0xaa9cd48624afeca9,2 +np.float64,0x7febf42e9a77e85c,0x55434541d8073376,2 +np.float64,0xbfedfc4469bbf889,0xbfef505989f7ee7d,2 +np.float64,0x8001211f1422423f,0xaa90a9889d865349,2 +np.float64,0x800e852f7fdd0a5f,0xaaa3845f11917f8e,2 +np.float64,0xffefd613c87fac27,0xd5441fd17ec669b4,2 +np.float64,0x7fed2a74543a54e8,0x55438b8c637da8b8,2 +np.float64,0xb83d50ff707aa,0x2aa210b4fc11e4b2,2 +np.float64,0x10000000000000,0x2aa428a2f98d728b,2 +np.float64,0x474ad9208e97,0x2a84e5a31530368a,2 +np.float64,0xffd0c5498ea18a94,0xd539ccc0e5cb425e,2 +np.float64,0x8001a8e9c82351d4,0xaa92f1aee6ca5b7c,2 +np.float64,0xd28db1e5a51b6,0x2aa2e328c0788f4a,2 +np.float64,0x3bf734ac77ee7,0x2a98da65c014b761,2 +np.float64,0x3fe56e17c96adc30,0x3febff2b6b829b7a,2 +np.float64,0x7783113eef063,0x2a9f46c3f09eb42c,2 +np.float64,0x3fd69d4e42ad3a9d,0x3fe69f83a21679f4,2 +np.float64,0x3fd34f4841a69e90,0x3fe5766b3c771616,2 +np.float64,0x3febb49895b76931,0x3fee7fcb603416c9,2 +np.float64,0x7fe8d6cb55f1ad96,0x554286c3b3bf4313,2 +np.float64,0xbfe67c6ba36cf8d8,0xbfec730218f2e284,2 +np.float64,0xffef9d97723f3b2e,0xd54413e38b6c29be,2 +np.float64,0x12d8cd2a25b1b,0x2a90e5ccd37b8563,2 +np.float64,0x81fe019103fc0,0x2aa01524155e73c5,2 +np.float64,0x7fe95d546f72baa8,0x5542a7fabfd425ff,2 +np.float64,0x800e742f1f9ce85e,0xaaa37cbe09e1f874,2 +np.float64,0xffd96bd3a732d7a8,0xd53da3086071264a,2 +np.float64,0x4ef2691e9de4e,0x2a9b3d316047fd6d,2 +np.float64,0x1a91684c3522e,0x2a92f25913c213de,2 +np.float64,0x3d5151b87aa2b,0x2a9909dbd9a44a84,2 +np.float64,0x800d9049435b2093,0xaaa31424e32d94a2,2 +np.float64,0xffe5b25fcc2b64bf,0xd541b5b0416b40b5,2 +np.float64,0xffe0eb784c21d6f0,0xd5404d083c3d6bc6,2 +np.float64,0x8007ceefbf0f9de0,0xaa9fbe0d739368b4,2 +np.float64,0xb78529416f0b,0x2a8ca3b29b5b3f18,2 +np.float64,0x7fba61130034c225,0x5532e6d4ca0f2918,2 +np.float64,0x3fba8d67ae351acf,0x3fde11efd6239b09,2 +np.float64,0x3fe7f24c576fe498,0x3fed0d63947a854d,2 +np.float64,0x2bb58dec576b3,0x2a965de7fca12aff,2 +np.float64,0xbfe86ceec4f0d9de,0xbfed3ea7f1d084e2,2 +np.float64,0x7fd1a7f7bca34fee,0x553a3f01b67fad2a,2 +np.float64,0x3fd9a43acfb34874,0x3fe7972dc5d8dfd6,2 +np.float64,0x7fd9861acdb30c35,0x553dad3b1bbb3b4d,2 +np.float64,0xffecc0c388398186,0xd54373d3b903deec,2 +np.float64,0x3fa6f86e9c2df0e0,0x3fd6bdbe40fcf710,2 +np.float64,0x800ddd99815bbb33,0xaaa33820d2f889bb,2 +np.float64,0x7fe087089b610e10,0x55402c868348a6d3,2 +np.float64,0x3fdf43d249be87a5,0x3fe933d29fbf7c23,2 +np.float64,0x7fe4f734c7a9ee69,0x5541822e56c40725,2 +np.float64,0x3feb39a9d3b67354,0x3fee526bf1f69f0e,2 +np.float64,0x3fe61454a0ec28a9,0x3fec46d7c36f7566,2 +np.float64,0xbfeafaa0a375f541,0xbfee3af2e49d457a,2 +np.float64,0x3fda7378e1b4e6f0,0x3fe7d613a3f92c40,2 +np.float64,0xe3e31c5fc7c64,0x2aa3645c12e26171,2 +np.float64,0xbfe97a556df2f4ab,0xbfeda8aa84cf3544,2 +np.float64,0xff612f9c80225f00,0xd514a51e5a2a8a97,2 +np.float64,0x800c51c8a0f8a391,0xaaa279fe7d40b50b,2 +np.float64,0xffd6f9d2312df3a4,0xd53ca783a5f8d110,2 +np.float64,0xbfead48bd7f5a918,0xbfee2cb2f89c5e57,2 +np.float64,0x800f5949e89eb294,0xaaa3e1a67a10cfef,2 +np.float64,0x800faf292b7f5e52,0xaaa40675e0c96cfd,2 +np.float64,0xbfedc238453b8470,0xbfef3c179d2d0209,2 +np.float64,0x3feb0443c5760888,0x3fee3e8bf29089c2,2 +np.float64,0xb26f69e164ded,0x2aa1df9f3dd7d765,2 +np.float64,0x3fcacdc053359b80,0x3fe300a67765b667,2 +np.float64,0x3fe8b274647164e8,0x3fed5a4cd4da8155,2 +np.float64,0x291e6782523ce,0x2a95ea7ac1b13a68,2 +np.float64,0xbfc4fc094e29f814,0xbfe1838671fc8513,2 +np.float64,0x3fbf1301f23e2600,0x3fdfb03a6f13e597,2 +np.float64,0xffeb36554ab66caa,0xd543193d8181e4f9,2 +np.float64,0xbfd969a52db2d34a,0xbfe78528ae61f16d,2 +np.float64,0x800cccd04d3999a1,0xaaa2b6b7a2d2d2d6,2 +np.float64,0x808eb4cb011d7,0x2aa005effecb2b4a,2 +np.float64,0x7fe839b3f9b07367,0x55425f61e344cd6d,2 +np.float64,0xbfeb25b6ed764b6e,0xbfee4b0234fee365,2 +np.float64,0xffefffffffffffff,0xd54428a2f98d728b,2 +np.float64,0xbfe01305da60260c,0xbfe9700b784af7e9,2 +np.float64,0xffcbf36b0a37e6d8,0xd538474b1d74ffe1,2 +np.float64,0xffaeebe3e83dd7c0,0xd52fa2e8dabf7209,2 +np.float64,0xbfd9913bf0b32278,0xbfe7915907aab13c,2 +np.float64,0xbfe7d125d9efa24c,0xbfecfff563177706,2 +np.float64,0xbfee98d23cbd31a4,0xbfef867ae393e446,2 +np.float64,0x3fe30efb67e61df6,0x3feaec6344633d11,2 +np.float64,0x1,0x2990000000000000,2 +np.float64,0x7fd5524fd3aaa49f,0x553bf30d18ab877e,2 +np.float64,0xc98b403f93168,0x2aa29d2fadb13c07,2 +np.float64,0xffe57080046ae100,0xd541a3b1b687360e,2 +np.float64,0x7fe20bade5e4175b,0x5540a79b94294f40,2 +np.float64,0x3fe155400a22aa80,0x3fea15c45f5b5837,2 +np.float64,0x7fe428dc8f6851b8,0x554147fd2ce93cc1,2 +np.float64,0xffefb77eb67f6efc,0xd544195dcaff4980,2 +np.float64,0x3fe49e733b293ce6,0x3feba394b833452a,2 +np.float64,0x38e01e3e71c05,0x2a986b2c955bad21,2 +np.float64,0x7fe735eb376e6bd5,0x55421cc51290d92d,2 +np.float64,0xbfd81d8644b03b0c,0xbfe71ce6d6fbd51a,2 +np.float64,0x8009a32325134647,0xaaa10645d0e6b0d7,2 +np.float64,0x56031ab8ac064,0x2a9c074be40b1f80,2 +np.float64,0xff8989aa30331340,0xd522b2d319a0ac6e,2 +np.float64,0xbfd6c183082d8306,0xbfe6ab8ffb3a8293,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfe17b68b1e2f6d2,0xbfea28dac8e0c457,2 +np.float64,0x3fbb50e42236a1c8,0x3fde5b090d51e3bd,2 +np.float64,0xffc2bb7cbf2576f8,0xd5353f1b3571c17f,2 +np.float64,0xbfe7576bca6eaed8,0xbfecce388241f47c,2 +np.float64,0x3fe7b52b04ef6a56,0x3fecf495bef99e7e,2 +np.float64,0xffe5511af82aa236,0xd5419b11524e8350,2 +np.float64,0xbfe66d5edf2cdabe,0xbfec6ca7d7b5be8c,2 +np.float64,0xc84a0ba790942,0x2aa29346f16a2cb4,2 +np.float64,0x6db5e7a0db6be,0x2a9e659c0e8244a0,2 +np.float64,0x7fef8f7b647f1ef6,0x554410e67af75d27,2 +np.float64,0xbfe2b4ada7e5695c,0xbfeac1997ec5a064,2 +np.float64,0xbfe99372e03326e6,0xbfedb2662b287543,2 +np.float64,0x3fa45d352428ba6a,0x3fd5d8a895423abb,2 +np.float64,0x3fa029695c2052d3,0x3fd439f858998886,2 +np.float64,0xffe0a9bd3261537a,0xd54037d0cd8bfcda,2 +np.float64,0xbfef83e09a7f07c1,0xbfefd66a4070ce73,2 +np.float64,0x7fee3dcc31fc7b97,0x5543c8503869407e,2 +np.float64,0xffbd16f1603a2de0,0xd533872fa5be978b,2 +np.float64,0xbfe8173141b02e62,0xbfed1c478614c6f4,2 +np.float64,0xbfef57aa277eaf54,0xbfefc77fdab27771,2 +np.float64,0x7fe883a02f31073f,0x554271ff0e3208da,2 +np.float64,0xe3adb63bc75b7,0x2aa362d833d0e41c,2 +np.float64,0x8001c430bac38862,0xaa93575026d26510,2 +np.float64,0x12fb347225f67,0x2a90f00eb9edb3fe,2 +np.float64,0x3fe53f83cbaa7f08,0x3febead40de452c2,2 +np.float64,0xbfe7f67227efece4,0xbfed0f10e32ad220,2 +np.float64,0xb8c5b45d718b7,0x2aa2152912cda86d,2 +np.float64,0x3fd23bb734a4776e,0x3fe50e5d3008c095,2 +np.float64,0x8001fd558ee3faac,0xaa941faa1f7ed450,2 +np.float64,0xffe6bbeda9ed77db,0xd541fcd185a63afa,2 +np.float64,0x4361d79086c3c,0x2a99d692237c30b7,2 +np.float64,0xbfd012f004a025e0,0xbfe43093e290fd0d,2 +np.float64,0xffe1d8850423b10a,0xd54097cf79d8d01e,2 +np.float64,0x3fccf4df7939e9bf,0x3fe37f8cf8be6436,2 +np.float64,0x8000546bc6c0a8d8,0xaa861bb3588556f2,2 +np.float64,0xbfecb4d6ba7969ae,0xbfeedcb6239135fe,2 +np.float64,0xbfaeb425cc3d6850,0xbfd90cfc103bb896,2 +np.float64,0x800ec037ec7d8070,0xaaa39eae8bde9774,2 +np.float64,0xbfeeaf863dfd5f0c,0xbfef8e4514772a8a,2 +np.float64,0xffec67c6c4b8cf8d,0xd5435fad89f900cf,2 +np.float64,0x3fda4498da348932,0x3fe7c7f6b3f84048,2 +np.float64,0xbfd05fd3dea0bfa8,0xbfe4509265a9b65f,2 +np.float64,0x3fe42cc713a8598e,0x3feb706ba9cd533c,2 +np.float64,0xec22d4d7d845b,0x2aa39f8cccb9711c,2 +np.float64,0x7fda30606c3460c0,0x553deea865065196,2 +np.float64,0xbfd58cba8bab1976,0xbfe64327ce32d611,2 +np.float64,0xadd521c75baa4,0x2aa1b7efce201a98,2 +np.float64,0x7fed43c1027a8781,0x55439131832b6429,2 +np.float64,0x800bee278fb7dc4f,0xaaa247a71e776db4,2 +np.float64,0xbfe9be5dd2737cbc,0xbfedc2f9501755b0,2 +np.float64,0x8003f4854447e90b,0xaa994d9b5372b13b,2 +np.float64,0xbfe5d0f867eba1f1,0xbfec29f8dd8b33a4,2 +np.float64,0x3fd79102d5af2206,0x3fe6efaa7a1efddb,2 +np.float64,0xbfeae783c835cf08,0xbfee33cdb4a44e81,2 +np.float64,0x3fcf1713e83e2e28,0x3fe3f7414753ddfb,2 +np.float64,0xffe5ab3cff2b567a,0xd541b3bf0213274a,2 +np.float64,0x7fe0fc65d8a1f8cb,0x554052761ac96386,2 +np.float64,0x7e81292efd026,0x2a9fdff8c01ae86f,2 +np.float64,0x80091176039222ec,0xaaa0aebf0565dfa6,2 +np.float64,0x800d2bf5ab5a57ec,0xaaa2e4a4c31e7e29,2 +np.float64,0xffd1912ea923225e,0xd53a33b2856726ab,2 +np.float64,0x800869918ed0d323,0xaaa0453408e1295d,2 +np.float64,0xffba0898fa341130,0xd532d19b202a9646,2 +np.float64,0xbfe09fac29613f58,0xbfe9b9687b5811a1,2 +np.float64,0xbfbd4ae82e3a95d0,0xbfdf1220f6f0fdfa,2 +np.float64,0xffea11d27bb423a4,0xd542d3d3e1522474,2 +np.float64,0xbfe6b05705ad60ae,0xbfec88d6bcab2683,2 +np.float64,0x3fe624a3f2ec4948,0x3fec4dcc78ddf871,2 +np.float64,0x53483018a6907,0x2a9bba8f92006b69,2 +np.float64,0xbfec0a6eeb7814de,0xbfee9f2a741248d7,2 +np.float64,0x3fe8c8ce6371919d,0x3fed63250c643482,2 +np.float64,0xbfe26b0ef964d61e,0xbfea9e511db83437,2 +np.float64,0xffa0408784208110,0xd52987f62c369ae9,2 +np.float64,0xffc153abc322a758,0xd534b384b5c5fe63,2 +np.float64,0xbfbdce88a63b9d10,0xbfdf4065ef0b01d4,2 +np.float64,0xffed4a4136fa9482,0xd54392a450f8b0af,2 +np.float64,0x8007aa18748f5432,0xaa9f8bd2226d4299,2 +np.float64,0xbfdab4d3e8b569a8,0xbfe7e9a5402540e5,2 +np.float64,0x7fe68914f92d1229,0x5541ef5e78fa35de,2 +np.float64,0x800a538bb1b4a718,0xaaa16bc487711295,2 +np.float64,0xffe02edbc8605db7,0xd5400f8f713df890,2 +np.float64,0xffe8968053712d00,0xd54276b9cc7f460a,2 +np.float64,0x800a4ce211d499c5,0xaaa1680491deb40c,2 +np.float64,0x3f988080f8310102,0x3fd2713691e99329,2 +np.float64,0xf64e42a7ec9c9,0x2aa3e6a7af780878,2 +np.float64,0xff73cc7100279900,0xd51b4478c3409618,2 +np.float64,0x71e6722ce3ccf,0x2a9ec76ddf296ce0,2 +np.float64,0x8006ca16ab0d942e,0xaa9e4bfd862af570,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0xbfed373e02ba6e7c,0xbfef0b2b7bb767b3,2 +np.float64,0xa6cb0f694d962,0x2aa179dd16b0242b,2 +np.float64,0x7fec14626cf828c4,0x55434ca55b7c85d5,2 +np.float64,0x3fcda404513b4808,0x3fe3a68e8d977752,2 +np.float64,0xbfeb94995f772933,0xbfee74091d288b81,2 +np.float64,0x3fce2299a13c4530,0x3fe3c2603f28d23b,2 +np.float64,0xffd07f4534a0fe8a,0xd539a8a6ebc5a603,2 +np.float64,0x7fdb1c651e3638c9,0x553e478a6385c86b,2 +np.float64,0x3fec758336f8eb06,0x3feec5f3b92c8b28,2 +np.float64,0x796fc87cf2dfa,0x2a9f7184a4ad8c49,2 +np.float64,0x3fef9ba866ff3750,0x3fefde6a446fc2cd,2 +np.float64,0x964d26c72c9a5,0x2aa0e143f1820179,2 +np.float64,0xbfef6af750bed5ef,0xbfefce04870a97bd,2 +np.float64,0x3fe2f3961aa5e72c,0x3feadf769321a3ff,2 +np.float64,0xbfd6b706e9ad6e0e,0xbfe6a8141c5c3b5d,2 +np.float64,0x7fe0ecc40a21d987,0x55404d72c2b46a82,2 +np.float64,0xbfe560d19deac1a3,0xbfebf962681a42a4,2 +np.float64,0xbfea37170ab46e2e,0xbfedf136ee9df02b,2 +np.float64,0xbfebf78947b7ef12,0xbfee9847ef160257,2 +np.float64,0x800551f8312aa3f1,0xaa9bee7d3aa5491b,2 +np.float64,0xffed2513897a4a26,0xd5438a58c4ae28ec,2 +np.float64,0x7fd962d75cb2c5ae,0x553d9f8a0c2016f3,2 +np.float64,0x3fefdd8512bfbb0a,0x3feff47d8da7424d,2 +np.float64,0xbfefa5b43bff4b68,0xbfefe1ca42867af0,2 +np.float64,0xbfc8a2853531450c,0xbfe279bb7b965729,2 +np.float64,0x800c8843bc391088,0xaaa2951344e7b29b,2 +np.float64,0x7fe22587bae44b0e,0x5540af8bb58cfe86,2 +np.float64,0xbfe159fae822b3f6,0xbfea182394eafd8d,2 +np.float64,0xbfe6fdfd50edfbfa,0xbfeca93f2a3597d0,2 +np.float64,0xbfe5cd5afaeb9ab6,0xbfec286a8ce0470f,2 +np.float64,0xbfc84bb97f309774,0xbfe263ef0f8f1f6e,2 +np.float64,0x7fd9c1e548b383ca,0x553dc4556874ecb9,2 +np.float64,0x7fda43d33bb487a5,0x553df60f61532fc0,2 +np.float64,0xbfe774bd25eee97a,0xbfecda42e8578c1f,2 +np.float64,0x800df1f5ab9be3ec,0xaaa34184712e69db,2 +np.float64,0xbff0000000000000,0xbff0000000000000,2 +np.float64,0x3fe14ec21b629d84,0x3fea128244215713,2 +np.float64,0x7fc1ce7843239cf0,0x5534e3fa8285b7b8,2 +np.float64,0xbfe922b204724564,0xbfed86818687d649,2 +np.float64,0x3fc58924fb2b1248,0x3fe1aa715ff6ebbf,2 +np.float64,0x8008b637e4d16c70,0xaaa0760b53abcf46,2 +np.float64,0xffbf55bd4c3eab78,0xd53404a23091a842,2 +np.float64,0x9f6b4a753ed6a,0x2aa136ef9fef9596,2 +np.float64,0xbfd11da7f8a23b50,0xbfe49deb493710d8,2 +np.float64,0x800a2f07fcd45e10,0xaaa157237c98b4f6,2 +np.float64,0x3fdd4defa4ba9bdf,0x3fe8aa0bcf895f4f,2 +np.float64,0x7fe9b0ab05f36155,0x5542bc5335414473,2 +np.float64,0x3fe89c97de313930,0x3fed51a1189b8982,2 +np.float64,0x3fdd45c8773a8b91,0x3fe8a7c2096fbf5a,2 +np.float64,0xbfeb6f64daf6deca,0xbfee665167ef43ad,2 +np.float64,0xffdf9da1c4bf3b44,0xd53fdf141944a983,2 +np.float64,0x3fde092ed0bc125c,0x3fe8de25bfbfc2db,2 +np.float64,0xbfcb21f96b3643f4,0xbfe3147904c258cf,2 +np.float64,0x800c9c934f993927,0xaaa29f17c43f021b,2 +np.float64,0x9b91814d37230,0x2aa11329e59bf6b0,2 +np.float64,0x3fe28a7e0b6514fc,0x3feaad6d23e2eadd,2 +np.float64,0xffecf38395f9e706,0xd5437f3ee1cd61e4,2 +np.float64,0x3fcade92a935bd25,0x3fe3049f4c1da1d0,2 +np.float64,0x800ab25d95d564bc,0xaaa1a076d7c66e04,2 +np.float64,0xffc0989e1e21313c,0xd53467f3b8158298,2 +np.float64,0x3fd81523eeb02a48,0x3fe71a38d2da8a82,2 +np.float64,0x7fe5b9dd402b73ba,0x5541b7b9b8631010,2 +np.float64,0x2c160d94582c3,0x2a966e51b503a3d1,2 +np.float64,0x2c416ffa5882f,0x2a9675aaef8b29c4,2 +np.float64,0x7fefe2ff01bfc5fd,0x55442289faf22b86,2 +np.float64,0xbfd469bf5d28d37e,0xbfe5dd239ffdc7eb,2 +np.float64,0xbfdd56f3eabaade8,0xbfe8ac93244ca17b,2 +np.float64,0xbfe057b89160af71,0xbfe9941557340bb3,2 +np.float64,0x800c50e140b8a1c3,0xaaa2798ace9097ee,2 +np.float64,0xbfda5a8984b4b514,0xbfe7ce93d65a56b0,2 +np.float64,0xbfcd6458323ac8b0,0xbfe39872514127bf,2 +np.float64,0x3fefb1f5ebff63ec,0x3fefe5e761b49b89,2 +np.float64,0x3fea3abc1df47578,0x3fedf29a1c997863,2 +np.float64,0x7fcb4a528e3694a4,0x553815f169667213,2 +np.float64,0x8c77da7b18efc,0x2aa080e52bdedb54,2 +np.float64,0x800e5dde4c5cbbbd,0xaaa372b16fd8b1ad,2 +np.float64,0x3fd2976038a52ec0,0x3fe5316b4f79fdbc,2 +np.float64,0x69413a0ed2828,0x2a9dfacd9cb44286,2 +np.float64,0xbfebbac0bdb77582,0xbfee820d9288b631,2 +np.float64,0x1a12aa7c34256,0x2a92d407e073bbfe,2 +np.float64,0xbfc41a27c3283450,0xbfe143c8665b0d3c,2 +np.float64,0xffe4faa41369f548,0xd54183230e0ce613,2 +np.float64,0xbfdeae81f23d5d04,0xbfe90b734bf35b68,2 +np.float64,0x3fc984ba58330975,0x3fe2b19e9052008e,2 +np.float64,0x7fe6e51b8d2dca36,0x554207a74ae2bb39,2 +np.float64,0x80081a58a81034b2,0xaaa0117d4aff11c8,2 +np.float64,0x7fde3fddfe3c7fbb,0x553f67d0082acc67,2 +np.float64,0x3fac7c999038f933,0x3fd86ec2f5dc3aa4,2 +np.float64,0x7fa26b4c4c24d698,0x552a9e6ea8545c18,2 +np.float64,0x3fdacd06e6b59a0e,0x3fe7f0dc0e8f9c6d,2 +np.float64,0x80064b62cbec96c6,0xaa9d8ac0506fdd05,2 +np.float64,0xb858116170b1,0x2a8caea703d9ccc8,2 +np.float64,0xbfe8d94ccef1b29a,0xbfed69a8782cbf3d,2 +np.float64,0x8005607d6a6ac0fc,0xaa9c07cf8620b037,2 +np.float64,0xbfe66a52daacd4a6,0xbfec6b5e403e6864,2 +np.float64,0x7fc398c2e0273185,0x5535918245894606,2 +np.float64,0x74b2d7dce965c,0x2a9f077020defdbc,2 +np.float64,0x7fe8f7a4d9b1ef49,0x55428eeae210e8eb,2 +np.float64,0x80027deddc84fbdc,0xaa95b11ff9089745,2 +np.float64,0xffeba2a94e774552,0xd5433273f6568902,2 +np.float64,0x80002f8259405f05,0xaa8240b68d7b9dc4,2 +np.float64,0xbfdf0d84883e1b0a,0xbfe92532c69c5802,2 +np.float64,0xbfcdfa7b6b3bf4f8,0xbfe3b997a84d0914,2 +np.float64,0x800c18b04e183161,0xaaa25d46d60b15c6,2 +np.float64,0xffeaf1e37c35e3c6,0xd543092cd929ac19,2 +np.float64,0xbfc5aa07752b5410,0xbfe1b36ab5ec741f,2 +np.float64,0x3fe5c491d1eb8924,0x3fec24a1c3f6a178,2 +np.float64,0xbfeb736937f6e6d2,0xbfee67cd296e6fa9,2 +np.float64,0xffec3d5718787aad,0xd5435602e1a2cc43,2 +np.float64,0x7fe71e1da86e3c3a,0x55421691ead882cb,2 +np.float64,0x3fdd6ed0c93adda2,0x3fe8b341d066c43c,2 +np.float64,0x7fbe3d7a203c7af3,0x5533c83e53283430,2 +np.float64,0x3fdc20cb56384197,0x3fe854676360aba9,2 +np.float64,0xb7a1ac636f436,0x2aa20b9d40d66e78,2 +np.float64,0x3fb1491bb8229237,0x3fda0fabad1738ee,2 +np.float64,0xbfdf9c0ce73f381a,0xbfe94b716dbe35ee,2 +np.float64,0xbfbd4f0ad23a9e18,0xbfdf1397329a2dce,2 +np.float64,0xbfe4e0caac69c196,0xbfebc119b8a181cd,2 +np.float64,0x5753641aaea6d,0x2a9c2ba3e92b0cd2,2 +np.float64,0x72bb814ae5771,0x2a9eda92fada66de,2 +np.float64,0x57ed8f5aafdb3,0x2a9c3c2e1d42e609,2 +np.float64,0xffec33359c38666a,0xd54353b2acd0daf1,2 +np.float64,0x3fa5fe6e8c2bfce0,0x3fd66a0b3bf2720a,2 +np.float64,0xffe2dc8d7ca5b91a,0xd540e6ebc097d601,2 +np.float64,0x7fd99d260eb33a4b,0x553db626c9c75f78,2 +np.float64,0xbfe2dd73e425bae8,0xbfead4fc4b93a727,2 +np.float64,0xdcd4a583b9a95,0x2aa33094c9a17ad7,2 +np.float64,0x7fb0af6422215ec7,0x553039a606e8e64f,2 +np.float64,0x7fdfab6227bf56c3,0x553fe3b26164aeda,2 +np.float64,0x1e4d265e3c9a6,0x2a93cba8a1a8ae6d,2 +np.float64,0xbfdc7d097238fa12,0xbfe86ee2f24fd473,2 +np.float64,0x7fe5d35d29eba6b9,0x5541bea5878bce2b,2 +np.float64,0xffcb886a903710d4,0xd53828281710aab5,2 +np.float64,0xffe058c7ffe0b190,0xd5401d61e9a7cbcf,2 +np.float64,0x3ff0000000000000,0x3ff0000000000000,2 +np.float64,0xffd5b1c1132b6382,0xd53c1c839c098340,2 +np.float64,0x3fe2e7956725cf2b,0x3fead9c907b9d041,2 +np.float64,0x800a8ee293951dc6,0xaaa18ce3f079f118,2 +np.float64,0x7febcd3085b79a60,0x55433c47e1f822ad,2 +np.float64,0x3feb0e14cd761c2a,0x3fee423542102546,2 +np.float64,0x3fb45e6d0628bcda,0x3fdb86db67d0c992,2 +np.float64,0x7fa836e740306dce,0x552d2907cb8118b2,2 +np.float64,0x3fd15ba25b22b745,0x3fe4b6b018409d78,2 +np.float64,0xbfb59980ce2b3300,0xbfdc1206274cb51d,2 +np.float64,0x3fdef1b87fbde371,0x3fe91dafc62124a1,2 +np.float64,0x7fed37a4337a6f47,0x55438e7e0b50ae37,2 +np.float64,0xffe6c87633ad90ec,0xd542001f216ab448,2 +np.float64,0x8008d2548ab1a4a9,0xaaa087ad272d8e17,2 +np.float64,0xbfd1d6744da3ace8,0xbfe4e71965adda74,2 +np.float64,0xbfb27f751224fee8,0xbfdaa82132775406,2 +np.float64,0x3fe2b336ae65666d,0x3feac0e6b13ec2d2,2 +np.float64,0xffc6bac2262d7584,0xd536a951a2eecb49,2 +np.float64,0x7fdb661321b6cc25,0x553e62dfd7fcd3f3,2 +np.float64,0xffe83567d5706acf,0xd5425e4bb5027568,2 +np.float64,0xbf7f0693e03e0d00,0xbfc9235314d53f82,2 +np.float64,0x3feb32b218766564,0x3fee4fd5847f3722,2 +np.float64,0x3fec25d33df84ba6,0x3feea91fcd4aebab,2 +np.float64,0x7fe17abecb22f57d,0x55407a8ba661207c,2 +np.float64,0xbfe5674b1eeace96,0xbfebfc351708dc70,2 +np.float64,0xbfe51a2d2f6a345a,0xbfebda702c9d302a,2 +np.float64,0x3fec05584af80ab0,0x3fee9d502a7bf54d,2 +np.float64,0xffda8871dcb510e4,0xd53e10105f0365b5,2 +np.float64,0xbfc279c31824f388,0xbfe0c9354d871484,2 +np.float64,0x1cbed61e397dc,0x2a937364712cd518,2 +np.float64,0x800787d198af0fa4,0xaa9f5c847affa1d2,2 +np.float64,0x80079f6d65af3edc,0xaa9f7d2863368bbd,2 +np.float64,0xb942f1e97285e,0x2aa2193e0c513b7f,2 +np.float64,0x7fe9078263320f04,0x554292d85dee2c18,2 +np.float64,0xbfe4de0761a9bc0f,0xbfebbfe04116b829,2 +np.float64,0xbfdbe6f3fc37cde8,0xbfe843aea59a0749,2 +np.float64,0xffcb6c0de136d81c,0xd5381fd9c525b813,2 +np.float64,0x9b6bda9336d7c,0x2aa111c924c35386,2 +np.float64,0x3fe17eece422fdda,0x3fea2a9bacd78607,2 +np.float64,0xd8011c49b0024,0x2aa30c87574fc0c6,2 +np.float64,0xbfc0a08b3f214118,0xbfe034d48f0d8dc0,2 +np.float64,0x3fd60adb1eac15b8,0x3fe66e42e4e7e6b5,2 +np.float64,0x80011d68ea023ad3,0xaa909733befbb962,2 +np.float64,0xffb35ac32426b588,0xd5310c4be1c37270,2 +np.float64,0x3fee8b56c9bd16ae,0x3fef81d8d15f6939,2 +np.float64,0x3fdc10a45e382149,0x3fe84fbe4cf11e68,2 +np.float64,0xbfc85dc45e30bb88,0xbfe2687b5518abde,2 +np.float64,0x3fd53b85212a770a,0x3fe6270d6d920d0f,2 +np.float64,0x800fc158927f82b1,0xaaa40e303239586f,2 +np.float64,0x11af5e98235ed,0x2a908b04a790083f,2 +np.float64,0xbfe2a097afe54130,0xbfeab80269eece99,2 +np.float64,0xbfd74ac588ae958c,0xbfe6d8ca3828d0b8,2 +np.float64,0xffea18ab2ef43156,0xd542d579ab31df1e,2 +np.float64,0xbfecda7058f9b4e1,0xbfeeea29c33b7913,2 +np.float64,0x3fc4ac56ed2958b0,0x3fe16d3e2bd7806d,2 +np.float64,0x3feccc898cb99913,0x3feee531f217dcfa,2 +np.float64,0xffeb3a64c5b674c9,0xd5431a30a41f0905,2 +np.float64,0x3fe5a7ee212b4fdc,0x3fec1844af9076fc,2 +np.float64,0x80080fdb52301fb7,0xaaa00a8b4274db67,2 +np.float64,0x800b3e7e47d67cfd,0xaaa1ec2876959852,2 +np.float64,0x80063fb8ee2c7f73,0xaa9d7875c9f20d6f,2 +np.float64,0x7fdacf80d0b59f01,0x553e2acede4c62a8,2 +np.float64,0x401e9b24803d4,0x2a996a0a75d0e093,2 +np.float64,0x3fe6c29505ed852a,0x3fec907a6d8c10af,2 +np.float64,0x8005c04ee2cb809f,0xaa9caa9813faef46,2 +np.float64,0xbfe1360f21e26c1e,0xbfea06155d6985b6,2 +np.float64,0xffc70606682e0c0c,0xd536c239b9d4be0a,2 +np.float64,0x800e639afefcc736,0xaaa37547d0229a26,2 +np.float64,0x3fe5589290aab125,0x3febf5c925c4e6db,2 +np.float64,0x8003b59330276b27,0xaa98c47e44524335,2 +np.float64,0x800d67ec22dacfd8,0xaaa301251b6a730a,2 +np.float64,0x7fdaeb5025b5d69f,0x553e35397dfe87eb,2 +np.float64,0x3fdae32a24b5c654,0x3fe7f771bc108f6c,2 +np.float64,0xffe6c1fc93ad83f8,0xd541fe6a6a716756,2 +np.float64,0xbfd7b9c1d32f7384,0xbfe6fcdae563d638,2 +np.float64,0x800e1bea06fc37d4,0xaaa354c0bf61449c,2 +np.float64,0xbfd78f097aaf1e12,0xbfe6ef068329bdf4,2 +np.float64,0x7fea6a400874d47f,0x5542e905978ad722,2 +np.float64,0x8008b4377cb1686f,0xaaa074c87eee29f9,2 +np.float64,0x8002f3fb8d45e7f8,0xaa96f47ac539b614,2 +np.float64,0xbfcf2b3fd13e5680,0xbfe3fb91c0cc66ad,2 +np.float64,0xffecca2f5279945e,0xd54375f361075927,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x7f84d5a5a029ab4a,0x552178d1d4e8640e,2 +np.float64,0x3fea8a4b64351497,0x3fee10c332440eb2,2 +np.float64,0x800fe01ac1dfc036,0xaaa41b34d91a4bee,2 +np.float64,0x3fc0b3d8872167b1,0x3fe03b178d354f8d,2 +np.float64,0x5ee8b0acbdd17,0x2a9cf69f2e317729,2 +np.float64,0x8006ef0407adde09,0xaa9e82888f3dd83e,2 +np.float64,0x7fdbb08a07b76113,0x553e7e4e35b938b9,2 +np.float64,0x49663f9c92cc9,0x2a9a95e0affe5108,2 +np.float64,0x7fd9b87e79b370fc,0x553dc0b5cff3dc7d,2 +np.float64,0xbfd86ae657b0d5cc,0xbfe73584d02bdd2b,2 +np.float64,0x3fd4d4a13729a942,0x3fe6030a962aaaf8,2 +np.float64,0x7fcc246bcb3848d7,0x5538557309449bba,2 +np.float64,0xbfdc86a7d5b90d50,0xbfe871a2983c2a29,2 +np.float64,0xd2a6e995a54dd,0x2aa2e3e9c0fdd6c0,2 +np.float64,0x3f92eb447825d680,0x3fd0eb4fd2ba16d2,2 +np.float64,0x800d4001697a8003,0xaaa2ee358661b75c,2 +np.float64,0x3fd3705fd1a6e0c0,0x3fe582a6f321d7d6,2 +np.float64,0xbfcfdf51533fbea4,0xbfe421c3bdd9f2a3,2 +np.float64,0x3fe268e87964d1d1,0x3fea9d47e08aad8a,2 +np.float64,0x24b8901e49713,0x2a951adeefe7b31b,2 +np.float64,0x3fedb35d687b66bb,0x3fef36e440850bf8,2 +np.float64,0x3fb7ab5cbe2f56c0,0x3fdcf097380721c6,2 +np.float64,0x3f8c4eaa10389d54,0x3fceb7ecb605b73b,2 +np.float64,0xbfed831ed6fb063e,0xbfef25f462a336f1,2 +np.float64,0x7fd8c52112318a41,0x553d61b0ee609f58,2 +np.float64,0xbfe71c4ff76e38a0,0xbfecb5d32e789771,2 +np.float64,0xbfe35fb7b166bf70,0xbfeb12328e75ee6b,2 +np.float64,0x458e1a3a8b1c4,0x2a9a1cebadc81342,2 +np.float64,0x8003c1b3ad478368,0xaa98df5ed060b28c,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe17098c162e131,0x5540775a9a3a104f,2 +np.float64,0xbfd95cb71732b96e,0xbfe7812acf7ea511,2 +np.float64,0x8000000000000001,0xa990000000000000,2 +np.float64,0xbfde0e7d9ebc1cfc,0xbfe8df9ca9e49a5b,2 +np.float64,0xffef4f67143e9ecd,0xd5440348a6a2f231,2 +np.float64,0x7fe37d23c826fa47,0x5541165de17caa03,2 +np.float64,0xbfcc0e5f85381cc0,0xbfe34b44b0deefe9,2 +np.float64,0x3fe858f1c470b1e4,0x3fed36ab90557d89,2 +np.float64,0x800e857278fd0ae5,0xaaa3847d13220545,2 +np.float64,0x3febd31a66f7a635,0x3fee8af90e66b043,2 +np.float64,0x7fd3fde1b127fbc2,0x553b5b186a49b968,2 +np.float64,0x3fd3dabb8b27b577,0x3fe5a99b446bed26,2 +np.float64,0xffeb4500f1768a01,0xd5431cab828e254a,2 +np.float64,0xffccca8fc6399520,0xd53884f8b505e79e,2 +np.float64,0xffeee9406b7dd280,0xd543ed6d27a1a899,2 +np.float64,0xffecdde0f0f9bbc1,0xd5437a6258b14092,2 +np.float64,0xe6b54005cd6a8,0x2aa378c25938dfda,2 +np.float64,0x7fe610f1022c21e1,0x5541cf460b972925,2 +np.float64,0xbfe5a170ec6b42e2,0xbfec1576081e3232,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv new file mode 100644 index 00000000..258ae48c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv @@ -0,0 +1,1375 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,2 +np.float32,0x007b2490,0x3f800000,2 +np.float32,0x007c99fa,0x3f800000,2 +np.float32,0x00734a0c,0x3f800000,2 +np.float32,0x0070de24,0x3f800000,2 +np.float32,0x007fffff,0x3f800000,2 +np.float32,0x00000001,0x3f800000,2 +## -ve denormals ## +np.float32,0x80495d65,0x3f800000,2 +np.float32,0x806894f6,0x3f800000,2 +np.float32,0x80555a76,0x3f800000,2 +np.float32,0x804e1fb8,0x3f800000,2 +np.float32,0x80687de9,0x3f800000,2 +np.float32,0x807fffff,0x3f800000,2 +np.float32,0x80000001,0x3f800000,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x3f800000,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0x00800000,0x3f800000,2 +np.float32,0x80800000,0x3f800000,2 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x3f0a5140,2 +np.float32,0x3f800001,0x3f0a513f,2 +np.float32,0x3f800002,0x3f0a513d,2 +np.float32,0xc090a8b0,0xbe4332ce,2 +np.float32,0x41ce3184,0x3f4d1de1,2 +np.float32,0xc1d85848,0xbeaa8980,2 +np.float32,0x402b8820,0xbf653aa3,2 +np.float32,0x42b4e454,0xbf4a338b,2 +np.float32,0x42a67a60,0x3c58202e,2 +np.float32,0x41d92388,0xbed987c7,2 +np.float32,0x422dd66c,0x3f5dcab3,2 +np.float32,0xc28f5be6,0xbf5688d8,2 +np.float32,0x41ab2674,0xbf53aa3b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0x3f3504f3,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x4016cbe4,0xbf3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x407b53d2,0xbf3504f1,2 +np.float32,0xc07b53d2,0xbf3504f1,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x40afede0,0x3f3504f7,2 +np.float32,0xc0afede0,0x3f3504f7,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0x3f3504f3,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x410a3ae7,0xbf3504fb,2 +np.float32,0xc10a3ae7,0xbf3504fb,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x41235ce2,0xbf3504ef,2 +np.float32,0xc1235ce2,0xbf3504ef,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x413c7edd,0x3f3504f4,2 +np.float32,0xc13c7edd,0x3f3504f4,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x4155a0d9,0x3f3504eb,2 +np.float32,0xc155a0d9,0x3f3504eb,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x416ec2d4,0xbf3504f7,2 +np.float32,0xc16ec2d4,0xbf3504f7,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x4183f268,0xbf3504e7,2 +np.float32,0xc183f268,0xbf3504e7,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x41908365,0x3f3504f0,2 +np.float32,0xc1908365,0x3f3504f0,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x419d1463,0x3f3504ef,2 +np.float32,0xc19d1463,0x3f3504ef,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x41a9a561,0xbf3504ff,2 +np.float32,0xc1a9a561,0xbf3504ff,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x41b6365e,0xbf3504f6,2 +np.float32,0xc1b6365e,0xbf3504f6,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x41c2c75c,0x3f3504f8,2 +np.float32,0xc1c2c75c,0x3f3504f8,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x41cf585a,0x3f3504e7,2 +np.float32,0xc1cf585a,0x3f3504e7,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x41dbe958,0xbf350507,2 +np.float32,0xc1dbe958,0xbf350507,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x41e87a55,0xbf3504ef,2 +np.float32,0xc1e87a55,0xbf3504ef,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x41f50b53,0x3f3504ff,2 +np.float32,0xc1f50b53,0x3f3504ff,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x4200ce28,0x3f3504f6,2 +np.float32,0xc200ce28,0x3f3504f6,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x420716a7,0xbf3504f8,2 +np.float32,0xc20716a7,0xbf3504f8,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x420d5f26,0xbf3504e7,2 +np.float32,0xc20d5f26,0xbf3504e7,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x4213a7a5,0x3f350507,2 +np.float32,0xc213a7a5,0x3f350507,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4219f024,0x3f3504d8,2 +np.float32,0xc219f024,0x3f3504d8,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x431d1463,0x3f800000,2 +np.float32,0xc31d1463,0x3f800000,2 +np.float32,0x422038a3,0xbf350516,2 +np.float32,0xc22038a3,0xbf350516,2 +np.float32,0x42a038a3,0x36c6cd61,2 +np.float32,0xc2a038a3,0x36c6cd61,2 +np.float32,0x432038a3,0xbf800000,2 +np.float32,0xc32038a3,0xbf800000,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x43235ce2,0x3f800000,2 +np.float32,0xc3235ce2,0x3f800000,2 +np.float32,0x42268121,0xbf3504f6,2 +np.float32,0xc2268121,0xbf3504f6,2 +np.float32,0x42a68121,0x34e43aac,2 +np.float32,0xc2a68121,0x34e43aac,2 +np.float32,0x43268121,0xbf800000,2 +np.float32,0xc3268121,0xbf800000,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x4329a561,0x3f800000,2 +np.float32,0xc329a561,0x3f800000,2 +np.float32,0x422cc9a0,0x3f3504f8,2 +np.float32,0xc22cc9a0,0x3f3504f8,2 +np.float32,0x42acc9a0,0x35655a50,2 +np.float32,0xc2acc9a0,0x35655a50,2 +np.float32,0x432cc9a0,0xbf800000,2 +np.float32,0xc32cc9a0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x432fede0,0x3f800000,2 +np.float32,0xc32fede0,0x3f800000,2 +np.float32,0x4233121f,0x3f3504e7,2 +np.float32,0xc233121f,0x3f3504e7,2 +np.float32,0x42b3121f,0xb60f347d,2 +np.float32,0xc2b3121f,0xb60f347d,2 +np.float32,0x4333121f,0xbf800000,2 +np.float32,0xc333121f,0xbf800000,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x4336365e,0x3f800000,2 +np.float32,0xc336365e,0x3f800000,2 +np.float32,0x42395a9e,0xbf350507,2 +np.float32,0xc2395a9e,0xbf350507,2 +np.float32,0x42b95a9e,0x36651267,2 +np.float32,0xc2b95a9e,0x36651267,2 +np.float32,0x43395a9e,0xbf800000,2 +np.float32,0xc3395a9e,0xbf800000,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x433c7edd,0x3f800000,2 +np.float32,0xc33c7edd,0x3f800000,2 +np.float32,0x423fa31d,0xbf3504d7,2 +np.float32,0xc23fa31d,0xbf3504d7,2 +np.float32,0x42bfa31d,0xb69d7828,2 +np.float32,0xc2bfa31d,0xb69d7828,2 +np.float32,0x433fa31d,0xbf800000,2 +np.float32,0xc33fa31d,0xbf800000,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x4342c75c,0x3f800000,2 +np.float32,0xc342c75c,0x3f800000,2 +np.float32,0x4245eb9c,0x3f350517,2 +np.float32,0xc245eb9c,0x3f350517,2 +np.float32,0x42c5eb9c,0x36c8671d,2 +np.float32,0xc2c5eb9c,0x36c8671d,2 +np.float32,0x4345eb9c,0xbf800000,2 +np.float32,0xc345eb9c,0xbf800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x43490fdb,0x3f800000,2 +np.float32,0xc3490fdb,0x3f800000,2 +np.float32,0x424c341a,0x3f3504f5,2 +np.float32,0xc24c341a,0x3f3504f5,2 +np.float32,0x42cc341a,0x34ca9ee6,2 +np.float32,0xc2cc341a,0x34ca9ee6,2 +np.float32,0x434c341a,0xbf800000,2 +np.float32,0xc34c341a,0xbf800000,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x434f585a,0x3f800000,2 +np.float32,0xc34f585a,0x3f800000,2 +np.float32,0x42527c99,0xbf3504f9,2 +np.float32,0xc2527c99,0xbf3504f9,2 +np.float32,0x42d27c99,0x35722833,2 +np.float32,0xc2d27c99,0x35722833,2 +np.float32,0x43527c99,0xbf800000,2 +np.float32,0xc3527c99,0xbf800000,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x4355a0d9,0x3f800000,2 +np.float32,0xc355a0d9,0x3f800000,2 +np.float32,0x4258c518,0xbf3504e6,2 +np.float32,0xc258c518,0xbf3504e6,2 +np.float32,0x42d8c518,0xb61267f6,2 +np.float32,0xc2d8c518,0xb61267f6,2 +np.float32,0x4358c518,0xbf800000,2 +np.float32,0xc358c518,0xbf800000,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x435be958,0x3f800000,2 +np.float32,0xc35be958,0x3f800000,2 +np.float32,0x425f0d97,0x3f350508,2 +np.float32,0xc25f0d97,0x3f350508,2 +np.float32,0x42df0d97,0x366845e0,2 +np.float32,0xc2df0d97,0x366845e0,2 +np.float32,0x435f0d97,0xbf800000,2 +np.float32,0xc35f0d97,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x436231d6,0x3f800000,2 +np.float32,0xc36231d6,0x3f800000,2 +np.float32,0x42655616,0x3f3504d7,2 +np.float32,0xc2655616,0x3f3504d7,2 +np.float32,0x42e55616,0xb69f11e5,2 +np.float32,0xc2e55616,0xb69f11e5,2 +np.float32,0x43655616,0xbf800000,2 +np.float32,0xc3655616,0xbf800000,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x43687a55,0x3f800000,2 +np.float32,0xc3687a55,0x3f800000,2 +np.float32,0x426b9e95,0xbf350517,2 +np.float32,0xc26b9e95,0xbf350517,2 +np.float32,0x42eb9e95,0x36ca00d9,2 +np.float32,0xc2eb9e95,0x36ca00d9,2 +np.float32,0x436b9e95,0xbf800000,2 +np.float32,0xc36b9e95,0xbf800000,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x436ec2d4,0x3f800000,2 +np.float32,0xc36ec2d4,0x3f800000,2 +np.float32,0x4271e713,0xbf3504f5,2 +np.float32,0xc271e713,0xbf3504f5,2 +np.float32,0x42f1e713,0x34b10321,2 +np.float32,0xc2f1e713,0x34b10321,2 +np.float32,0x4371e713,0xbf800000,2 +np.float32,0xc371e713,0xbf800000,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x43750b53,0x3f800000,2 +np.float32,0xc3750b53,0x3f800000,2 +np.float32,0x42782f92,0x3f3504f9,2 +np.float32,0xc2782f92,0x3f3504f9,2 +np.float32,0x42f82f92,0x357ef616,2 +np.float32,0xc2f82f92,0x357ef616,2 +np.float32,0x43782f92,0xbf800000,2 +np.float32,0xc3782f92,0xbf800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x437b53d2,0x3f800000,2 +np.float32,0xc37b53d2,0x3f800000,2 +np.float32,0x427e7811,0x3f3504e6,2 +np.float32,0xc27e7811,0x3f3504e6,2 +np.float32,0x42fe7811,0xb6159b6f,2 +np.float32,0xc2fe7811,0xb6159b6f,2 +np.float32,0x437e7811,0xbf800000,2 +np.float32,0xc37e7811,0xbf800000,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4380ce28,0x3f800000,2 +np.float32,0xc380ce28,0x3f800000,2 +np.float32,0x42826048,0xbf350508,2 +np.float32,0xc2826048,0xbf350508,2 +np.float32,0x43026048,0x366b7958,2 +np.float32,0xc3026048,0x366b7958,2 +np.float32,0x43826048,0xbf800000,2 +np.float32,0xc3826048,0xbf800000,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x4383f268,0x3f800000,2 +np.float32,0xc383f268,0x3f800000,2 +np.float32,0x42858487,0xbf350504,2 +np.float32,0xc2858487,0xbf350504,2 +np.float32,0x43058487,0x363ea8be,2 +np.float32,0xc3058487,0x363ea8be,2 +np.float32,0x43858487,0xbf800000,2 +np.float32,0xc3858487,0xbf800000,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x438716a7,0x3f800000,2 +np.float32,0xc38716a7,0x3f800000,2 +np.float32,0x4288a8c7,0x3f350517,2 +np.float32,0xc288a8c7,0x3f350517,2 +np.float32,0x4308a8c7,0x36cb9a96,2 +np.float32,0xc308a8c7,0x36cb9a96,2 +np.float32,0x4388a8c7,0xbf800000,2 +np.float32,0xc388a8c7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x438a3ae7,0x3f800000,2 +np.float32,0xc38a3ae7,0x3f800000,2 +np.float32,0x428bcd06,0x3f3504f5,2 +np.float32,0xc28bcd06,0x3f3504f5,2 +np.float32,0x430bcd06,0x3497675b,2 +np.float32,0xc30bcd06,0x3497675b,2 +np.float32,0x438bcd06,0xbf800000,2 +np.float32,0xc38bcd06,0xbf800000,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x438d5f26,0x3f800000,2 +np.float32,0xc38d5f26,0x3f800000,2 +np.float32,0x428ef146,0xbf350526,2 +np.float32,0xc28ef146,0xbf350526,2 +np.float32,0x430ef146,0x3710bc40,2 +np.float32,0xc30ef146,0x3710bc40,2 +np.float32,0x438ef146,0xbf800000,2 +np.float32,0xc38ef146,0xbf800000,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x43908365,0x3f800000,2 +np.float32,0xc3908365,0x3f800000,2 +np.float32,0x42921585,0xbf3504e6,2 +np.float32,0xc2921585,0xbf3504e6,2 +np.float32,0x43121585,0xb618cee8,2 +np.float32,0xc3121585,0xb618cee8,2 +np.float32,0x43921585,0xbf800000,2 +np.float32,0xc3921585,0xbf800000,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4393a7a5,0x3f800000,2 +np.float32,0xc393a7a5,0x3f800000,2 +np.float32,0x429539c5,0x3f350536,2 +np.float32,0xc29539c5,0x3f350536,2 +np.float32,0x431539c5,0x373bab34,2 +np.float32,0xc31539c5,0x373bab34,2 +np.float32,0x439539c5,0xbf800000,2 +np.float32,0xc39539c5,0xbf800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4396cbe4,0x3f800000,2 +np.float32,0xc396cbe4,0x3f800000,2 +np.float32,0x42985e04,0x3f3504d7,2 +np.float32,0xc2985e04,0x3f3504d7,2 +np.float32,0x43185e04,0xb6a2455d,2 +np.float32,0xc3185e04,0xb6a2455d,2 +np.float32,0x43985e04,0xbf800000,2 +np.float32,0xc3985e04,0xbf800000,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x4399f024,0x3f800000,2 +np.float32,0xc399f024,0x3f800000,2 +np.float32,0x429b8243,0xbf3504ea,2 +np.float32,0xc29b8243,0xbf3504ea,2 +np.float32,0x431b8243,0xb5cb2eb8,2 +np.float32,0xc31b8243,0xb5cb2eb8,2 +np.float32,0x439b8243,0xbf800000,2 +np.float32,0xc39b8243,0xbf800000,2 +np.float32,0x435b2047,0x3f3504c1,2 +np.float32,0x42a038a2,0xb5e4ca7e,2 +np.float32,0x432038a2,0xbf800000,2 +np.float32,0x4345eb9b,0xbf800000,2 +np.float32,0x42c5eb9b,0xb5de638c,2 +np.float32,0x42eb9e94,0xb5d7fc9b,2 +np.float32,0x4350ea79,0x3631dadb,2 +np.float32,0x42dbe957,0xbf800000,2 +np.float32,0x425be957,0xb505522a,2 +np.float32,0x435be957,0x3f800000,2 +np.float32,0x46027eb2,0x3e7d94c9,2 +np.float32,0x4477baed,0xbe7f1824,2 +np.float32,0x454b8024,0x3e7f5268,2 +np.float32,0x455d2c09,0x3e7f40cb,2 +np.float32,0x4768d3de,0xba14b4af,2 +np.float32,0x46c1e7cd,0x3e7fb102,2 +np.float32,0x44a52949,0xbe7dc9d5,2 +np.float32,0x4454633a,0x3e7dbc7d,2 +np.float32,0x4689810b,0x3e7eb02b,2 +np.float32,0x473473cd,0xbe7eef6f,2 +np.float32,0x44a5193f,0x3e7e1b1f,2 +np.float32,0x46004b36,0x3e7dac59,2 +np.float32,0x467f604b,0x3d7ffd3a,2 +np.float32,0x45ea1805,0x3dffd2e0,2 +np.float32,0x457b6af3,0x3dff7831,2 +np.float32,0x44996159,0xbe7d85f4,2 +np.float32,0x47883553,0xbb80584e,2 +np.float32,0x44e19f0c,0xbdffcfe6,2 +np.float32,0x472b3bf6,0xbe7f7a82,2 +np.float32,0x4600bb4e,0x3a135e33,2 +np.float32,0x449f4556,0x3e7e42e5,2 +np.float32,0x474e9420,0x3dff77b2,2 +np.float32,0x45cbdb23,0x3dff7240,2 +np.float32,0x44222747,0x3dffb039,2 +np.float32,0x4772e419,0xbdff74b8,2 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x7fefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0xffefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfc28bd9dd2517b4,0x3fefaa28ba13a702,1 +np.float64,0x3fb673c62e2ce790,0x3fefe083847a717f,1 +np.float64,0xbfe3e1dac7e7c3b6,0x3fea0500ba099f3a,1 +np.float64,0xbfbe462caa3c8c58,0x3fefc6c8b9c1c87c,1 +np.float64,0xbfb9353576326a68,0x3fefd8513e50e6b1,1 +np.float64,0xbfc05e798520bcf4,0x3fefbd1ad81cf089,1 +np.float64,0xbfe3ca3be2e79478,0x3fea12b995ea6574,1 +np.float64,0xbfde875d46bd0eba,0x3fec6d888662a824,1 +np.float64,0x3fafc4e02c3f89c0,0x3feff03c34bffd69,1 +np.float64,0xbf98855848310ac0,0x3feffda6c1588bdb,1 +np.float64,0x3fe66c51186cd8a2,0x3fe875c61c630ecb,1 +np.float64,0xbfedff1c3b7bfe38,0x3fe2f0c8c9e8fa39,1 +np.float64,0x3fd6082267ac1044,0x3fee1f6023695050,1 +np.float64,0xbfe78449b06f0894,0x3fe7bda2b223850e,1 +np.float64,0x3feedb8e63fdb71c,0x3fe23d5dfd2dd33f,1 +np.float64,0xbfc0a9de3d2153bc,0x3fefbaadf5e5285e,1 +np.float64,0x3fc04c67432098d0,0x3fefbdae07b7de8d,1 +np.float64,0xbfeeef84c4fddf0a,0x3fe22cf37f309d88,1 +np.float64,0x3fc04bb025209760,0x3fefbdb3d7d34ecf,1 +np.float64,0x3fd6b84d48ad709c,0x3fee013403da6e2a,1 +np.float64,0x3fec1ae25d7835c4,0x3fe46e62195cf274,1 +np.float64,0xbfdc6fdf9bb8dfc0,0x3fece48dc78bbb2e,1 +np.float64,0x3fb4db2c9229b660,0x3fefe4d42f79bf49,1 +np.float64,0xbfc0ed698521dad4,0x3fefb8785ea658c9,1 +np.float64,0xbfee82772b7d04ee,0x3fe2864a80efe8e9,1 +np.float64,0x3fd575b664aaeb6c,0x3fee37c669a12879,1 +np.float64,0x3fe4afb1c5e95f64,0x3fe98b177194439c,1 +np.float64,0x3fd93962f9b272c4,0x3fed8bef61876294,1 +np.float64,0x3fd97ae025b2f5c0,0x3fed7f4cfbf4d300,1 +np.float64,0xbfd9afdb1bb35fb6,0x3fed74fdc44dabb1,1 +np.float64,0x3f8ae65e3035cc80,0x3fefff4b1a0ea62b,1 +np.float64,0xbfe7e58664efcb0d,0x3fe77c02a1cbb670,1 +np.float64,0x3fe5f68b37ebed16,0x3fe8c10f849a5d4d,1 +np.float64,0x3fd9137d61b226fc,0x3fed9330eb4815a1,1 +np.float64,0x3fc146d019228da0,0x3fefb57e2d4d52f8,1 +np.float64,0xbfda6036edb4c06e,0x3fed521b2b578679,1 +np.float64,0xbfe78ddfb0ef1bc0,0x3fe7b734319a77e4,1 +np.float64,0x3fe0877823610ef0,0x3febd33a993dd786,1 +np.float64,0x3fbc61af2e38c360,0x3fefcdb4f889756d,1 +np.float64,0x3fd4dcdca4a9b9b8,0x3fee50962ffea5ae,1 +np.float64,0xbfe03cb29f607965,0x3febf7dbf640a75a,1 +np.float64,0xbfc81de407303bc8,0x3fef6f066cef64bc,1 +np.float64,0x3fd8dea42db1bd48,0x3fed9d3e00dbe0b3,1 +np.float64,0x3feac75e94f58ebe,0x3fe56f1f47f97896,1 +np.float64,0x3fb3a1ea6e2743d0,0x3fefe7ec1247cdaa,1 +np.float64,0x3fd695c0f4ad2b80,0x3fee0730bd40883d,1 +np.float64,0xbfd2c631f5a58c64,0x3feea20cbd1105d7,1 +np.float64,0xbfe978a8e1f2f152,0x3fe663014d40ad7a,1 +np.float64,0x3fd8b6b76ab16d70,0x3feda4c879aacc19,1 +np.float64,0x3feaafd30e755fa6,0x3fe5809514c28453,1 +np.float64,0x3fe1e37dc263c6fc,0x3feb20f9ad1f3f5c,1 +np.float64,0x3fd0ec7c24a1d8f8,0x3feee34048f43b75,1 +np.float64,0xbfe3881cbf67103a,0x3fea38d7886e6f53,1 +np.float64,0xbfd7023957ae0472,0x3fedf4471c765a1c,1 +np.float64,0xbfebc51c4ef78a38,0x3fe4b01c424e297b,1 +np.float64,0xbfe20a93eae41528,0x3feb0c2aa321d2e0,1 +np.float64,0x3fef39be867e737e,0x3fe1efaba9164d27,1 +np.float64,0x3fe8ea9576f1d52a,0x3fe6c7a8826ce1be,1 +np.float64,0x3fea921d91f5243c,0x3fe5968c6cf78963,1 +np.float64,0x3fd7ee5d31afdcbc,0x3fedc9f19d43fe61,1 +np.float64,0xbfe3ed581767dab0,0x3fe9fe4ee2f2b1cd,1 +np.float64,0xbfc40923d5281248,0x3fef9bd8ee9f6e68,1 +np.float64,0x3fe411a834682350,0x3fe9e9103854f057,1 +np.float64,0xbfedf6ccdf7bed9a,0x3fe2f77ad6543246,1 +np.float64,0xbfe8788a44f0f114,0x3fe7172f3aa0c742,1 +np.float64,0xbfce728f173ce520,0x3fef1954083bea04,1 +np.float64,0xbfd64dd0acac9ba2,0x3fee138c3293c246,1 +np.float64,0xbfe00669f5600cd4,0x3fec121443945350,1 +np.float64,0xbfe7152ba2ee2a58,0x3fe8079465d09846,1 +np.float64,0x3fe8654d8f70ca9c,0x3fe7247c94f09596,1 +np.float64,0x3fea68045cf4d008,0x3fe5b58cfe81a243,1 +np.float64,0xbfcd4779073a8ef4,0x3fef2a9d78153fa5,1 +np.float64,0xbfdb4456e5b688ae,0x3fed23b11614203f,1 +np.float64,0x3fcb5d59cd36bab0,0x3fef45818216a515,1 +np.float64,0xbfd914ff5ab229fe,0x3fed92e73746fea8,1 +np.float64,0x3fe4d211db69a424,0x3fe97653f433d15f,1 +np.float64,0xbfdbbb9224b77724,0x3fed0adb593dde80,1 +np.float64,0x3fd424ceafa8499c,0x3fee6d9124795d33,1 +np.float64,0x3feb5968f976b2d2,0x3fe501d116efbf54,1 +np.float64,0x3fee7d92a2fcfb26,0x3fe28a479b6a9dcf,1 +np.float64,0x3fc308e9972611d0,0x3fefa595f4df0c89,1 +np.float64,0x3fda79cd77b4f39c,0x3fed4cf8e69ba1f8,1 +np.float64,0x3fcbcf42d5379e88,0x3fef3f6a6a77c187,1 +np.float64,0x3fe13a1da662743c,0x3feb79504faea888,1 +np.float64,0xbfee4435f07c886c,0x3fe2b8ea98d2fc29,1 +np.float64,0x3fd65d68ccacbad0,0x3fee10e1ac7ada89,1 +np.float64,0x3fef2f89bb7e5f14,0x3fe1f81e882cc3f4,1 +np.float64,0xbfef0a7769fe14ef,0x3fe216bf384fc646,1 +np.float64,0x3fc065277320ca50,0x3fefbce44835c193,1 +np.float64,0x3fe9c1a74d73834e,0x3fe62e9ee0c2f2bf,1 +np.float64,0x3fd9d96e5db3b2dc,0x3fed6cd88eb51f6a,1 +np.float64,0x3fe02bf1c56057e4,0x3febfffc24b5a7ba,1 +np.float64,0xbfd6814350ad0286,0x3fee0ab9ad318b84,1 +np.float64,0x3f9fcbec583f97c0,0x3feffc0d0f1d8e75,1 +np.float64,0x3fe23524e5e46a4a,0x3feaf55372949a06,1 +np.float64,0xbfbdc95f6a3b92c0,0x3fefc89c21d44995,1 +np.float64,0x3fe961bb9cf2c378,0x3fe6735d6e1cca58,1 +np.float64,0xbfe8f1c370f1e387,0x3fe6c29d1be8bee9,1 +np.float64,0x3fd880d43ab101a8,0x3fedaee3c7ccfc96,1 +np.float64,0xbfedb37005fb66e0,0x3fe32d91ef2e3bd3,1 +np.float64,0xfdce287bfb9c5,0x3ff0000000000000,1 +np.float64,0x9aa1b9e735437,0x3ff0000000000000,1 +np.float64,0x6beac6e0d7d59,0x3ff0000000000000,1 +np.float64,0x47457aae8e8b0,0x3ff0000000000000,1 +np.float64,0x35ff13b46bfe3,0x3ff0000000000000,1 +np.float64,0xb9c0c82b73819,0x3ff0000000000000,1 +np.float64,0x1a8dc21a351b9,0x3ff0000000000000,1 +np.float64,0x7e87ef6afd0ff,0x3ff0000000000000,1 +np.float64,0x620a6588c414d,0x3ff0000000000000,1 +np.float64,0x7f366000fe6e,0x3ff0000000000000,1 +np.float64,0x787e39f4f0fc8,0x3ff0000000000000,1 +np.float64,0xf5134f1fea26a,0x3ff0000000000000,1 +np.float64,0xbce700ef79ce0,0x3ff0000000000000,1 +np.float64,0x144d7cc8289b1,0x3ff0000000000000,1 +np.float64,0xb9fbc5b973f79,0x3ff0000000000000,1 +np.float64,0xc3d6292d87ac5,0x3ff0000000000000,1 +np.float64,0xc1084e618210a,0x3ff0000000000000,1 +np.float64,0xb6b9eca56d73e,0x3ff0000000000000,1 +np.float64,0xc7ac4b858f58a,0x3ff0000000000000,1 +np.float64,0x516d75d2a2daf,0x3ff0000000000000,1 +np.float64,0x9dc089d93b811,0x3ff0000000000000,1 +np.float64,0x7b5f2840f6be6,0x3ff0000000000000,1 +np.float64,0x121d3ce8243a9,0x3ff0000000000000,1 +np.float64,0xf0be0337e17c1,0x3ff0000000000000,1 +np.float64,0xff58a5cbfeb15,0x3ff0000000000000,1 +np.float64,0xdaf1d07fb5e3a,0x3ff0000000000000,1 +np.float64,0x61d95382c3b2b,0x3ff0000000000000,1 +np.float64,0xe4df943fc9bf3,0x3ff0000000000000,1 +np.float64,0xf72ac2bdee559,0x3ff0000000000000,1 +np.float64,0x12dafbf625b60,0x3ff0000000000000,1 +np.float64,0xee11d427dc23b,0x3ff0000000000000,1 +np.float64,0xf4f8eb37e9f1e,0x3ff0000000000000,1 +np.float64,0xad7cb5df5af97,0x3ff0000000000000,1 +np.float64,0x59fc9b06b3f94,0x3ff0000000000000,1 +np.float64,0x3c3e65e4787ce,0x3ff0000000000000,1 +np.float64,0xe37bc993c6f79,0x3ff0000000000000,1 +np.float64,0x13bd6330277ad,0x3ff0000000000000,1 +np.float64,0x56cc2800ad986,0x3ff0000000000000,1 +np.float64,0x6203b8fcc4078,0x3ff0000000000000,1 +np.float64,0x75c7c8b8eb8fa,0x3ff0000000000000,1 +np.float64,0x5ebf8e00bd7f2,0x3ff0000000000000,1 +np.float64,0xda81f2f1b503f,0x3ff0000000000000,1 +np.float64,0x6adb17d6d5b64,0x3ff0000000000000,1 +np.float64,0x1ba68eee374d3,0x3ff0000000000000,1 +np.float64,0xeecf6fbbdd9ee,0x3ff0000000000000,1 +np.float64,0x24d6dd8e49add,0x3ff0000000000000,1 +np.float64,0xdf7cb81bbef97,0x3ff0000000000000,1 +np.float64,0xafd7be1b5faf8,0x3ff0000000000000,1 +np.float64,0xdb90ca35b721a,0x3ff0000000000000,1 +np.float64,0xa72903a14e521,0x3ff0000000000000,1 +np.float64,0x14533ee028a7,0x3ff0000000000000,1 +np.float64,0x7951540cf2a2b,0x3ff0000000000000,1 +np.float64,0x22882be045106,0x3ff0000000000000,1 +np.float64,0x136270d626c4f,0x3ff0000000000000,1 +np.float64,0x6a0f5744d41ec,0x3ff0000000000000,1 +np.float64,0x21e0d1aa43c1b,0x3ff0000000000000,1 +np.float64,0xee544155dca88,0x3ff0000000000000,1 +np.float64,0xcbe8aac797d16,0x3ff0000000000000,1 +np.float64,0x6c065e80d80e,0x3ff0000000000000,1 +np.float64,0xe57f0411cafe1,0x3ff0000000000000,1 +np.float64,0xdec3a6bdbd875,0x3ff0000000000000,1 +np.float64,0xf4d23a0fe9a48,0x3ff0000000000000,1 +np.float64,0xda77ef47b4efe,0x3ff0000000000000,1 +np.float64,0x8c405c9b1880c,0x3ff0000000000000,1 +np.float64,0x4eced5149d9db,0x3ff0000000000000,1 +np.float64,0x16b6552c2d6cc,0x3ff0000000000000,1 +np.float64,0x6fbc262cdf785,0x3ff0000000000000,1 +np.float64,0x628c3844c5188,0x3ff0000000000000,1 +np.float64,0x6d827d2cdb050,0x3ff0000000000000,1 +np.float64,0xd1bfdf29a37fc,0x3ff0000000000000,1 +np.float64,0xd85400fdb0a80,0x3ff0000000000000,1 +np.float64,0xcc420b2d98842,0x3ff0000000000000,1 +np.float64,0xac41d21b5883b,0x3ff0000000000000,1 +np.float64,0x432f18d4865e4,0x3ff0000000000000,1 +np.float64,0xe7e89a1bcfd14,0x3ff0000000000000,1 +np.float64,0x9b1141d536228,0x3ff0000000000000,1 +np.float64,0x6805f662d00bf,0x3ff0000000000000,1 +np.float64,0xc76552358ecab,0x3ff0000000000000,1 +np.float64,0x4ae8ffee95d21,0x3ff0000000000000,1 +np.float64,0x4396c096872d9,0x3ff0000000000000,1 +np.float64,0x6e8e55d4dd1cb,0x3ff0000000000000,1 +np.float64,0x4c2e33dc985c7,0x3ff0000000000000,1 +np.float64,0xbce814a579d03,0x3ff0000000000000,1 +np.float64,0x911681b5222d0,0x3ff0000000000000,1 +np.float64,0x5f90a4b2bf215,0x3ff0000000000000,1 +np.float64,0x26f76be84deee,0x3ff0000000000000,1 +np.float64,0xb2f7536165eeb,0x3ff0000000000000,1 +np.float64,0x4de4e6089bc9d,0x3ff0000000000000,1 +np.float64,0xf2e016afe5c03,0x3ff0000000000000,1 +np.float64,0xb9b7b949736f7,0x3ff0000000000000,1 +np.float64,0x3363ea1866c7e,0x3ff0000000000000,1 +np.float64,0xd1a3bd6ba3478,0x3ff0000000000000,1 +np.float64,0xae89f3595d13f,0x3ff0000000000000,1 +np.float64,0xddbd9601bb7c,0x3ff0000000000000,1 +np.float64,0x5de41a06bbc84,0x3ff0000000000000,1 +np.float64,0xfd58c86dfab19,0x3ff0000000000000,1 +np.float64,0x24922e8c49247,0x3ff0000000000000,1 +np.float64,0xcda040339b408,0x3ff0000000000000,1 +np.float64,0x5fe500b2bfca1,0x3ff0000000000000,1 +np.float64,0x9214abb924296,0x3ff0000000000000,1 +np.float64,0x800609fe0a2c13fd,0x3ff0000000000000,1 +np.float64,0x800c7c6fe518f8e0,0x3ff0000000000000,1 +np.float64,0x800a1a9491b4352a,0x3ff0000000000000,1 +np.float64,0x800b45e0e8968bc2,0x3ff0000000000000,1 +np.float64,0x8008497e57d092fd,0x3ff0000000000000,1 +np.float64,0x800b9c0af0173816,0x3ff0000000000000,1 +np.float64,0x800194cccb43299a,0x3ff0000000000000,1 +np.float64,0x8001c91ef183923f,0x3ff0000000000000,1 +np.float64,0x800f25b5ccde4b6c,0x3ff0000000000000,1 +np.float64,0x800ce63ccc79cc7a,0x3ff0000000000000,1 +np.float64,0x800d8fb2e83b1f66,0x3ff0000000000000,1 +np.float64,0x80083cd06f7079a1,0x3ff0000000000000,1 +np.float64,0x800823598e9046b3,0x3ff0000000000000,1 +np.float64,0x8001c1319de38264,0x3ff0000000000000,1 +np.float64,0x800f2b68543e56d1,0x3ff0000000000000,1 +np.float64,0x80022a4f4364549f,0x3ff0000000000000,1 +np.float64,0x800f51badf7ea376,0x3ff0000000000000,1 +np.float64,0x8003fbf31e27f7e7,0x3ff0000000000000,1 +np.float64,0x800d4c00e2fa9802,0x3ff0000000000000,1 +np.float64,0x800023b974804774,0x3ff0000000000000,1 +np.float64,0x800860778990c0ef,0x3ff0000000000000,1 +np.float64,0x800a15c241542b85,0x3ff0000000000000,1 +np.float64,0x8003097d9dc612fc,0x3ff0000000000000,1 +np.float64,0x800d77d8541aefb1,0x3ff0000000000000,1 +np.float64,0x80093804ab52700a,0x3ff0000000000000,1 +np.float64,0x800d2b3bfd7a5678,0x3ff0000000000000,1 +np.float64,0x800da24bcd5b4498,0x3ff0000000000000,1 +np.float64,0x8006eee1c28dddc4,0x3ff0000000000000,1 +np.float64,0x80005137fa40a271,0x3ff0000000000000,1 +np.float64,0x8007a3fbc22f47f8,0x3ff0000000000000,1 +np.float64,0x800dcd97071b9b2e,0x3ff0000000000000,1 +np.float64,0x80065b36048cb66d,0x3ff0000000000000,1 +np.float64,0x8004206ba72840d8,0x3ff0000000000000,1 +np.float64,0x8007e82b98cfd058,0x3ff0000000000000,1 +np.float64,0x8001a116ed23422f,0x3ff0000000000000,1 +np.float64,0x800c69e9ff18d3d4,0x3ff0000000000000,1 +np.float64,0x8003843688e7086e,0x3ff0000000000000,1 +np.float64,0x800335e3b8866bc8,0x3ff0000000000000,1 +np.float64,0x800e3308f0bc6612,0x3ff0000000000000,1 +np.float64,0x8002a9ec55c553d9,0x3ff0000000000000,1 +np.float64,0x80001c2084e03842,0x3ff0000000000000,1 +np.float64,0x800bc2bbd8d78578,0x3ff0000000000000,1 +np.float64,0x800ae6bcc555cd7a,0x3ff0000000000000,1 +np.float64,0x80083f7a13907ef5,0x3ff0000000000000,1 +np.float64,0x800d83ed76db07db,0x3ff0000000000000,1 +np.float64,0x800a12251974244b,0x3ff0000000000000,1 +np.float64,0x800a69c95714d393,0x3ff0000000000000,1 +np.float64,0x800cd5a85639ab51,0x3ff0000000000000,1 +np.float64,0x800e0e1837bc1c31,0x3ff0000000000000,1 +np.float64,0x8007b5ca39ef6b95,0x3ff0000000000000,1 +np.float64,0x800cf961cad9f2c4,0x3ff0000000000000,1 +np.float64,0x80066e8fc14cdd20,0x3ff0000000000000,1 +np.float64,0x8001cb8c7b43971a,0x3ff0000000000000,1 +np.float64,0x800002df68a005c0,0x3ff0000000000000,1 +np.float64,0x8003e6681567ccd1,0x3ff0000000000000,1 +np.float64,0x800b039126b60723,0x3ff0000000000000,1 +np.float64,0x800d2e1b663a5c37,0x3ff0000000000000,1 +np.float64,0x800188b3e2a31169,0x3ff0000000000000,1 +np.float64,0x8001f272e943e4e7,0x3ff0000000000000,1 +np.float64,0x800d7f53607afea7,0x3ff0000000000000,1 +np.float64,0x80092cafa4f25960,0x3ff0000000000000,1 +np.float64,0x800fc009f07f8014,0x3ff0000000000000,1 +np.float64,0x8003da896507b514,0x3ff0000000000000,1 +np.float64,0x800d4d1b4c3a9a37,0x3ff0000000000000,1 +np.float64,0x8007a835894f506c,0x3ff0000000000000,1 +np.float64,0x80057ba0522af741,0x3ff0000000000000,1 +np.float64,0x8009b7054b336e0b,0x3ff0000000000000,1 +np.float64,0x800b2c6c125658d9,0x3ff0000000000000,1 +np.float64,0x8008b1840ad16308,0x3ff0000000000000,1 +np.float64,0x8007ea0e3befd41d,0x3ff0000000000000,1 +np.float64,0x800dd658683bacb1,0x3ff0000000000000,1 +np.float64,0x8008cda48fd19b49,0x3ff0000000000000,1 +np.float64,0x8003acca14c75995,0x3ff0000000000000,1 +np.float64,0x8008bd152d717a2b,0x3ff0000000000000,1 +np.float64,0x80010d1ea3621a3e,0x3ff0000000000000,1 +np.float64,0x800130b78b826170,0x3ff0000000000000,1 +np.float64,0x8002cf3a46e59e75,0x3ff0000000000000,1 +np.float64,0x800b76e7fa76edd0,0x3ff0000000000000,1 +np.float64,0x800e065fe1dc0cc0,0x3ff0000000000000,1 +np.float64,0x8000dd527ea1baa6,0x3ff0000000000000,1 +np.float64,0x80032cb234665965,0x3ff0000000000000,1 +np.float64,0x800affc1acb5ff84,0x3ff0000000000000,1 +np.float64,0x80074be23fee97c5,0x3ff0000000000000,1 +np.float64,0x8004f83eafc9f07e,0x3ff0000000000000,1 +np.float64,0x800b02a115560543,0x3ff0000000000000,1 +np.float64,0x800b324a55766495,0x3ff0000000000000,1 +np.float64,0x800ffbcfd69ff7a0,0x3ff0000000000000,1 +np.float64,0x800830bc7b906179,0x3ff0000000000000,1 +np.float64,0x800cbafe383975fd,0x3ff0000000000000,1 +np.float64,0x8001ee42bfe3dc86,0x3ff0000000000000,1 +np.float64,0x8005b00fdc0b6020,0x3ff0000000000000,1 +np.float64,0x8005e7addd0bcf5c,0x3ff0000000000000,1 +np.float64,0x8001ae4cb0635c9a,0x3ff0000000000000,1 +np.float64,0x80098a9941131533,0x3ff0000000000000,1 +np.float64,0x800334c929466993,0x3ff0000000000000,1 +np.float64,0x8009568239d2ad05,0x3ff0000000000000,1 +np.float64,0x800f0639935e0c73,0x3ff0000000000000,1 +np.float64,0x800cebce7499d79d,0x3ff0000000000000,1 +np.float64,0x800482ee4c2905dd,0x3ff0000000000000,1 +np.float64,0x8007b7bd9e2f6f7c,0x3ff0000000000000,1 +np.float64,0x3fe654469f2ca88d,0x3fe8853f6c01ffb3,1 +np.float64,0x3feb4d7297369ae5,0x3fe50ad5bb621408,1 +np.float64,0x3feef53ba43dea77,0x3fe2283f356f8658,1 +np.float64,0x3fddf564eabbeaca,0x3fec8ec0e0dead9c,1 +np.float64,0x3fd3a69078274d21,0x3fee80e05c320000,1 +np.float64,0x3fecdafe5d39b5fd,0x3fe3d91a5d440fd9,1 +np.float64,0x3fd93286bc32650d,0x3fed8d40696cd10e,1 +np.float64,0x3fc0d34eb821a69d,0x3fefb954023d4284,1 +np.float64,0x3fc7b4b9a02f6973,0x3fef73e8739787ce,1 +np.float64,0x3fe08c839a611907,0x3febd0bc6f5641cd,1 +np.float64,0x3fb3d1758627a2eb,0x3fefe776f6183f96,1 +np.float64,0x3fef93c9ff3f2794,0x3fe1a4d2f622627d,1 +np.float64,0x3fea8d0041351a01,0x3fe59a52a1c78c9e,1 +np.float64,0x3fe3e26a30e7c4d4,0x3fea04ad3e0bbf8d,1 +np.float64,0x3fe5a34c9f6b4699,0x3fe8f57c5ccd1eab,1 +np.float64,0x3fc21ef859243df1,0x3fefae0b68a3a2e7,1 +np.float64,0x3fed7dd585fafbab,0x3fe35860041e5b0d,1 +np.float64,0x3fe5abacf22b575a,0x3fe8f03d8b6ef0f2,1 +np.float64,0x3fe426451f284c8a,0x3fe9dcf21f13205b,1 +np.float64,0x3fc01f6456203ec9,0x3fefbf19e2a8e522,1 +np.float64,0x3fe1cf2772239e4f,0x3feb2bbd645c7697,1 +np.float64,0x3fd18c4ace231896,0x3feecdfdd086c110,1 +np.float64,0x3fe8387d5b7070fb,0x3fe74358f2ec4910,1 +np.float64,0x3fdce51c2239ca38,0x3feccb2ae5459632,1 +np.float64,0x3fe5b0f2e4eb61e6,0x3fe8ecef4dbe4277,1 +np.float64,0x3fe1ceeb08a39dd6,0x3feb2bdd4dcfb3df,1 +np.float64,0x3febc5899d778b13,0x3fe4afc8dd8ad228,1 +np.float64,0x3fe7a47fbe2f48ff,0x3fe7a7fd9b352ea5,1 +np.float64,0x3fe7f74e1fafee9c,0x3fe76feb2755b247,1 +np.float64,0x3fe2bfad04e57f5a,0x3feaa9b46adddaeb,1 +np.float64,0x3fd06a090320d412,0x3feef40c334f8fba,1 +np.float64,0x3fdc97297d392e53,0x3fecdc16a3e22fcb,1 +np.float64,0x3fdc1a3f3838347e,0x3fecf6db2769d404,1 +np.float64,0x3fcca90096395201,0x3fef338156fcd218,1 +np.float64,0x3fed464733fa8c8e,0x3fe38483f0465d91,1 +np.float64,0x3fe7e067d82fc0d0,0x3fe77f7c8c9de896,1 +np.float64,0x3fc014fa0b2029f4,0x3fefbf6d84c933f8,1 +np.float64,0x3fd3bf1524277e2a,0x3fee7d2997b74dec,1 +np.float64,0x3fec153b86782a77,0x3fe472bb5497bb2a,1 +np.float64,0x3fd3e4d9d5a7c9b4,0x3fee776842691902,1 +np.float64,0x3fea6c0e2c74d81c,0x3fe5b2954cb458d9,1 +np.float64,0x3fee8f6a373d1ed4,0x3fe27bb9e348125b,1 +np.float64,0x3fd30c6dd42618dc,0x3fee97d2cab2b0bc,1 +np.float64,0x3fe4f90e6d69f21d,0x3fe95ea3dd4007f2,1 +np.float64,0x3fe271d467e4e3a9,0x3fead470d6d4008b,1 +np.float64,0x3fef2983897e5307,0x3fe1fd1a4debe33b,1 +np.float64,0x3fe980cc83b30199,0x3fe65d2fb8a0eb46,1 +np.float64,0x3fdfdf53db3fbea8,0x3fec1cf95b2a1cc7,1 +np.float64,0x3fe4d5307ba9aa61,0x3fe974701b4156cb,1 +np.float64,0x3fdb4e2345b69c47,0x3fed21aa6c146512,1 +np.float64,0x3fe3f7830327ef06,0x3fe9f85f6c88c2a8,1 +np.float64,0x3fca915fb63522bf,0x3fef502b73a52ecf,1 +np.float64,0x3fe66d3709ecda6e,0x3fe87531d7372d7a,1 +np.float64,0x3fd86000bcb0c001,0x3fedb5018dd684ca,1 +np.float64,0x3fe516e5feea2dcc,0x3fe94c68b111404e,1 +np.float64,0x3fd83c53dd3078a8,0x3fedbb9e5dd9e165,1 +np.float64,0x3fedfeeb673bfdd7,0x3fe2f0f0253c5d5d,1 +np.float64,0x3fe0dc6f9c21b8df,0x3feba8e2452410c2,1 +np.float64,0x3fbe154d643c2a9b,0x3fefc780a9357457,1 +np.float64,0x3fe5f63986abec73,0x3fe8c1434951a40a,1 +np.float64,0x3fbce0e50839c1ca,0x3fefcbeeaa27de75,1 +np.float64,0x3fd7ef5c5c2fdeb9,0x3fedc9c3022495b3,1 +np.float64,0x3fc1073914220e72,0x3fefb79de80fc0fd,1 +np.float64,0x3fe1a93c3d235278,0x3feb3fb21f86ac67,1 +np.float64,0x3fe321ee53e643dd,0x3fea72e2999f1e22,1 +np.float64,0x3fa881578c3102af,0x3feff69e6e51e0d6,1 +np.float64,0x3fd313482a262690,0x3fee96d161199495,1 +np.float64,0x3fe7272cd6ae4e5a,0x3fe7fbacbd0d8f43,1 +np.float64,0x3fd6cf4015ad9e80,0x3fedfd3513d544b8,1 +np.float64,0x3fc67b7e6d2cf6fd,0x3fef81f5c16923a4,1 +np.float64,0x3fa1999c14233338,0x3feffb2913a14184,1 +np.float64,0x3fc74eb8dd2e9d72,0x3fef78909a138e3c,1 +np.float64,0x3fc0b9274921724f,0x3fefba2ebd5f3e1c,1 +np.float64,0x3fd53fa156aa7f43,0x3fee40a18e952e88,1 +np.float64,0x3feaccbca4b59979,0x3fe56b22b33eb713,1 +np.float64,0x3fe6a01e3a2d403c,0x3fe8543fbd820ecc,1 +np.float64,0x3fd392a869a72551,0x3fee83e0ffe0e8de,1 +np.float64,0x3fe44d8928689b12,0x3fe9c5bf3c8fffdb,1 +np.float64,0x3fca3f209f347e41,0x3fef5461b6fa0924,1 +np.float64,0x3fee9e84b07d3d09,0x3fe26f638f733549,1 +np.float64,0x3faf49acb03e9359,0x3feff0b583cd8c48,1 +np.float64,0x3fea874b2af50e96,0x3fe59e882fa6febf,1 +np.float64,0x3fc50b72772a16e5,0x3fef918777dc41be,1 +np.float64,0x3fe861d1d4f0c3a4,0x3fe726e44d9d42c2,1 +np.float64,0x3fcadd2e2535ba5c,0x3fef4c3e2b56da38,1 +np.float64,0x3fea59c29cb4b385,0x3fe5c0043e586439,1 +np.float64,0x3fc1ffef0d23ffde,0x3fefaf22be452d13,1 +np.float64,0x3fc2d8dbc125b1b8,0x3fefa75b646d8e4e,1 +np.float64,0x3fd66c6471acd8c9,0x3fee0e5038b895c0,1 +np.float64,0x3fd0854adfa10a96,0x3feef0945bcc5c99,1 +np.float64,0x3feaac7076f558e1,0x3fe58316c23a82ad,1 +np.float64,0x3fdda49db3bb493b,0x3feca0e347c0ad6f,1 +np.float64,0x3fe43a539de874a7,0x3fe9d11d722d4822,1 +np.float64,0x3feeee3ebbfddc7d,0x3fe22dffd251e9af,1 +np.float64,0x3f8ee2c5b03dc58b,0x3fefff11855a7b6c,1 +np.float64,0x3fcd7107c63ae210,0x3fef2840bb55ca52,1 +np.float64,0x3f8d950d203b2a1a,0x3fefff253a08e40e,1 +np.float64,0x3fd40a5e57a814bd,0x3fee71a633c761fc,1 +np.float64,0x3fee836ec83d06de,0x3fe28580975be2fd,1 +np.float64,0x3fd7bbe87f2f77d1,0x3fedd31f661890cc,1 +np.float64,0xbfe05bf138a0b7e2,0x3febe8a000d96e47,1 +np.float64,0xbf88bddd90317bc0,0x3fefff66f6e2ff26,1 +np.float64,0xbfdc9cbb12393976,0x3fecdae2982335db,1 +np.float64,0xbfd85b4eccb0b69e,0x3fedb5e0dd87f702,1 +np.float64,0xbfe5c326cb2b864e,0x3fe8e180f525fa12,1 +np.float64,0xbfe381a0e4a70342,0x3fea3c8e5e3ab78e,1 +np.float64,0xbfe58d892c2b1b12,0x3fe9031551617aed,1 +np.float64,0xbfd7f3a52cafe74a,0x3fedc8fa97edd080,1 +np.float64,0xbfef3417bc7e682f,0x3fe1f45989f6a009,1 +np.float64,0xbfddfb8208bbf704,0x3fec8d5fa9970773,1 +np.float64,0xbfdab69bcc356d38,0x3fed40b2f6c347c6,1 +np.float64,0xbfed3f7cf17a7efa,0x3fe389e4ff4d9235,1 +np.float64,0xbfe47675d9a8ecec,0x3fe9ad6829a69e94,1 +np.float64,0xbfd030e2902061c6,0x3feefb3f811e024f,1 +np.float64,0xbfc376ac7226ed58,0x3fefa1798712b37e,1 +np.float64,0xbfdb7e54a0b6fcaa,0x3fed17a974c4bc28,1 +np.float64,0xbfdb7d5d5736faba,0x3fed17dcf31a8d84,1 +np.float64,0xbf876bd6502ed7c0,0x3fefff76dce6232c,1 +np.float64,0xbfd211e6c02423ce,0x3feebba41f0a1764,1 +np.float64,0xbfb443e3962887c8,0x3fefe658953629d4,1 +np.float64,0xbfe81b09e9b03614,0x3fe757882e4fdbae,1 +np.float64,0xbfdcb905d2b9720c,0x3fecd4c22cfe84e5,1 +np.float64,0xbfe3b62d99276c5b,0x3fea1e5520b3098d,1 +np.float64,0xbfbf05b25c3e0b68,0x3fefc3ecc04bca8e,1 +np.float64,0xbfdedc885b3db910,0x3fec59e22feb49f3,1 +np.float64,0xbfe33aa282667545,0x3fea64f2d55ec471,1 +np.float64,0xbfec84745a3908e9,0x3fe41cb3214e7044,1 +np.float64,0xbfddefdff1bbdfc0,0x3fec8fff88d4d0ec,1 +np.float64,0xbfd26ae6aca4d5ce,0x3feeaf208c7fedf6,1 +np.float64,0xbfee010591fc020b,0x3fe2ef3e57211a5e,1 +np.float64,0xbfb8cfddca319fb8,0x3fefd98d8f7918ed,1 +np.float64,0xbfe991648f3322c9,0x3fe6514e54670bae,1 +np.float64,0xbfee63fd087cc7fa,0x3fe29f1bfa3297cc,1 +np.float64,0xbfe1685942a2d0b2,0x3feb617f5f839eee,1 +np.float64,0xbfc6fc2fd62df860,0x3fef7c4698fd58cf,1 +np.float64,0xbfe42723d3a84e48,0x3fe9dc6ef7243e90,1 +np.float64,0xbfc3a7e89d274fd0,0x3fef9f99e3314e77,1 +np.float64,0xbfeb4c9521f6992a,0x3fe50b7c919bc6d8,1 +np.float64,0xbf707b34e020f680,0x3fefffef05e30264,1 +np.float64,0xbfc078478e20f090,0x3fefbc479305d5aa,1 +np.float64,0xbfd494ac4ca92958,0x3fee5c11f1cd8269,1 +np.float64,0xbfdaf888a035f112,0x3fed3346ae600469,1 +np.float64,0xbfa5d8ed502bb1e0,0x3feff88b0f262609,1 +np.float64,0xbfeec0cbfffd8198,0x3fe253543b2371cb,1 +np.float64,0xbfe594b5986b296b,0x3fe8fe9b39fb3940,1 +np.float64,0xbfc8ece7c631d9d0,0x3fef652bd0611ac7,1 +np.float64,0xbfd8ffeca0b1ffda,0x3fed96ebdf9b65cb,1 +np.float64,0xbfba9b221e353648,0x3fefd3cc21e2f15c,1 +np.float64,0xbfca63a52c34c74c,0x3fef52848eb9ed3b,1 +np.float64,0xbfe588e9b06b11d4,0x3fe905f7403e8881,1 +np.float64,0xbfc76f82db2edf04,0x3fef77138fe9bbc2,1 +np.float64,0xbfeeb3f334bd67e6,0x3fe25ddadb1096d6,1 +np.float64,0xbfbf2b64ce3e56c8,0x3fefc35a9555f6df,1 +np.float64,0xbfe9920e4ff3241c,0x3fe650d4ab8f5c42,1 +np.float64,0xbfb4a54c02294a98,0x3fefe55fc85ae5e9,1 +np.float64,0xbfe353b0c766a762,0x3fea56c02d17e4b7,1 +np.float64,0xbfd99961a4b332c4,0x3fed795fcd00dbf9,1 +np.float64,0xbfef191ddabe323c,0x3fe20aa79524f636,1 +np.float64,0xbfb25d060224ba10,0x3fefeaeee5cc8c0b,1 +np.float64,0xbfe6022428ec0448,0x3fe8b9b46e776194,1 +np.float64,0xbfed1a236cba3447,0x3fe3a76bee0d9861,1 +np.float64,0xbfc59671e72b2ce4,0x3fef8bc4daef6f14,1 +np.float64,0xbfdf2711703e4e22,0x3fec4886a8c9ceb5,1 +np.float64,0xbfeb7e207536fc41,0x3fe4e610c783f168,1 +np.float64,0xbfe6cdf5bcad9bec,0x3fe8365f8a59bc81,1 +np.float64,0xbfe55294adaaa52a,0x3fe927b0af5ccd09,1 +np.float64,0xbfdf4a88913e9512,0x3fec4036df58ba74,1 +np.float64,0xbfebb7efe4376fe0,0x3fe4ba276006992d,1 +np.float64,0xbfe09f29cfa13e54,0x3febc77f4f9c95e7,1 +np.float64,0xbfdf8c75653f18ea,0x3fec30ac924e4f46,1 +np.float64,0xbfefd601c7ffac04,0x3fe16d6f21bcb9c1,1 +np.float64,0xbfeae97ff5f5d300,0x3fe555bb5b87efe9,1 +np.float64,0xbfed427f02fa84fe,0x3fe387830db093bc,1 +np.float64,0xbfa33909cc267210,0x3feffa3a1bcb50dd,1 +np.float64,0xbfe9aa4bf5f35498,0x3fe63f6e98f6aa0f,1 +np.float64,0xbfe2d7349b25ae69,0x3fea9caa7c331e7e,1 +np.float64,0xbfcdbb2a3a3b7654,0x3fef2401c9659e4b,1 +np.float64,0xbfc8a90919315214,0x3fef686fe7fc0513,1 +np.float64,0xbfe62a98df2c5532,0x3fe89ff22a02cc6b,1 +np.float64,0xbfdc0f67b3b81ed0,0x3fecf928b637798f,1 +np.float64,0xbfebb32bf6f76658,0x3fe4bdc893c09698,1 +np.float64,0xbfec067996380cf3,0x3fe47e132741db97,1 +np.float64,0xbfd9774e1d32ee9c,0x3fed7ffe1e87c434,1 +np.float64,0xbfef989890bf3131,0x3fe1a0d025c80cf4,1 +np.float64,0xbfe59887e62b3110,0x3fe8fc382a3d4197,1 +np.float64,0xbfdea0a11e3d4142,0x3fec67b987e236ec,1 +np.float64,0xbfe2ec495825d892,0x3fea90efb231602d,1 +np.float64,0xbfb329c5c2265388,0x3fefe90f1b8209c3,1 +np.float64,0xbfdcd2dcd339a5ba,0x3feccf24c60b1478,1 +np.float64,0xbfe537ea18aa6fd4,0x3fe938237e217fe0,1 +np.float64,0xbfe8675ce170ceba,0x3fe723105925ce3a,1 +np.float64,0xbfd70723acae0e48,0x3fedf369ac070e65,1 +np.float64,0xbfea9d8692b53b0d,0x3fe58e1ee42e3fdb,1 +np.float64,0xbfcfeb96653fd72c,0x3fef029770033bdc,1 +np.float64,0xbfcc06c92d380d94,0x3fef3c69797d9b0a,1 +np.float64,0xbfe16b7c4f62d6f8,0x3feb5fdf9f0a9a07,1 +np.float64,0xbfed4d7a473a9af4,0x3fe37ecee27b1eb7,1 +np.float64,0xbfe6a6f6942d4ded,0x3fe84fccdf762b19,1 +np.float64,0xbfda46d867348db0,0x3fed572d928fa657,1 +np.float64,0xbfdbd9482db7b290,0x3fed049b5f907b52,1 +np.float64,0x7fe992ceb933259c,0xbfeb15af92aad70e,1 +np.float64,0x7fe3069204a60d23,0xbfe5eeff454240e9,1 +np.float64,0x7fe729dbf32e53b7,0xbfefe0528a330e4c,1 +np.float64,0x7fec504fb638a09e,0x3fd288e95dbedf65,1 +np.float64,0x7fe1d30167a3a602,0xbfeffc41f946fd02,1 +np.float64,0x7fed7f8ffd3aff1f,0x3fefe68ec604a19d,1 +np.float64,0x7fd2f23635a5e46b,0x3fea63032efbb447,1 +np.float64,0x7fd4c86db1a990da,0x3fdf6b9f7888db5d,1 +np.float64,0x7fe7554db6eeaa9a,0x3fe1b41476861bb0,1 +np.float64,0x7fe34e823ba69d03,0x3fefc435532e6294,1 +np.float64,0x7fec5c82fef8b905,0x3fef8f0c6473034f,1 +np.float64,0x7feba221bff74442,0xbfea95b81eb19b47,1 +np.float64,0x7fe74808a5ae9010,0xbfd3aa322917c3e5,1 +np.float64,0x7fdf41b7e0be836f,0x3fd14283c7147282,1 +np.float64,0x7fec09892f381311,0x3fe5240376ae484b,1 +np.float64,0x7faaf80bf435f017,0x3fe20227fa811423,1 +np.float64,0x7f8422d8402845b0,0x3fe911714593b8a0,1 +np.float64,0x7fd23a7fada474fe,0x3feff9f40aa37e9c,1 +np.float64,0x7fef4a4806fe948f,0x3fec6eca89cb4a62,1 +np.float64,0x7fe1e71cf763ce39,0xbfea6ac63f9ba457,1 +np.float64,0x7fe3e555be27caaa,0xbfe75b305d0dbbfd,1 +np.float64,0x7fcb8bac96371758,0xbfe8b126077f9d4c,1 +np.float64,0x7fc98e2c84331c58,0x3fef9092eb0bc85a,1 +np.float64,0x7fe947cf2b728f9d,0xbfebfff2c5b7d198,1 +np.float64,0x7feee8058c3dd00a,0xbfef21ebaae2eb17,1 +np.float64,0x7fef61d8d5bec3b1,0xbfdf1a032fb1c864,1 +np.float64,0x7fcf714b6f3ee296,0x3fe6fc89a8084098,1 +np.float64,0x7fa9a8b44c335168,0xbfeb16c149cea943,1 +np.float64,0x7fd175c482a2eb88,0xbfef64d341e73f88,1 +np.float64,0x7feab8e6a87571cc,0x3feb10069c397464,1 +np.float64,0x7fe3ade72de75bcd,0x3fd1753e333d5790,1 +np.float64,0x7fb26d87d224db0f,0xbfe753d36b18f4ca,1 +np.float64,0x7fdb7ef159b6fde2,0x3fe5c0a6044d3607,1 +np.float64,0x7fd5af86422b5f0c,0x3fe77193c95f6484,1 +np.float64,0x7fee9e00b07d3c00,0x3fe864d494596845,1 +np.float64,0x7fef927a147f24f3,0xbfe673b14715693d,1 +np.float64,0x7fd0aea63c215d4b,0xbfeff435f119fce9,1 +np.float64,0x7fd02e3796a05c6e,0x3fe4f7e3706e9a3d,1 +np.float64,0x7fd3ed61da27dac3,0xbfefef2f057f168c,1 +np.float64,0x7fefaca0d4ff5941,0x3fd3e8ad205cd4ab,1 +np.float64,0x7feb659e06f6cb3b,0x3fd64d803203e027,1 +np.float64,0x7fc94ccfaf32999e,0x3fee04922209369a,1 +np.float64,0x7feb4ec294f69d84,0xbfd102763a056c89,1 +np.float64,0x7fe2ada6ac655b4c,0x3fef4f6792aa6093,1 +np.float64,0x7fe5f40fdc2be81f,0xbfb4a6327186eee8,1 +np.float64,0x7fe7584bc3eeb097,0xbfd685b8ff94651d,1 +np.float64,0x7fe45d276be8ba4e,0x3fee53b13f7e442f,1 +np.float64,0x7fe6449b3d6c8935,0xbfe7e08bafa75251,1 +np.float64,0x7f8d62e6b03ac5cc,0x3fe73d30762f38fd,1 +np.float64,0x7fe3a76f72a74ede,0xbfeb48a28bc60968,1 +np.float64,0x7fd057706920aee0,0x3fdece8fa06f626c,1 +np.float64,0x7fe45ae158e8b5c2,0x3fe7a70f47b4d349,1 +np.float64,0x7fea8a5a983514b4,0x3fefb053d5f9ddd7,1 +np.float64,0x7fdd1e86ab3a3d0c,0x3fe3cded1b93816b,1 +np.float64,0x7fdb456108b68ac1,0xbfe37574c0b9bf8f,1 +np.float64,0x7fe972602432e4bf,0x3fef9a26e65ec01c,1 +np.float64,0x7fdbe2385637c470,0x3fed541df57969e1,1 +np.float64,0x7fe57f03602afe06,0x3fbd90f595cbbd94,1 +np.float64,0x7feb0ceb68f619d6,0xbfeae9cb8ee5261f,1 +np.float64,0x7fe6abfe6c6d57fc,0xbfef40a6edaca26f,1 +np.float64,0x7fe037ea08606fd3,0xbfda817d75858597,1 +np.float64,0x7fdd75a52dbaeb49,0x3feef2a0d91d6aa1,1 +np.float64,0x7fe8f9af66b1f35e,0xbfedfceef2a3bfc9,1 +np.float64,0x7fedf762b53beec4,0x3fd8b4f21ef69ee3,1 +np.float64,0x7fe99295b7f3252a,0x3feffc24d970383e,1 +np.float64,0x7fe797b0172f2f5f,0x3fee089aa56f7ce8,1 +np.float64,0x7fed89dcc97b13b9,0xbfcfa2bb0c3ea41f,1 +np.float64,0x7fae9e8d5c3d3d1a,0xbfe512ffe16c6b08,1 +np.float64,0x7fefaecbe27f5d97,0x3fbfc718a5e972f1,1 +np.float64,0x7fce0236d93c046d,0xbfa9b7cd790db256,1 +np.float64,0x7fa9689aac32d134,0x3feced501946628a,1 +np.float64,0x7feb1469e93628d3,0x3fef2a988e7673ed,1 +np.float64,0x7fdba78344b74f06,0xbfe092e78965b30c,1 +np.float64,0x7fece54c3fb9ca97,0x3fd3cfd184bed2e6,1 +np.float64,0x7fdb84212b370841,0xbfe25ebf2db6ee55,1 +np.float64,0x7fbe3e8bf23c7d17,0x3fe2ee72df573345,1 +np.float64,0x7fe43d9803687b2f,0xbfed2eff6a9e66a0,1 +np.float64,0x7fb0f9c00a21f37f,0x3feff70f3276fdb7,1 +np.float64,0x7fea0c6cbbb418d8,0xbfefa612494798b2,1 +np.float64,0x7fe4b3239e296646,0xbfe74dd959af8cdc,1 +np.float64,0x7fe5c6a773eb8d4e,0xbfd06944048f8d2b,1 +np.float64,0x7fb1c1278223824e,0xbfeb533a34655bde,1 +np.float64,0x7fd21c09ee243813,0xbfe921ccbc9255c3,1 +np.float64,0x7fe051020c20a203,0x3fbd519d700c1f2f,1 +np.float64,0x7fe0c76845e18ed0,0x3fefb9595191a31b,1 +np.float64,0x7fe6b0b57b6d616a,0xbf8c59a8ba5fcd9a,1 +np.float64,0x7fd386c460270d88,0x3fe8ffea5d1a5c46,1 +np.float64,0x7feeb884713d7108,0x3fee9b2247ef6c0d,1 +np.float64,0x7fd85f71b6b0bee2,0xbfefc30ec3e28f07,1 +np.float64,0x7fc341366426826c,0x3fd4234d35386d3b,1 +np.float64,0x7fe56482dd6ac905,0x3fe7189de6a50668,1 +np.float64,0x7fec67a2e3f8cf45,0xbfef86d0b940f37f,1 +np.float64,0x7fe38b202fe7163f,0x3feb90b75caa2030,1 +np.float64,0x7fdcbc64883978c8,0x3fed4f758fbf64d4,1 +np.float64,0x7fea5f0598f4be0a,0x3fdd503a417b3d4d,1 +np.float64,0x7fda3b6bcf3476d7,0x3fea6e9af3f7f9f5,1 +np.float64,0x7fc7d7896c2faf12,0x3fda2bebc36a2363,1 +np.float64,0x7fe7e8e2626fd1c4,0xbfe7d5e390c4cc3f,1 +np.float64,0x7fde0f3d7abc1e7a,0xbfede7a0ecfa3606,1 +np.float64,0x7fc692b8f52d2571,0x3feff0cd7ab6f61b,1 +np.float64,0xff92d1fce825a400,0xbfc921c36fc014fa,1 +np.float64,0xffdec3af2fbd875e,0xbfed6a77e6a0364e,1 +np.float64,0xffef46e7d9be8dcf,0xbfed7d39476f7e27,1 +np.float64,0xffe2c2ce4525859c,0x3fe1757261316bc9,1 +np.float64,0xffe27c8b5864f916,0xbfefe017c0d43457,1 +np.float64,0xffe184d7442309ae,0x3fa1fb8c49dba596,1 +np.float64,0xffddf5f98d3bebf4,0x3fee4f8eaa5f847e,1 +np.float64,0xffee3ef354fc7de6,0xbfebfd60fa51b2ba,1 +np.float64,0xffdecb3e85bd967e,0x3fbfad2667a8b468,1 +np.float64,0xffe4ee900b29dd20,0xbfdc02dc626f91cd,1 +np.float64,0xffd3179f6da62f3e,0xbfe2cfe442511776,1 +np.float64,0xffe99ef7cef33def,0x3f50994542a7f303,1 +np.float64,0xffe2b66b1ae56cd6,0xbfefe3e066eb6329,1 +np.float64,0xff8f72aff03ee540,0x3fe9c46224cf5003,1 +np.float64,0xffd29beb85a537d8,0x3fefcb0b6166be71,1 +np.float64,0xffaef02d4c3de060,0xbfef5fb71028fc72,1 +np.float64,0xffd39a2a89273456,0x3fe6d4b183205dca,1 +np.float64,0xffef8a9392ff1526,0x3fedb99fbf402468,1 +np.float64,0xffb9b3f31e3367e8,0x3fee1005270fcf80,1 +np.float64,0xffed9d5c693b3ab8,0x3fd110f4b02365d5,1 +np.float64,0xffeaba45f9f5748b,0x3fe499e0a6f4afb2,1 +np.float64,0xffdba3f70d3747ee,0xbfca0c30493ae519,1 +np.float64,0xffa35b985426b730,0xbfdb625df56bcf45,1 +np.float64,0xffccbc9728397930,0x3fc53cbc59020704,1 +np.float64,0xffef73c942bee792,0xbfdc647a7a5e08be,1 +np.float64,0xffcb5acfb236b5a0,0x3feeb4ec038c39fc,1 +np.float64,0xffea116fe2b422df,0x3fefe03b6ae0b435,1 +np.float64,0xffe97de6e7b2fbcd,0xbfd2025698fab9eb,1 +np.float64,0xffdddba314bbb746,0x3fd31f0fdb8f93be,1 +np.float64,0xffd613a24a2c2744,0xbfebbb1efae884b3,1 +np.float64,0xffe3d938aa67b271,0xbfc2099cead3d3be,1 +np.float64,0xffdf08c2e33e1186,0xbfefd236839b900d,1 +np.float64,0xffea6ba8bd34d751,0x3fe8dfc032114719,1 +np.float64,0xffe3202083e64040,0x3fed513b81432a22,1 +np.float64,0xffb2397db62472f8,0xbfee7d7fe1c3f76c,1 +np.float64,0xffd9d0682ab3a0d0,0x3fe0bcf9e531ad79,1 +np.float64,0xffc293df202527c0,0xbfe58d0bdece5e64,1 +np.float64,0xffe1422c7da28458,0xbf81bd72595f2341,1 +np.float64,0xffd64e4ed4ac9c9e,0x3fa4334cc011c703,1 +np.float64,0xffe40a970ae8152e,0x3fead3d258b55b7d,1 +np.float64,0xffc8c2f2223185e4,0xbfef685f07c8b9fd,1 +np.float64,0xffe4b2f7216965ee,0x3fe3861d3d896a83,1 +np.float64,0xffdb531db3b6a63c,0x3fe18cb8332dd59d,1 +np.float64,0xffe8e727a3b1ce4e,0xbfe57b15abb677b9,1 +np.float64,0xffe530c1e12a6184,0xbfb973ea5535e48f,1 +np.float64,0xffe6f7849cedef08,0x3fd39a37ec5af4b6,1 +np.float64,0xffead62a78b5ac54,0x3fe69b3f6c7aa24b,1 +np.float64,0xffeefdd725fdfbad,0xbfc08a456111fdd5,1 +np.float64,0xffe682182fed0430,0x3fecc7c1292761d2,1 +np.float64,0xffee0ca8dcbc1951,0x3fef6cc361ef2c19,1 +np.float64,0xffec9b338f393666,0x3fefa9ab8e0471b5,1 +np.float64,0xffe13c5e29a278bc,0xbfef8da74ad83398,1 +np.float64,0xffd7bd48c62f7a92,0x3fe3468cd4ac9d34,1 +np.float64,0xffedd0ed14bba1d9,0xbfd563a83477077b,1 +np.float64,0xffe86b83f3f0d707,0x3fe9eb3c658e4b2d,1 +np.float64,0xffd6a4db4bad49b6,0xbfc7e11276166e17,1 +np.float64,0xffc29e8404253d08,0x3fd35971961c789f,1 +np.float64,0xffe27cf3d664f9e7,0xbfeca0f73c72f810,1 +np.float64,0xffc34152352682a4,0x3fef384e564c002c,1 +np.float64,0xffe395728ba72ae4,0x3f8fe18c2de86eba,1 +np.float64,0xffed86c4fbbb0d89,0x3fef709db881c672,1 +np.float64,0xffe8a98d37f1531a,0x3fd4879c8f73c3dc,1 +np.float64,0xffb8ce9fea319d40,0xbfb853c8fe46b08d,1 +np.float64,0xffe7f26db8efe4db,0xbfec1cfd3e5c2ac1,1 +np.float64,0xffd7935b77af26b6,0x3fb7368c89b2a460,1 +np.float64,0xffc5840ed02b081c,0x3fd92220b56631f3,1 +np.float64,0xffc36a873926d510,0x3fa84d61baf61811,1 +np.float64,0xffe06ea583e0dd4a,0x3feb647e348b9e39,1 +np.float64,0xffe6a33031ed4660,0xbfe096b851dc1a0a,1 +np.float64,0xffe001c938e00392,0x3fe4eece77623e7a,1 +np.float64,0xffc1e4f23b23c9e4,0xbfdb9bb1f83f6ac4,1 +np.float64,0xffecd3ecbab9a7d9,0x3fbafb1f800f177d,1 +np.float64,0xffc2d3016825a604,0xbfef650e8b0d6afb,1 +np.float64,0xffe222cb68e44596,0x3fde3690e44de5bd,1 +np.float64,0xffe5bb145e2b7628,0x3fedbb98e23c9dc1,1 +np.float64,0xffe9e5823b73cb04,0xbfee41661016c03c,1 +np.float64,0xffd234a00ba46940,0x3fda0312cda580c2,1 +np.float64,0xffe0913ed6e1227d,0xbfed508bb529bd23,1 +np.float64,0xffe8e3596171c6b2,0xbfdc33e1c1d0310e,1 +np.float64,0xffef9c6835ff38cf,0x3fea8ce6d27dfba3,1 +np.float64,0xffdd3bcf66ba779e,0x3fe50523d2b6470e,1 +np.float64,0xffe57e8cf06afd1a,0xbfee600933347247,1 +np.float64,0xffe0d8c65fa1b18c,0x3fe75091f93d5e4c,1 +np.float64,0xffea7c8c16b4f918,0x3fee681724795198,1 +np.float64,0xffe34f7a05269ef4,0xbfe3c3e179676f13,1 +np.float64,0xffd28894a6a5112a,0xbfe5d1027aee615d,1 +np.float64,0xffc73be6f22e77cc,0x3fe469bbc08b472a,1 +np.float64,0xffe7f71b066fee36,0x3fe7ed136c8fdfaa,1 +np.float64,0xffebc13e29f7827c,0x3fefcdc6e677d314,1 +np.float64,0xffd53e9c942a7d3a,0x3fea5a02c7341749,1 +np.float64,0xffd7191b23ae3236,0x3fea419b66023443,1 +np.float64,0xffe9480325b29006,0xbfefeaff5fa38cd5,1 +np.float64,0xffba46dc0e348db8,0xbfefa54f4de28eba,1 +np.float64,0xffdd4cc31eba9986,0x3fe60bb41fe1c4da,1 +np.float64,0xffe13a70dea274e1,0xbfaa9192f7bd6c9b,1 +np.float64,0xffde25127bbc4a24,0x3f7c75f45e29be7d,1 +np.float64,0xffe4076543a80eca,0x3fea5aad50d2f687,1 +np.float64,0xffe61512acec2a25,0xbfefffeb67401649,1 +np.float64,0xffef812ec1ff025d,0xbfe919c7c073c766,1 +np.float64,0xffd5552aeaaaaa56,0x3fc89d38ab047396,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv new file mode 100644 index 00000000..c9e446c3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfe0ac238,0x7f800000,3 +np.float32,0xbf553b86,0x3faf079b,3 +np.float32,0xff4457da,0x7f800000,3 +np.float32,0xff7253f3,0x7f800000,3 +np.float32,0x5a5802,0x3f800000,3 +np.float32,0x3db03413,0x3f80795b,3 +np.float32,0x7f6795c9,0x7f800000,3 +np.float32,0x805b9142,0x3f800000,3 +np.float32,0xfeea581a,0x7f800000,3 +np.float32,0x3f7e2dba,0x3fc472f6,3 +np.float32,0x3d9c4d74,0x3f805f7a,3 +np.float32,0x7f18c665,0x7f800000,3 +np.float32,0x7f003e23,0x7f800000,3 +np.float32,0x3d936fa0,0x3f8054f3,3 +np.float32,0x3f32034f,0x3fa0368e,3 +np.float32,0xff087604,0x7f800000,3 +np.float32,0x380a5,0x3f800000,3 +np.float32,0x3f59694e,0x3fb10077,3 +np.float32,0x3e63e648,0x3f832ee4,3 +np.float32,0x80712f42,0x3f800000,3 +np.float32,0x3e169908,0x3f816302,3 +np.float32,0x3f2d766e,0x3f9e8692,3 +np.float32,0x3d6412e0,0x3f8032d0,3 +np.float32,0xbde689e8,0x3f80cfd4,3 +np.float32,0x483e2e,0x3f800000,3 +np.float32,0xff1ba2d0,0x7f800000,3 +np.float32,0x80136bff,0x3f800000,3 +np.float32,0x3f72534c,0x3fbdc1d4,3 +np.float32,0x3e9eb381,0x3f8632c6,3 +np.float32,0x3e142892,0x3f815795,3 +np.float32,0x0,0x3f800000,3 +np.float32,0x2f2528,0x3f800000,3 +np.float32,0x7f38be13,0x7f800000,3 +np.float32,0xfeee6896,0x7f800000,3 +np.float32,0x7f09095d,0x7f800000,3 +np.float32,0xbe94d,0x3f800000,3 +np.float32,0xbedcf8d4,0x3f8c1b74,3 +np.float32,0xbf694c02,0x3fb8ef07,3 +np.float32,0x3e2261f8,0x3f819cde,3 +np.float32,0xbf01d3ce,0x3f90d0e0,3 +np.float32,0xbeb7b3a2,0x3f8853de,3 +np.float32,0x8046de7b,0x3f800000,3 +np.float32,0xbcb45ea0,0x3f8007f1,3 +np.float32,0x3eef14af,0x3f8e35dd,3 +np.float32,0xbf047316,0x3f91846e,3 +np.float32,0x801cef45,0x3f800000,3 +np.float32,0x3e9ad891,0x3f85e609,3 +np.float32,0xff20e9cf,0x7f800000,3 +np.float32,0x80068434,0x3f800000,3 +np.float32,0xbe253020,0x3f81ab49,3 +np.float32,0x3f13f4b8,0x3f95fac9,3 +np.float32,0x804accd1,0x3f800000,3 +np.float32,0x3dee3e10,0x3f80ddf7,3 +np.float32,0xbe6c4690,0x3f836c29,3 +np.float32,0xff30d431,0x7f800000,3 +np.float32,0xbec82416,0x3f89e791,3 +np.float32,0x3f30bbcb,0x3f9fbbcc,3 +np.float32,0x3f5620a2,0x3faf72b8,3 +np.float32,0x807a8130,0x3f800000,3 +np.float32,0x3e3cb02d,0x3f822de0,3 +np.float32,0xff4839ac,0x7f800000,3 +np.float32,0x800a3e9c,0x3f800000,3 +np.float32,0x3dffd65b,0x3f810002,3 +np.float32,0xbf2b1492,0x3f9da987,3 +np.float32,0xbf21602c,0x3f9a48fe,3 +np.float32,0x512531,0x3f800000,3 +np.float32,0x24b99a,0x3f800000,3 +np.float32,0xbf53e345,0x3fae67b1,3 +np.float32,0xff2126ec,0x7f800000,3 +np.float32,0x7e79b49d,0x7f800000,3 +np.float32,0x3ea3cf04,0x3f869b6f,3 +np.float32,0x7f270059,0x7f800000,3 +np.float32,0x3f625b2f,0x3fb561e1,3 +np.float32,0xbf59947e,0x3fb11519,3 +np.float32,0xfe0d1c64,0x7f800000,3 +np.float32,0xbf3f3eae,0x3fa568e2,3 +np.float32,0x7c04d1,0x3f800000,3 +np.float32,0x7e66bd,0x3f800000,3 +np.float32,0x8011880d,0x3f800000,3 +np.float32,0x3f302f07,0x3f9f8759,3 +np.float32,0x4e3375,0x3f800000,3 +np.float32,0xfe67a134,0x7f800000,3 +np.float32,0xff670249,0x7f800000,3 +np.float32,0x7e19f27d,0x7f800000,3 +np.float32,0xbf36ce12,0x3fa20b81,3 +np.float32,0xbe6bcfc4,0x3f8368b5,3 +np.float32,0x76fcba,0x3f800000,3 +np.float32,0x7f30abaf,0x7f800000,3 +np.float32,0x3f4c1f6d,0x3faae43c,3 +np.float32,0x7f61f44a,0x7f800000,3 +np.float32,0xbf4bb3c9,0x3faab4af,3 +np.float32,0xbda15ee0,0x3f8065c6,3 +np.float32,0xfbb4e800,0x7f800000,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80568501,0x3f800000,3 +np.float32,0xfeb285e4,0x7f800000,3 +np.float32,0x804423a7,0x3f800000,3 +np.float32,0x7e6c0f21,0x7f800000,3 +np.float32,0x7f136b3c,0x7f800000,3 +np.float32,0x3f2d08e6,0x3f9e5e9c,3 +np.float32,0xbf6b454e,0x3fb9f7e6,3 +np.float32,0x3e6bceb0,0x3f8368ad,3 +np.float32,0xff1ad16a,0x7f800000,3 +np.float32,0x7cce1a04,0x7f800000,3 +np.float32,0xff7bcf95,0x7f800000,3 +np.float32,0x8049788d,0x3f800000,3 +np.float32,0x7ec45918,0x7f800000,3 +np.float32,0xff7fffff,0x7f800000,3 +np.float32,0x8039a1a0,0x3f800000,3 +np.float32,0x7e90cd72,0x7f800000,3 +np.float32,0xbf7dfd53,0x3fc456cc,3 +np.float32,0x3eeeb664,0x3f8e2a76,3 +np.float32,0x8055ef9b,0x3f800000,3 +np.float32,0x7ee06ddd,0x7f800000,3 +np.float32,0xba2cc000,0x3f800002,3 +np.float32,0x806da632,0x3f800000,3 +np.float32,0x7ecfaaf5,0x7f800000,3 +np.float32,0x3ddd12e6,0x3f80bf19,3 +np.float32,0xbf754394,0x3fbf60b1,3 +np.float32,0x6f3f19,0x3f800000,3 +np.float32,0x800a9af0,0x3f800000,3 +np.float32,0xfeef13ea,0x7f800000,3 +np.float32,0x7f74841f,0x7f800000,3 +np.float32,0xbeb9a2f0,0x3f888181,3 +np.float32,0x77cbb,0x3f800000,3 +np.float32,0xbf587f84,0x3fb0911b,3 +np.float32,0x210ba5,0x3f800000,3 +np.float32,0x3ee60a28,0x3f8d2367,3 +np.float32,0xbe3731ac,0x3f820dc7,3 +np.float32,0xbee8cfee,0x3f8d765e,3 +np.float32,0x7b2ef179,0x7f800000,3 +np.float32,0xfe81377c,0x7f800000,3 +np.float32,0x6ac98c,0x3f800000,3 +np.float32,0x3f51f144,0x3fad8288,3 +np.float32,0x80785750,0x3f800000,3 +np.float32,0x3f46615a,0x3fa864ff,3 +np.float32,0xbf35ac9e,0x3fa19b8e,3 +np.float32,0x7f0982ac,0x7f800000,3 +np.float32,0x1b2610,0x3f800000,3 +np.float32,0x3ed8bb25,0x3f8ba3df,3 +np.float32,0xbeb41bac,0x3f88006d,3 +np.float32,0xff48e89d,0x7f800000,3 +np.float32,0x3ed0ab8c,0x3f8ac755,3 +np.float32,0xbe64671c,0x3f833282,3 +np.float32,0x64bce4,0x3f800000,3 +np.float32,0x284f79,0x3f800000,3 +np.float32,0x7e09faa7,0x7f800000,3 +np.float32,0x4376c1,0x3f800000,3 +np.float32,0x805ca8c0,0x3f800000,3 +np.float32,0xff0859d5,0x7f800000,3 +np.float32,0xbed2f3b2,0x3f8b04dd,3 +np.float32,0x8045bd0c,0x3f800000,3 +np.float32,0x3f0e6216,0x3f94503f,3 +np.float32,0x3f41e3ae,0x3fa68035,3 +np.float32,0x80088ccc,0x3f800000,3 +np.float32,0x3f37fc19,0x3fa2812f,3 +np.float32,0x71c87d,0x3f800000,3 +np.float32,0x8024f4b2,0x3f800000,3 +np.float32,0xff78dd88,0x7f800000,3 +np.float32,0xbda66c90,0x3f806c40,3 +np.float32,0x7f33ef0d,0x7f800000,3 +np.float32,0x46a343,0x3f800000,3 +np.float32,0xff1dce38,0x7f800000,3 +np.float32,0x1b935d,0x3f800000,3 +np.float32,0x3ebec598,0x3f88fd0e,3 +np.float32,0xff115530,0x7f800000,3 +np.float32,0x803916aa,0x3f800000,3 +np.float32,0xff60a3e2,0x7f800000,3 +np.float32,0x3b8ddd48,0x3f80004f,3 +np.float32,0x3f761b6e,0x3fbfd8ea,3 +np.float32,0xbdf55b88,0x3f80eb70,3 +np.float32,0x37374,0x3f800000,3 +np.float32,0x3de150e0,0x3f80c682,3 +np.float32,0x3f343278,0x3fa10a83,3 +np.float32,0xbe9baefa,0x3f85f68b,3 +np.float32,0x3d8d43,0x3f800000,3 +np.float32,0x3e80994b,0x3f840f0c,3 +np.float32,0xbe573c6c,0x3f82d685,3 +np.float32,0x805b83b4,0x3f800000,3 +np.float32,0x683d88,0x3f800000,3 +np.float32,0x692465,0x3f800000,3 +np.float32,0xbdc345f8,0x3f809511,3 +np.float32,0x3f7c1c5a,0x3fc3406f,3 +np.float32,0xbf40bef3,0x3fa606df,3 +np.float32,0xff1e25b9,0x7f800000,3 +np.float32,0x3e4481e0,0x3f825d37,3 +np.float32,0x75d188,0x3f800000,3 +np.float32,0x3ea53cec,0x3f86b956,3 +np.float32,0xff105a54,0x7f800000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f11f0b0,0x7f800000,3 +np.float32,0xbf58a57d,0x3fb0a328,3 +np.float32,0xbdd11e38,0x3f80aaf8,3 +np.float32,0xbea94adc,0x3f870fa0,3 +np.float32,0x3e9dd780,0x3f862180,3 +np.float32,0xff1786b9,0x7f800000,3 +np.float32,0xfec46aa2,0x7f800000,3 +np.float32,0x7f4300c1,0x7f800000,3 +np.float32,0x29ba2b,0x3f800000,3 +np.float32,0x3f4112e2,0x3fa62993,3 +np.float32,0xbe6c9224,0x3f836e5d,3 +np.float32,0x7f0e42a3,0x7f800000,3 +np.float32,0xff6390ad,0x7f800000,3 +np.float32,0x3f54e374,0x3faede94,3 +np.float32,0x7f2642a2,0x7f800000,3 +np.float32,0x7f46b2be,0x7f800000,3 +np.float32,0xfe59095c,0x7f800000,3 +np.float32,0x7146a0,0x3f800000,3 +np.float32,0x3f07763d,0x3f925786,3 +np.float32,0x3d172780,0x3f801651,3 +np.float32,0xff66f1c5,0x7f800000,3 +np.float32,0xff025349,0x7f800000,3 +np.float32,0x6ce99d,0x3f800000,3 +np.float32,0xbf7e4f50,0x3fc48685,3 +np.float32,0xbeff8ca2,0x3f904708,3 +np.float32,0x3e6c8,0x3f800000,3 +np.float32,0x7f7153dc,0x7f800000,3 +np.float32,0xbedcf612,0x3f8c1b26,3 +np.float32,0xbbc2f180,0x3f800094,3 +np.float32,0xbf397399,0x3fa314b8,3 +np.float32,0x6c6e35,0x3f800000,3 +np.float32,0x7f50a88b,0x7f800000,3 +np.float32,0xfe84093e,0x7f800000,3 +np.float32,0x3f737b9d,0x3fbe6478,3 +np.float32,0x7f6a5340,0x7f800000,3 +np.float32,0xbde83c20,0x3f80d2e7,3 +np.float32,0xff769ce9,0x7f800000,3 +np.float32,0xfdd33c30,0x7f800000,3 +np.float32,0xbc95cb60,0x3f80057a,3 +np.float32,0x8007a40d,0x3f800000,3 +np.float32,0x3f55d90c,0x3faf5132,3 +np.float32,0x80282082,0x3f800000,3 +np.float32,0xbf43b1f2,0x3fa7418c,3 +np.float32,0x3f1dc7cb,0x3f991731,3 +np.float32,0xbd4346a0,0x3f80253f,3 +np.float32,0xbf5aa82a,0x3fb19946,3 +np.float32,0x3f4b8c22,0x3faaa333,3 +np.float32,0x3d13468c,0x3f80152f,3 +np.float32,0x7db77097,0x7f800000,3 +np.float32,0x4a00df,0x3f800000,3 +np.float32,0xbedea5e0,0x3f8c4b64,3 +np.float32,0x80482543,0x3f800000,3 +np.float32,0xbef344fe,0x3f8eb8dd,3 +np.float32,0x7ebd4044,0x7f800000,3 +np.float32,0xbf512c0e,0x3fad287e,3 +np.float32,0x3db28cce,0x3f807c9c,3 +np.float32,0xbd0f5ae0,0x3f801412,3 +np.float32,0xfe7ed9ac,0x7f800000,3 +np.float32,0x3eb1aa82,0x3f87c8b4,3 +np.float32,0xfef1679e,0x7f800000,3 +np.float32,0xff3629f2,0x7f800000,3 +np.float32,0xff3562b4,0x7f800000,3 +np.float32,0x3dcafe1d,0x3f80a118,3 +np.float32,0xfedf242a,0x7f800000,3 +np.float32,0xbf43102a,0x3fa6fda4,3 +np.float32,0x8028834e,0x3f800000,3 +np.float32,0x805c8513,0x3f800000,3 +np.float32,0x3f59306a,0x3fb0e550,3 +np.float32,0x3eda2c9c,0x3f8bcc4a,3 +np.float32,0x80023524,0x3f800000,3 +np.float32,0x7ef72879,0x7f800000,3 +np.float32,0x661c8a,0x3f800000,3 +np.float32,0xfec3ba6c,0x7f800000,3 +np.float32,0x805aaca6,0x3f800000,3 +np.float32,0xff5c1f13,0x7f800000,3 +np.float32,0x3f6ab3f4,0x3fb9ab6b,3 +np.float32,0x3f014896,0x3f90ac20,3 +np.float32,0x3f030584,0x3f91222a,3 +np.float32,0xbf74853d,0x3fbef71d,3 +np.float32,0xbf534ee0,0x3fae2323,3 +np.float32,0x2c90c3,0x3f800000,3 +np.float32,0x7f62ad25,0x7f800000,3 +np.float32,0x1c8847,0x3f800000,3 +np.float32,0x7e2a8d43,0x7f800000,3 +np.float32,0x807a09cd,0x3f800000,3 +np.float32,0x413871,0x3f800000,3 +np.float32,0x80063692,0x3f800000,3 +np.float32,0x3edaf29b,0x3f8be211,3 +np.float32,0xbf64a7ab,0x3fb68b2d,3 +np.float32,0xfe56a720,0x7f800000,3 +np.float32,0xbf54a8d4,0x3faec350,3 +np.float32,0x3ecbaef7,0x3f8a4350,3 +np.float32,0x3f413714,0x3fa63890,3 +np.float32,0x7d3aa8,0x3f800000,3 +np.float32,0xbea9a13c,0x3f8716e7,3 +np.float32,0x7ef7553e,0x7f800000,3 +np.float32,0x8056f29f,0x3f800000,3 +np.float32,0xff1f7ffe,0x7f800000,3 +np.float32,0x3f41953b,0x3fa65f9c,3 +np.float32,0x3daa2f,0x3f800000,3 +np.float32,0xff0893e4,0x7f800000,3 +np.float32,0xbefc7ec6,0x3f8fe207,3 +np.float32,0xbb026800,0x3f800011,3 +np.float32,0x341e4f,0x3f800000,3 +np.float32,0x3e7b708a,0x3f83e0d1,3 +np.float32,0xa18cb,0x3f800000,3 +np.float32,0x7e290239,0x7f800000,3 +np.float32,0xbf4254f2,0x3fa6af62,3 +np.float32,0x80000000,0x3f800000,3 +np.float32,0x3f0a6c,0x3f800000,3 +np.float32,0xbec44d28,0x3f898609,3 +np.float32,0xf841f,0x3f800000,3 +np.float32,0x7f01a693,0x7f800000,3 +np.float32,0x8053340b,0x3f800000,3 +np.float32,0xfd4e7990,0x7f800000,3 +np.float32,0xbf782f1f,0x3fc10356,3 +np.float32,0xbe962118,0x3f858acc,3 +np.float32,0xfe8cd702,0x7f800000,3 +np.float32,0x7ecd986f,0x7f800000,3 +np.float32,0x3ebe775f,0x3f88f59b,3 +np.float32,0x8065524f,0x3f800000,3 +np.float32,0x3ede7fc4,0x3f8c471e,3 +np.float32,0x7f5e15ea,0x7f800000,3 +np.float32,0xbe871ada,0x3f847b78,3 +np.float32,0x3f21958b,0x3f9a5af7,3 +np.float32,0x3f64d480,0x3fb6a1fa,3 +np.float32,0xff18b0e9,0x7f800000,3 +np.float32,0xbf0840dd,0x3f928fd9,3 +np.float32,0x80104f5d,0x3f800000,3 +np.float32,0x643b94,0x3f800000,3 +np.float32,0xbc560a80,0x3f8002cc,3 +np.float32,0x3f5c75d6,0x3fb2786e,3 +np.float32,0x7f365fc9,0x7f800000,3 +np.float32,0x54e965,0x3f800000,3 +np.float32,0x6dcd4d,0x3f800000,3 +np.float32,0x3f2057a0,0x3f99f04d,3 +np.float32,0x272fa3,0x3f800000,3 +np.float32,0xff423dc9,0x7f800000,3 +np.float32,0x80273463,0x3f800000,3 +np.float32,0xfe21cc78,0x7f800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x802feb65,0x3f800000,3 +np.float32,0x3dc733d0,0x3f809b21,3 +np.float32,0x65d56b,0x3f800000,3 +np.float32,0x80351d8e,0x3f800000,3 +np.float32,0xbf244247,0x3f9b43dd,3 +np.float32,0x7f328e7e,0x7f800000,3 +np.float32,0x7f4d9712,0x7f800000,3 +np.float32,0x2c505d,0x3f800000,3 +np.float32,0xbf232ebe,0x3f9ae5a0,3 +np.float32,0x804a363a,0x3f800000,3 +np.float32,0x80417102,0x3f800000,3 +np.float32,0xbf48b170,0x3fa963d4,3 +np.float32,0x7ea3e3b6,0x7f800000,3 +np.float32,0xbf41415b,0x3fa63cd2,3 +np.float32,0xfe3af7c8,0x7f800000,3 +np.float32,0x7f478010,0x7f800000,3 +np.float32,0x80143113,0x3f800000,3 +np.float32,0x3f7626a7,0x3fbfdf2e,3 +np.float32,0xfea20b0a,0x7f800000,3 +np.float32,0x80144d64,0x3f800000,3 +np.float32,0x7db9ba47,0x7f800000,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xbe410834,0x3f8247ef,3 +np.float32,0x14a7af,0x3f800000,3 +np.float32,0x7eaebf9e,0x7f800000,3 +np.float32,0xff800000,0x7f800000,3 +np.float32,0x3f0a7d8e,0x3f9330fd,3 +np.float32,0x3ef780,0x3f800000,3 +np.float32,0x3f62253e,0x3fb546d1,3 +np.float32,0x3f4cbeac,0x3fab2acc,3 +np.float32,0x25db1,0x3f800000,3 +np.float32,0x65c54a,0x3f800000,3 +np.float32,0x800f0645,0x3f800000,3 +np.float32,0x3ed28c78,0x3f8af9f0,3 +np.float32,0x8040c6ce,0x3f800000,3 +np.float32,0x5e4e9a,0x3f800000,3 +np.float32,0xbd3fd2b0,0x3f8023f1,3 +np.float32,0xbf5d2d3f,0x3fb2d1b6,3 +np.float32,0x7ead999f,0x7f800000,3 +np.float32,0xbf30dc86,0x3f9fc805,3 +np.float32,0xff2b0a62,0x7f800000,3 +np.float32,0x3d5180e9,0x3f802adf,3 +np.float32,0x3f62716f,0x3fb56d0d,3 +np.float32,0x7e82ae9c,0x7f800000,3 +np.float32,0xfe2d4bdc,0x7f800000,3 +np.float32,0x805cc7d4,0x3f800000,3 +np.float32,0xfb50f700,0x7f800000,3 +np.float32,0xff57b684,0x7f800000,3 +np.float32,0x80344f01,0x3f800000,3 +np.float32,0x7f2af372,0x7f800000,3 +np.float32,0xfeab6204,0x7f800000,3 +np.float32,0x30b251,0x3f800000,3 +np.float32,0x3eed8cc4,0x3f8e0698,3 +np.float32,0x7eeb1c6a,0x7f800000,3 +np.float32,0x3f17ece6,0x3f9735b0,3 +np.float32,0x21e985,0x3f800000,3 +np.float32,0x3f3a7df3,0x3fa37e34,3 +np.float32,0x802a14a2,0x3f800000,3 +np.float32,0x807d4d5b,0x3f800000,3 +np.float32,0x7f6093ce,0x7f800000,3 +np.float32,0x3f800000,0x3fc583ab,3 +np.float32,0x3da2c26e,0x3f806789,3 +np.float32,0xfe05f278,0x7f800000,3 +np.float32,0x800000,0x3f800000,3 +np.float32,0xbee63342,0x3f8d282e,3 +np.float32,0xbf225586,0x3f9a9bd4,3 +np.float32,0xbed60e86,0x3f8b59ba,3 +np.float32,0xbec99484,0x3f8a0ca3,3 +np.float32,0x3e967c71,0x3f859199,3 +np.float32,0x7f26ab62,0x7f800000,3 +np.float32,0xca7f4,0x3f800000,3 +np.float32,0xbf543790,0x3fae8ebc,3 +np.float32,0x3e4c1ed9,0x3f828d2d,3 +np.float32,0xbdf37f88,0x3f80e7e1,3 +np.float32,0xff0cc44e,0x7f800000,3 +np.float32,0x5dea48,0x3f800000,3 +np.float32,0x31023c,0x3f800000,3 +np.float32,0x3ea10733,0x3f866208,3 +np.float32,0x3e11e6f2,0x3f814d2e,3 +np.float32,0x80641960,0x3f800000,3 +np.float32,0x3ef779a8,0x3f8f3edb,3 +np.float32,0x3f2a5062,0x3f9d632a,3 +np.float32,0x2b7d34,0x3f800000,3 +np.float32,0x3eeb95c5,0x3f8dca67,3 +np.float32,0x805c1357,0x3f800000,3 +np.float32,0x3db3a79d,0x3f807e29,3 +np.float32,0xfded1900,0x7f800000,3 +np.float32,0x45f362,0x3f800000,3 +np.float32,0x451f38,0x3f800000,3 +np.float32,0x801d3ae5,0x3f800000,3 +np.float32,0x458d45,0x3f800000,3 +np.float32,0xfda9d298,0x7f800000,3 +np.float32,0x467439,0x3f800000,3 +np.float32,0x7f66554a,0x7f800000,3 +np.float32,0xfef2375a,0x7f800000,3 +np.float32,0xbf33fc47,0x3fa0f5d7,3 +np.float32,0x3f75ba69,0x3fbfa2d0,3 +np.float32,0xfeb625b2,0x7f800000,3 +np.float32,0x8066b371,0x3f800000,3 +np.float32,0x3f5cb4e9,0x3fb29718,3 +np.float32,0x7f3b6a58,0x7f800000,3 +np.float32,0x7f6b35ea,0x7f800000,3 +np.float32,0xbf6ee555,0x3fbbe5be,3 +np.float32,0x3d836e21,0x3f804380,3 +np.float32,0xff43cd0c,0x7f800000,3 +np.float32,0xff55c1fa,0x7f800000,3 +np.float32,0xbf0dfccc,0x3f9432a6,3 +np.float32,0x3ed92121,0x3f8baf00,3 +np.float32,0x80068cc1,0x3f800000,3 +np.float32,0xff0103f9,0x7f800000,3 +np.float32,0x7e51b175,0x7f800000,3 +np.float32,0x8012f214,0x3f800000,3 +np.float32,0x62d298,0x3f800000,3 +np.float32,0xbf3e1525,0x3fa4ef8d,3 +np.float32,0x806b4882,0x3f800000,3 +np.float32,0xbf38c146,0x3fa2ce7c,3 +np.float32,0xbed59c30,0x3f8b4d70,3 +np.float32,0x3d1910c0,0x3f8016e2,3 +np.float32,0x7f33d55b,0x7f800000,3 +np.float32,0x7f5800e3,0x7f800000,3 +np.float32,0x5b2c5d,0x3f800000,3 +np.float32,0x807be750,0x3f800000,3 +np.float32,0x7eb297c1,0x7f800000,3 +np.float32,0x7dafee62,0x7f800000,3 +np.float32,0x7d9e23f0,0x7f800000,3 +np.float32,0x3e580537,0x3f82dbd8,3 +np.float32,0xbf800000,0x3fc583ab,3 +np.float32,0x7f40f880,0x7f800000,3 +np.float32,0x775ad3,0x3f800000,3 +np.float32,0xbedacd36,0x3f8bddf3,3 +np.float32,0x2138f6,0x3f800000,3 +np.float32,0x52c3b7,0x3f800000,3 +np.float32,0x8041cfdd,0x3f800000,3 +np.float32,0x7bf16791,0x7f800000,3 +np.float32,0xbe95869c,0x3f857f55,3 +np.float32,0xbf199796,0x3f97bcaf,3 +np.float32,0x3ef8da38,0x3f8f6b45,3 +np.float32,0x803f3648,0x3f800000,3 +np.float32,0x80026fd2,0x3f800000,3 +np.float32,0x7eb3ac26,0x7f800000,3 +np.float32,0x3e49921b,0x3f827ce8,3 +np.float32,0xbf689aed,0x3fb892de,3 +np.float32,0x3f253509,0x3f9b9779,3 +np.float32,0xff17894a,0x7f800000,3 +np.float32,0x3cd12639,0x3f800aae,3 +np.float32,0x1db14b,0x3f800000,3 +np.float32,0x39a0bf,0x3f800000,3 +np.float32,0xfdfe1d08,0x7f800000,3 +np.float32,0xff416cd2,0x7f800000,3 +np.float32,0x8070d818,0x3f800000,3 +np.float32,0x3e516e12,0x3f82afb8,3 +np.float32,0x80536651,0x3f800000,3 +np.float32,0xbf2903d2,0x3f9cecb7,3 +np.float32,0x3e896ae4,0x3f84a353,3 +np.float32,0xbd6ba2c0,0x3f80363d,3 +np.float32,0x80126d3e,0x3f800000,3 +np.float32,0xfd9d43d0,0x7f800000,3 +np.float32,0x7b56b6,0x3f800000,3 +np.float32,0xff04718e,0x7f800000,3 +np.float32,0x31440f,0x3f800000,3 +np.float32,0xbf7a1313,0x3fc215c9,3 +np.float32,0x7f43d6a0,0x7f800000,3 +np.float32,0x3f566503,0x3faf92cc,3 +np.float32,0xbf39eb0e,0x3fa343f1,3 +np.float32,0xbe35fd70,0x3f8206df,3 +np.float32,0x800c36ac,0x3f800000,3 +np.float32,0x60d061,0x3f800000,3 +np.float32,0x80453e12,0x3f800000,3 +np.float32,0xfe17c36c,0x7f800000,3 +np.float32,0x3d8c72,0x3f800000,3 +np.float32,0xfe8e9134,0x7f800000,3 +np.float32,0xff5d89de,0x7f800000,3 +np.float32,0x7f45020e,0x7f800000,3 +np.float32,0x3f28225e,0x3f9c9d01,3 +np.float32,0xbf3b6900,0x3fa3dbdd,3 +np.float32,0x80349023,0x3f800000,3 +np.float32,0xbf14d780,0x3f964042,3 +np.float32,0x3f56b5d2,0x3fafb8c3,3 +np.float32,0x800c639c,0x3f800000,3 +np.float32,0x7f7a19c8,0x7f800000,3 +np.float32,0xbf7a0815,0x3fc20f86,3 +np.float32,0xbec55926,0x3f89a06e,3 +np.float32,0x4b2cd2,0x3f800000,3 +np.float32,0xbf271eb2,0x3f9c41c8,3 +np.float32,0xff26e168,0x7f800000,3 +np.float32,0x800166b2,0x3f800000,3 +np.float32,0xbde97e38,0x3f80d532,3 +np.float32,0xbf1f93ec,0x3f99af1a,3 +np.float32,0x7f2896ed,0x7f800000,3 +np.float32,0x3da7d96d,0x3f806e1d,3 +np.float32,0x802b7237,0x3f800000,3 +np.float32,0xfdca6bc0,0x7f800000,3 +np.float32,0xbed2e300,0x3f8b0318,3 +np.float32,0x8079d9e8,0x3f800000,3 +np.float32,0x3f388c81,0x3fa2b9c2,3 +np.float32,0x3ed2607c,0x3f8af54a,3 +np.float32,0xff287de6,0x7f800000,3 +np.float32,0x3f55ed89,0x3faf5ac9,3 +np.float32,0x7f5b6af7,0x7f800000,3 +np.float32,0xbeb24730,0x3f87d698,3 +np.float32,0x1,0x3f800000,3 +np.float32,0x3f3a2350,0x3fa35a3b,3 +np.float32,0x8013b422,0x3f800000,3 +np.float32,0x3e9a6560,0x3f85dd35,3 +np.float32,0x80510631,0x3f800000,3 +np.float32,0xfeae39d6,0x7f800000,3 +np.float32,0x7eb437ad,0x7f800000,3 +np.float32,0x8047545b,0x3f800000,3 +np.float32,0x806a1c71,0x3f800000,3 +np.float32,0xbe5543f0,0x3f82c93b,3 +np.float32,0x40e8d,0x3f800000,3 +np.float32,0x63d18b,0x3f800000,3 +np.float32,0x1fa1ea,0x3f800000,3 +np.float32,0x801944e0,0x3f800000,3 +np.float32,0xbf4c7ac6,0x3fab0cae,3 +np.float32,0x7f2679d4,0x7f800000,3 +np.float32,0x3f0102fc,0x3f9099d0,3 +np.float32,0x7e44bdc1,0x7f800000,3 +np.float32,0xbf2072f6,0x3f99f970,3 +np.float32,0x5c7d38,0x3f800000,3 +np.float32,0x30a2e6,0x3f800000,3 +np.float32,0x805b9ca3,0x3f800000,3 +np.float32,0x7cc24ad5,0x7f800000,3 +np.float32,0x3f4f7920,0x3fac6357,3 +np.float32,0x111d62,0x3f800000,3 +np.float32,0xbf4de40a,0x3fabad77,3 +np.float32,0x805d0354,0x3f800000,3 +np.float32,0xbb3d2b00,0x3f800023,3 +np.float32,0x3ef229e7,0x3f8e960b,3 +np.float32,0x3f15754e,0x3f9670e0,3 +np.float32,0xbf689c6b,0x3fb893a5,3 +np.float32,0xbf3796c6,0x3fa2599b,3 +np.float32,0xbe95303c,0x3f8578f2,3 +np.float32,0xfee330de,0x7f800000,3 +np.float32,0xff0d9705,0x7f800000,3 +np.float32,0xbeb0ebd0,0x3f87b7dd,3 +np.float32,0xbf4d5a13,0x3fab6fe7,3 +np.float32,0x80142f5a,0x3f800000,3 +np.float32,0x7e01a87b,0x7f800000,3 +np.float32,0xbe45e5ec,0x3f8265d7,3 +np.float32,0x7f4ac255,0x7f800000,3 +np.float32,0x3ebf6a60,0x3f890ccb,3 +np.float32,0x7f771e16,0x7f800000,3 +np.float32,0x3f41834e,0x3fa6582b,3 +np.float32,0x3f7f6f98,0x3fc52ef0,3 +np.float32,0x7e4ad775,0x7f800000,3 +np.float32,0x3eb39991,0x3f87f4c4,3 +np.float32,0x1e3f4,0x3f800000,3 +np.float32,0x7e84ba19,0x7f800000,3 +np.float32,0x80640be4,0x3f800000,3 +np.float32,0x3f459fc8,0x3fa81272,3 +np.float32,0x3f554ed0,0x3faf109b,3 +np.float32,0x3c6617,0x3f800000,3 +np.float32,0x7f441158,0x7f800000,3 +np.float32,0x7f66e6d8,0x7f800000,3 +np.float32,0x7f565152,0x7f800000,3 +np.float32,0x7f16d550,0x7f800000,3 +np.float32,0xbd4f1950,0x3f8029e5,3 +np.float32,0xcf722,0x3f800000,3 +np.float32,0x3f37d6fd,0x3fa272ad,3 +np.float32,0xff7324ea,0x7f800000,3 +np.float32,0x804bc246,0x3f800000,3 +np.float32,0x7f099ef8,0x7f800000,3 +np.float32,0x5f838b,0x3f800000,3 +np.float32,0x80523534,0x3f800000,3 +np.float32,0x3f595e84,0x3fb0fb50,3 +np.float32,0xfdef8ac8,0x7f800000,3 +np.float32,0x3d9a07,0x3f800000,3 +np.float32,0x410f61,0x3f800000,3 +np.float32,0xbf715dbb,0x3fbd3bcb,3 +np.float32,0xbedd4734,0x3f8c242f,3 +np.float32,0x7e86739a,0x7f800000,3 +np.float32,0x3e81f144,0x3f8424fe,3 +np.float32,0x7f6342d1,0x7f800000,3 +np.float32,0xff6919a3,0x7f800000,3 +np.float32,0xff051878,0x7f800000,3 +np.float32,0x800ba28f,0x3f800000,3 +np.float32,0xfefab3d8,0x7f800000,3 +np.float32,0xff612a84,0x7f800000,3 +np.float32,0x800cd5ab,0x3f800000,3 +np.float32,0x802a07ae,0x3f800000,3 +np.float32,0xfef6ee3a,0x7f800000,3 +np.float32,0x8037e896,0x3f800000,3 +np.float32,0x3ef2d86f,0x3f8eab7d,3 +np.float32,0x3eafe53d,0x3f87a0cb,3 +np.float32,0xba591c00,0x3f800003,3 +np.float32,0x3e9ed028,0x3f863508,3 +np.float32,0x4a12a8,0x3f800000,3 +np.float32,0xbee55c84,0x3f8d0f45,3 +np.float32,0x8038a8d3,0x3f800000,3 +np.float32,0xff055243,0x7f800000,3 +np.float32,0xbf659067,0x3fb701ca,3 +np.float32,0xbee36a86,0x3f8cd5e0,3 +np.float32,0x7f1d74c1,0x7f800000,3 +np.float32,0xbf7657df,0x3fbffaad,3 +np.float32,0x7e37ee34,0x7f800000,3 +np.float32,0xff04bc74,0x7f800000,3 +np.float32,0x806d194e,0x3f800000,3 +np.float32,0x7f5596c3,0x7f800000,3 +np.float32,0xbe09d268,0x3f81293e,3 +np.float32,0x79ff75,0x3f800000,3 +np.float32,0xbf55479c,0x3faf0d3e,3 +np.float32,0xbe5428ec,0x3f82c1d4,3 +np.float32,0x3f624134,0x3fb554d7,3 +np.float32,0x2ccb8a,0x3f800000,3 +np.float32,0xfc082040,0x7f800000,3 +np.float32,0xff315467,0x7f800000,3 +np.float32,0x3e6ea2d2,0x3f837dd5,3 +np.float32,0x8020fdd1,0x3f800000,3 +np.float32,0x7f0416a1,0x7f800000,3 +np.float32,0x710a1b,0x3f800000,3 +np.float32,0x3dfcd050,0x3f80f9fc,3 +np.float32,0xfe995e96,0x7f800000,3 +np.float32,0x3f020d00,0x3f90e006,3 +np.float32,0x8064263e,0x3f800000,3 +np.float32,0xfcee4160,0x7f800000,3 +np.float32,0x801b3a18,0x3f800000,3 +np.float32,0x3f62c984,0x3fb59955,3 +np.float32,0x806e8355,0x3f800000,3 +np.float32,0x7e94f65d,0x7f800000,3 +np.float32,0x1173de,0x3f800000,3 +np.float32,0x3e3ff3b7,0x3f824166,3 +np.float32,0x803b4aea,0x3f800000,3 +np.float32,0x804c5bcc,0x3f800000,3 +np.float32,0x509fe5,0x3f800000,3 +np.float32,0xbf33b5ee,0x3fa0db0b,3 +np.float32,0x3f2ac15c,0x3f9d8ba4,3 +np.float32,0x7f2c54f8,0x7f800000,3 +np.float32,0x7f33d933,0x7f800000,3 +np.float32,0xbf09b2b4,0x3f92f795,3 +np.float32,0x805db8d6,0x3f800000,3 +np.float32,0x6d6e66,0x3f800000,3 +np.float32,0x3ddfea92,0x3f80c40c,3 +np.float32,0xfda719b8,0x7f800000,3 +np.float32,0x5d657f,0x3f800000,3 +np.float32,0xbf005ba3,0x3f906df6,3 +np.float32,0xbf45e606,0x3fa8305c,3 +np.float32,0x5e9fd1,0x3f800000,3 +np.float32,0x8079dc45,0x3f800000,3 +np.float32,0x7e9c40e3,0x7f800000,3 +np.float32,0x6bd5f6,0x3f800000,3 +np.float32,0xbea14a0e,0x3f866761,3 +np.float32,0x7e7323f3,0x7f800000,3 +np.float32,0x7f0c0a79,0x7f800000,3 +np.float32,0xbf7d7aeb,0x3fc40b0f,3 +np.float32,0x437588,0x3f800000,3 +np.float32,0xbf356376,0x3fa17f63,3 +np.float32,0x7f129921,0x7f800000,3 +np.float32,0x7f47a52e,0x7f800000,3 +np.float32,0xba8cb400,0x3f800005,3 +np.float32,0x802284e0,0x3f800000,3 +np.float32,0xbe820f56,0x3f8426ec,3 +np.float32,0x7f2ef6cf,0x7f800000,3 +np.float32,0xbf70a090,0x3fbcd501,3 +np.float32,0xbf173fea,0x3f96ff6d,3 +np.float32,0x3e19c489,0x3f817224,3 +np.float32,0x7f429b30,0x7f800000,3 +np.float32,0xbdae4118,0x3f8076af,3 +np.float32,0x3e70ad30,0x3f838d41,3 +np.float32,0x335fed,0x3f800000,3 +np.float32,0xff5359cf,0x7f800000,3 +np.float32,0xbf17e42b,0x3f9732f1,3 +np.float32,0xff3a950b,0x7f800000,3 +np.float32,0xbcca70c0,0x3f800a02,3 +np.float32,0x3f2cda62,0x3f9e4dad,3 +np.float32,0x3f50c185,0x3facf805,3 +np.float32,0x80000001,0x3f800000,3 +np.float32,0x807b86d2,0x3f800000,3 +np.float32,0x8010c2cf,0x3f800000,3 +np.float32,0x3f130fb8,0x3f95b519,3 +np.float32,0x807dc546,0x3f800000,3 +np.float32,0xbee20740,0x3f8cad3f,3 +np.float32,0x80800000,0x3f800000,3 +np.float32,0x3cbd90c0,0x3f8008c6,3 +np.float32,0x3e693488,0x3f835571,3 +np.float32,0xbe70cd44,0x3f838e35,3 +np.float32,0xbe348dc8,0x3f81feb1,3 +np.float32,0x3f31ea90,0x3fa02d3f,3 +np.float32,0xfcd7e180,0x7f800000,3 +np.float32,0xbe30a75c,0x3f81e8d0,3 +np.float32,0x3e552c5a,0x3f82c89d,3 +np.float32,0xff513f74,0x7f800000,3 +np.float32,0xbdb16248,0x3f807afd,3 +np.float64,0x7fbbf954e437f2a9,0x7ff0000000000000,2 +np.float64,0x581bbf0cb0379,0x3ff0000000000000,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xffb959a2a632b348,0x7ff0000000000000,2 +np.float64,0xbfdbd6baebb7ad76,0x3ff189a5ca25a6e1,2 +np.float64,0xbfd094ec9aa129da,0x3ff08a3f6b918065,2 +np.float64,0x3fe236753f646cea,0x3ff2a982660b8b43,2 +np.float64,0xbfe537fadfaa6ff6,0x3ff3a5f1c49c31bf,2 +np.float64,0xbfe31fa7dc663f50,0x3ff2f175374aef0e,2 +np.float64,0x3fc4b6569f296cb0,0x3ff035bde801bb53,2 +np.float64,0x800ce3c00f99c780,0x3ff0000000000000,2 +np.float64,0xbfebcde33e779bc6,0x3ff66de82cd30fc5,2 +np.float64,0x800dc09d3b7b813b,0x3ff0000000000000,2 +np.float64,0x80067d4c450cfa99,0x3ff0000000000000,2 +np.float64,0x1f6ade203ed7,0x3ff0000000000000,2 +np.float64,0xbfd4e311eca9c624,0x3ff0dc1383d6c3db,2 +np.float64,0x800649b3a54c9368,0x3ff0000000000000,2 +np.float64,0xcc14d1ab9829a,0x3ff0000000000000,2 +np.float64,0x3fc290c5bb25218b,0x3ff02b290f46dd6d,2 +np.float64,0x3fe78eb8376f1d70,0x3ff488f3bc259537,2 +np.float64,0xffc60f58e82c1eb0,0x7ff0000000000000,2 +np.float64,0x3fd35666ad26accd,0x3ff0bc6573da6bcd,2 +np.float64,0x7fc20257a62404ae,0x7ff0000000000000,2 +np.float64,0x80076d842e0edb09,0x3ff0000000000000,2 +np.float64,0x3fd8e44b08b1c898,0x3ff139b9a1f8428e,2 +np.float64,0x7fd6f6fc7a2dedf8,0x7ff0000000000000,2 +np.float64,0x3fa01b9f0820373e,0x3ff00206f8ad0f1b,2 +np.float64,0x69ed190ed3da4,0x3ff0000000000000,2 +np.float64,0xbfd997eb34b32fd6,0x3ff14be65a5db4a0,2 +np.float64,0x7feada2d0935b459,0x7ff0000000000000,2 +np.float64,0xbf80987120213100,0x3ff000226d29a9fc,2 +np.float64,0xbfef203e37fe407c,0x3ff82f51f04e8821,2 +np.float64,0xffe3dcf91fa7b9f2,0x7ff0000000000000,2 +np.float64,0x9a367283346cf,0x3ff0000000000000,2 +np.float64,0x800feb09f7bfd614,0x3ff0000000000000,2 +np.float64,0xbfe0319f9520633f,0x3ff217c5205c403f,2 +np.float64,0xbfa91eabd4323d50,0x3ff004ee4347f627,2 +np.float64,0x3fd19cbf7d23397f,0x3ff09c13e8e43571,2 +np.float64,0xffeb8945f0b7128b,0x7ff0000000000000,2 +np.float64,0x800a0eb4f2141d6a,0x3ff0000000000000,2 +np.float64,0xffe83e7312f07ce6,0x7ff0000000000000,2 +np.float64,0xffca53fee834a7fc,0x7ff0000000000000,2 +np.float64,0x800881cbf1710398,0x3ff0000000000000,2 +np.float64,0x80003e6abbe07cd6,0x3ff0000000000000,2 +np.float64,0xbfef6a998afed533,0x3ff859b7852d1b4d,2 +np.float64,0x3fd4eb7577a9d6eb,0x3ff0dcc601261aab,2 +np.float64,0xbfc9c12811338250,0x3ff05331268b05c8,2 +np.float64,0x7fddf84e5e3bf09c,0x7ff0000000000000,2 +np.float64,0xbfd4d6fbbc29adf8,0x3ff0db12db19d187,2 +np.float64,0x80077892bfaef126,0x3ff0000000000000,2 +np.float64,0xffae9d49543d3a90,0x7ff0000000000000,2 +np.float64,0xbfd8bef219317de4,0x3ff136034e5d2f1b,2 +np.float64,0xffe89c74ddb138e9,0x7ff0000000000000,2 +np.float64,0x8003b6bbb7e76d78,0x3ff0000000000000,2 +np.float64,0x315a4e8462b4b,0x3ff0000000000000,2 +np.float64,0x800ee616edddcc2e,0x3ff0000000000000,2 +np.float64,0xdfb27f97bf650,0x3ff0000000000000,2 +np.float64,0x8004723dc328e47c,0x3ff0000000000000,2 +np.float64,0xbfe529500daa52a0,0x3ff3a0b9b33fc84c,2 +np.float64,0xbfe4e46a7ce9c8d5,0x3ff3886ce0f92612,2 +np.float64,0xbf52003680240000,0x3ff00000a203d61a,2 +np.float64,0xffd3400458268008,0x7ff0000000000000,2 +np.float64,0x80076deb444edbd7,0x3ff0000000000000,2 +np.float64,0xa612f6c14c27,0x3ff0000000000000,2 +np.float64,0xbfd41c74c9a838ea,0x3ff0cbe61e16aecf,2 +np.float64,0x43f464a887e8d,0x3ff0000000000000,2 +np.float64,0x800976e748b2edcf,0x3ff0000000000000,2 +np.float64,0xffc79d6ba12f3ad8,0x7ff0000000000000,2 +np.float64,0xffd6dbcb022db796,0x7ff0000000000000,2 +np.float64,0xffd6a9672a2d52ce,0x7ff0000000000000,2 +np.float64,0x3fe95dcfa632bb9f,0x3ff54bbad2ee919e,2 +np.float64,0x3febadd2e1375ba6,0x3ff65e336c47c018,2 +np.float64,0x7fd47c37d828f86f,0x7ff0000000000000,2 +np.float64,0xbfd4ea59e0a9d4b4,0x3ff0dcae6af3e443,2 +np.float64,0x2c112afc58226,0x3ff0000000000000,2 +np.float64,0x8008122bced02458,0x3ff0000000000000,2 +np.float64,0x7fe7105ab3ee20b4,0x7ff0000000000000,2 +np.float64,0x80089634df312c6a,0x3ff0000000000000,2 +np.float64,0x68e9fbc8d1d40,0x3ff0000000000000,2 +np.float64,0xbfec1e1032f83c20,0x3ff69590b9f18ea8,2 +np.float64,0xbfedf181623be303,0x3ff787ef48935dc6,2 +np.float64,0xffe8600457f0c008,0x7ff0000000000000,2 +np.float64,0x7a841ec6f5084,0x3ff0000000000000,2 +np.float64,0x459a572e8b34c,0x3ff0000000000000,2 +np.float64,0x3fe8a232bef14465,0x3ff4fac1780f731e,2 +np.float64,0x3fcb37597d366eb3,0x3ff05cf08ab14ebd,2 +np.float64,0xbfb0261d00204c38,0x3ff00826fb86ca8a,2 +np.float64,0x3fc6e7a6dd2dcf4e,0x3ff041c1222ffa79,2 +np.float64,0xee65dd03dccbc,0x3ff0000000000000,2 +np.float64,0xffe26fdc23e4dfb8,0x7ff0000000000000,2 +np.float64,0x7fe8d6c8cab1ad91,0x7ff0000000000000,2 +np.float64,0xbfeb64bf2676c97e,0x3ff63abb8607828c,2 +np.float64,0x3fd28417b425082f,0x3ff0ac9eb22a732b,2 +np.float64,0xbfd26835b3a4d06c,0x3ff0aa94c48fb6d2,2 +np.float64,0xffec617a01b8c2f3,0x7ff0000000000000,2 +np.float64,0xe1bfff01c3800,0x3ff0000000000000,2 +np.float64,0x3fd4def913a9bdf4,0x3ff0dbbc7271046f,2 +np.float64,0x94f4c17129e98,0x3ff0000000000000,2 +np.float64,0x8009b2eaa33365d6,0x3ff0000000000000,2 +np.float64,0x3fd9633b41b2c678,0x3ff1468388bdfb65,2 +np.float64,0xffe0ae5c80e15cb8,0x7ff0000000000000,2 +np.float64,0x7fdfc35996bf86b2,0x7ff0000000000000,2 +np.float64,0x3fcfc5bdc23f8b7c,0x3ff07ed5caa4545c,2 +np.float64,0xd48b4907a9169,0x3ff0000000000000,2 +np.float64,0xbfe0a2cc52614598,0x3ff2361665895d95,2 +np.float64,0xbfe9068f90720d1f,0x3ff525b82491a1a5,2 +np.float64,0x4238b9208472,0x3ff0000000000000,2 +np.float64,0x800e6b2bf69cd658,0x3ff0000000000000,2 +np.float64,0x7fb638b6ae2c716c,0x7ff0000000000000,2 +np.float64,0x7fe267641764cec7,0x7ff0000000000000,2 +np.float64,0xffc0933d3521267c,0x7ff0000000000000,2 +np.float64,0x7fddfdfb533bfbf6,0x7ff0000000000000,2 +np.float64,0xced2a8e99da55,0x3ff0000000000000,2 +np.float64,0x2a80d5165501b,0x3ff0000000000000,2 +np.float64,0xbfeead2ab63d5a55,0x3ff7eeb5cbcfdcab,2 +np.float64,0x80097f6f92f2fee0,0x3ff0000000000000,2 +np.float64,0x3fee1f29b77c3e54,0x3ff7a0a58c13df62,2 +np.float64,0x3f9d06b8383a0d70,0x3ff001a54a2d8cf8,2 +np.float64,0xbfc8b41d3f31683c,0x3ff04c85379dd6b0,2 +np.float64,0xffd2a04c1e254098,0x7ff0000000000000,2 +np.float64,0xbfb71c01e02e3800,0x3ff010b34220e838,2 +np.float64,0xbfe69249ef6d2494,0x3ff425e48d1e938b,2 +np.float64,0xffefffffffffffff,0x7ff0000000000000,2 +np.float64,0x3feb1d52fbf63aa6,0x3ff618813ae922d7,2 +np.float64,0x7fb8d1a77e31a34e,0x7ff0000000000000,2 +np.float64,0xffc3cfc4ed279f88,0x7ff0000000000000,2 +np.float64,0x2164b9fc42c98,0x3ff0000000000000,2 +np.float64,0x3fbb868cee370d1a,0x3ff017b31b0d4d27,2 +np.float64,0x3fcd6dea583adbd5,0x3ff06cbd16bf44a0,2 +np.float64,0xbfecd041d479a084,0x3ff6efb25f61012d,2 +np.float64,0xbfb0552e6e20aa60,0x3ff00856ca83834a,2 +np.float64,0xe6293cbfcc528,0x3ff0000000000000,2 +np.float64,0x7fba58394034b072,0x7ff0000000000000,2 +np.float64,0x33bc96d467794,0x3ff0000000000000,2 +np.float64,0xffe90ea86bf21d50,0x7ff0000000000000,2 +np.float64,0xbfc626ea6d2c4dd4,0x3ff03d7e01ec3849,2 +np.float64,0x65b56fe4cb6af,0x3ff0000000000000,2 +np.float64,0x3fea409fb7f4813f,0x3ff5b171deab0ebd,2 +np.float64,0x3fe849c1df709384,0x3ff4d59063ff98c4,2 +np.float64,0x169073082d20f,0x3ff0000000000000,2 +np.float64,0xcc8b6add9916e,0x3ff0000000000000,2 +np.float64,0xbfef3d78d5fe7af2,0x3ff83fecc26abeea,2 +np.float64,0x3fe8c65a4a718cb4,0x3ff50a23bfeac7df,2 +np.float64,0x3fde9fa5c8bd3f4c,0x3ff1ddeb12b9d623,2 +np.float64,0xffe2af536da55ea6,0x7ff0000000000000,2 +np.float64,0x800186d0b0c30da2,0x3ff0000000000000,2 +np.float64,0x3fe9ba3c1d737478,0x3ff574ab2bf3a560,2 +np.float64,0xbfe1489c46a29138,0x3ff2641d36b30e21,2 +np.float64,0xbfe4b6b7c0e96d70,0x3ff37880ac8b0540,2 +np.float64,0x800e66ad82fccd5b,0x3ff0000000000000,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x7febb0fd477761fa,0x7ff0000000000000,2 +np.float64,0xbfdc433f2eb8867e,0x3ff195ec2a6cce27,2 +np.float64,0x3fe12c5a172258b4,0x3ff25c225b8a34bb,2 +np.float64,0xbfef6f116c3ede23,0x3ff85c47eaed49a0,2 +np.float64,0x800af6f60f35edec,0x3ff0000000000000,2 +np.float64,0xffe567999a2acf32,0x7ff0000000000000,2 +np.float64,0xbfc5ac5ae72b58b4,0x3ff03adb50ec04f3,2 +np.float64,0x3fea1b57e23436b0,0x3ff5a06f98541767,2 +np.float64,0x7fcc3e36fb387c6d,0x7ff0000000000000,2 +np.float64,0x8000c8dc698191ba,0x3ff0000000000000,2 +np.float64,0x3fee5085ed7ca10c,0x3ff7bb92f61245b8,2 +np.float64,0x7fbb9f803a373eff,0x7ff0000000000000,2 +np.float64,0xbfe1e5e806e3cbd0,0x3ff2918f2d773007,2 +np.float64,0x8008f8c3f3b1f188,0x3ff0000000000000,2 +np.float64,0x7fe53df515ea7be9,0x7ff0000000000000,2 +np.float64,0x7fdbb87fb3b770fe,0x7ff0000000000000,2 +np.float64,0x3fefcc0f50ff981f,0x3ff89210a6a04e6b,2 +np.float64,0x3fe33f87d0267f10,0x3ff2fb989ea4f2bc,2 +np.float64,0x1173992022e8,0x3ff0000000000000,2 +np.float64,0x3fef534632bea68c,0x3ff84c5ca9713ff9,2 +np.float64,0x3fc5991d552b3238,0x3ff03a72bfdb6e5f,2 +np.float64,0x3fdad90dc1b5b21c,0x3ff16db868180034,2 +np.float64,0xffe20b8078e41700,0x7ff0000000000000,2 +np.float64,0x7fdf409a82be8134,0x7ff0000000000000,2 +np.float64,0x3fccb7e691396fcd,0x3ff06786b6ccdbcb,2 +np.float64,0xffe416e0b7282dc1,0x7ff0000000000000,2 +np.float64,0xffe3a8a981275152,0x7ff0000000000000,2 +np.float64,0x3fd9c8bd31b3917c,0x3ff150ee6f5f692f,2 +np.float64,0xffeab6fef6356dfd,0x7ff0000000000000,2 +np.float64,0x3fe9c5e3faf38bc8,0x3ff579e18c9bd548,2 +np.float64,0x800b173e44762e7d,0x3ff0000000000000,2 +np.float64,0xffe2719db764e33b,0x7ff0000000000000,2 +np.float64,0x3fd1fcf31223f9e6,0x3ff0a2da7ad99856,2 +np.float64,0x80082c4afcd05896,0x3ff0000000000000,2 +np.float64,0xa56e5e4b4adcc,0x3ff0000000000000,2 +np.float64,0xffbbbddab2377bb8,0x7ff0000000000000,2 +np.float64,0x3b3927c076726,0x3ff0000000000000,2 +np.float64,0x3fec03fd58f807fb,0x3ff6889b8a774728,2 +np.float64,0xbfaa891fb4351240,0x3ff00580987bd914,2 +np.float64,0x7fb4800c4a290018,0x7ff0000000000000,2 +np.float64,0xffbb5d2b6036ba58,0x7ff0000000000000,2 +np.float64,0x7fd6608076acc100,0x7ff0000000000000,2 +np.float64,0x31267e4c624d1,0x3ff0000000000000,2 +np.float64,0x33272266664e5,0x3ff0000000000000,2 +np.float64,0x47bb37f28f768,0x3ff0000000000000,2 +np.float64,0x3fe134bb4ee26977,0x3ff25e7ea647a928,2 +np.float64,0xbfe2b5f42ba56be8,0x3ff2d05cbdc7344b,2 +np.float64,0xbfe0e013fd61c028,0x3ff246dfce572914,2 +np.float64,0x7fecedcda4f9db9a,0x7ff0000000000000,2 +np.float64,0x8001816c2da302d9,0x3ff0000000000000,2 +np.float64,0xffced8b65b3db16c,0x7ff0000000000000,2 +np.float64,0xffdc1d4a0b383a94,0x7ff0000000000000,2 +np.float64,0x7fe94e7339f29ce5,0x7ff0000000000000,2 +np.float64,0x33fb846667f71,0x3ff0000000000000,2 +np.float64,0x800a1380e9542702,0x3ff0000000000000,2 +np.float64,0x800b74eaa776e9d6,0x3ff0000000000000,2 +np.float64,0x5681784aad030,0x3ff0000000000000,2 +np.float64,0xbfee0eb7917c1d6f,0x3ff797b949f7f6b4,2 +np.float64,0xffe4ec5fd2a9d8bf,0x7ff0000000000000,2 +np.float64,0xbfcd7401dd3ae804,0x3ff06cea52c792c0,2 +np.float64,0x800587563beb0ead,0x3ff0000000000000,2 +np.float64,0x3fc15c6f3322b8de,0x3ff025bbd030166d,2 +np.float64,0x7feb6b4caf76d698,0x7ff0000000000000,2 +np.float64,0x7fe136ef82a26dde,0x7ff0000000000000,2 +np.float64,0xf592dac3eb25c,0x3ff0000000000000,2 +np.float64,0x7fd300baf6a60175,0x7ff0000000000000,2 +np.float64,0x7fc880de9e3101bc,0x7ff0000000000000,2 +np.float64,0x7fe7a1aa5caf4354,0x7ff0000000000000,2 +np.float64,0x2f9b8e0e5f373,0x3ff0000000000000,2 +np.float64,0xffcc9071993920e4,0x7ff0000000000000,2 +np.float64,0x8009e151b313c2a4,0x3ff0000000000000,2 +np.float64,0xbfd46e2d18a8dc5a,0x3ff0d27a7b37c1ae,2 +np.float64,0x3fe65c7961acb8f3,0x3ff4116946062a4c,2 +np.float64,0x7fd31b371626366d,0x7ff0000000000000,2 +np.float64,0x98dc924d31b93,0x3ff0000000000000,2 +np.float64,0x268bef364d17f,0x3ff0000000000000,2 +np.float64,0x7fd883ba56310774,0x7ff0000000000000,2 +np.float64,0x3fc53f01a32a7e03,0x3ff0388dea9cd63e,2 +np.float64,0xffe1ea8c0563d518,0x7ff0000000000000,2 +np.float64,0x3fd0bf0e63a17e1d,0x3ff08d0577f5ffa6,2 +np.float64,0x7fef42418f7e8482,0x7ff0000000000000,2 +np.float64,0x8000bccd38c1799b,0x3ff0000000000000,2 +np.float64,0xbfe6c48766ed890f,0x3ff43936fa4048c8,2 +np.float64,0xbfb2a38f3a254720,0x3ff00adc7f7b2822,2 +np.float64,0x3fd5262b2eaa4c56,0x3ff0e1af492c08f5,2 +np.float64,0x80065b4691ecb68e,0x3ff0000000000000,2 +np.float64,0xfb6b9e9ff6d74,0x3ff0000000000000,2 +np.float64,0x8006c71e6ecd8e3e,0x3ff0000000000000,2 +np.float64,0x3fd0a3e43ca147c8,0x3ff08b3ad7b42485,2 +np.float64,0xbfc82d8607305b0c,0x3ff04949d6733ef6,2 +np.float64,0xde048c61bc092,0x3ff0000000000000,2 +np.float64,0xffcf73e0fa3ee7c0,0x7ff0000000000000,2 +np.float64,0xbfe8639d7830c73b,0x3ff4e05f97948376,2 +np.float64,0x8010000000000000,0x3ff0000000000000,2 +np.float64,0x67f01a2acfe04,0x3ff0000000000000,2 +np.float64,0x3fe222e803e445d0,0x3ff2a3a75e5f29d8,2 +np.float64,0xffef84c6387f098b,0x7ff0000000000000,2 +np.float64,0x3fe5969c1e6b2d38,0x3ff3c80130462bb2,2 +np.float64,0x8009f56953d3ead3,0x3ff0000000000000,2 +np.float64,0x3fe05c9b6360b937,0x3ff2232e1cba5617,2 +np.float64,0x3fd8888d63b1111b,0x3ff130a5b788d52f,2 +np.float64,0xffe3a9e6f26753ce,0x7ff0000000000000,2 +np.float64,0x800e2aaa287c5554,0x3ff0000000000000,2 +np.float64,0x3fea8d6c82351ad9,0x3ff5d4d8cde9a11d,2 +np.float64,0x7feef700723dee00,0x7ff0000000000000,2 +np.float64,0x3fa5cb77242b96e0,0x3ff003b62b3e50f1,2 +np.float64,0x7fb68f0a862d1e14,0x7ff0000000000000,2 +np.float64,0x7fb97ee83432fdcf,0x7ff0000000000000,2 +np.float64,0x7fd74a78632e94f0,0x7ff0000000000000,2 +np.float64,0x7fcfe577713fcaee,0x7ff0000000000000,2 +np.float64,0xffe192ee5ea325dc,0x7ff0000000000000,2 +np.float64,0x477d6ae48efae,0x3ff0000000000000,2 +np.float64,0xffe34d5237669aa4,0x7ff0000000000000,2 +np.float64,0x7fe3ce8395a79d06,0x7ff0000000000000,2 +np.float64,0x80019c01ffa33805,0x3ff0000000000000,2 +np.float64,0x74b5b56ce96b7,0x3ff0000000000000,2 +np.float64,0x7fe05ecdeda0bd9b,0x7ff0000000000000,2 +np.float64,0xffe9693eb232d27d,0x7ff0000000000000,2 +np.float64,0xffd2be2c7da57c58,0x7ff0000000000000,2 +np.float64,0x800dbd5cbc1b7aba,0x3ff0000000000000,2 +np.float64,0xbfa36105d426c210,0x3ff002ef2e3a87f7,2 +np.float64,0x800b2d69fb765ad4,0x3ff0000000000000,2 +np.float64,0xbfdb81c9a9370394,0x3ff1802d409cbf7a,2 +np.float64,0x7fd481d014a9039f,0x7ff0000000000000,2 +np.float64,0xffe66c3c1fecd878,0x7ff0000000000000,2 +np.float64,0x3fc55865192ab0c8,0x3ff03915b51e8839,2 +np.float64,0xd6a78987ad4f1,0x3ff0000000000000,2 +np.float64,0x800c6cc80d58d990,0x3ff0000000000000,2 +np.float64,0x979435a12f29,0x3ff0000000000000,2 +np.float64,0xbfbd971e7a3b2e40,0x3ff01b647e45f5a6,2 +np.float64,0x80067565bfeceacc,0x3ff0000000000000,2 +np.float64,0x8001ad689ce35ad2,0x3ff0000000000000,2 +np.float64,0x7fa43253dc2864a7,0x7ff0000000000000,2 +np.float64,0xbfe3dda307e7bb46,0x3ff32ef99a2efe1d,2 +np.float64,0x3fe5d7b395ebaf68,0x3ff3dfd33cdc8ef4,2 +np.float64,0xd94cc9c3b2999,0x3ff0000000000000,2 +np.float64,0x3fee5a513fbcb4a2,0x3ff7c0f17b876ce5,2 +np.float64,0xffe27761fa64eec4,0x7ff0000000000000,2 +np.float64,0x3feb788119b6f102,0x3ff64446f67f4efa,2 +np.float64,0xbfed6e10dffadc22,0x3ff741d5ef610ca0,2 +np.float64,0x7fe73cf98b2e79f2,0x7ff0000000000000,2 +np.float64,0x7847d09af08fb,0x3ff0000000000000,2 +np.float64,0x29ded2da53bdb,0x3ff0000000000000,2 +np.float64,0xbfe51c1ec1aa383e,0x3ff39c0b7cf832e2,2 +np.float64,0xbfeafd5e65f5fabd,0x3ff609548a787f57,2 +np.float64,0x3fd872a26fb0e545,0x3ff12e7fbd95505c,2 +np.float64,0x7fed6b7c1b7ad6f7,0x7ff0000000000000,2 +np.float64,0xffe7ba9ec16f753d,0x7ff0000000000000,2 +np.float64,0x7f89b322f0336645,0x7ff0000000000000,2 +np.float64,0xbfad1677383a2cf0,0x3ff0069ca67e7baa,2 +np.float64,0x3fe0906d04a120da,0x3ff2311b04b7bfef,2 +np.float64,0xffe4b3c9d4296793,0x7ff0000000000000,2 +np.float64,0xbfe476bb0ce8ed76,0x3ff36277d2921a74,2 +np.float64,0x7fc35655cf26acab,0x7ff0000000000000,2 +np.float64,0x7fe9980f0373301d,0x7ff0000000000000,2 +np.float64,0x9e6e04cb3cdc1,0x3ff0000000000000,2 +np.float64,0x800b89e0afb713c2,0x3ff0000000000000,2 +np.float64,0x800bd951a3f7b2a4,0x3ff0000000000000,2 +np.float64,0x29644a9e52c8a,0x3ff0000000000000,2 +np.float64,0x3fe1be2843637c51,0x3ff285e90d8387e4,2 +np.float64,0x7fa233cce4246799,0x7ff0000000000000,2 +np.float64,0xbfcfb7bc2d3f6f78,0x3ff07e657de3e2ed,2 +np.float64,0xffd7c953e7af92a8,0x7ff0000000000000,2 +np.float64,0xbfc5bbaf772b7760,0x3ff03b2ee4febb1e,2 +np.float64,0x8007b7315a6f6e63,0x3ff0000000000000,2 +np.float64,0xbfe906d902320db2,0x3ff525d7e16acfe0,2 +np.float64,0x3fde33d8553c67b1,0x3ff1d09faa19aa53,2 +np.float64,0x61fe76a0c3fcf,0x3ff0000000000000,2 +np.float64,0xa75e355b4ebc7,0x3ff0000000000000,2 +np.float64,0x3fc9e6d86033cdb1,0x3ff05426299c7064,2 +np.float64,0x7fd83f489eb07e90,0x7ff0000000000000,2 +np.float64,0x8000000000000001,0x3ff0000000000000,2 +np.float64,0x80014434ae62886a,0x3ff0000000000000,2 +np.float64,0xbfe21af9686435f3,0x3ff2a149338bdefe,2 +np.float64,0x9354e6cd26a9d,0x3ff0000000000000,2 +np.float64,0xb42b95f768573,0x3ff0000000000000,2 +np.float64,0xbfecb4481bb96890,0x3ff6e15d269dd651,2 +np.float64,0x3f97842ae82f0840,0x3ff0011485156f28,2 +np.float64,0xffdef63d90bdec7c,0x7ff0000000000000,2 +np.float64,0x7fe511a8d36a2351,0x7ff0000000000000,2 +np.float64,0xbf8cb638a0396c80,0x3ff000670c318fb6,2 +np.float64,0x3fe467e1f668cfc4,0x3ff35d65f93ccac6,2 +np.float64,0xbfce7d88f03cfb10,0x3ff074c22475fe5b,2 +np.float64,0x6d0a4994da14a,0x3ff0000000000000,2 +np.float64,0xbfb3072580260e48,0x3ff00b51d3913e9f,2 +np.float64,0x8008fcde36b1f9bd,0x3ff0000000000000,2 +np.float64,0x3fd984df66b309c0,0x3ff149f29125eca4,2 +np.float64,0xffee2a10fe7c5421,0x7ff0000000000000,2 +np.float64,0x80039168ace722d2,0x3ff0000000000000,2 +np.float64,0xffda604379b4c086,0x7ff0000000000000,2 +np.float64,0xffdc6a405bb8d480,0x7ff0000000000000,2 +np.float64,0x3fe62888b26c5111,0x3ff3fdda754c4372,2 +np.float64,0x8008b452cb5168a6,0x3ff0000000000000,2 +np.float64,0x6165d540c2cbb,0x3ff0000000000000,2 +np.float64,0xbfee0c04d17c180a,0x3ff796431c64bcbe,2 +np.float64,0x800609b8448c1371,0x3ff0000000000000,2 +np.float64,0x800fc3fca59f87f9,0x3ff0000000000000,2 +np.float64,0x77f64848efeca,0x3ff0000000000000,2 +np.float64,0x8007cf522d8f9ea5,0x3ff0000000000000,2 +np.float64,0xbfe9fb0b93f3f617,0x3ff591cb0052e22c,2 +np.float64,0x7fd569d5f0aad3ab,0x7ff0000000000000,2 +np.float64,0x7fe5cf489d6b9e90,0x7ff0000000000000,2 +np.float64,0x7fd6e193e92dc327,0x7ff0000000000000,2 +np.float64,0xf78988a5ef131,0x3ff0000000000000,2 +np.float64,0x3fe8f97562b1f2eb,0x3ff5201080fbc12d,2 +np.float64,0x7febfd69d7b7fad3,0x7ff0000000000000,2 +np.float64,0xffc07b5c1720f6b8,0x7ff0000000000000,2 +np.float64,0xbfd966926832cd24,0x3ff146da9adf492e,2 +np.float64,0x7fef5bd9edfeb7b3,0x7ff0000000000000,2 +np.float64,0xbfd2afbc96255f7a,0x3ff0afd601febf44,2 +np.float64,0x7fdd4ea6293a9d4b,0x7ff0000000000000,2 +np.float64,0xbfe8a1e916b143d2,0x3ff4faa23c2793e5,2 +np.float64,0x800188fcd8c311fa,0x3ff0000000000000,2 +np.float64,0xbfe30803f1661008,0x3ff2e9fc729baaee,2 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,2 +np.float64,0x3fd287bec3250f7e,0x3ff0ace34d3102f6,2 +np.float64,0x1f0ee9443e1de,0x3ff0000000000000,2 +np.float64,0xbfd92f73da325ee8,0x3ff14143e4fa2c5a,2 +np.float64,0x3fed7c9bdffaf938,0x3ff74984168734d3,2 +np.float64,0x8002c4d1696589a4,0x3ff0000000000000,2 +np.float64,0xfe03011bfc060,0x3ff0000000000000,2 +np.float64,0x7f7a391e6034723c,0x7ff0000000000000,2 +np.float64,0xffd6fd46f82dfa8e,0x7ff0000000000000,2 +np.float64,0xbfd7520a742ea414,0x3ff112f1ba5d4f91,2 +np.float64,0x8009389d8812713b,0x3ff0000000000000,2 +np.float64,0x7fefb846aaff708c,0x7ff0000000000000,2 +np.float64,0x3fd98a0983331413,0x3ff14a79efb8adbf,2 +np.float64,0xbfd897158db12e2c,0x3ff132137902cf3e,2 +np.float64,0xffc4048d5928091c,0x7ff0000000000000,2 +np.float64,0x80036ae46046d5ca,0x3ff0000000000000,2 +np.float64,0x7faba7ed3c374fd9,0x7ff0000000000000,2 +np.float64,0xbfec4265e1f884cc,0x3ff6a7b8602422c9,2 +np.float64,0xaa195e0b5432c,0x3ff0000000000000,2 +np.float64,0x3feac15d317582ba,0x3ff5ed115758145f,2 +np.float64,0x6c13a5bcd8275,0x3ff0000000000000,2 +np.float64,0xbfed20b8883a4171,0x3ff7194dbd0dc988,2 +np.float64,0x800cde65c899bccc,0x3ff0000000000000,2 +np.float64,0x7c72912af8e53,0x3ff0000000000000,2 +np.float64,0x3fe49d2bb4e93a57,0x3ff36fab3aba15d4,2 +np.float64,0xbfd598fa02ab31f4,0x3ff0eb72fc472025,2 +np.float64,0x8007a191712f4324,0x3ff0000000000000,2 +np.float64,0xbfdeb14872bd6290,0x3ff1e01ca83f35fd,2 +np.float64,0xbfe1da46b3e3b48e,0x3ff28e23ad2f5615,2 +np.float64,0x800a2f348e745e69,0x3ff0000000000000,2 +np.float64,0xbfee66928afccd25,0x3ff7c7ac7dbb3273,2 +np.float64,0xffd78a0a2b2f1414,0x7ff0000000000000,2 +np.float64,0x7fc5fa80b82bf500,0x7ff0000000000000,2 +np.float64,0x800e6d7260dcdae5,0x3ff0000000000000,2 +np.float64,0xbfd6cff2aaad9fe6,0x3ff106f78ee61642,2 +np.float64,0x7fe1041d1d220839,0x7ff0000000000000,2 +np.float64,0xbfdf75586cbeeab0,0x3ff1f8dbaa7e57f0,2 +np.float64,0xffdcaae410b955c8,0x7ff0000000000000,2 +np.float64,0x800fe5e0d1ffcbc2,0x3ff0000000000000,2 +np.float64,0x800d7999527af333,0x3ff0000000000000,2 +np.float64,0xbfe62c233bac5846,0x3ff3ff34220a204c,2 +np.float64,0x7fe99bbff8f3377f,0x7ff0000000000000,2 +np.float64,0x7feeaf471d3d5e8d,0x7ff0000000000000,2 +np.float64,0xd5904ff5ab20a,0x3ff0000000000000,2 +np.float64,0x3fd07aae3320f55c,0x3ff08888c227c968,2 +np.float64,0x7fea82b8dff50571,0x7ff0000000000000,2 +np.float64,0xffef2db9057e5b71,0x7ff0000000000000,2 +np.float64,0xbfe2077fef640f00,0x3ff29b7dd0d39d36,2 +np.float64,0xbfe09a4d7c61349b,0x3ff233c7e88881f4,2 +np.float64,0x3fda50c4cbb4a188,0x3ff15f28a71deee7,2 +np.float64,0x7fe7d9ee6b2fb3dc,0x7ff0000000000000,2 +np.float64,0x3febbf6faeb77edf,0x3ff666d13682ea93,2 +np.float64,0xc401a32988035,0x3ff0000000000000,2 +np.float64,0xbfeab30aa8f56615,0x3ff5e65dcc6603f8,2 +np.float64,0x92c8cea32591a,0x3ff0000000000000,2 +np.float64,0xbff0000000000000,0x3ff8b07551d9f550,2 +np.float64,0xbfbddfb4dc3bbf68,0x3ff01bebaec38faa,2 +np.float64,0xbfd8de3e2a31bc7c,0x3ff1391f4830d20b,2 +np.float64,0xffc83a8f8a307520,0x7ff0000000000000,2 +np.float64,0x3fee026ef53c04de,0x3ff7911337085827,2 +np.float64,0x7fbaf380b235e700,0x7ff0000000000000,2 +np.float64,0xffe5b89fa62b713f,0x7ff0000000000000,2 +np.float64,0xbfdc1ff54ab83fea,0x3ff191e8c0b60bb2,2 +np.float64,0x6ae3534cd5c6b,0x3ff0000000000000,2 +np.float64,0xbfea87e558750fcb,0x3ff5d24846013794,2 +np.float64,0xffe0f467bee1e8cf,0x7ff0000000000000,2 +np.float64,0x7fee3b0dc7bc761b,0x7ff0000000000000,2 +np.float64,0x3fed87521afb0ea4,0x3ff74f2f5cd36a5c,2 +np.float64,0x7b3c9882f6794,0x3ff0000000000000,2 +np.float64,0x7fdd1a62243a34c3,0x7ff0000000000000,2 +np.float64,0x800f1dc88d3e3b91,0x3ff0000000000000,2 +np.float64,0x7fc3213cfa264279,0x7ff0000000000000,2 +np.float64,0x3fe40e0f3d681c1e,0x3ff33f135e9d5ded,2 +np.float64,0x7febf14e51f7e29c,0x7ff0000000000000,2 +np.float64,0xffe96c630c72d8c5,0x7ff0000000000000,2 +np.float64,0x7fdd82fbe7bb05f7,0x7ff0000000000000,2 +np.float64,0xbf9a6a0b1034d420,0x3ff0015ce009f7d8,2 +np.float64,0xbfceb4f8153d69f0,0x3ff0766e3ecc77df,2 +np.float64,0x3fd9de31e633bc64,0x3ff15327b794a16e,2 +np.float64,0x3faa902a30352054,0x3ff00583848d1969,2 +np.float64,0x0,0x3ff0000000000000,2 +np.float64,0x3fbe3459c43c68b4,0x3ff01c8af6710ef6,2 +np.float64,0xbfa8df010031be00,0x3ff004d5632dc9f5,2 +np.float64,0x7fbcf6cf2a39ed9d,0x7ff0000000000000,2 +np.float64,0xffe4236202a846c4,0x7ff0000000000000,2 +np.float64,0x3fd35ed52e26bdaa,0x3ff0bd0b231f11f7,2 +np.float64,0x7fe7a2df532f45be,0x7ff0000000000000,2 +np.float64,0xffe32f8315665f06,0x7ff0000000000000,2 +np.float64,0x7fe1a69f03e34d3d,0x7ff0000000000000,2 +np.float64,0x7fa5542b742aa856,0x7ff0000000000000,2 +np.float64,0x3fe84e9f8ef09d3f,0x3ff4d79816359765,2 +np.float64,0x29076fe6520ef,0x3ff0000000000000,2 +np.float64,0xffd70894f7ae112a,0x7ff0000000000000,2 +np.float64,0x800188edcbe311dc,0x3ff0000000000000,2 +np.float64,0x3fe2c7acda258f5a,0x3ff2d5dad4617703,2 +np.float64,0x3f775d41a02ebb00,0x3ff000110f212445,2 +np.float64,0x7fe8a084d1714109,0x7ff0000000000000,2 +np.float64,0x3fe31562d8a62ac6,0x3ff2ee35055741cd,2 +np.float64,0xbfd195d4d1a32baa,0x3ff09b98a50c151b,2 +np.float64,0xffaae9ff0c35d400,0x7ff0000000000000,2 +np.float64,0xff819866502330c0,0x7ff0000000000000,2 +np.float64,0x7fddc64815bb8c8f,0x7ff0000000000000,2 +np.float64,0xbfd442b428288568,0x3ff0cef70aa73ae6,2 +np.float64,0x8002e7625aa5cec5,0x3ff0000000000000,2 +np.float64,0x7fe8d4f70e71a9ed,0x7ff0000000000000,2 +np.float64,0xbfc3bd015f277a04,0x3ff030cbf16f29d9,2 +np.float64,0x3fd315d5baa62bab,0x3ff0b77a551a5335,2 +np.float64,0x7fa638b4642c7168,0x7ff0000000000000,2 +np.float64,0x3fdea8b795bd516f,0x3ff1df0bb70cdb79,2 +np.float64,0xbfd78754762f0ea8,0x3ff117ee0f29abed,2 +np.float64,0x8009f6a37633ed47,0x3ff0000000000000,2 +np.float64,0x3fea1daf75343b5f,0x3ff5a1804789bf13,2 +np.float64,0x3fd044b6c0a0896e,0x3ff0850b7297d02f,2 +np.float64,0x8003547a9c86a8f6,0x3ff0000000000000,2 +np.float64,0x3fa6c2cd782d859b,0x3ff0040c4ac8f44a,2 +np.float64,0x3fe225baaae44b76,0x3ff2a47f5e1f5e85,2 +np.float64,0x8000000000000000,0x3ff0000000000000,2 +np.float64,0x3fcb53da8736a7b8,0x3ff05db45af470ac,2 +np.float64,0x80079f8f140f3f1f,0x3ff0000000000000,2 +np.float64,0xbfcd1d7e2b3a3afc,0x3ff06a6b6845d05f,2 +np.float64,0x96df93672dbf3,0x3ff0000000000000,2 +np.float64,0xdef86e43bdf0e,0x3ff0000000000000,2 +np.float64,0xbfec05a09db80b41,0x3ff6896b768eea08,2 +np.float64,0x7fe3ff91d267ff23,0x7ff0000000000000,2 +np.float64,0xffea3eaa07347d53,0x7ff0000000000000,2 +np.float64,0xbfebde1cc1f7bc3a,0x3ff675e34ac2afc2,2 +np.float64,0x629bcde8c537a,0x3ff0000000000000,2 +np.float64,0xbfdde4fcff3bc9fa,0x3ff1c7061d21f0fe,2 +np.float64,0x3fee60fd003cc1fa,0x3ff7c49af3878a51,2 +np.float64,0x3fe5c92ac32b9256,0x3ff3da7a7929588b,2 +np.float64,0xbfe249c78f64938f,0x3ff2af52a06f1a50,2 +np.float64,0xbfc6de9dbe2dbd3c,0x3ff0418d284ee29f,2 +np.float64,0xffc8ef094631de14,0x7ff0000000000000,2 +np.float64,0x3fdef05f423de0bf,0x3ff1e800caba8ab5,2 +np.float64,0xffc1090731221210,0x7ff0000000000000,2 +np.float64,0xbfedec9b5fbbd937,0x3ff7854b6792a24a,2 +np.float64,0xbfb873507630e6a0,0x3ff012b23b3b7a67,2 +np.float64,0xbfe3cd6692679acd,0x3ff3299d6936ec4b,2 +np.float64,0xbfb107c890220f90,0x3ff0091122162472,2 +np.float64,0xbfe4e6ee48e9cddc,0x3ff3894e5a5e70a6,2 +np.float64,0xffe6fa3413edf468,0x7ff0000000000000,2 +np.float64,0x3fe2faf79b65f5ef,0x3ff2e5e11fae8b54,2 +np.float64,0xbfdfeb8df9bfd71c,0x3ff208189691b15f,2 +np.float64,0x75d2d03ceba5b,0x3ff0000000000000,2 +np.float64,0x3feb48c182b69183,0x3ff62d4462eba6cb,2 +np.float64,0xffcda9f7ff3b53f0,0x7ff0000000000000,2 +np.float64,0x7fcafbdcbd35f7b8,0x7ff0000000000000,2 +np.float64,0xbfd1895523a312aa,0x3ff09aba642a78d9,2 +np.float64,0x3fe3129c3f662538,0x3ff2ed546bbfafcf,2 +np.float64,0x3fb444dee02889be,0x3ff00cd86273b964,2 +np.float64,0xbf73b32d7ee77,0x3ff0000000000000,2 +np.float64,0x3fae19904c3c3321,0x3ff00714865c498a,2 +np.float64,0x7fefbfaef5bf7f5d,0x7ff0000000000000,2 +np.float64,0x8000dc3816e1b871,0x3ff0000000000000,2 +np.float64,0x8003f957ba47f2b0,0x3ff0000000000000,2 +np.float64,0xbfe3563c7ea6ac79,0x3ff302dcebc92856,2 +np.float64,0xbfdc80fbae3901f8,0x3ff19cfe73e58092,2 +np.float64,0x8009223b04524476,0x3ff0000000000000,2 +np.float64,0x3fd95f431c32be86,0x3ff1461c21cb03f0,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0xbfe7c12ed3ef825e,0x3ff49d59c265efcd,2 +np.float64,0x10000000000000,0x3ff0000000000000,2 +np.float64,0x7fc5e2632f2bc4c5,0x7ff0000000000000,2 +np.float64,0xffd8f6b4c7b1ed6a,0x7ff0000000000000,2 +np.float64,0x80034b93d4069728,0x3ff0000000000000,2 +np.float64,0xffdf5d4c1dbeba98,0x7ff0000000000000,2 +np.float64,0x800bc63d70178c7b,0x3ff0000000000000,2 +np.float64,0xbfeba31ea0f7463d,0x3ff658fa27073d2b,2 +np.float64,0xbfeebeede97d7ddc,0x3ff7f89a8e80dec4,2 +np.float64,0x7feb0f1f91361e3e,0x7ff0000000000000,2 +np.float64,0xffec3158d0b862b1,0x7ff0000000000000,2 +np.float64,0x3fde51cbfbbca398,0x3ff1d44c2ff15b3d,2 +np.float64,0xd58fb2b3ab1f7,0x3ff0000000000000,2 +np.float64,0x80028b9e32e5173d,0x3ff0000000000000,2 +np.float64,0x7fea77a56c74ef4a,0x7ff0000000000000,2 +np.float64,0x3fdaabbd4a35577b,0x3ff168d82edf2fe0,2 +np.float64,0xbfe69c39cc2d3874,0x3ff429b2f4cdb362,2 +np.float64,0x3b78f5d876f20,0x3ff0000000000000,2 +np.float64,0x7fa47d116428fa22,0x7ff0000000000000,2 +np.float64,0xbfe4118b0ce82316,0x3ff3403d989f780f,2 +np.float64,0x800482e793c905d0,0x3ff0000000000000,2 +np.float64,0xbfe48e5728e91cae,0x3ff36a9020bf9d20,2 +np.float64,0x7fe078ba8860f174,0x7ff0000000000000,2 +np.float64,0x3fd80843e5b01088,0x3ff1242f401e67da,2 +np.float64,0x3feb1f6965f63ed3,0x3ff6197fc590e143,2 +np.float64,0xffa41946d8283290,0x7ff0000000000000,2 +np.float64,0xffe30de129661bc2,0x7ff0000000000000,2 +np.float64,0x3fec9c8e1ab9391c,0x3ff6d542ea2f49b4,2 +np.float64,0x3fdc3e4490387c89,0x3ff1955ae18cac37,2 +np.float64,0xffef49d9c77e93b3,0x7ff0000000000000,2 +np.float64,0xfff0000000000000,0x7ff0000000000000,2 +np.float64,0x3fe0442455608849,0x3ff21cab90067d5c,2 +np.float64,0xbfed86aebd3b0d5e,0x3ff74ed8d4b75f50,2 +np.float64,0xffe4600d2b28c01a,0x7ff0000000000000,2 +np.float64,0x7fc1e8ccff23d199,0x7ff0000000000000,2 +np.float64,0x8008d49b0091a936,0x3ff0000000000000,2 +np.float64,0xbfe4139df028273c,0x3ff340ef3c86227c,2 +np.float64,0xbfe9ab4542b3568a,0x3ff56dfe32061247,2 +np.float64,0xbfd76dd365aedba6,0x3ff11589bab5fe71,2 +np.float64,0x3fd42cf829a859f0,0x3ff0cd3844bb0e11,2 +np.float64,0x7fd077cf2e20ef9d,0x7ff0000000000000,2 +np.float64,0x3fd7505760aea0b0,0x3ff112c937b3f088,2 +np.float64,0x1f93341a3f267,0x3ff0000000000000,2 +np.float64,0x7fe3c3c1b0678782,0x7ff0000000000000,2 +np.float64,0x800f85cec97f0b9e,0x3ff0000000000000,2 +np.float64,0xd93ab121b2756,0x3ff0000000000000,2 +np.float64,0xbfef8066fd7f00ce,0x3ff8663ed7d15189,2 +np.float64,0xffe31dd4af663ba9,0x7ff0000000000000,2 +np.float64,0xbfd7ff05a6affe0c,0x3ff1234c09bb686d,2 +np.float64,0xbfe718c31fee3186,0x3ff45a0c2d0ef7b0,2 +np.float64,0x800484bf33e9097f,0x3ff0000000000000,2 +np.float64,0xffd409dad02813b6,0x7ff0000000000000,2 +np.float64,0x3fe59679896b2cf4,0x3ff3c7f49e4fbbd3,2 +np.float64,0xbfd830c54d30618a,0x3ff1281729861390,2 +np.float64,0x1d4fc81c3a9fa,0x3ff0000000000000,2 +np.float64,0x3fd334e4272669c8,0x3ff0b9d5d82894f0,2 +np.float64,0xffc827e65c304fcc,0x7ff0000000000000,2 +np.float64,0xffe2d1814aa5a302,0x7ff0000000000000,2 +np.float64,0xffd7b5b8d32f6b72,0x7ff0000000000000,2 +np.float64,0xbfdbc9f077b793e0,0x3ff18836b9106ad0,2 +np.float64,0x7fc724c2082e4983,0x7ff0000000000000,2 +np.float64,0x3fa39ed72c273da0,0x3ff00302051ce17e,2 +np.float64,0xbfe3c4c209678984,0x3ff326c4fd16b5cd,2 +np.float64,0x7fe91f6d00f23ed9,0x7ff0000000000000,2 +np.float64,0x8004ee93fea9dd29,0x3ff0000000000000,2 +np.float64,0xbfe7c32d0eaf865a,0x3ff49e290ed2ca0e,2 +np.float64,0x800ea996b29d532d,0x3ff0000000000000,2 +np.float64,0x2df9ec1c5bf3e,0x3ff0000000000000,2 +np.float64,0xabb175df5762f,0x3ff0000000000000,2 +np.float64,0xffe3fc9c8e27f938,0x7ff0000000000000,2 +np.float64,0x7fb358a62826b14b,0x7ff0000000000000,2 +np.float64,0x800aedcccaf5db9a,0x3ff0000000000000,2 +np.float64,0xffca530c5234a618,0x7ff0000000000000,2 +np.float64,0x40f91e9681f24,0x3ff0000000000000,2 +np.float64,0x80098f4572f31e8b,0x3ff0000000000000,2 +np.float64,0xbfdc58c21fb8b184,0x3ff1986115f8fe92,2 +np.float64,0xbfebeafd40b7d5fa,0x3ff67c3cf34036e3,2 +np.float64,0x7fd108861a22110b,0x7ff0000000000000,2 +np.float64,0xff8e499ae03c9340,0x7ff0000000000000,2 +np.float64,0xbfd2f58caa25eb1a,0x3ff0b50b1bffafdf,2 +np.float64,0x3fa040c9bc208193,0x3ff002105e95aefa,2 +np.float64,0xbfd2ebc0a5a5d782,0x3ff0b44ed5a11584,2 +np.float64,0xffe237bc93a46f78,0x7ff0000000000000,2 +np.float64,0x3fd557c5eeaaaf8c,0x3ff0e5e0a575e1ba,2 +np.float64,0x7abb419ef5769,0x3ff0000000000000,2 +np.float64,0xffefa1fe353f43fb,0x7ff0000000000000,2 +np.float64,0x3fa6f80ba02df017,0x3ff0041f51fa0d76,2 +np.float64,0xbfdce79488b9cf2a,0x3ff1a8e32877beb4,2 +np.float64,0x2285f3e4450bf,0x3ff0000000000000,2 +np.float64,0x3bf7eb7277efe,0x3ff0000000000000,2 +np.float64,0xbfd5925fd3ab24c0,0x3ff0eae1c2ac2e78,2 +np.float64,0xbfed6325227ac64a,0x3ff73c14a2ad5bfe,2 +np.float64,0x8000429c02408539,0x3ff0000000000000,2 +np.float64,0xb67c21e76cf84,0x3ff0000000000000,2 +np.float64,0x3fec3d3462f87a69,0x3ff6a51e4c027eb7,2 +np.float64,0x3feae69cbcf5cd3a,0x3ff5fe9387314afd,2 +np.float64,0x7fd0c9a0ec219341,0x7ff0000000000000,2 +np.float64,0x8004adb7f6295b71,0x3ff0000000000000,2 +np.float64,0xffd61fe8bb2c3fd2,0x7ff0000000000000,2 +np.float64,0xffe7fb3834aff670,0x7ff0000000000000,2 +np.float64,0x7fd1eef163a3dde2,0x7ff0000000000000,2 +np.float64,0x2e84547a5d08b,0x3ff0000000000000,2 +np.float64,0x8002d8875ee5b10f,0x3ff0000000000000,2 +np.float64,0x3fe1d1c5f763a38c,0x3ff28ba524fb6de8,2 +np.float64,0x8001dea0bc43bd42,0x3ff0000000000000,2 +np.float64,0xfecfad91fd9f6,0x3ff0000000000000,2 +np.float64,0xffed7965fa3af2cb,0x7ff0000000000000,2 +np.float64,0xbfe6102ccc2c205a,0x3ff3f4c082506686,2 +np.float64,0x3feff75b777feeb6,0x3ff8ab6222578e0c,2 +np.float64,0x3fb8a97bd43152f8,0x3ff013057f0a9d89,2 +np.float64,0xffe234b5e964696c,0x7ff0000000000000,2 +np.float64,0x984d9137309b2,0x3ff0000000000000,2 +np.float64,0xbfe42e9230e85d24,0x3ff349fb7d1a7560,2 +np.float64,0xbfecc8b249f99165,0x3ff6ebd0fea0ea72,2 +np.float64,0x8000840910410813,0x3ff0000000000000,2 +np.float64,0xbfd81db9e7303b74,0x3ff126402d3539ec,2 +np.float64,0x800548eb7fea91d8,0x3ff0000000000000,2 +np.float64,0xbfe4679ad0e8cf36,0x3ff35d4db89296a3,2 +np.float64,0x3fd4c55b5a298ab7,0x3ff0d99da31081f9,2 +np.float64,0xbfa8f5b38c31eb60,0x3ff004de3a23b32d,2 +np.float64,0x80005d348e80ba6a,0x3ff0000000000000,2 +np.float64,0x800c348d6118691b,0x3ff0000000000000,2 +np.float64,0xffd6b88f84ad7120,0x7ff0000000000000,2 +np.float64,0x3fc1aaaa82235555,0x3ff027136afd08e0,2 +np.float64,0x7fca7d081b34fa0f,0x7ff0000000000000,2 +np.float64,0x1,0x3ff0000000000000,2 +np.float64,0xbfdc810d1139021a,0x3ff19d007408cfe3,2 +np.float64,0xbfe5dce05f2bb9c0,0x3ff3e1bb9234617b,2 +np.float64,0xffecfe2c32b9fc58,0x7ff0000000000000,2 +np.float64,0x95b2891b2b651,0x3ff0000000000000,2 +np.float64,0x8000b60c6c616c1a,0x3ff0000000000000,2 +np.float64,0x4944f0889289f,0x3ff0000000000000,2 +np.float64,0x3fe6e508696dca10,0x3ff445d1b94863e9,2 +np.float64,0xbfe63355d0ec66ac,0x3ff401e74f16d16f,2 +np.float64,0xbfe9b9595af372b3,0x3ff57445e1b4d670,2 +np.float64,0x800e16f7313c2dee,0x3ff0000000000000,2 +np.float64,0xffe898f5f0b131eb,0x7ff0000000000000,2 +np.float64,0x3fe91ac651f2358d,0x3ff52e787c21c004,2 +np.float64,0x7fbfaac6783f558c,0x7ff0000000000000,2 +np.float64,0xd8ef3dfbb1de8,0x3ff0000000000000,2 +np.float64,0xbfc58c13a52b1828,0x3ff03a2c19d65019,2 +np.float64,0xbfbde55e8a3bcac0,0x3ff01bf648a3e0a7,2 +np.float64,0xffc3034930260694,0x7ff0000000000000,2 +np.float64,0xea77a64dd4ef5,0x3ff0000000000000,2 +np.float64,0x800cfe7e7739fcfd,0x3ff0000000000000,2 +np.float64,0x4960f31a92c1f,0x3ff0000000000000,2 +np.float64,0x3fd9552c94b2aa58,0x3ff14515a29add09,2 +np.float64,0xffe8b3244c316648,0x7ff0000000000000,2 +np.float64,0x3fe8201e6a70403d,0x3ff4c444fa679cce,2 +np.float64,0xffe9ab7c20f356f8,0x7ff0000000000000,2 +np.float64,0x3fed8bba5f7b1774,0x3ff751853c4c95c5,2 +np.float64,0x8007639cb76ec73a,0x3ff0000000000000,2 +np.float64,0xbfe396db89672db7,0x3ff317bfd1d6fa8c,2 +np.float64,0xbfeb42f888f685f1,0x3ff62a7e0eee56b1,2 +np.float64,0x3fe894827c712904,0x3ff4f4f561d9ea13,2 +np.float64,0xb66b3caf6cd68,0x3ff0000000000000,2 +np.float64,0x800f8907fdbf1210,0x3ff0000000000000,2 +np.float64,0x7fe9b0cddb73619b,0x7ff0000000000000,2 +np.float64,0xbfda70c0e634e182,0x3ff1628c6fdffc53,2 +np.float64,0x3fe0b5f534a16bea,0x3ff23b4ed4c2b48e,2 +np.float64,0xbfe8eee93671ddd2,0x3ff51b85b3c50ae4,2 +np.float64,0xbfe8c22627f1844c,0x3ff50858787a3bfe,2 +np.float64,0x37bb83c86f771,0x3ff0000000000000,2 +np.float64,0xffb7827ffe2f0500,0x7ff0000000000000,2 +np.float64,0x64317940c864,0x3ff0000000000000,2 +np.float64,0x800430ecee6861db,0x3ff0000000000000,2 +np.float64,0x3fa4291fbc285240,0x3ff0032d0204f6dd,2 +np.float64,0xffec69f76af8d3ee,0x7ff0000000000000,2 +np.float64,0x3ff0000000000000,0x3ff8b07551d9f550,2 +np.float64,0x3fc4cf3c42299e79,0x3ff0363fb1d3c254,2 +np.float64,0x7fe0223a77e04474,0x7ff0000000000000,2 +np.float64,0x800a3d4fa4347aa0,0x3ff0000000000000,2 +np.float64,0x3fdd273f94ba4e7f,0x3ff1b05b686e6879,2 +np.float64,0x3feca79052f94f20,0x3ff6dadedfa283aa,2 +np.float64,0x5e7f6f80bcfef,0x3ff0000000000000,2 +np.float64,0xbfef035892fe06b1,0x3ff81efb39cbeba2,2 +np.float64,0x3fee6c08e07cd812,0x3ff7caad952860a1,2 +np.float64,0xffeda715877b4e2a,0x7ff0000000000000,2 +np.float64,0x800580286b0b0052,0x3ff0000000000000,2 +np.float64,0x800703a73fee074f,0x3ff0000000000000,2 +np.float64,0xbfccf96a6639f2d4,0x3ff0696330a60832,2 +np.float64,0x7feb408442368108,0x7ff0000000000000,2 +np.float64,0x3fedc87a46fb90f5,0x3ff771e3635649a9,2 +np.float64,0x3fd8297b773052f7,0x3ff12762bc0cea76,2 +np.float64,0x3fee41bb03fc8376,0x3ff7b37b2da48ab4,2 +np.float64,0xbfe2b05a226560b4,0x3ff2cea17ae7c528,2 +np.float64,0xbfd2e92cf2a5d25a,0x3ff0b41d605ced61,2 +np.float64,0x4817f03a902ff,0x3ff0000000000000,2 +np.float64,0x8c9d4f0d193aa,0x3ff0000000000000,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv new file mode 100644 index 00000000..071fb312 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv @@ -0,0 +1,412 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,3 +np.float32,0x007b2490,0x3f800000,3 +np.float32,0x007c99fa,0x3f800000,3 +np.float32,0x00734a0c,0x3f800000,3 +np.float32,0x0070de24,0x3f800000,3 +np.float32,0x00495d65,0x3f800000,3 +np.float32,0x006894f6,0x3f800000,3 +np.float32,0x00555a76,0x3f800000,3 +np.float32,0x004e1fb8,0x3f800000,3 +np.float32,0x00687de9,0x3f800000,3 +## -ve denormals ## +np.float32,0x805b59af,0x3f800000,3 +np.float32,0x807ed8ed,0x3f800000,3 +np.float32,0x807142ad,0x3f800000,3 +np.float32,0x80772002,0x3f800000,3 +np.float32,0x8062abcb,0x3f800000,3 +np.float32,0x8045e31c,0x3f800000,3 +np.float32,0x805f01c2,0x3f800000,3 +np.float32,0x80506432,0x3f800000,3 +np.float32,0x8060089d,0x3f800000,3 +np.float32,0x8071292f,0x3f800000,3 +## floats that output a denormal ## +np.float32,0xc2cf3fc1,0x00000001,3 +np.float32,0xc2c79726,0x00000021,3 +np.float32,0xc2cb295d,0x00000005,3 +np.float32,0xc2b49e6b,0x00068c4c,3 +np.float32,0xc2ca8116,0x00000008,3 +np.float32,0xc2c23f82,0x000001d7,3 +np.float32,0xc2cb69c0,0x00000005,3 +np.float32,0xc2cc1f4d,0x00000003,3 +np.float32,0xc2ae094e,0x00affc4c,3 +np.float32,0xc2c86c44,0x00000015,3 +## random floats between -87.0f and 88.0f ## +np.float32,0x4030d7e0,0x417d9a05,3 +np.float32,0x426f60e8,0x6aa1be2c,3 +np.float32,0x41a1b220,0x4e0efc11,3 +np.float32,0xc20cc722,0x26159da7,3 +np.float32,0x41c492bc,0x512ec79d,3 +np.float32,0x40980210,0x42e73a0e,3 +np.float32,0xbf1f7b80,0x3f094de3,3 +np.float32,0x42a678a4,0x7b87a383,3 +np.float32,0xc20f3cfd,0x25a1c304,3 +np.float32,0x423ff34c,0x6216467f,3 +np.float32,0x00000000,0x3f800000,3 +## floats that cause an overflow ## +np.float32,0x7f06d8c1,0x7f800000,3 +np.float32,0x7f451912,0x7f800000,3 +np.float32,0x7ecceac3,0x7f800000,3 +np.float32,0x7f643b45,0x7f800000,3 +np.float32,0x7e910ea0,0x7f800000,3 +np.float32,0x7eb4756b,0x7f800000,3 +np.float32,0x7f4ec708,0x7f800000,3 +np.float32,0x7f6b4551,0x7f800000,3 +np.float32,0x7d8edbda,0x7f800000,3 +np.float32,0x7f730718,0x7f800000,3 +np.float32,0x42b17217,0x7f7fff84,3 +np.float32,0x42b17218,0x7f800000,3 +np.float32,0x42b17219,0x7f800000,3 +np.float32,0xfef2b0bc,0x00000000,3 +np.float32,0xff69f83e,0x00000000,3 +np.float32,0xff4ecb12,0x00000000,3 +np.float32,0xfeac6d86,0x00000000,3 +np.float32,0xfde0cdb8,0x00000000,3 +np.float32,0xff26aef4,0x00000000,3 +np.float32,0xff6f9277,0x00000000,3 +np.float32,0xff7adfc4,0x00000000,3 +np.float32,0xff0ad40e,0x00000000,3 +np.float32,0xff6fd8f3,0x00000000,3 +np.float32,0xc2cff1b4,0x00000001,3 +np.float32,0xc2cff1b5,0x00000000,3 +np.float32,0xc2cff1b6,0x00000000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0xff800000,0x00000000,3 +np.float32,0x4292f27c,0x7480000a,3 +np.float32,0x42a920be,0x7c7fff94,3 +np.float32,0x41c214c9,0x50ffffd9,3 +np.float32,0x41abe686,0x4effffd9,3 +np.float32,0x4287db5a,0x707fffd3,3 +np.float32,0x41902cbb,0x4c800078,3 +np.float32,0x42609466,0x67ffffeb,3 +np.float32,0x41a65af5,0x4e7fffd1,3 +np.float32,0x417f13ff,0x4affffc9,3 +np.float32,0x426d0e6c,0x6a3504f2,3 +np.float32,0x41bc8934,0x507fff51,3 +np.float32,0x42a7bdde,0x7c0000d6,3 +np.float32,0x4120cf66,0x46b504f6,3 +np.float32,0x4244da8f,0x62ffff1a,3 +np.float32,0x41a0cf69,0x4e000034,3 +np.float32,0x41cd2bec,0x52000005,3 +np.float32,0x42893e41,0x7100009e,3 +np.float32,0x41b437e1,0x4fb50502,3 +np.float32,0x41d8430f,0x5300001d,3 +np.float32,0x4244da92,0x62ffffda,3 +np.float32,0x41a0cf63,0x4dffffa9,3 +np.float32,0x3eb17218,0x3fb504f3,3 +np.float32,0x428729e8,0x703504dc,3 +np.float32,0x41a0cf67,0x4e000014,3 +np.float32,0x4252b77d,0x65800011,3 +np.float32,0x41902cb9,0x4c800058,3 +np.float32,0x42a0cf67,0x79800052,3 +np.float32,0x4152b77b,0x48ffffe9,3 +np.float32,0x41265af3,0x46ffffc8,3 +np.float32,0x42187e0b,0x5affff9a,3 +np.float32,0xc0d2b77c,0x3ab504f6,3 +np.float32,0xc283b2ac,0x10000072,3 +np.float32,0xc1cff1b4,0x2cb504f5,3 +np.float32,0xc05dce9e,0x3d000000,3 +np.float32,0xc28ec9d2,0x0bfffea5,3 +np.float32,0xc23c893a,0x1d7fffde,3 +np.float32,0xc2a920c0,0x027fff6c,3 +np.float32,0xc1f9886f,0x2900002b,3 +np.float32,0xc2c42920,0x000000b5,3 +np.float32,0xc2893e41,0x0dfffec5,3 +np.float32,0xc2c4da93,0x00000080,3 +np.float32,0xc17f1401,0x3400000c,3 +np.float32,0xc1902cb6,0x327fffaf,3 +np.float32,0xc27c4e3b,0x11ffffc5,3 +np.float32,0xc268e5c5,0x157ffe9d,3 +np.float32,0xc2b4e953,0x0005a826,3 +np.float32,0xc287db5a,0x0e800016,3 +np.float32,0xc207db5a,0x2700000b,3 +np.float32,0xc2b2d4fe,0x000ffff1,3 +np.float32,0xc268e5c0,0x157fffdd,3 +np.float32,0xc22920bd,0x2100003b,3 +np.float32,0xc2902caf,0x0b80011e,3 +np.float32,0xc1902cba,0x327fff2f,3 +np.float32,0xc2ca6625,0x00000008,3 +np.float32,0xc280ece8,0x10fffeb5,3 +np.float32,0xc2918f94,0x0b0000ea,3 +np.float32,0xc29b43d5,0x077ffffc,3 +np.float32,0xc1e61ff7,0x2ab504f5,3 +np.float32,0xc2867878,0x0effff15,3 +np.float32,0xc2a2324a,0x04fffff4,3 +#float64 +## near zero ## +np.float64,0x8000000000000000,0x3ff0000000000000,2 +np.float64,0x8010000000000000,0x3ff0000000000000,2 +np.float64,0x8000000000000001,0x3ff0000000000000,2 +np.float64,0x8360000000000000,0x3ff0000000000000,2 +np.float64,0x9a70000000000000,0x3ff0000000000000,2 +np.float64,0xb9b0000000000000,0x3ff0000000000000,2 +np.float64,0xb810000000000000,0x3ff0000000000000,2 +np.float64,0xbc30000000000000,0x3ff0000000000000,2 +np.float64,0xb6a0000000000000,0x3ff0000000000000,2 +np.float64,0x0000000000000000,0x3ff0000000000000,2 +np.float64,0x0010000000000000,0x3ff0000000000000,2 +np.float64,0x0000000000000001,0x3ff0000000000000,2 +np.float64,0x0360000000000000,0x3ff0000000000000,2 +np.float64,0x1a70000000000000,0x3ff0000000000000,2 +np.float64,0x3c30000000000000,0x3ff0000000000000,2 +np.float64,0x36a0000000000000,0x3ff0000000000000,2 +np.float64,0x39b0000000000000,0x3ff0000000000000,2 +np.float64,0x3810000000000000,0x3ff0000000000000,2 +## underflow ## +np.float64,0xc0c6276800000000,0x0000000000000000,2 +np.float64,0xc0c62d918ce2421d,0x0000000000000000,2 +np.float64,0xc0c62d918ce2421e,0x0000000000000000,2 +np.float64,0xc0c62d91a0000000,0x0000000000000000,2 +np.float64,0xc0c62d9180000000,0x0000000000000000,2 +np.float64,0xc0c62dea45ee3e06,0x0000000000000000,2 +np.float64,0xc0c62dea45ee3e07,0x0000000000000000,2 +np.float64,0xc0c62dea40000000,0x0000000000000000,2 +np.float64,0xc0c62dea60000000,0x0000000000000000,2 +np.float64,0xc0875f1120000000,0x0000000000000000,2 +np.float64,0xc0875f113c30b1c8,0x0000000000000000,2 +np.float64,0xc0875f1140000000,0x0000000000000000,2 +np.float64,0xc093480000000000,0x0000000000000000,2 +np.float64,0xffefffffffffffff,0x0000000000000000,2 +np.float64,0xc7efffffe0000000,0x0000000000000000,2 +## overflow ## +np.float64,0x40862e52fefa39ef,0x7ff0000000000000,2 +np.float64,0x40872e42fefa39ef,0x7ff0000000000000,2 +## +/- INF, +/- NAN ## +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0xfff0000000000000,0x0000000000000000,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xfff8000000000000,0xfff8000000000000,2 +## output denormal ## +np.float64,0xc087438520000000,0x0000000000000001,2 +np.float64,0xc08743853f2f4461,0x0000000000000001,2 +np.float64,0xc08743853f2f4460,0x0000000000000001,2 +np.float64,0xc087438540000000,0x0000000000000001,2 +## between -745.13321910 and 709.78271289 ## +np.float64,0xbff760cd14774bd9,0x3fcdb14ced00ceb6,2 +np.float64,0xbff760cd20000000,0x3fcdb14cd7993879,2 +np.float64,0xbff760cd00000000,0x3fcdb14d12fbd264,2 +np.float64,0xc07f1cf360000000,0x130c1b369af14fda,2 +np.float64,0xbeb0000000000000,0x3feffffe00001000,2 +np.float64,0xbd70000000000000,0x3fefffffffffe000,2 +np.float64,0xc084fd46e5c84952,0x0360000000000139,2 +np.float64,0xc084fd46e5c84953,0x035ffffffffffe71,2 +np.float64,0xc084fd46e0000000,0x0360000b9096d32c,2 +np.float64,0xc084fd4700000000,0x035fff9721d12104,2 +np.float64,0xc086232bc0000000,0x0010003af5e64635,2 +np.float64,0xc086232bdd7abcd2,0x001000000000007c,2 +np.float64,0xc086232bdd7abcd3,0x000ffffffffffe7c,2 +np.float64,0xc086232be0000000,0x000ffffaf57a6fc9,2 +np.float64,0xc086233920000000,0x000fe590e3b45eb0,2 +np.float64,0xc086233938000000,0x000fe56133493c57,2 +np.float64,0xc086233940000000,0x000fe5514deffbbc,2 +np.float64,0xc086234c98000000,0x000fbf1024c32ccb,2 +np.float64,0xc086234ca0000000,0x000fbf0065bae78d,2 +np.float64,0xc086234c80000000,0x000fbf3f623a7724,2 +np.float64,0xc086234ec0000000,0x000fbad237c846f9,2 +np.float64,0xc086234ec8000000,0x000fbac27cfdec97,2 +np.float64,0xc086234ee0000000,0x000fba934cfd3dc2,2 +np.float64,0xc086234ef0000000,0x000fba73d7f618d9,2 +np.float64,0xc086234f00000000,0x000fba54632dddc0,2 +np.float64,0xc0862356e0000000,0x000faae0945b761a,2 +np.float64,0xc0862356f0000000,0x000faac13eb9a310,2 +np.float64,0xc086235700000000,0x000faaa1e9567b0a,2 +np.float64,0xc086236020000000,0x000f98cd75c11ed7,2 +np.float64,0xc086236ca0000000,0x000f8081b4d93f89,2 +np.float64,0xc086236cb0000000,0x000f8062b3f4d6c5,2 +np.float64,0xc086236cc0000000,0x000f8043b34e6f8c,2 +np.float64,0xc086238d98000000,0x000f41220d9b0d2c,2 +np.float64,0xc086238da0000000,0x000f4112cc80a01f,2 +np.float64,0xc086238d80000000,0x000f414fd145db5b,2 +np.float64,0xc08624fd00000000,0x000cbfce8ea1e6c4,2 +np.float64,0xc086256080000000,0x000c250747fcd46e,2 +np.float64,0xc08626c480000000,0x000a34f4bd975193,2 +np.float64,0xbf50000000000000,0x3feff800ffeaac00,2 +np.float64,0xbe10000000000000,0x3fefffffff800000,2 +np.float64,0xbcd0000000000000,0x3feffffffffffff8,2 +np.float64,0xc055d589e0000000,0x38100004bf94f63e,2 +np.float64,0xc055d58a00000000,0x380ffff97f292ce8,2 +np.float64,0xbfd962d900000000,0x3fe585a4b00110e1,2 +np.float64,0x3ff4bed280000000,0x400d411e7a58a303,2 +np.float64,0x3fff0b3620000000,0x401bd7737ffffcf3,2 +np.float64,0x3ff0000000000000,0x4005bf0a8b145769,2 +np.float64,0x3eb0000000000000,0x3ff0000100000800,2 +np.float64,0x3d70000000000000,0x3ff0000000001000,2 +np.float64,0x40862e42e0000000,0x7fefff841808287f,2 +np.float64,0x40862e42fefa39ef,0x7fefffffffffff2a,2 +np.float64,0x40862e0000000000,0x7feef85a11e73f2d,2 +np.float64,0x4000000000000000,0x401d8e64b8d4ddae,2 +np.float64,0x4009242920000000,0x40372a52c383a488,2 +np.float64,0x4049000000000000,0x44719103e4080b45,2 +np.float64,0x4008000000000000,0x403415e5bf6fb106,2 +np.float64,0x3f50000000000000,0x3ff00400800aab55,2 +np.float64,0x3e10000000000000,0x3ff0000000400000,2 +np.float64,0x3cd0000000000000,0x3ff0000000000004,2 +np.float64,0x40562e40a0000000,0x47effed088821c3f,2 +np.float64,0x40562e42e0000000,0x47effff082e6c7ff,2 +np.float64,0x40562e4300000000,0x47f00000417184b8,2 +np.float64,0x3fe8000000000000,0x4000ef9db467dcf8,2 +np.float64,0x402b12e8d4f33589,0x412718f68c71a6fe,2 +np.float64,0x402b12e8d4f3358a,0x412718f68c71a70a,2 +np.float64,0x402b12e8c0000000,0x412718f59a7f472e,2 +np.float64,0x402b12e8e0000000,0x412718f70c0eac62,2 +##use 1th entry +np.float64,0x40631659AE147CB4,0x4db3a95025a4890f,2 +np.float64,0xC061B87D2E85A4E2,0x332640c8e2de2c51,2 +np.float64,0x405A4A50BE243AF4,0x496a45e4b7f0339a,2 +np.float64,0xC0839898B98EC5C6,0x0764027828830df4,2 +#use 2th entry +np.float64,0xC072428C44B6537C,0x2596ade838b96f3e,2 +np.float64,0xC053057C5E1AE9BF,0x3912c8fad18fdadf,2 +np.float64,0x407E89C78328BAA3,0x6bfe35d5b9a1a194,2 +np.float64,0x4083501B6DD87112,0x77a855503a38924e,2 +#use 3th entry +np.float64,0x40832C6195F24540,0x7741e73c80e5eb2f,2 +np.float64,0xC083D4CD557C2EC9,0x06b61727c2d2508e,2 +np.float64,0x400C48F5F67C99BD,0x404128820f02b92e,2 +np.float64,0x4056E36D9B2DF26A,0x4830f52ff34a8242,2 +#use 4th entry +np.float64,0x4080FF700D8CBD06,0x70fa70df9bc30f20,2 +np.float64,0x406C276D39E53328,0x543eb8e20a8f4741,2 +np.float64,0xC070D6159BBD8716,0x27a4a0548c904a75,2 +np.float64,0xC052EBCF8ED61F83,0x391c0e92368d15e4,2 +#use 5th entry +np.float64,0xC061F892A8AC5FBE,0x32f807a89efd3869,2 +np.float64,0x4021D885D2DBA085,0x40bd4dc86d3e3270,2 +np.float64,0x40767AEEEE7D4FCF,0x605e22851ee2afb7,2 +np.float64,0xC0757C5D75D08C80,0x20f0751599b992a2,2 +#use 6th entry +np.float64,0x405ACF7A284C4CE3,0x499a4e0b7a27027c,2 +np.float64,0xC085A6C9E80D7AF5,0x0175914009d62ec2,2 +np.float64,0xC07E4C02F86F1DAE,0x1439269b29a9231e,2 +np.float64,0x4080D80F9691CC87,0x7088a6cdafb041de,2 +#use 7th entry +np.float64,0x407FDFD84FBA0AC1,0x6deb1ae6f9bc4767,2 +np.float64,0x40630C06A1A2213D,0x4dac7a9d51a838b7,2 +np.float64,0x40685FDB30BB8B4F,0x5183f5cc2cac9e79,2 +np.float64,0x408045A2208F77F4,0x6ee299e08e2aa2f0,2 +#use 8th entry +np.float64,0xC08104E391F5078B,0x0ed397b7cbfbd230,2 +np.float64,0xC031501CAEFAE395,0x3e6040fd1ea35085,2 +np.float64,0xC079229124F6247C,0x1babf4f923306b1e,2 +np.float64,0x407FB65F44600435,0x6db03beaf2512b8a,2 +#use 9th entry +np.float64,0xC07EDEE8E8E8A5AC,0x136536cec9cbef48,2 +np.float64,0x4072BB4086099A14,0x5af4d3c3008b56cc,2 +np.float64,0x4050442A2EC42CB4,0x45cd393bd8fad357,2 +np.float64,0xC06AC28FB3D419B4,0x2ca1b9d3437df85f,2 +#use 10th entry +np.float64,0x40567FC6F0A68076,0x480c977fd5f3122e,2 +np.float64,0x40620A2F7EDA59BB,0x4cf278e96f4ce4d7,2 +np.float64,0xC085044707CD557C,0x034aad6c968a045a,2 +np.float64,0xC07374EA5AC516AA,0x23dd6afdc03e83d5,2 +#use 11th entry +np.float64,0x4073CC95332619C1,0x5c804b1498bbaa54,2 +np.float64,0xC0799FEBBE257F31,0x1af6a954c43b87d2,2 +np.float64,0x408159F19EA424F6,0x7200858efcbfc84d,2 +np.float64,0x404A81F6F24C0792,0x44b664a07ce5bbfa,2 +#use 12th entry +np.float64,0x40295FF1EFB9A741,0x4113c0e74c52d7b0,2 +np.float64,0x4073975F4CC411DA,0x5c32be40b4fec2c1,2 +np.float64,0x406E9DE52E82A77E,0x56049c9a3f1ae089,2 +np.float64,0x40748C2F52560ED9,0x5d93bc14fd4cd23b,2 +#use 13th entry +np.float64,0x4062A553CDC4D04C,0x4d6266bfde301318,2 +np.float64,0xC079EC1D63598AB7,0x1a88cb184dab224c,2 +np.float64,0xC0725C1CB3167427,0x25725b46f8a081f6,2 +np.float64,0x407888771D9B45F9,0x6353b1ec6bd7ce80,2 +#use 14th entry +np.float64,0xC082CBA03AA89807,0x09b383723831ce56,2 +np.float64,0xC083A8961BB67DD7,0x0735b118d5275552,2 +np.float64,0xC076BC6ECA12E7E3,0x1f2222679eaef615,2 +np.float64,0xC072752503AA1A5B,0x254eb832242c77e1,2 +#use 15th entry +np.float64,0xC058800792125DEC,0x371882372a0b48d4,2 +np.float64,0x4082909FD863E81C,0x7580d5f386920142,2 +np.float64,0xC071616F8FB534F9,0x26dbe20ef64a412b,2 +np.float64,0x406D1AB571CAA747,0x54ee0d55cb38ac20,2 +#use 16th entry +np.float64,0x406956428B7DAD09,0x52358682c271237f,2 +np.float64,0xC07EFC2D9D17B621,0x133b3e77c27a4d45,2 +np.float64,0xC08469BAC5BA3CCA,0x050863e5f42cc52f,2 +np.float64,0x407189D9626386A5,0x593cb1c0b3b5c1d3,2 +#use 17th entry +np.float64,0x4077E652E3DEB8C6,0x6269a10dcbd3c752,2 +np.float64,0x407674C97DB06878,0x605485dcc2426ec2,2 +np.float64,0xC07CE9969CF4268D,0x16386cf8996669f2,2 +np.float64,0x40780EE32D5847C4,0x62a436bd1abe108d,2 +#use 18th entry +np.float64,0x4076C3AA5E1E8DA1,0x60c62f56a5e72e24,2 +np.float64,0xC0730AFC7239B9BE,0x24758ead095cec1e,2 +np.float64,0xC085CC2B9C420DDB,0x0109cdaa2e5694c1,2 +np.float64,0x406D0765CB6D7AA4,0x54e06f8dd91bd945,2 +#use 19th entry +np.float64,0xC082D011F3B495E7,0x09a6647661d279c2,2 +np.float64,0xC072826AF8F6AFBC,0x253acd3cd224507e,2 +np.float64,0x404EB9C4810CEA09,0x457933dbf07e8133,2 +np.float64,0x408284FBC97C58CE,0x755f6eb234aa4b98,2 +#use 20th entry +np.float64,0x40856008CF6EDC63,0x7d9c0b3c03f4f73c,2 +np.float64,0xC077CB2E9F013B17,0x1d9b3d3a166a55db,2 +np.float64,0xC0479CA3C20AD057,0x3bad40e081555b99,2 +np.float64,0x40844CD31107332A,0x7a821d70aea478e2,2 +#use 21th entry +np.float64,0xC07C8FCC0BFCC844,0x16ba1cc8c539d19b,2 +np.float64,0xC085C4E9A3ABA488,0x011ff675ba1a2217,2 +np.float64,0x4074D538B32966E5,0x5dfd9d78043c6ad9,2 +np.float64,0xC0630CA16902AD46,0x3231a446074cede6,2 +#use 22th entry +np.float64,0xC06C826733D7D0B7,0x2b5f1078314d41e1,2 +np.float64,0xC0520DF55B2B907F,0x396c13a6ce8e833e,2 +np.float64,0xC080712072B0F437,0x107eae02d11d98ea,2 +np.float64,0x40528A6150E19EFB,0x469fdabda02228c5,2 +#use 23th entry +np.float64,0xC07B1D74B6586451,0x18d1253883ae3b48,2 +np.float64,0x4045AFD7867DAEC0,0x43d7d634fc4c5d98,2 +np.float64,0xC07A08B91F9ED3E2,0x1a60973e6397fc37,2 +np.float64,0x407B3ECF0AE21C8C,0x673e03e9d98d7235,2 +#use 24th entry +np.float64,0xC078AEB6F30CEABF,0x1c530b93ab54a1b3,2 +np.float64,0x4084495006A41672,0x7a775b6dc7e63064,2 +np.float64,0x40830B1C0EBF95DD,0x76e1e6eed77cfb89,2 +np.float64,0x407D93E8F33D8470,0x6a9adbc9e1e4f1e5,2 +#use 25th entry +np.float64,0x4066B11A09EFD9E8,0x504dd528065c28a7,2 +np.float64,0x408545823723AEEB,0x7d504a9b1844f594,2 +np.float64,0xC068C711F2CA3362,0x2e104f3496ea118e,2 +np.float64,0x407F317FCC3CA873,0x6cf0732c9948ebf4,2 +#use 26th entry +np.float64,0x407AFB3EBA2ED50F,0x66dc28a129c868d5,2 +np.float64,0xC075377037708ADE,0x21531a329f3d793e,2 +np.float64,0xC07C30066A1F3246,0x174448baa16ded2b,2 +np.float64,0xC06689A75DE2ABD3,0x2fad70662fae230b,2 +#use 27th entry +np.float64,0x4081514E9FCCF1E0,0x71e673b9efd15f44,2 +np.float64,0xC0762C710AF68460,0x1ff1ed7d8947fe43,2 +np.float64,0xC0468102FF70D9C4,0x3be0c3a8ff3419a3,2 +np.float64,0xC07EA4CEEF02A83E,0x13b908f085102c61,2 +#use 28th entry +np.float64,0xC06290B04AE823C4,0x328a83da3c2e3351,2 +np.float64,0xC0770EB1D1C395FB,0x1eab281c1f1db5fe,2 +np.float64,0xC06F5D4D838A5BAE,0x29500ea32fb474ea,2 +np.float64,0x40723B3133B54C5D,0x5a3c82c7c3a2b848,2 +#use 29th entry +np.float64,0x4085E6454CE3B4AA,0x7f20319b9638d06a,2 +np.float64,0x408389F2A0585D4B,0x7850667c58aab3d0,2 +np.float64,0xC0382798F9C8AE69,0x3dc1c79fe8739d6d,2 +np.float64,0xC08299D827608418,0x0a4335f76cdbaeb5,2 +#use 30th entry +np.float64,0xC06F3DED43301BF1,0x2965670ae46750a8,2 +np.float64,0xC070CAF6BDD577D9,0x27b4aa4ffdd29981,2 +np.float64,0x4078529AD4B2D9F2,0x6305c12755d5e0a6,2 +np.float64,0xC055B14E75A31B96,0x381c2eda6d111e5d,2 +#use 31th entry +np.float64,0x407B13EE414FA931,0x6700772c7544564d,2 +np.float64,0x407EAFDE9DE3EC54,0x6c346a0e49724a3c,2 +np.float64,0xC08362F398B9530D,0x07ffeddbadf980cb,2 +np.float64,0x407E865CDD9EEB86,0x6bf866cac5e0d126,2 +#use 32th entry +np.float64,0x407FB62DBC794C86,0x6db009f708ac62cb,2 +np.float64,0xC063D0BAA68CDDDE,0x31a3b2a51ce50430,2 +np.float64,0xC05E7706A2231394,0x34f24bead6fab5c9,2 +np.float64,0x4083E3A06FDE444E,0x79527b7a386d1937,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv new file mode 100644 index 00000000..e19e9ebd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbdfe94b0,0x3f6adda6,2 +np.float32,0x3f20f8f8,0x3fc5ec69,2 +np.float32,0x7040b5,0x3f800000,2 +np.float32,0x30ec5,0x3f800000,2 +np.float32,0x3eb63070,0x3fa3ce29,2 +np.float32,0xff4dda3d,0x0,2 +np.float32,0x805b832f,0x3f800000,2 +np.float32,0x3e883fb7,0x3f99ed8c,2 +np.float32,0x3f14d71f,0x3fbf8708,2 +np.float32,0xff7b1e55,0x0,2 +np.float32,0xbf691ac6,0x3f082fa2,2 +np.float32,0x7ee3e6ab,0x7f800000,2 +np.float32,0xbec6e2b4,0x3f439248,2 +np.float32,0xbf5f5ec2,0x3f0bd2c0,2 +np.float32,0x8025cc2c,0x3f800000,2 +np.float32,0x7e0d7672,0x7f800000,2 +np.float32,0xff4bbc5c,0x0,2 +np.float32,0xbd94fb30,0x3f73696b,2 +np.float32,0x6cc079,0x3f800000,2 +np.float32,0x803cf080,0x3f800000,2 +np.float32,0x71d418,0x3f800000,2 +np.float32,0xbf24a442,0x3f23ec1e,2 +np.float32,0xbe6c9510,0x3f5a1e1d,2 +np.float32,0xbe8fb284,0x3f52be38,2 +np.float32,0x7ea64754,0x7f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80620cfd,0x3f800000,2 +np.float32,0x3f3e20e8,0x3fd62e72,2 +np.float32,0x3f384600,0x3fd2d00e,2 +np.float32,0xff362150,0x0,2 +np.float32,0xbf349fa8,0x3f1cfaef,2 +np.float32,0xbf776cf2,0x3f0301a6,2 +np.float32,0x8021fc60,0x3f800000,2 +np.float32,0xbdb75280,0x3f70995c,2 +np.float32,0x7e9363a6,0x7f800000,2 +np.float32,0x7e728422,0x7f800000,2 +np.float32,0xfe91edc2,0x0,2 +np.float32,0x3f5f438c,0x3fea491d,2 +np.float32,0x3f2afae9,0x3fcb5c1f,2 +np.float32,0xbef8e766,0x3f36c448,2 +np.float32,0xba522c00,0x3f7fdb97,2 +np.float32,0xff18ee8c,0x0,2 +np.float32,0xbee8c5f4,0x3f3acd44,2 +np.float32,0x3e790448,0x3f97802c,2 +np.float32,0x3e8c9541,0x3f9ad571,2 +np.float32,0xbf03fa9f,0x3f331460,2 +np.float32,0x801ee053,0x3f800000,2 +np.float32,0xbf773230,0x3f03167f,2 +np.float32,0x356fd9,0x3f800000,2 +np.float32,0x8009cd88,0x3f800000,2 +np.float32,0x7f2bac51,0x7f800000,2 +np.float32,0x4d9eeb,0x3f800000,2 +np.float32,0x3133,0x3f800000,2 +np.float32,0x7f4290e0,0x7f800000,2 +np.float32,0xbf5e6523,0x3f0c3161,2 +np.float32,0x3f19182e,0x3fc1bf10,2 +np.float32,0x7e1248bb,0x7f800000,2 +np.float32,0xff5f7aae,0x0,2 +np.float32,0x7e8557b5,0x7f800000,2 +np.float32,0x26fc7f,0x3f800000,2 +np.float32,0x80397d61,0x3f800000,2 +np.float32,0x3cb1825d,0x3f81efe0,2 +np.float32,0x3ed808d0,0x3fab7c45,2 +np.float32,0xbf6f668a,0x3f05e259,2 +np.float32,0x3e3c7802,0x3f916abd,2 +np.float32,0xbd5ac5a0,0x3f76b21b,2 +np.float32,0x805aa6c9,0x3f800000,2 +np.float32,0xbe4d6f68,0x3f5ec3e1,2 +np.float32,0x3f3108b2,0x3fceb87f,2 +np.float32,0x3ec385cc,0x3fa6c9fb,2 +np.float32,0xbe9fc1ce,0x3f4e35e8,2 +np.float32,0x43b68,0x3f800000,2 +np.float32,0x3ef0cdcc,0x3fb15557,2 +np.float32,0x3e3f729b,0x3f91b5e1,2 +np.float32,0x7f52a4df,0x7f800000,2 +np.float32,0xbf56da96,0x3f0f15b9,2 +np.float32,0xbf161d2b,0x3f2a7faf,2 +np.float32,0x3e8df763,0x3f9b1fbe,2 +np.float32,0xff4f0780,0x0,2 +np.float32,0x8048f594,0x3f800000,2 +np.float32,0x3e62bb1d,0x3f953b7e,2 +np.float32,0xfe58e764,0x0,2 +np.float32,0x3dd2c922,0x3f897718,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xff07b4b2,0x0,2 +np.float32,0x7f6231a0,0x7f800000,2 +np.float32,0xb8d1d,0x3f800000,2 +np.float32,0x3ee01d24,0x3fad5f16,2 +np.float32,0xbf43f59f,0x3f169869,2 +np.float32,0x801f5257,0x3f800000,2 +np.float32,0x803c15d8,0x3f800000,2 +np.float32,0x3f171a08,0x3fc0b42a,2 +np.float32,0x127aef,0x3f800000,2 +np.float32,0xfd1c6,0x3f800000,2 +np.float32,0x3f1ed13e,0x3fc4c59a,2 +np.float32,0x57fd4f,0x3f800000,2 +np.float32,0x6e8c61,0x3f800000,2 +np.float32,0x804019ab,0x3f800000,2 +np.float32,0x3ef4e5c6,0x3fb251a1,2 +np.float32,0x5044c3,0x3f800000,2 +np.float32,0x3f04460f,0x3fb7204b,2 +np.float32,0x7e326b47,0x7f800000,2 +np.float32,0x800a7e4c,0x3f800000,2 +np.float32,0xbf47ec82,0x3f14fccc,2 +np.float32,0xbedb1b3e,0x3f3e4a4d,2 +np.float32,0x3f741d86,0x3ff7e4b0,2 +np.float32,0xbe249d20,0x3f6501a6,2 +np.float32,0xbf2ea152,0x3f1f8c68,2 +np.float32,0x3ec6dbcc,0x3fa78b3f,2 +np.float32,0x7ebd9bb4,0x7f800000,2 +np.float32,0x3f61b574,0x3febd77a,2 +np.float32,0x3f3dfb2b,0x3fd61891,2 +np.float32,0x3c7d95,0x3f800000,2 +np.float32,0x8071e840,0x3f800000,2 +np.float32,0x15c6fe,0x3f800000,2 +np.float32,0xbf096601,0x3f307893,2 +np.float32,0x7f5c2ef9,0x7f800000,2 +np.float32,0xbe79f750,0x3f582689,2 +np.float32,0x1eb692,0x3f800000,2 +np.float32,0xbd8024f0,0x3f75226d,2 +np.float32,0xbf5a8be8,0x3f0da950,2 +np.float32,0xbf4d28f3,0x3f12e3e1,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfea8a758,0x0,2 +np.float32,0x8075d2cf,0x3f800000,2 +np.float32,0xfd99af58,0x0,2 +np.float32,0x9e6a,0x3f800000,2 +np.float32,0x2fa19f,0x3f800000,2 +np.float32,0x3e9f4206,0x3f9ecc56,2 +np.float32,0xbee0b666,0x3f3cd9fc,2 +np.float32,0xbec558c4,0x3f43fab1,2 +np.float32,0x7e9a77df,0x7f800000,2 +np.float32,0xff3a9694,0x0,2 +np.float32,0x3f3b3708,0x3fd47f9a,2 +np.float32,0x807cd6d4,0x3f800000,2 +np.float32,0x804aa422,0x3f800000,2 +np.float32,0xfead7a70,0x0,2 +np.float32,0x3f08c610,0x3fb95efe,2 +np.float32,0xff390126,0x0,2 +np.float32,0x5d2d47,0x3f800000,2 +np.float32,0x8006849c,0x3f800000,2 +np.float32,0x654f6e,0x3f800000,2 +np.float32,0xff478a16,0x0,2 +np.float32,0x3f480b0c,0x3fdc024c,2 +np.float32,0xbc3b96c0,0x3f7df9f4,2 +np.float32,0xbcc96460,0x3f7bacb5,2 +np.float32,0x7f349f30,0x7f800000,2 +np.float32,0xbe08fa98,0x3f6954a1,2 +np.float32,0x4f3a13,0x3f800000,2 +np.float32,0x7f6a5ab4,0x7f800000,2 +np.float32,0x7eb85247,0x7f800000,2 +np.float32,0xbf287246,0x3f223e08,2 +np.float32,0x801584d0,0x3f800000,2 +np.float32,0x7ec25371,0x7f800000,2 +np.float32,0x3f002165,0x3fb51552,2 +np.float32,0x3e1108a8,0x3f8d3429,2 +np.float32,0x4f0f88,0x3f800000,2 +np.float32,0x7f67c1ce,0x7f800000,2 +np.float32,0xbf4348f8,0x3f16dedf,2 +np.float32,0xbe292b64,0x3f644d24,2 +np.float32,0xbf2bfa36,0x3f20b2d6,2 +np.float32,0xbf2a6e58,0x3f215f71,2 +np.float32,0x3e97d5d3,0x3f9d35df,2 +np.float32,0x31f597,0x3f800000,2 +np.float32,0x100544,0x3f800000,2 +np.float32,0x10a197,0x3f800000,2 +np.float32,0x3f44df50,0x3fda20d2,2 +np.float32,0x59916d,0x3f800000,2 +np.float32,0x707472,0x3f800000,2 +np.float32,0x8054194e,0x3f800000,2 +np.float32,0x80627b01,0x3f800000,2 +np.float32,0x7f4d5a5b,0x7f800000,2 +np.float32,0xbcecad00,0x3f7aeca5,2 +np.float32,0xff69c541,0x0,2 +np.float32,0xbe164e20,0x3f673c3a,2 +np.float32,0x3dd321de,0x3f897b39,2 +np.float32,0x3c9c4900,0x3f81b431,2 +np.float32,0x7f0efae3,0x7f800000,2 +np.float32,0xbf1b3ee6,0x3f282567,2 +np.float32,0x3ee858ac,0x3faf5083,2 +np.float32,0x3f0e6a39,0x3fbc3965,2 +np.float32,0x7f0c06d8,0x7f800000,2 +np.float32,0x801dd236,0x3f800000,2 +np.float32,0x564245,0x3f800000,2 +np.float32,0x7e99d3ad,0x7f800000,2 +np.float32,0xff3b0164,0x0,2 +np.float32,0x3f386f18,0x3fd2e785,2 +np.float32,0x7f603c39,0x7f800000,2 +np.float32,0x3cbd9b00,0x3f8211f0,2 +np.float32,0x2178e2,0x3f800000,2 +np.float32,0x5db226,0x3f800000,2 +np.float32,0xfec78d62,0x0,2 +np.float32,0x7f40bc1e,0x7f800000,2 +np.float32,0x80325064,0x3f800000,2 +np.float32,0x3f6068dc,0x3feb0377,2 +np.float32,0xfe8b95c6,0x0,2 +np.float32,0xbe496894,0x3f5f5f87,2 +np.float32,0xbf18722a,0x3f296cf4,2 +np.float32,0x332d0e,0x3f800000,2 +np.float32,0x3f6329dc,0x3fecc5c0,2 +np.float32,0x807d1802,0x3f800000,2 +np.float32,0x3e8afcee,0x3f9a7ff1,2 +np.float32,0x26a0a7,0x3f800000,2 +np.float32,0x7f13085d,0x7f800000,2 +np.float32,0x68d547,0x3f800000,2 +np.float32,0x7e9b04ae,0x7f800000,2 +np.float32,0x3f3ecdfe,0x3fd692ea,2 +np.float32,0x805256f4,0x3f800000,2 +np.float32,0x3f312dc8,0x3fcecd42,2 +np.float32,0x23ca15,0x3f800000,2 +np.float32,0x3f53c455,0x3fe31ad6,2 +np.float32,0xbf21186c,0x3f2580fd,2 +np.float32,0x803b9bb1,0x3f800000,2 +np.float32,0xff6ae1fc,0x0,2 +np.float32,0x2103cf,0x3f800000,2 +np.float32,0xbedcec6c,0x3f3dd29d,2 +np.float32,0x7f520afa,0x7f800000,2 +np.float32,0x7e8b44f2,0x7f800000,2 +np.float32,0xfef7f6ce,0x0,2 +np.float32,0xbd5e7c30,0x3f768a6f,2 +np.float32,0xfeb36848,0x0,2 +np.float32,0xff49effb,0x0,2 +np.float32,0xbec207c0,0x3f44dc74,2 +np.float32,0x3e91147f,0x3f9bc77f,2 +np.float32,0xfe784cd4,0x0,2 +np.float32,0xfd1a7250,0x0,2 +np.float32,0xff3b3f48,0x0,2 +np.float32,0x3f685db5,0x3ff0219f,2 +np.float32,0x3f370976,0x3fd21bae,2 +np.float32,0xfed4cc20,0x0,2 +np.float32,0xbf41e337,0x3f17714a,2 +np.float32,0xbf4e8638,0x3f12593a,2 +np.float32,0x3edaf0f1,0x3fac295e,2 +np.float32,0x803cbb4f,0x3f800000,2 +np.float32,0x7f492043,0x7f800000,2 +np.float32,0x2cabcf,0x3f800000,2 +np.float32,0x17f8ac,0x3f800000,2 +np.float32,0x3e846478,0x3f99205a,2 +np.float32,0x76948f,0x3f800000,2 +np.float32,0x1,0x3f800000,2 +np.float32,0x7ea6419e,0x7f800000,2 +np.float32,0xa5315,0x3f800000,2 +np.float32,0xff3a8e32,0x0,2 +np.float32,0xbe5714e8,0x3f5d50b7,2 +np.float32,0xfeadf960,0x0,2 +np.float32,0x3ebbd1a8,0x3fa50efc,2 +np.float32,0x7f31dce7,0x7f800000,2 +np.float32,0x80314999,0x3f800000,2 +np.float32,0x8017f41b,0x3f800000,2 +np.float32,0x7ed6d051,0x7f800000,2 +np.float32,0x7f525688,0x7f800000,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3e8b0461,0x3f9a8180,2 +np.float32,0x3d9fe46e,0x3f871e1f,2 +np.float32,0x5e6d8f,0x3f800000,2 +np.float32,0xbf09ae55,0x3f305608,2 +np.float32,0xfe7028c4,0x0,2 +np.float32,0x7f3ade56,0x7f800000,2 +np.float32,0xff4c9ef9,0x0,2 +np.float32,0x7e3199cf,0x7f800000,2 +np.float32,0x8048652f,0x3f800000,2 +np.float32,0x805e1237,0x3f800000,2 +np.float32,0x189ed8,0x3f800000,2 +np.float32,0xbea7c094,0x3f4bfd98,2 +np.float32,0xbf2f109c,0x3f1f5c5c,2 +np.float32,0xbf0e7f4c,0x3f2e0d2c,2 +np.float32,0x8005981f,0x3f800000,2 +np.float32,0xbf762005,0x3f0377f3,2 +np.float32,0xbf0f60ab,0x3f2da317,2 +np.float32,0xbf4aa3e7,0x3f13e54e,2 +np.float32,0xbf348fd2,0x3f1d01aa,2 +np.float32,0x3e530b50,0x3f93a7fb,2 +np.float32,0xbf0b05a4,0x3f2fb26a,2 +np.float32,0x3eea416c,0x3fafc4aa,2 +np.float32,0x805ad04d,0x3f800000,2 +np.float32,0xbf6328d8,0x3f0a655e,2 +np.float32,0x3f7347b9,0x3ff75558,2 +np.float32,0xfda3ca68,0x0,2 +np.float32,0x80497d21,0x3f800000,2 +np.float32,0x3e740452,0x3f96fd22,2 +np.float32,0x3e528e57,0x3f939b7e,2 +np.float32,0x3e9e19fa,0x3f9e8cbd,2 +np.float32,0x8078060b,0x3f800000,2 +np.float32,0x3f3fea7a,0x3fd73872,2 +np.float32,0xfcfa30a0,0x0,2 +np.float32,0x7f4eb4bf,0x7f800000,2 +np.float32,0x3f712618,0x3ff5e900,2 +np.float32,0xbf668f0e,0x3f0920c6,2 +np.float32,0x3f3001e9,0x3fce259d,2 +np.float32,0xbe9b6fac,0x3f4f6b9c,2 +np.float32,0xbf61fcf3,0x3f0ad5ec,2 +np.float32,0xff08a55c,0x0,2 +np.float32,0x3e805014,0x3f984872,2 +np.float32,0x6ce04c,0x3f800000,2 +np.float32,0x7f7cbc07,0x7f800000,2 +np.float32,0x3c87dc,0x3f800000,2 +np.float32,0x3f2ee498,0x3fcd869a,2 +np.float32,0x4b1116,0x3f800000,2 +np.float32,0x3d382d06,0x3f840d5f,2 +np.float32,0xff7de21e,0x0,2 +np.float32,0x3f2f1d6d,0x3fcda63c,2 +np.float32,0xbf1c1618,0x3f27c38a,2 +np.float32,0xff4264b1,0x0,2 +np.float32,0x8026e5e7,0x3f800000,2 +np.float32,0xbe6fa180,0x3f59ab02,2 +np.float32,0xbe923c02,0x3f52053b,2 +np.float32,0xff3aa453,0x0,2 +np.float32,0x3f77a7ac,0x3ffa47d0,2 +np.float32,0xbed15f36,0x3f40d08a,2 +np.float32,0xa62d,0x3f800000,2 +np.float32,0xbf342038,0x3f1d3123,2 +np.float32,0x7f2f7f80,0x7f800000,2 +np.float32,0x7f2b6fc1,0x7f800000,2 +np.float32,0xff323540,0x0,2 +np.float32,0x3f1a2b6e,0x3fc24faa,2 +np.float32,0x800cc1d2,0x3f800000,2 +np.float32,0xff38fa01,0x0,2 +np.float32,0x80800000,0x3f800000,2 +np.float32,0xbf3d22e0,0x3f196745,2 +np.float32,0x7f40fd62,0x7f800000,2 +np.float32,0x7e1785c7,0x7f800000,2 +np.float32,0x807408c4,0x3f800000,2 +np.float32,0xbf300192,0x3f1ef485,2 +np.float32,0x351e3d,0x3f800000,2 +np.float32,0x7f5ab736,0x7f800000,2 +np.float32,0x2f1696,0x3f800000,2 +np.float32,0x806ac5d7,0x3f800000,2 +np.float32,0x42ec59,0x3f800000,2 +np.float32,0x7f79f52d,0x7f800000,2 +np.float32,0x44ad28,0x3f800000,2 +np.float32,0xbf49dc9c,0x3f143532,2 +np.float32,0x3f6c1f1f,0x3ff295e7,2 +np.float32,0x1589b3,0x3f800000,2 +np.float32,0x3f49b44e,0x3fdd0031,2 +np.float32,0x7f5942c9,0x7f800000,2 +np.float32,0x3f2dab28,0x3fccd877,2 +np.float32,0xff7fffff,0x0,2 +np.float32,0x80578eb2,0x3f800000,2 +np.float32,0x3f39ba67,0x3fd3a50b,2 +np.float32,0x8020340d,0x3f800000,2 +np.float32,0xbf6025b2,0x3f0b8783,2 +np.float32,0x8015ccfe,0x3f800000,2 +np.float32,0x3f6b9762,0x3ff23cd0,2 +np.float32,0xfeeb0c86,0x0,2 +np.float32,0x802779bc,0x3f800000,2 +np.float32,0xbf32bf64,0x3f1dc796,2 +np.float32,0xbf577eb6,0x3f0ed631,2 +np.float32,0x0,0x3f800000,2 +np.float32,0xfe99de6c,0x0,2 +np.float32,0x7a4e53,0x3f800000,2 +np.float32,0x1a15d3,0x3f800000,2 +np.float32,0x8035fe16,0x3f800000,2 +np.float32,0x3e845784,0x3f991dab,2 +np.float32,0x43d688,0x3f800000,2 +np.float32,0xbd447cc0,0x3f77a0b7,2 +np.float32,0x3f83fa,0x3f800000,2 +np.float32,0x3f141df2,0x3fbf2719,2 +np.float32,0x805c586a,0x3f800000,2 +np.float32,0x14c47e,0x3f800000,2 +np.float32,0x3d3bed00,0x3f8422d4,2 +np.float32,0x7f6f4ecd,0x7f800000,2 +np.float32,0x3f0a5e5a,0x3fba2c5c,2 +np.float32,0x523ecf,0x3f800000,2 +np.float32,0xbef4a6e8,0x3f37d262,2 +np.float32,0xff54eb58,0x0,2 +np.float32,0xff3fc875,0x0,2 +np.float32,0x8067c392,0x3f800000,2 +np.float32,0xfedae910,0x0,2 +np.float32,0x80595979,0x3f800000,2 +np.float32,0x3ee87d1d,0x3faf5929,2 +np.float32,0x7f5bad33,0x7f800000,2 +np.float32,0xbf45b868,0x3f15e109,2 +np.float32,0x3ef2277d,0x3fb1a868,2 +np.float32,0x3ca5a950,0x3f81ce8c,2 +np.float32,0x3e70f4e6,0x3f96ad25,2 +np.float32,0xfe3515bc,0x0,2 +np.float32,0xfe4af088,0x0,2 +np.float32,0xff3c78b2,0x0,2 +np.float32,0x7f50f51a,0x7f800000,2 +np.float32,0x3e3a232a,0x3f913009,2 +np.float32,0x7dfec6ff,0x7f800000,2 +np.float32,0x3e1bbaec,0x3f8e3ad6,2 +np.float32,0xbd658fa0,0x3f763ee7,2 +np.float32,0xfe958684,0x0,2 +np.float32,0x503670,0x3f800000,2 +np.float32,0x3f800000,0x40000000,2 +np.float32,0x1bbec6,0x3f800000,2 +np.float32,0xbea7bb7c,0x3f4bff00,2 +np.float32,0xff3a24a2,0x0,2 +np.float32,0xbf416240,0x3f17a635,2 +np.float32,0xbf800000,0x3f000000,2 +np.float32,0xff0c965c,0x0,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0xbec2c69a,0x3f44a99e,2 +np.float32,0x5b68d4,0x3f800000,2 +np.float32,0xb9a93000,0x3f7ff158,2 +np.float32,0x3d5a0dd8,0x3f84cfbc,2 +np.float32,0xbeaf7a28,0x3f49de4e,2 +np.float32,0x3ee83555,0x3faf4820,2 +np.float32,0xfd320330,0x0,2 +np.float32,0xe1af2,0x3f800000,2 +np.float32,0x7cf28caf,0x7f800000,2 +np.float32,0x80781009,0x3f800000,2 +np.float32,0xbf1e0baf,0x3f26e04d,2 +np.float32,0x7edb05b1,0x7f800000,2 +np.float32,0x3de004,0x3f800000,2 +np.float32,0xff436af6,0x0,2 +np.float32,0x802a9408,0x3f800000,2 +np.float32,0x7ed82205,0x7f800000,2 +np.float32,0x3e3f8212,0x3f91b767,2 +np.float32,0x16a2b2,0x3f800000,2 +np.float32,0xff1e5af3,0x0,2 +np.float32,0xbf1c860c,0x3f2790b7,2 +np.float32,0x3f3bc5da,0x3fd4d1d6,2 +np.float32,0x7f5f7085,0x7f800000,2 +np.float32,0x7f68e409,0x7f800000,2 +np.float32,0x7f4b3388,0x7f800000,2 +np.float32,0x7ecaf440,0x7f800000,2 +np.float32,0x80078785,0x3f800000,2 +np.float32,0x3ebd800d,0x3fa56f45,2 +np.float32,0xbe39a140,0x3f61c58e,2 +np.float32,0x803b587e,0x3f800000,2 +np.float32,0xbeaaa418,0x3f4b31c4,2 +np.float32,0xff7e2b9f,0x0,2 +np.float32,0xff5180a3,0x0,2 +np.float32,0xbf291394,0x3f21f73c,2 +np.float32,0x7f7b9698,0x7f800000,2 +np.float32,0x4218da,0x3f800000,2 +np.float32,0x7f135262,0x7f800000,2 +np.float32,0x804c10e8,0x3f800000,2 +np.float32,0xbf1c2a54,0x3f27ba5a,2 +np.float32,0x7f41fd32,0x7f800000,2 +np.float32,0x3e5cc464,0x3f94a195,2 +np.float32,0xff7a2fa7,0x0,2 +np.float32,0x3e05dc30,0x3f8c23c9,2 +np.float32,0x7f206d99,0x7f800000,2 +np.float32,0xbe9ae520,0x3f4f9287,2 +np.float32,0xfe4f4d58,0x0,2 +np.float32,0xbf44db42,0x3f163ae3,2 +np.float32,0x3f65ac48,0x3fee6300,2 +np.float32,0x3ebfaf36,0x3fa5ecb0,2 +np.float32,0x3f466719,0x3fdb08b0,2 +np.float32,0x80000001,0x3f800000,2 +np.float32,0xff4b3c7b,0x0,2 +np.float32,0x3df44374,0x3f8b0819,2 +np.float32,0xfea4b540,0x0,2 +np.float32,0x7f358e3d,0x7f800000,2 +np.float32,0x801f5e63,0x3f800000,2 +np.float32,0x804ae77e,0x3f800000,2 +np.float32,0xdbb5,0x3f800000,2 +np.float32,0x7f0a7e3b,0x7f800000,2 +np.float32,0xbe4152e4,0x3f609953,2 +np.float32,0x4b9579,0x3f800000,2 +np.float32,0x3ece0bd4,0x3fa92ea5,2 +np.float32,0x7e499d9a,0x7f800000,2 +np.float32,0x80637d8a,0x3f800000,2 +np.float32,0x3e50a425,0x3f936a8b,2 +np.float32,0xbf0e8cb0,0x3f2e06dd,2 +np.float32,0x802763e2,0x3f800000,2 +np.float32,0xff73041b,0x0,2 +np.float32,0xfea466da,0x0,2 +np.float32,0x80064c73,0x3f800000,2 +np.float32,0xbef29222,0x3f385728,2 +np.float32,0x8029c215,0x3f800000,2 +np.float32,0xbd3994e0,0x3f7815d1,2 +np.float32,0xbe6ac9e4,0x3f5a61f3,2 +np.float32,0x804b58b0,0x3f800000,2 +np.float32,0xbdb83be0,0x3f70865c,2 +np.float32,0x7ee18da2,0x7f800000,2 +np.float32,0xfd4ca010,0x0,2 +np.float32,0x807c668b,0x3f800000,2 +np.float32,0xbd40ed90,0x3f77c6e9,2 +np.float32,0x7efc6881,0x7f800000,2 +np.float32,0xfe633bfc,0x0,2 +np.float32,0x803ce363,0x3f800000,2 +np.float32,0x7ecba81e,0x7f800000,2 +np.float32,0xfdcb2378,0x0,2 +np.float32,0xbebc5524,0x3f4662b2,2 +np.float32,0xfaa30000,0x0,2 +np.float32,0x805d451b,0x3f800000,2 +np.float32,0xbee85600,0x3f3ae996,2 +np.float32,0xfefb0a54,0x0,2 +np.float32,0xbdfc6690,0x3f6b0a08,2 +np.float32,0x58a57,0x3f800000,2 +np.float32,0x3b41b7,0x3f800000,2 +np.float32,0x7c99812d,0x7f800000,2 +np.float32,0xbd3ae740,0x3f78079d,2 +np.float32,0xbf4a48a7,0x3f1409dd,2 +np.float32,0xfdeaad58,0x0,2 +np.float32,0xbe9aa65a,0x3f4fa42c,2 +np.float32,0x3f79d78c,0x3ffbc458,2 +np.float32,0x805e7389,0x3f800000,2 +np.float32,0x7ebb3612,0x7f800000,2 +np.float32,0x2e27dc,0x3f800000,2 +np.float32,0x80726dec,0x3f800000,2 +np.float32,0xfe8fb738,0x0,2 +np.float32,0xff1ff3bd,0x0,2 +np.float32,0x7f5264a2,0x7f800000,2 +np.float32,0x3f5a6893,0x3fe739ca,2 +np.float32,0xbec4029c,0x3f44558d,2 +np.float32,0xbef65cfa,0x3f37657e,2 +np.float32,0x63aba1,0x3f800000,2 +np.float32,0xfbb6e200,0x0,2 +np.float32,0xbf3466fc,0x3f1d1307,2 +np.float32,0x3f258844,0x3fc861d7,2 +np.float32,0xbf5f29a7,0x3f0be6dc,2 +np.float32,0x802b51cd,0x3f800000,2 +np.float32,0xbe9094dc,0x3f527dae,2 +np.float32,0xfec2e68c,0x0,2 +np.float32,0x807b38bd,0x3f800000,2 +np.float32,0xbf594662,0x3f0e2663,2 +np.float32,0x7cbcf747,0x7f800000,2 +np.float32,0xbe4b88f0,0x3f5f0d47,2 +np.float32,0x3c53c4,0x3f800000,2 +np.float32,0xbe883562,0x3f54e3f7,2 +np.float32,0xbf1efaf0,0x3f267456,2 +np.float32,0x3e22cd3e,0x3f8ee98b,2 +np.float32,0x80434875,0x3f800000,2 +np.float32,0xbf000b44,0x3f34ff6e,2 +np.float32,0x7f311c3a,0x7f800000,2 +np.float32,0x802f7f3f,0x3f800000,2 +np.float32,0x805155fe,0x3f800000,2 +np.float32,0x7f5d7485,0x7f800000,2 +np.float32,0x80119197,0x3f800000,2 +np.float32,0x3f445b8b,0x3fd9d30d,2 +np.float32,0xbf638eb3,0x3f0a3f38,2 +np.float32,0x402410,0x3f800000,2 +np.float32,0xbc578a40,0x3f7dad1d,2 +np.float32,0xbeecbf8a,0x3f39cc9e,2 +np.float32,0x7f2935a4,0x7f800000,2 +np.float32,0x3f570fea,0x3fe523e2,2 +np.float32,0xbf06bffa,0x3f31bdb6,2 +np.float32,0xbf2afdfd,0x3f2120ba,2 +np.float32,0x7f76f7ab,0x7f800000,2 +np.float32,0xfee2d1e8,0x0,2 +np.float32,0x800b026d,0x3f800000,2 +np.float32,0xff0eda75,0x0,2 +np.float32,0x3d4c,0x3f800000,2 +np.float32,0xbed538a2,0x3f3fcffb,2 +np.float32,0x3f73f4f9,0x3ff7c979,2 +np.float32,0x2aa9fc,0x3f800000,2 +np.float32,0x806a45b3,0x3f800000,2 +np.float32,0xff770d35,0x0,2 +np.float32,0x7e999be3,0x7f800000,2 +np.float32,0x80741128,0x3f800000,2 +np.float32,0xff6aac34,0x0,2 +np.float32,0x470f74,0x3f800000,2 +np.float32,0xff423b7b,0x0,2 +np.float32,0x17dfdd,0x3f800000,2 +np.float32,0x7f029e12,0x7f800000,2 +np.float32,0x803fcb9d,0x3f800000,2 +np.float32,0x3f3dc3,0x3f800000,2 +np.float32,0x7f3a27bc,0x7f800000,2 +np.float32,0x3e473108,0x3f9279ec,2 +np.float32,0x7f4add5d,0x7f800000,2 +np.float32,0xfd9736e0,0x0,2 +np.float32,0x805f1df2,0x3f800000,2 +np.float32,0x6c49c1,0x3f800000,2 +np.float32,0x7ec733c7,0x7f800000,2 +np.float32,0x804c1abf,0x3f800000,2 +np.float32,0x3de2e887,0x3f8a37a5,2 +np.float32,0x3f51630a,0x3fe1a561,2 +np.float32,0x3de686a8,0x3f8a62ff,2 +np.float32,0xbedb3538,0x3f3e439c,2 +np.float32,0xbf3aa892,0x3f1a6f9e,2 +np.float32,0x7ee5fb32,0x7f800000,2 +np.float32,0x7e916c9b,0x7f800000,2 +np.float32,0x3f033f1c,0x3fb69e19,2 +np.float32,0x25324b,0x3f800000,2 +np.float32,0x3f348d1d,0x3fd0b2e2,2 +np.float32,0x3f5797e8,0x3fe57851,2 +np.float32,0xbf69c316,0x3f07f1a0,2 +np.float32,0xbe8b7fb0,0x3f53f1bf,2 +np.float32,0xbdbbc190,0x3f703d00,2 +np.float32,0xff6c4fc0,0x0,2 +np.float32,0x7f29fcbe,0x7f800000,2 +np.float32,0x3f678d19,0x3fef9a23,2 +np.float32,0x73d140,0x3f800000,2 +np.float32,0x3e25bdd2,0x3f8f326b,2 +np.float32,0xbeb775ec,0x3f47b2c6,2 +np.float32,0xff451c4d,0x0,2 +np.float32,0x8072c466,0x3f800000,2 +np.float32,0x3f65e836,0x3fee89b2,2 +np.float32,0x52ca7a,0x3f800000,2 +np.float32,0x62cfed,0x3f800000,2 +np.float32,0xbf583dd0,0x3f0e8c5c,2 +np.float32,0xbf683842,0x3f088342,2 +np.float32,0x3f1a7828,0x3fc2780c,2 +np.float32,0x800ea979,0x3f800000,2 +np.float32,0xbeb9133c,0x3f474328,2 +np.float32,0x3ef09fc7,0x3fb14a4b,2 +np.float32,0x7ebbcb75,0x7f800000,2 +np.float32,0xff316c0e,0x0,2 +np.float32,0x805b84e3,0x3f800000,2 +np.float32,0x3d6a55e0,0x3f852d8a,2 +np.float32,0x3e755788,0x3f971fd1,2 +np.float32,0x3ee7aacb,0x3faf2743,2 +np.float32,0x7f714039,0x7f800000,2 +np.float32,0xff70bad8,0x0,2 +np.float32,0xbe0b74c8,0x3f68f08c,2 +np.float32,0xbf6cb170,0x3f06de86,2 +np.float32,0x7ec1fbff,0x7f800000,2 +np.float32,0x8014b1f6,0x3f800000,2 +np.float32,0xfe8b45fe,0x0,2 +np.float32,0x6e2220,0x3f800000,2 +np.float32,0x3ed1777d,0x3fa9f7ab,2 +np.float32,0xff48e467,0x0,2 +np.float32,0xff76c5aa,0x0,2 +np.float32,0x3e9bd330,0x3f9e0fd7,2 +np.float32,0x3f17de4f,0x3fc11aae,2 +np.float32,0x7eeaa2fd,0x7f800000,2 +np.float32,0xbf572746,0x3f0ef806,2 +np.float32,0x7e235554,0x7f800000,2 +np.float32,0xfe24fc1c,0x0,2 +np.float32,0x7daf71ad,0x7f800000,2 +np.float32,0x800d4a6b,0x3f800000,2 +np.float32,0xbf6fc31d,0x3f05c0ce,2 +np.float32,0x1c4d93,0x3f800000,2 +np.float32,0x7ee9200c,0x7f800000,2 +np.float32,0x3f54b4da,0x3fe3aeec,2 +np.float32,0x2b37b1,0x3f800000,2 +np.float32,0x3f7468bd,0x3ff81731,2 +np.float32,0x3f2850ea,0x3fc9e5f4,2 +np.float32,0xbe0d47ac,0x3f68a6f9,2 +np.float32,0x314877,0x3f800000,2 +np.float32,0x802700c3,0x3f800000,2 +np.float32,0x7e2c915f,0x7f800000,2 +np.float32,0x800d0059,0x3f800000,2 +np.float32,0x3f7f3c25,0x3fff7862,2 +np.float32,0xff735d31,0x0,2 +np.float32,0xff7e339e,0x0,2 +np.float32,0xbef96cf0,0x3f36a340,2 +np.float32,0x3db6ea21,0x3f882cb2,2 +np.float32,0x67cb3d,0x3f800000,2 +np.float32,0x801f349d,0x3f800000,2 +np.float32,0x3f1390ec,0x3fbede29,2 +np.float32,0x7f13644a,0x7f800000,2 +np.float32,0x804a369b,0x3f800000,2 +np.float32,0x80262666,0x3f800000,2 +np.float32,0x7e850fbc,0x7f800000,2 +np.float32,0x18b002,0x3f800000,2 +np.float32,0x8051f1ed,0x3f800000,2 +np.float32,0x3eba48f6,0x3fa4b753,2 +np.float32,0xbf3f4130,0x3f1886a9,2 +np.float32,0xbedac006,0x3f3e61cf,2 +np.float32,0xbf097c70,0x3f306ddc,2 +np.float32,0x4aba6d,0x3f800000,2 +np.float32,0x580078,0x3f800000,2 +np.float32,0x3f64d82e,0x3fedda40,2 +np.float32,0x7f781fd6,0x7f800000,2 +np.float32,0x6aff3d,0x3f800000,2 +np.float32,0xff25e074,0x0,2 +np.float32,0x7ea9ec89,0x7f800000,2 +np.float32,0xbf63b816,0x3f0a2fbb,2 +np.float32,0x133f07,0x3f800000,2 +np.float32,0xff800000,0x0,2 +np.float32,0x8013dde7,0x3f800000,2 +np.float32,0xff770b95,0x0,2 +np.float32,0x806154e8,0x3f800000,2 +np.float32,0x3f1e7bce,0x3fc4981a,2 +np.float32,0xff262c78,0x0,2 +np.float32,0x3f59a652,0x3fe6c04c,2 +np.float32,0x7f220166,0x7f800000,2 +np.float32,0x7eb24939,0x7f800000,2 +np.float32,0xbed58bb0,0x3f3fba6a,2 +np.float32,0x3c2ad000,0x3f80eda7,2 +np.float32,0x2adb2e,0x3f800000,2 +np.float32,0xfe8b213e,0x0,2 +np.float32,0xbf2e0c1e,0x3f1fccea,2 +np.float32,0x7e1716be,0x7f800000,2 +np.float32,0x80184e73,0x3f800000,2 +np.float32,0xbf254743,0x3f23a3d5,2 +np.float32,0x8063a722,0x3f800000,2 +np.float32,0xbe50adf0,0x3f5e46c7,2 +np.float32,0x3f614158,0x3feb8d60,2 +np.float32,0x8014bbc8,0x3f800000,2 +np.float32,0x283bc7,0x3f800000,2 +np.float32,0x3ffb5c,0x3f800000,2 +np.float32,0xfe8de6bc,0x0,2 +np.float32,0xbea6e086,0x3f4c3b82,2 +np.float32,0xfee64b92,0x0,2 +np.float32,0x506c1a,0x3f800000,2 +np.float32,0xff342af8,0x0,2 +np.float32,0x6b6f4c,0x3f800000,2 +np.float32,0xfeb42b1e,0x0,2 +np.float32,0x3e49384a,0x3f92ad71,2 +np.float32,0x152d08,0x3f800000,2 +np.float32,0x804c8f09,0x3f800000,2 +np.float32,0xff5e927d,0x0,2 +np.float32,0x6374da,0x3f800000,2 +np.float32,0x3f48f011,0x3fdc8ae4,2 +np.float32,0xbf446a30,0x3f1668e8,2 +np.float32,0x3ee77073,0x3faf196e,2 +np.float32,0xff4caa40,0x0,2 +np.float32,0x7efc9363,0x7f800000,2 +np.float32,0xbf706dcc,0x3f05830d,2 +np.float32,0xfe29c7e8,0x0,2 +np.float32,0x803cfe58,0x3f800000,2 +np.float32,0x3ec34c7c,0x3fa6bd0a,2 +np.float32,0x3eb85b62,0x3fa44968,2 +np.float32,0xfda1b9d8,0x0,2 +np.float32,0x802932cd,0x3f800000,2 +np.float32,0xbf5cde78,0x3f0cc5fa,2 +np.float32,0x3f31bf44,0x3fcf1ec8,2 +np.float32,0x803a0882,0x3f800000,2 +np.float32,0x800000,0x3f800000,2 +np.float32,0x3f54110e,0x3fe34a08,2 +np.float32,0x80645ea9,0x3f800000,2 +np.float32,0xbd8c1070,0x3f7425c3,2 +np.float32,0x801a006a,0x3f800000,2 +np.float32,0x7f5d161e,0x7f800000,2 +np.float32,0x805b5df3,0x3f800000,2 +np.float32,0xbf71a7c0,0x3f0511be,2 +np.float32,0xbe9a55c0,0x3f4fbad6,2 +np.float64,0xde7e2fd9bcfc6,0x3ff0000000000000,2 +np.float64,0xbfd8cd88eb319b12,0x3fe876349efbfa2b,2 +np.float64,0x3fe4fa13ace9f428,0x3ff933fbb117d196,2 +np.float64,0x475b3d048eb68,0x3ff0000000000000,2 +np.float64,0x7fef39ed07be73d9,0x7ff0000000000000,2 +np.float64,0x80026b84d904d70a,0x3ff0000000000000,2 +np.float64,0xebd60627d7ac1,0x3ff0000000000000,2 +np.float64,0xbfd7cbefdbaf97e0,0x3fe8bad30f6cf8e1,2 +np.float64,0x7fc17c605a22f8c0,0x7ff0000000000000,2 +np.float64,0x8cdac05119b58,0x3ff0000000000000,2 +np.float64,0x3fc45cd60a28b9ac,0x3ff1dd8028ec3f41,2 +np.float64,0x7fef4fce137e9f9b,0x7ff0000000000000,2 +np.float64,0xe5a2b819cb457,0x3ff0000000000000,2 +np.float64,0xe3bcfd4dc77a0,0x3ff0000000000000,2 +np.float64,0x68f0b670d1e17,0x3ff0000000000000,2 +np.float64,0xae69a6455cd35,0x3ff0000000000000,2 +np.float64,0xffe7007a0c6e00f4,0x0,2 +np.float64,0x59fc57a8b3f8c,0x3ff0000000000000,2 +np.float64,0xbfeee429c0bdc854,0x3fe0638fa62bed9f,2 +np.float64,0x80030bb6e206176f,0x3ff0000000000000,2 +np.float64,0x8006967a36ad2cf5,0x3ff0000000000000,2 +np.float64,0x3fe128176a22502f,0x3ff73393301e5dc8,2 +np.float64,0x218de20c431bd,0x3ff0000000000000,2 +np.float64,0x3fe7dbc48aafb789,0x3ffad38989b5955c,2 +np.float64,0xffda1ef411343de8,0x0,2 +np.float64,0xc6b392838d673,0x3ff0000000000000,2 +np.float64,0x7fe6d080c1ada101,0x7ff0000000000000,2 +np.float64,0xbfed36dd67fa6dbb,0x3fe0fec342c4ee89,2 +np.float64,0x3fee2bb6a3fc576e,0x3ffec1c149f1f092,2 +np.float64,0xbfd1f785eb23ef0c,0x3fea576eb01233cb,2 +np.float64,0x7fdad29a1f35a533,0x7ff0000000000000,2 +np.float64,0xffe8928c4fb12518,0x0,2 +np.float64,0x7fb123160022462b,0x7ff0000000000000,2 +np.float64,0x8007ab56cfaf56ae,0x3ff0000000000000,2 +np.float64,0x7fda342d6634685a,0x7ff0000000000000,2 +np.float64,0xbfe3b7e42c676fc8,0x3fe4e05cf8685b8a,2 +np.float64,0xffa708be7c2e1180,0x0,2 +np.float64,0xbfe8ffbece31ff7e,0x3fe29eb84077a34a,2 +np.float64,0xbf91002008220040,0x3fefa245058f05cb,2 +np.float64,0x8000281f0ee0503f,0x3ff0000000000000,2 +np.float64,0x8005617adc2ac2f6,0x3ff0000000000000,2 +np.float64,0x7fa84fec60309fd8,0x7ff0000000000000,2 +np.float64,0x8d00c0231a018,0x3ff0000000000000,2 +np.float64,0xbfdfe52ca63fca5a,0x3fe6a7324cc00d57,2 +np.float64,0x7fcc81073d39020d,0x7ff0000000000000,2 +np.float64,0x800134ff5a6269ff,0x3ff0000000000000,2 +np.float64,0xffc7fff98d2ffff4,0x0,2 +np.float64,0x8000925ce50124bb,0x3ff0000000000000,2 +np.float64,0xffe2530c66a4a618,0x0,2 +np.float64,0x7fc99070673320e0,0x7ff0000000000000,2 +np.float64,0xbfddd5c1f13bab84,0x3fe72a0c80f8df39,2 +np.float64,0x3fe1c220fee38442,0x3ff7817ec66aa55b,2 +np.float64,0x3fb9a1e1043343c2,0x3ff1265e575e6404,2 +np.float64,0xffef72e0833ee5c0,0x0,2 +np.float64,0x3fe710c0416e2181,0x3ffa5e93588aaa69,2 +np.float64,0xbfd8d23cbab1a47a,0x3fe874f5b9d99885,2 +np.float64,0x7fe9628ebd72c51c,0x7ff0000000000000,2 +np.float64,0xdd5fa611babf5,0x3ff0000000000000,2 +np.float64,0x8002bafac86575f6,0x3ff0000000000000,2 +np.float64,0x68acea44d159e,0x3ff0000000000000,2 +np.float64,0xffd776695eaeecd2,0x0,2 +np.float64,0x80059b59bb4b36b4,0x3ff0000000000000,2 +np.float64,0xbdcdd2af7b9bb,0x3ff0000000000000,2 +np.float64,0x8002b432ee856867,0x3ff0000000000000,2 +np.float64,0xcbc72f09978e6,0x3ff0000000000000,2 +np.float64,0xbfee8f4bf6fd1e98,0x3fe081cc0318b170,2 +np.float64,0xffc6e2892d2dc514,0x0,2 +np.float64,0x7feb682e4db6d05c,0x7ff0000000000000,2 +np.float64,0x8004b70a04296e15,0x3ff0000000000000,2 +np.float64,0x42408a4284812,0x3ff0000000000000,2 +np.float64,0xbfe9b8b197f37163,0x3fe254b4c003ce0a,2 +np.float64,0x3fcaadf5f5355bec,0x3ff27ca7876a8d20,2 +np.float64,0xfff0000000000000,0x0,2 +np.float64,0x7fea8376d33506ed,0x7ff0000000000000,2 +np.float64,0xffef73c2d63ee785,0x0,2 +np.float64,0xffe68b2bae2d1657,0x0,2 +np.float64,0x3fd8339cb2306739,0x3ff4cb774d616f90,2 +np.float64,0xbfc6d1db4d2da3b8,0x3fec47bb873a309c,2 +np.float64,0x7fe858016230b002,0x7ff0000000000000,2 +np.float64,0x7fe74cb99d2e9972,0x7ff0000000000000,2 +np.float64,0xffec2e96dc385d2d,0x0,2 +np.float64,0xb762a9876ec55,0x3ff0000000000000,2 +np.float64,0x3feca230c5794462,0x3ffdbfe62a572f52,2 +np.float64,0xbfb5ebad3a2bd758,0x3fee27eed86dcc39,2 +np.float64,0x471c705a8e38f,0x3ff0000000000000,2 +np.float64,0x7fc79bb5cf2f376b,0x7ff0000000000000,2 +np.float64,0xbfe53d6164ea7ac3,0x3fe4331b3beb73bd,2 +np.float64,0xbfe375a3f766eb48,0x3fe4fe67edb516e6,2 +np.float64,0x3fe1c7686ca38ed1,0x3ff7842f04770ba9,2 +np.float64,0x242e74dc485cf,0x3ff0000000000000,2 +np.float64,0x8009c06ab71380d6,0x3ff0000000000000,2 +np.float64,0x3fd08505efa10a0c,0x3ff3227b735b956d,2 +np.float64,0xffe3dfcecda7bf9d,0x0,2 +np.float64,0x8001f079bbc3e0f4,0x3ff0000000000000,2 +np.float64,0x3fddc706b6bb8e0c,0x3ff616d927987363,2 +np.float64,0xbfd151373ea2a26e,0x3fea870ba53ec126,2 +np.float64,0x7fe89533bfb12a66,0x7ff0000000000000,2 +np.float64,0xffed302cbc3a6059,0x0,2 +np.float64,0x3fd871cc28b0e398,0x3ff4d97d58c16ae2,2 +np.float64,0x7fbe9239683d2472,0x7ff0000000000000,2 +np.float64,0x848a445909149,0x3ff0000000000000,2 +np.float64,0x8007b104ce2f620a,0x3ff0000000000000,2 +np.float64,0x7fc2cd6259259ac4,0x7ff0000000000000,2 +np.float64,0xbfeadb640df5b6c8,0x3fe1e2b068de10af,2 +np.float64,0x800033b2f1a06767,0x3ff0000000000000,2 +np.float64,0x7fe54e5b7caa9cb6,0x7ff0000000000000,2 +np.float64,0x4f928f209f26,0x3ff0000000000000,2 +np.float64,0x8003c3dc6f2787ba,0x3ff0000000000000,2 +np.float64,0xbfd55a59daaab4b4,0x3fe9649d57b32b5d,2 +np.float64,0xffe3e2968d67c52c,0x0,2 +np.float64,0x80087434d550e86a,0x3ff0000000000000,2 +np.float64,0xffdde800083bd000,0x0,2 +np.float64,0xffe291f0542523e0,0x0,2 +np.float64,0xbfe1419bc3e28338,0x3fe6051d4f95a34a,2 +np.float64,0x3fd9d00ee1b3a01e,0x3ff5292bb8d5f753,2 +np.float64,0x3fdb720b60b6e417,0x3ff589d133625374,2 +np.float64,0xbfe3e21f0967c43e,0x3fe4cd4d02e3ef9a,2 +np.float64,0x7fd7e27f3dafc4fd,0x7ff0000000000000,2 +np.float64,0x3fd1cc2620a3984c,0x3ff366befbc38e3e,2 +np.float64,0x3fe78d05436f1a0b,0x3ffaa5ee4ea54b79,2 +np.float64,0x7e2acc84fc55a,0x3ff0000000000000,2 +np.float64,0x800ffb861c5ff70c,0x3ff0000000000000,2 +np.float64,0xffb2b0db1a2561b8,0x0,2 +np.float64,0xbfe80c2363701847,0x3fe301fdfe789576,2 +np.float64,0x7fe383c1c3e70783,0x7ff0000000000000,2 +np.float64,0xbfeefc02e6fdf806,0x3fe05b1a8528bf6c,2 +np.float64,0xbfe42c9268285925,0x3fe4abdc14793cb8,2 +np.float64,0x1,0x3ff0000000000000,2 +np.float64,0xa71c7ce94e390,0x3ff0000000000000,2 +np.float64,0x800ed4e6777da9cd,0x3ff0000000000000,2 +np.float64,0x3fde11b35d3c2367,0x3ff628bdc6dd1b78,2 +np.float64,0x3fef3964dbfe72ca,0x3fff777cae357608,2 +np.float64,0x3fefe369b7ffc6d4,0x3fffec357be508a3,2 +np.float64,0xbfdef1855f3de30a,0x3fe6e348c58e3fed,2 +np.float64,0x3fee0e2bc13c1c58,0x3ffeae1909c1b973,2 +np.float64,0xbfd31554ffa62aaa,0x3fea06628b2f048a,2 +np.float64,0x800dc56bcc7b8ad8,0x3ff0000000000000,2 +np.float64,0x7fbba01b8e374036,0x7ff0000000000000,2 +np.float64,0x7fd9737a92b2e6f4,0x7ff0000000000000,2 +np.float64,0x3feeae0fac3d5c1f,0x3fff1913705f1f07,2 +np.float64,0x3fdcc64fcdb98ca0,0x3ff5d9c3e5862972,2 +np.float64,0x3fdad9f83db5b3f0,0x3ff56674e81c1bd1,2 +np.float64,0x32b8797065710,0x3ff0000000000000,2 +np.float64,0x3fd20deae6241bd6,0x3ff37495bc057394,2 +np.float64,0x7fc899f0763133e0,0x7ff0000000000000,2 +np.float64,0x80045805fc08b00d,0x3ff0000000000000,2 +np.float64,0xbfcd8304cb3b0608,0x3feb4611f1eaa30c,2 +np.float64,0x3fd632a2fcac6544,0x3ff4592e1ea14fb0,2 +np.float64,0xffeeb066007d60cb,0x0,2 +np.float64,0x800bb12a42b76255,0x3ff0000000000000,2 +np.float64,0xbfe060fe1760c1fc,0x3fe6714640ab2574,2 +np.float64,0x80067ed737acfdaf,0x3ff0000000000000,2 +np.float64,0x3fd5ec3211abd864,0x3ff449adea82e73e,2 +np.float64,0x7fc4b2fdc22965fb,0x7ff0000000000000,2 +np.float64,0xff656afd002ad600,0x0,2 +np.float64,0xffeadefcdcb5bdf9,0x0,2 +np.float64,0x80052f18610a5e32,0x3ff0000000000000,2 +np.float64,0xbfd5b75c78ab6eb8,0x3fe94b15e0f39194,2 +np.float64,0xa4d3de2b49a7c,0x3ff0000000000000,2 +np.float64,0xbfe321c93de64392,0x3fe524ac7bbee401,2 +np.float64,0x3feb32f5def665ec,0x3ffcd6e4e5f9c271,2 +np.float64,0x7fe6b07e4ced60fc,0x7ff0000000000000,2 +np.float64,0x3fe013bb2de02776,0x3ff6aa4c32ab5ba4,2 +np.float64,0xbfeadd81d375bb04,0x3fe1e1de89b4aebf,2 +np.float64,0xffece7678079cece,0x0,2 +np.float64,0x3fe3d87b8467b0f8,0x3ff897cf22505e4d,2 +np.float64,0xffc4e3a05129c740,0x0,2 +np.float64,0xbfddee6b03bbdcd6,0x3fe723dd83ab49bd,2 +np.float64,0x3fcc4e2672389c4d,0x3ff2a680db769116,2 +np.float64,0x3fd8ed221ab1da44,0x3ff4f569aec8b850,2 +np.float64,0x80000a3538a0146b,0x3ff0000000000000,2 +np.float64,0x8004832eb109065e,0x3ff0000000000000,2 +np.float64,0xffdca83c60395078,0x0,2 +np.float64,0xffef551cda3eaa39,0x0,2 +np.float64,0x800fd95dd65fb2bc,0x3ff0000000000000,2 +np.float64,0x3ff0000000000000,0x4000000000000000,2 +np.float64,0xbfc06f5c4f20deb8,0x3fed466c17305ad8,2 +np.float64,0xbfeb01b5f476036c,0x3fe1d3de0f4211f4,2 +np.float64,0xbfdb2b9284365726,0x3fe7d7b02f790b05,2 +np.float64,0xff76ba83202d7500,0x0,2 +np.float64,0x3fd3f1c59ea7e38c,0x3ff3db96b3a0aaad,2 +np.float64,0x8b99ff6d17340,0x3ff0000000000000,2 +np.float64,0xbfeb383aa0f67075,0x3fe1bedcf2531c08,2 +np.float64,0x3fe321e35fa643c7,0x3ff83749a5d686ee,2 +np.float64,0xbfd863eb2130c7d6,0x3fe8923fcc39bac7,2 +np.float64,0x9e71dd333ce3c,0x3ff0000000000000,2 +np.float64,0x9542962b2a853,0x3ff0000000000000,2 +np.float64,0xba2c963b74593,0x3ff0000000000000,2 +np.float64,0x80019f4d0ca33e9b,0x3ff0000000000000,2 +np.float64,0xffde3e39a73c7c74,0x0,2 +np.float64,0x800258ae02c4b15d,0x3ff0000000000000,2 +np.float64,0xbfd99a535a3334a6,0x3fe8402f3a0662a5,2 +np.float64,0xe6c62143cd8c4,0x3ff0000000000000,2 +np.float64,0x7fbcc828f0399051,0x7ff0000000000000,2 +np.float64,0xbfe42e3596285c6b,0x3fe4ab2066d66071,2 +np.float64,0xffe2ee42d365dc85,0x0,2 +np.float64,0x3fe1f98abea3f315,0x3ff79dc68002a80b,2 +np.float64,0x7fd7225891ae44b0,0x7ff0000000000000,2 +np.float64,0x477177408ee30,0x3ff0000000000000,2 +np.float64,0xbfe16a7e2162d4fc,0x3fe5f1a5c745385d,2 +np.float64,0xbf98aaee283155e0,0x3fef785952e9c089,2 +np.float64,0x7fd7c14a8daf8294,0x7ff0000000000000,2 +np.float64,0xf7e7713defcee,0x3ff0000000000000,2 +np.float64,0x800769aa11aed355,0x3ff0000000000000,2 +np.float64,0xbfed30385e3a6071,0x3fe10135a3bd9ae6,2 +np.float64,0x3fe6dd7205edbae4,0x3ffa4155899efd70,2 +np.float64,0x800d705d26bae0ba,0x3ff0000000000000,2 +np.float64,0xa443ac1f48876,0x3ff0000000000000,2 +np.float64,0xbfec8cfec43919fe,0x3fe13dbf966e6633,2 +np.float64,0x7fd246efaa248dde,0x7ff0000000000000,2 +np.float64,0x800f2ad14afe55a3,0x3ff0000000000000,2 +np.float64,0x800487a894c90f52,0x3ff0000000000000,2 +np.float64,0x80014c4f19e2989f,0x3ff0000000000000,2 +np.float64,0x3fc11f265f223e4d,0x3ff18def05c971e5,2 +np.float64,0xffeb6d565776daac,0x0,2 +np.float64,0x7fd5ca5df8ab94bb,0x7ff0000000000000,2 +np.float64,0xbfe33de4fde67bca,0x3fe517d0e212cd1c,2 +np.float64,0xbfd1c738e5a38e72,0x3fea6539e9491693,2 +np.float64,0xbfec1d8c33b83b18,0x3fe16790fbca0c65,2 +np.float64,0xbfeecb464b7d968d,0x3fe06c67e2aefa55,2 +np.float64,0xbfd621dbf1ac43b8,0x3fe92dfa32d93846,2 +np.float64,0x80069a02860d3406,0x3ff0000000000000,2 +np.float64,0xbfe84f650e309eca,0x3fe2e661300f1975,2 +np.float64,0x7fc1d2cec523a59d,0x7ff0000000000000,2 +np.float64,0x3fd7706d79aee0db,0x3ff49fb033353dfe,2 +np.float64,0xffd94ba458329748,0x0,2 +np.float64,0x7fea98ba1a753173,0x7ff0000000000000,2 +np.float64,0xbfe756ba092ead74,0x3fe34d428d1857bc,2 +np.float64,0xffecfbd836b9f7b0,0x0,2 +np.float64,0x3fd211fbe5a423f8,0x3ff375711a3641e0,2 +np.float64,0x7fee24f7793c49ee,0x7ff0000000000000,2 +np.float64,0x7fe6a098886d4130,0x7ff0000000000000,2 +np.float64,0xbfd4ade909a95bd2,0x3fe99436524db1f4,2 +np.float64,0xbfeb704e6476e09d,0x3fe1a95be4a21bc6,2 +np.float64,0xffefc0f6627f81ec,0x0,2 +np.float64,0x7feff3f896ffe7f0,0x7ff0000000000000,2 +np.float64,0xa3f74edb47eea,0x3ff0000000000000,2 +np.float64,0xbfe0a551cf214aa4,0x3fe65027a7ff42e3,2 +np.float64,0x3fe164b23622c964,0x3ff7521c6225f51d,2 +np.float64,0x7fc258752324b0e9,0x7ff0000000000000,2 +np.float64,0x4739b3348e737,0x3ff0000000000000,2 +np.float64,0xb0392b1d60726,0x3ff0000000000000,2 +np.float64,0x7fe26f42e5e4de85,0x7ff0000000000000,2 +np.float64,0x8004601f87e8c040,0x3ff0000000000000,2 +np.float64,0xffe92ce37b3259c6,0x0,2 +np.float64,0x3fe620da3a6c41b4,0x3ff9d6ee3d005466,2 +np.float64,0x3fd850cfa2b0a1a0,0x3ff4d20bd249d411,2 +np.float64,0xffdcdfdfb5b9bfc0,0x0,2 +np.float64,0x800390297d672054,0x3ff0000000000000,2 +np.float64,0x3fde5864f6bcb0ca,0x3ff639bb9321f5ef,2 +np.float64,0x3fee484cec7c909a,0x3ffed4d2c6274219,2 +np.float64,0x7fe9b9a064b37340,0x7ff0000000000000,2 +np.float64,0xffe50028b8aa0051,0x0,2 +np.float64,0x3fe37774ade6eee9,0x3ff864558498a9a8,2 +np.float64,0x7fef83c724bf078d,0x7ff0000000000000,2 +np.float64,0xbfeb58450fb6b08a,0x3fe1b290556be73d,2 +np.float64,0x7fd7161475ae2c28,0x7ff0000000000000,2 +np.float64,0x3fece09621f9c12c,0x3ffde836a583bbdd,2 +np.float64,0x3fd045790ea08af2,0x3ff31554778fd4e2,2 +np.float64,0xbfe7c7dd6cef8fbb,0x3fe31e2eeda857fc,2 +np.float64,0xffe9632f5372c65e,0x0,2 +np.float64,0x800d4f3a703a9e75,0x3ff0000000000000,2 +np.float64,0xffea880e4df5101c,0x0,2 +np.float64,0xbfeb7edc4ff6fdb8,0x3fe1a3cb5dc33594,2 +np.float64,0xbfcaae4bab355c98,0x3febb1ee65e16b58,2 +np.float64,0xbfde598a19bcb314,0x3fe709145eafaaf8,2 +np.float64,0x3feefb6d78fdf6db,0x3fff4d5c8c68e39a,2 +np.float64,0x13efc75427dfa,0x3ff0000000000000,2 +np.float64,0xffe26f65c064decb,0x0,2 +np.float64,0xbfed5c1addfab836,0x3fe0f1133bd2189a,2 +np.float64,0x7fe7a7cf756f4f9e,0x7ff0000000000000,2 +np.float64,0xffc681702e2d02e0,0x0,2 +np.float64,0x8003d6ab5067ad57,0x3ff0000000000000,2 +np.float64,0xffa695f1342d2be0,0x0,2 +np.float64,0xbfcf8857db3f10b0,0x3feafa14da8c29a4,2 +np.float64,0xbfe8ca06be71940e,0x3fe2b46f6d2c64b4,2 +np.float64,0x3451c74468a3a,0x3ff0000000000000,2 +np.float64,0x3fde47d5f6bc8fac,0x3ff635bf8e024716,2 +np.float64,0xffda159d5db42b3a,0x0,2 +np.float64,0x7fef9fecaa3f3fd8,0x7ff0000000000000,2 +np.float64,0x3fd4e745e3a9ce8c,0x3ff410a9cb6fd8bf,2 +np.float64,0xffef57019b3eae02,0x0,2 +np.float64,0xbfe6604f4f6cc09e,0x3fe3b55de43c626d,2 +np.float64,0xffe066a424a0cd48,0x0,2 +np.float64,0x3fd547de85aa8fbc,0x3ff425b2a7a16675,2 +np.float64,0xffb3c69280278d28,0x0,2 +np.float64,0xffebe0b759f7c16e,0x0,2 +np.float64,0x3fefc84106ff9082,0x3fffd973687337d8,2 +np.float64,0x501c42a4a0389,0x3ff0000000000000,2 +np.float64,0x7feb45d13eb68ba1,0x7ff0000000000000,2 +np.float64,0xbfb16a8c2e22d518,0x3fee86a9c0f9291a,2 +np.float64,0x3be327b877c66,0x3ff0000000000000,2 +np.float64,0x7fe4a58220694b03,0x7ff0000000000000,2 +np.float64,0x3fe0286220a050c4,0x3ff6b472157ab8f2,2 +np.float64,0x3fc9381825327030,0x3ff2575fbea2bf5d,2 +np.float64,0xbfd1af7ee8a35efe,0x3fea6c032cf7e669,2 +np.float64,0xbfea9b0f39b5361e,0x3fe1fbae14b40b4d,2 +np.float64,0x39efe4aa73dfd,0x3ff0000000000000,2 +np.float64,0xffeb06fdc8360dfb,0x0,2 +np.float64,0xbfda481e72b4903c,0x3fe812b4b08d4884,2 +np.float64,0xbfd414ba5ba82974,0x3fe9bec9474bdfe6,2 +np.float64,0x7fe707177b6e0e2e,0x7ff0000000000000,2 +np.float64,0x8000000000000001,0x3ff0000000000000,2 +np.float64,0xbfede6a75bbbcd4f,0x3fe0be874cccd399,2 +np.float64,0x8006cdb577cd9b6c,0x3ff0000000000000,2 +np.float64,0x800051374f20a26f,0x3ff0000000000000,2 +np.float64,0x3fe5cba8c96b9752,0x3ff9a76b3adcc122,2 +np.float64,0xbfee3933487c7267,0x3fe0a0b190f9609a,2 +np.float64,0x3fd574b8d8aae970,0x3ff42f7e83de1af9,2 +np.float64,0xba5db72b74bb7,0x3ff0000000000000,2 +np.float64,0x3fa9bf512c337ea0,0x3ff0914a7f743a94,2 +np.float64,0xffe8cb736c3196e6,0x0,2 +np.float64,0x3761b2f06ec37,0x3ff0000000000000,2 +np.float64,0x8b4d4433169a9,0x3ff0000000000000,2 +np.float64,0x800f0245503e048b,0x3ff0000000000000,2 +np.float64,0x7fb20d54ac241aa8,0x7ff0000000000000,2 +np.float64,0x3fdf26666b3e4ccd,0x3ff66b8995142017,2 +np.float64,0xbfcbf2a83737e550,0x3feb8173a7b9d6b5,2 +np.float64,0x3fd31572a0a62ae5,0x3ff3ac6c94313dcd,2 +np.float64,0x7fb6c2807a2d8500,0x7ff0000000000000,2 +np.float64,0x800799758f2f32ec,0x3ff0000000000000,2 +np.float64,0xe72f1f6bce5e4,0x3ff0000000000000,2 +np.float64,0x3fe0e0f223a1c1e4,0x3ff70fed5b761673,2 +np.float64,0x3fe6d4f133eda9e2,0x3ffa3c8000c169eb,2 +np.float64,0xbfe1ccc3d8639988,0x3fe5c32148bedbda,2 +np.float64,0x3fea71c53574e38a,0x3ffc5f31201fe9be,2 +np.float64,0x9e0323eb3c065,0x3ff0000000000000,2 +np.float64,0x8005cc79a5cb98f4,0x3ff0000000000000,2 +np.float64,0x1dace1f83b59d,0x3ff0000000000000,2 +np.float64,0x10000000000000,0x3ff0000000000000,2 +np.float64,0xbfdef50830bdea10,0x3fe6e269fc17ebef,2 +np.float64,0x8010000000000000,0x3ff0000000000000,2 +np.float64,0xbfdfa82192bf5044,0x3fe6b6313ee0a095,2 +np.float64,0x3fd9398fe2b27320,0x3ff506ca2093c060,2 +np.float64,0x8002721fe664e441,0x3ff0000000000000,2 +np.float64,0x800c04166ad8082d,0x3ff0000000000000,2 +np.float64,0xffec3918b3387230,0x0,2 +np.float64,0x3fec62d5dfb8c5ac,0x3ffd972ea4a54b32,2 +np.float64,0x3fe7e42a0b6fc854,0x3ffad86b0443181d,2 +np.float64,0x3fc0aff5f3215fec,0x3ff1836058d4d210,2 +np.float64,0xbf82ff68a025fec0,0x3fefcb7f06862dce,2 +np.float64,0xae2e35195c5c7,0x3ff0000000000000,2 +np.float64,0x3fece3bddf79c77c,0x3ffdea41fb1ba8fa,2 +np.float64,0xbfa97b947832f730,0x3feeea34ebedbbd2,2 +np.float64,0xbfdfb1b1ce3f6364,0x3fe6b3d72871335c,2 +np.float64,0xbfe61a4f24ac349e,0x3fe3d356bf991b06,2 +np.float64,0x7fe23117a5e4622e,0x7ff0000000000000,2 +np.float64,0x800552a8cccaa552,0x3ff0000000000000,2 +np.float64,0x625b4d0ac4b6a,0x3ff0000000000000,2 +np.float64,0x3f86cf15702d9e00,0x3ff01fbe0381676d,2 +np.float64,0x800d7d1b685afa37,0x3ff0000000000000,2 +np.float64,0x3fe2cb6e40a596dd,0x3ff80a1a562f7fc9,2 +np.float64,0x3fe756eb8e2eadd7,0x3ffa86c638aad07d,2 +np.float64,0x800dc9a5513b934b,0x3ff0000000000000,2 +np.float64,0xbfbbdd118a37ba20,0x3fedacb4624f3cee,2 +np.float64,0x800de01f8efbc03f,0x3ff0000000000000,2 +np.float64,0x800da1a3fe9b4348,0x3ff0000000000000,2 +np.float64,0xbf87d8c7602fb180,0x3fefbe2614998ab6,2 +np.float64,0xbfdfff6141bffec2,0x3fe6a0c54d9f1bc8,2 +np.float64,0xee8fbba5dd1f8,0x3ff0000000000000,2 +np.float64,0x3fe79dc93e6f3b92,0x3ffaaf9d7d955b2c,2 +np.float64,0xffedd4b3d07ba967,0x0,2 +np.float64,0x800905dfc1720bc0,0x3ff0000000000000,2 +np.float64,0x3fd9e483b8b3c907,0x3ff52ddc6c950e7f,2 +np.float64,0xe34ffefdc6a00,0x3ff0000000000000,2 +np.float64,0x2168e62242d1e,0x3ff0000000000000,2 +np.float64,0x800349950e26932b,0x3ff0000000000000,2 +np.float64,0x7fc50da8532a1b50,0x7ff0000000000000,2 +np.float64,0xae1a4d115c34a,0x3ff0000000000000,2 +np.float64,0xa020f0b74041e,0x3ff0000000000000,2 +np.float64,0x3fd2aa2f77a5545f,0x3ff3959f09519a25,2 +np.float64,0x3fbfefc3223fdf86,0x3ff171f3df2d408b,2 +np.float64,0xbfea9fc340b53f86,0x3fe1f9d92b712654,2 +np.float64,0xffe9b920a5337240,0x0,2 +np.float64,0xbfe2eb0265e5d605,0x3fe53dd195782de3,2 +np.float64,0x7fb932c70e32658d,0x7ff0000000000000,2 +np.float64,0x3fda816bfcb502d8,0x3ff551f8d5c84c82,2 +np.float64,0x3fed68cbe9fad198,0x3ffe40f6692d5693,2 +np.float64,0x32df077665be2,0x3ff0000000000000,2 +np.float64,0x7fdc9c2f3539385d,0x7ff0000000000000,2 +np.float64,0x7fe71091a2ee2122,0x7ff0000000000000,2 +np.float64,0xbfe68106c46d020e,0x3fe3a76b56024c2c,2 +np.float64,0xffcf0572823e0ae4,0x0,2 +np.float64,0xbfeeab341fbd5668,0x3fe077d496941cda,2 +np.float64,0x7fe7ada0d2af5b41,0x7ff0000000000000,2 +np.float64,0xffacdef2a439bde0,0x0,2 +np.float64,0x3fe4200f3128401e,0x3ff8be0ddf30fd1e,2 +np.float64,0xffd9022a69320454,0x0,2 +np.float64,0xbfe8e06914f1c0d2,0x3fe2ab5fe7fffb5a,2 +np.float64,0x3fc4b976602972ed,0x3ff1e6786fa7a890,2 +np.float64,0xbfd784c105af0982,0x3fe8cdeb1cdbd57e,2 +np.float64,0x7feb20a20eb64143,0x7ff0000000000000,2 +np.float64,0xbfc87dd83630fbb0,0x3fec067c1e7e6983,2 +np.float64,0x7fe5400cbe6a8018,0x7ff0000000000000,2 +np.float64,0xbfb4a1f5e22943e8,0x3fee42e6c81559a9,2 +np.float64,0x3fe967c575f2cf8a,0x3ffbbd8bc0d5c50d,2 +np.float64,0xbfeb059cf4760b3a,0x3fe1d25c592c4dab,2 +np.float64,0xbfeef536d5bdea6e,0x3fe05d832c15c64a,2 +np.float64,0x3fa90b3f6432167f,0x3ff08d410dd732cc,2 +np.float64,0xbfeaff265e75fe4d,0x3fe1d4db3fb3208d,2 +np.float64,0x6d93d688db27b,0x3ff0000000000000,2 +np.float64,0x800ab9b4ea55736a,0x3ff0000000000000,2 +np.float64,0x3fd444b39d288967,0x3ff3ed749d48d444,2 +np.float64,0xbfd5f2c0d0abe582,0x3fe93ad6124d88e7,2 +np.float64,0x3fea8fd915f51fb2,0x3ffc71b32cb92d60,2 +np.float64,0xbfd23d6491a47aca,0x3fea43875709b0f0,2 +np.float64,0xffe76f75ce6edeeb,0x0,2 +np.float64,0x1f5670da3eacf,0x3ff0000000000000,2 +np.float64,0x8000d89c9621b13a,0x3ff0000000000000,2 +np.float64,0x3fedb51c52bb6a39,0x3ffe732279c228ff,2 +np.float64,0x7f99215ac83242b5,0x7ff0000000000000,2 +np.float64,0x742a6864e854e,0x3ff0000000000000,2 +np.float64,0xbfe02fb340205f66,0x3fe689495f9164e3,2 +np.float64,0x7fef4c12b0fe9824,0x7ff0000000000000,2 +np.float64,0x3fd40e17c2a81c30,0x3ff3e1aee8ed972f,2 +np.float64,0x7fdcd264e939a4c9,0x7ff0000000000000,2 +np.float64,0x3fdb675838b6ceb0,0x3ff587526241c550,2 +np.float64,0x3fdf1a4081be3480,0x3ff66896a18c2385,2 +np.float64,0xbfea5082b874a106,0x3fe218cf8f11be13,2 +np.float64,0xffe1a0ebf7e341d8,0x0,2 +np.float64,0x3fed0a2222ba1444,0x3ffe032ce928ae7d,2 +np.float64,0xffeae036da75c06d,0x0,2 +np.float64,0x5b05fc8ab60c0,0x3ff0000000000000,2 +np.float64,0x7fd8aae5f03155cb,0x7ff0000000000000,2 +np.float64,0xbfd0b4d9fda169b4,0x3feab41e58b6ccb7,2 +np.float64,0xffdcaffa57395ff4,0x0,2 +np.float64,0xbfcbf1455437e28c,0x3feb81a884182c5d,2 +np.float64,0x3f9d6700b83ace01,0x3ff0525657db35d4,2 +np.float64,0x4fd5b0b29fab7,0x3ff0000000000000,2 +np.float64,0x3fe9af2df5b35e5c,0x3ffbe895684df916,2 +np.float64,0x800dfd41f9dbfa84,0x3ff0000000000000,2 +np.float64,0xbf2a30457e546,0x3ff0000000000000,2 +np.float64,0x7fc6be37182d7c6d,0x7ff0000000000000,2 +np.float64,0x800e0f9788dc1f2f,0x3ff0000000000000,2 +np.float64,0x8006890c704d121a,0x3ff0000000000000,2 +np.float64,0xffecb1a7cbb9634f,0x0,2 +np.float64,0xffb35c330426b868,0x0,2 +np.float64,0x7fe8f2ba8a71e574,0x7ff0000000000000,2 +np.float64,0xf3ccff8fe79a0,0x3ff0000000000000,2 +np.float64,0x3fdf19a84e3e3351,0x3ff66871b17474c1,2 +np.float64,0x80049a662d0934cd,0x3ff0000000000000,2 +np.float64,0xdf5bb4bbbeb77,0x3ff0000000000000,2 +np.float64,0x8005eca030cbd941,0x3ff0000000000000,2 +np.float64,0xffe5f239586be472,0x0,2 +np.float64,0xbfc4526a0728a4d4,0x3fecaa52fbf5345e,2 +np.float64,0xbfe8f1ecda31e3da,0x3fe2a44c080848b3,2 +np.float64,0x3feebd32f4bd7a66,0x3fff234788938c3e,2 +np.float64,0xffd6ca04e9ad940a,0x0,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0xbfd4c560a9a98ac2,0x3fe98db6d97442fc,2 +np.float64,0x8005723471cae46a,0x3ff0000000000000,2 +np.float64,0xbfeb278299764f05,0x3fe1c54b48f8ba4b,2 +np.float64,0x8007907b376f20f7,0x3ff0000000000000,2 +np.float64,0x7fe9c2fd01b385f9,0x7ff0000000000000,2 +np.float64,0x7fdaa37368b546e6,0x7ff0000000000000,2 +np.float64,0xbfe6d0f3786da1e7,0x3fe38582271cada7,2 +np.float64,0xbfea9b77823536ef,0x3fe1fb8575cd1b7d,2 +np.float64,0xbfe90ac38bf21587,0x3fe29a471b47a2e8,2 +np.float64,0xbfe9c51844738a30,0x3fe24fc8de03ea84,2 +np.float64,0x3fe45a9013a8b520,0x3ff8dd7c80f1cf75,2 +np.float64,0xbfe5780551eaf00a,0x3fe419832a6a4c56,2 +np.float64,0xffefffffffffffff,0x0,2 +np.float64,0x7fe3778c84a6ef18,0x7ff0000000000000,2 +np.float64,0xbfdc8a60413914c0,0x3fe77dc55b85028f,2 +np.float64,0xef47ae2fde8f6,0x3ff0000000000000,2 +np.float64,0x8001269fa4c24d40,0x3ff0000000000000,2 +np.float64,0x3fe9d2d39e73a5a7,0x3ffbfe2a66c4148e,2 +np.float64,0xffee61f528fcc3e9,0x0,2 +np.float64,0x3fe8a259ab7144b3,0x3ffb47e797a34bd2,2 +np.float64,0x3f906d610820dac0,0x3ff02dccda8e1a75,2 +np.float64,0x3fe70739f32e0e74,0x3ffa59232f4fcd07,2 +np.float64,0x3fe6b7f5e6ad6fec,0x3ffa2c0cc54f2c16,2 +np.float64,0x95a91a792b524,0x3ff0000000000000,2 +np.float64,0xbfedf6fcf57bedfa,0x3fe0b89bb40081cc,2 +np.float64,0xbfa4d2de9c29a5c0,0x3fef1c485678d657,2 +np.float64,0x3fe130470d22608e,0x3ff737b0be409a38,2 +np.float64,0x3fcf8035423f006b,0x3ff2f9d7c3c6a302,2 +np.float64,0xffe5995a3eab32b4,0x0,2 +np.float64,0xffca68c63034d18c,0x0,2 +np.float64,0xff9d53af903aa760,0x0,2 +np.float64,0x800563f1de6ac7e4,0x3ff0000000000000,2 +np.float64,0x7fce284fa63c509e,0x7ff0000000000000,2 +np.float64,0x7fb2a3959a25472a,0x7ff0000000000000,2 +np.float64,0x7fdbe2652f37c4c9,0x7ff0000000000000,2 +np.float64,0x800d705bbc1ae0b8,0x3ff0000000000000,2 +np.float64,0x7fd9bd2347b37a46,0x7ff0000000000000,2 +np.float64,0x3fcac3c0fb358782,0x3ff27ed62d6c8221,2 +np.float64,0x800110691ec220d3,0x3ff0000000000000,2 +np.float64,0x3fef79a8157ef350,0x3fffa368513eb909,2 +np.float64,0x7fe8bd2f0e317a5d,0x7ff0000000000000,2 +np.float64,0x7fd3040e60a6081c,0x7ff0000000000000,2 +np.float64,0xffea50723234a0e4,0x0,2 +np.float64,0xbfe6220054ac4400,0x3fe3d00961238a93,2 +np.float64,0x3f9eddd8c83dbbc0,0x3ff0567b0c73005a,2 +np.float64,0xbfa4a062c42940c0,0x3fef1e68badde324,2 +np.float64,0xbfd077ad4720ef5a,0x3feac5d577581d07,2 +np.float64,0x7fdfd4b025bfa95f,0x7ff0000000000000,2 +np.float64,0xd00d3cf3a01a8,0x3ff0000000000000,2 +np.float64,0x7fe3010427260207,0x7ff0000000000000,2 +np.float64,0x22ea196645d44,0x3ff0000000000000,2 +np.float64,0x7fd747e8cd2e8fd1,0x7ff0000000000000,2 +np.float64,0xd50665e7aa0cd,0x3ff0000000000000,2 +np.float64,0x7fe1da580ae3b4af,0x7ff0000000000000,2 +np.float64,0xffeb218ecfb6431d,0x0,2 +np.float64,0xbf887d0dd030fa00,0x3fefbc6252c8b354,2 +np.float64,0x3fcaa31067354621,0x3ff27b904c07e07f,2 +np.float64,0x7fe698cc4ded3198,0x7ff0000000000000,2 +np.float64,0x1c40191a38804,0x3ff0000000000000,2 +np.float64,0x80086fd20e30dfa4,0x3ff0000000000000,2 +np.float64,0x7fed34d5eaba69ab,0x7ff0000000000000,2 +np.float64,0xffd00b52622016a4,0x0,2 +np.float64,0x3f80abcdb021579b,0x3ff0172d27945851,2 +np.float64,0x3fe614cfd66c29a0,0x3ff9d031e1839191,2 +np.float64,0x80021d71c8843ae4,0x3ff0000000000000,2 +np.float64,0x800bc2adc657855c,0x3ff0000000000000,2 +np.float64,0x6b9fec1cd73fe,0x3ff0000000000000,2 +np.float64,0xffd9093b5f321276,0x0,2 +np.float64,0x800d3c6c77fa78d9,0x3ff0000000000000,2 +np.float64,0xffe80fc1cbf01f83,0x0,2 +np.float64,0xffbffbaf2a3ff760,0x0,2 +np.float64,0x3fea1ed29eb43da5,0x3ffc2c64ec0e17a3,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x3fd944a052328941,0x3ff5094f4c43ecca,2 +np.float64,0x800b1f9416163f29,0x3ff0000000000000,2 +np.float64,0x800f06bf33de0d7e,0x3ff0000000000000,2 +np.float64,0xbfdbf0d226b7e1a4,0x3fe7a4f73793d95b,2 +np.float64,0xffe7306c30ae60d8,0x0,2 +np.float64,0x7fe991accfb32359,0x7ff0000000000000,2 +np.float64,0x3fcc0040d2380082,0x3ff29ea47e4f07d4,2 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,2 +np.float64,0x0,0x3ff0000000000000,2 +np.float64,0x3fe1423f7be2847e,0x3ff740bc1d3b20f8,2 +np.float64,0xbfeae3a3cab5c748,0x3fe1df7e936f8504,2 +np.float64,0x800b2da7d6165b50,0x3ff0000000000000,2 +np.float64,0x800b2404fcd6480a,0x3ff0000000000000,2 +np.float64,0x6fcbcf88df97b,0x3ff0000000000000,2 +np.float64,0xa248c0e14492,0x3ff0000000000000,2 +np.float64,0xffd255776824aaee,0x0,2 +np.float64,0x80057b3effeaf67f,0x3ff0000000000000,2 +np.float64,0x3feb0b07d7761610,0x3ffcbdfe1be5a594,2 +np.float64,0x924e1019249c2,0x3ff0000000000000,2 +np.float64,0x80074307e80e8611,0x3ff0000000000000,2 +np.float64,0xffb207fa46240ff8,0x0,2 +np.float64,0x95ac388d2b587,0x3ff0000000000000,2 +np.float64,0xbff0000000000000,0x3fe0000000000000,2 +np.float64,0x3fd38b6a492716d5,0x3ff3c59f62b5add5,2 +np.float64,0x7fe49362c3e926c5,0x7ff0000000000000,2 +np.float64,0x7fe842889db08510,0x7ff0000000000000,2 +np.float64,0xbfba6003e834c008,0x3fedcb620a2d9856,2 +np.float64,0xffe7e782bd6fcf05,0x0,2 +np.float64,0x7fd9b93d9433727a,0x7ff0000000000000,2 +np.float64,0x7fc8fcb61d31f96b,0x7ff0000000000000,2 +np.float64,0xbfef9be8db3f37d2,0x3fe022d603b81dc2,2 +np.float64,0x6f4fc766de9fa,0x3ff0000000000000,2 +np.float64,0xbfe93016f132602e,0x3fe28b42d782d949,2 +np.float64,0x3fe10e52b8e21ca5,0x3ff726a38b0bb895,2 +np.float64,0x3fbbba0ae6377416,0x3ff13f56084a9da3,2 +np.float64,0x3fe09e42ece13c86,0x3ff6eeb57e775e24,2 +np.float64,0x800942e39fb285c8,0x3ff0000000000000,2 +np.float64,0xffe5964370eb2c86,0x0,2 +np.float64,0x3fde479f32bc8f3e,0x3ff635b2619ba53a,2 +np.float64,0x3fe826e187f04dc3,0x3ffaff52b79c3a08,2 +np.float64,0x3febcbf1eab797e4,0x3ffd37152e5e2598,2 +np.float64,0x3fa0816a202102d4,0x3ff05c8e6a8b00d5,2 +np.float64,0xbd005ccb7a00c,0x3ff0000000000000,2 +np.float64,0x44c12fdc89827,0x3ff0000000000000,2 +np.float64,0xffc8fdffa431fc00,0x0,2 +np.float64,0xffeb4f5a87b69eb4,0x0,2 +np.float64,0xbfb07e7f8420fd00,0x3fee9a32924fe6a0,2 +np.float64,0xbfbd9d1bb63b3a38,0x3fed88ca81e5771c,2 +np.float64,0x8008682a74f0d055,0x3ff0000000000000,2 +np.float64,0x3fdeedbc7b3ddb79,0x3ff65dcb7c55f4dc,2 +np.float64,0x8009e889c613d114,0x3ff0000000000000,2 +np.float64,0x3faea831f43d5064,0x3ff0ad935e890e49,2 +np.float64,0xf0af1703e15e3,0x3ff0000000000000,2 +np.float64,0xffec06c4a5f80d88,0x0,2 +np.float64,0x53a1cc0ca743a,0x3ff0000000000000,2 +np.float64,0x7fd10c9eea22193d,0x7ff0000000000000,2 +np.float64,0xbfd48a6bf0a914d8,0x3fe99e0d109f2bac,2 +np.float64,0x3fd6dfe931adbfd4,0x3ff47f81c2dfc5d3,2 +np.float64,0x3fed20e86b7a41d0,0x3ffe11fecc7bc686,2 +np.float64,0xbfea586818b4b0d0,0x3fe215b7747d5cb8,2 +np.float64,0xbfd4ad3e20295a7c,0x3fe99465ab8c3275,2 +np.float64,0x3fd6619ee4acc33e,0x3ff4638b7b80c08a,2 +np.float64,0x3fdf6fcb63bedf97,0x3ff67d62fd3d560c,2 +np.float64,0x800a9191e7152324,0x3ff0000000000000,2 +np.float64,0x3fd2ff3c0da5fe78,0x3ff3a7b17e892a28,2 +np.float64,0x8003dbf1f327b7e5,0x3ff0000000000000,2 +np.float64,0xffea6b89a934d712,0x0,2 +np.float64,0x7fcfb879043f70f1,0x7ff0000000000000,2 +np.float64,0xea6a84dbd4d51,0x3ff0000000000000,2 +np.float64,0x800ec97a815d92f5,0x3ff0000000000000,2 +np.float64,0xffe304c3a8660987,0x0,2 +np.float64,0xbfefe24dd3ffc49c,0x3fe00a4e065be96d,2 +np.float64,0xffd3cc8c00a79918,0x0,2 +np.float64,0x95be8b7b2b7d2,0x3ff0000000000000,2 +np.float64,0x7fe20570cba40ae1,0x7ff0000000000000,2 +np.float64,0x7f97a06da02f40da,0x7ff0000000000000,2 +np.float64,0xffe702b9522e0572,0x0,2 +np.float64,0x3fada2d8543b45b1,0x3ff0a7adc4201e08,2 +np.float64,0x235e6acc46bce,0x3ff0000000000000,2 +np.float64,0x3fea6bc28ef4d786,0x3ffc5b7fc68fddac,2 +np.float64,0xffdbc9f505b793ea,0x0,2 +np.float64,0xffe98b137ff31626,0x0,2 +np.float64,0x800e26c6721c4d8d,0x3ff0000000000000,2 +np.float64,0x80080de445301bc9,0x3ff0000000000000,2 +np.float64,0x37e504a86fca1,0x3ff0000000000000,2 +np.float64,0x8002f5f60325ebed,0x3ff0000000000000,2 +np.float64,0x5c8772feb90ef,0x3ff0000000000000,2 +np.float64,0xbfe021abb4604358,0x3fe69023a51d22b8,2 +np.float64,0x3fde744f8fbce8a0,0x3ff64074dc84edd7,2 +np.float64,0xbfdd92899f3b2514,0x3fe73aefd9701858,2 +np.float64,0x7fc1ad5c51235ab8,0x7ff0000000000000,2 +np.float64,0xaae2f98955c5f,0x3ff0000000000000,2 +np.float64,0x7f9123d5782247aa,0x7ff0000000000000,2 +np.float64,0xbfe3f8e94b67f1d2,0x3fe4c30ab28e9cb7,2 +np.float64,0x7fdaba8b4cb57516,0x7ff0000000000000,2 +np.float64,0x7fefc85cfeff90b9,0x7ff0000000000000,2 +np.float64,0xffb83b4f523076a0,0x0,2 +np.float64,0xbfe888a68c71114d,0x3fe2ceff17c203d1,2 +np.float64,0x800de1dac4bbc3b6,0x3ff0000000000000,2 +np.float64,0xbfe4f27f09e9e4fe,0x3fe453f9af407eac,2 +np.float64,0xffe3d2713467a4e2,0x0,2 +np.float64,0xbfebaab840375570,0x3fe1931131b98842,2 +np.float64,0x93892a1b27126,0x3ff0000000000000,2 +np.float64,0x1e8e7f983d1d1,0x3ff0000000000000,2 +np.float64,0x3fecc950627992a0,0x3ffdd926f036add0,2 +np.float64,0xbfd41dfb1aa83bf6,0x3fe9bc34ece35b94,2 +np.float64,0x800aebfc6555d7f9,0x3ff0000000000000,2 +np.float64,0x7fe33ba52ca67749,0x7ff0000000000000,2 +np.float64,0xffe57c9b3feaf936,0x0,2 +np.float64,0x3fdd12464fba248c,0x3ff5ebc5598e6bd0,2 +np.float64,0xffe06d7f0fe0dafe,0x0,2 +np.float64,0x800e55b7fe9cab70,0x3ff0000000000000,2 +np.float64,0x3fd33803c8267008,0x3ff3b3cb78b2d642,2 +np.float64,0xe9cab8a1d3957,0x3ff0000000000000,2 +np.float64,0x3fb38ac166271580,0x3ff0de906947c0f0,2 +np.float64,0xbfd67aa552acf54a,0x3fe915cf64a389fd,2 +np.float64,0x1db96daa3b72f,0x3ff0000000000000,2 +np.float64,0xbfee9f08f4fd3e12,0x3fe07c2c615add3c,2 +np.float64,0xf14f6d65e29ee,0x3ff0000000000000,2 +np.float64,0x800bce089e179c12,0x3ff0000000000000,2 +np.float64,0xffc42dcc37285b98,0x0,2 +np.float64,0x7fd5f37063abe6e0,0x7ff0000000000000,2 +np.float64,0xbfd943c2cbb28786,0x3fe856f6452ec753,2 +np.float64,0x8ddfbc091bbf8,0x3ff0000000000000,2 +np.float64,0xbfe153491e22a692,0x3fe5fcb075dbbd5d,2 +np.float64,0xffe7933999ef2672,0x0,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0x8000000000000000,0x3ff0000000000000,2 +np.float64,0xbfe9154580b22a8b,0x3fe2960bac3a8220,2 +np.float64,0x800dc6dda21b8dbb,0x3ff0000000000000,2 +np.float64,0xbfb26225a824c448,0x3fee7239a457df81,2 +np.float64,0xbfd7b68c83af6d1a,0x3fe8c08e351ab468,2 +np.float64,0xffde01f7213c03ee,0x0,2 +np.float64,0x3fe54cbe0faa997c,0x3ff9614527191d72,2 +np.float64,0xbfd6bec3732d7d86,0x3fe90354909493de,2 +np.float64,0xbfef3c85bd7e790b,0x3fe0444f8c489ca6,2 +np.float64,0x899501b7132a0,0x3ff0000000000000,2 +np.float64,0xbfe17a456462f48b,0x3fe5ea2719a9a84b,2 +np.float64,0xffe34003b8668007,0x0,2 +np.float64,0x7feff6a3633fed46,0x7ff0000000000000,2 +np.float64,0x3fba597ecc34b2fe,0x3ff12ee72e4de474,2 +np.float64,0x4084c7b68109a,0x3ff0000000000000,2 +np.float64,0x3fad23bf4c3a4780,0x3ff0a4d06193ff6d,2 +np.float64,0xffd0fe2707a1fc4e,0x0,2 +np.float64,0xb96cb43f72d97,0x3ff0000000000000,2 +np.float64,0x7fc4d684d829ad09,0x7ff0000000000000,2 +np.float64,0x7fdc349226b86923,0x7ff0000000000000,2 +np.float64,0x7fd82851cd3050a3,0x7ff0000000000000,2 +np.float64,0x800cde0041b9bc01,0x3ff0000000000000,2 +np.float64,0x4e8caa1e9d196,0x3ff0000000000000,2 +np.float64,0xbfed06a6d2fa0d4e,0x3fe1108c3682b05a,2 +np.float64,0xffe8908122312102,0x0,2 +np.float64,0xffe56ed6d9aaddad,0x0,2 +np.float64,0x3fedd6db00fbadb6,0x3ffe896c68c4b26e,2 +np.float64,0x3fde31f9b4bc63f4,0x3ff6307e08f8b6ba,2 +np.float64,0x6bb963c2d772d,0x3ff0000000000000,2 +np.float64,0x787b7142f0f6f,0x3ff0000000000000,2 +np.float64,0x3fe6e4147c6dc829,0x3ffa451bbdece240,2 +np.float64,0x8003857401470ae9,0x3ff0000000000000,2 +np.float64,0xbfeae82c3c75d058,0x3fe1ddbd66e65aab,2 +np.float64,0x7fe174707c62e8e0,0x7ff0000000000000,2 +np.float64,0x80008d2545e11a4b,0x3ff0000000000000,2 +np.float64,0xbfecc2dce17985ba,0x3fe129ad4325985a,2 +np.float64,0xbfe1fa1daf63f43c,0x3fe5adcb0731a44b,2 +np.float64,0x7fcf2530203e4a5f,0x7ff0000000000000,2 +np.float64,0xbfea5cefe874b9e0,0x3fe213f134b61f4a,2 +np.float64,0x800103729f2206e6,0x3ff0000000000000,2 +np.float64,0xbfe8442ff7708860,0x3fe2eaf850faa169,2 +np.float64,0x8006c78e19ed8f1d,0x3ff0000000000000,2 +np.float64,0x3fc259589c24b2b1,0x3ff1abe6a4d28816,2 +np.float64,0xffed02b7b5ba056e,0x0,2 +np.float64,0xbfce0aa4fe3c1548,0x3feb32115d92103e,2 +np.float64,0x7fec06e78bf80dce,0x7ff0000000000000,2 +np.float64,0xbfe0960bbc612c18,0x3fe6578ab29b70d4,2 +np.float64,0x3fee45841cbc8b08,0x3ffed2f6ca808ad3,2 +np.float64,0xbfeb0f8ebef61f1e,0x3fe1ce86003044cd,2 +np.float64,0x8002c357358586af,0x3ff0000000000000,2 +np.float64,0x3fe9aa10cc735422,0x3ffbe57e294ce68b,2 +np.float64,0x800256c0a544ad82,0x3ff0000000000000,2 +np.float64,0x4de6e1449bcdd,0x3ff0000000000000,2 +np.float64,0x65e9bc9ccbd38,0x3ff0000000000000,2 +np.float64,0xbfe53b0fa9aa7620,0x3fe4341f0aa29bbc,2 +np.float64,0xbfcdd94cd13bb298,0x3feb3956acd2e2dd,2 +np.float64,0x8004a49b65a94938,0x3ff0000000000000,2 +np.float64,0x800d3d05deba7a0c,0x3ff0000000000000,2 +np.float64,0x3fe4e05bce69c0b8,0x3ff925f55602a7e0,2 +np.float64,0xffe391e3256723c6,0x0,2 +np.float64,0xbfe92f0f37b25e1e,0x3fe28bacc76ae753,2 +np.float64,0x3f990238d8320472,0x3ff045edd36e2d62,2 +np.float64,0xffed8d15307b1a2a,0x0,2 +np.float64,0x3fee82e01afd05c0,0x3ffefc09e8b9c2b7,2 +np.float64,0xffb2d94b2225b298,0x0,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv new file mode 100644 index 00000000..732ae865 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x80606724,0x80606724,3 +np.float32,0xbf16790f,0xbee38e14,3 +np.float32,0xbf1778a1,0xbee4a97f,3 +np.float32,0x7d4fc610,0x7f800000,3 +np.float32,0xbec30a20,0xbea230d5,3 +np.float32,0x3eae8a36,0x3ecffac5,3 +np.float32,0xbf1f08f1,0xbeece93c,3 +np.float32,0x80374376,0x80374376,3 +np.float32,0x3f2e04ca,0x3f793115,3 +np.float32,0x7e2c7e36,0x7f800000,3 +np.float32,0xbf686cae,0xbf18bcf0,3 +np.float32,0xbf5518cd,0xbf10a3da,3 +np.float32,0x807e233c,0x807e233c,3 +np.float32,0x7f4edd54,0x7f800000,3 +np.float32,0x7ed70088,0x7f800000,3 +np.float32,0x801675da,0x801675da,3 +np.float32,0x806735d5,0x806735d5,3 +np.float32,0xfe635fec,0xbf800000,3 +np.float32,0xfed88a0a,0xbf800000,3 +np.float32,0xff52c052,0xbf800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0xff4f65f9,0xbf800000,3 +np.float32,0xfe0f6c20,0xbf800000,3 +np.float32,0x80322b30,0x80322b30,3 +np.float32,0xfb757000,0xbf800000,3 +np.float32,0x3c81e0,0x3c81e0,3 +np.float32,0x79d56a,0x79d56a,3 +np.float32,0x8029d7af,0x8029d7af,3 +np.float32,0x8058a593,0x8058a593,3 +np.float32,0x3f3a13c7,0x3f88c75c,3 +np.float32,0x2a6b05,0x2a6b05,3 +np.float32,0xbd64c960,0xbd5e83ae,3 +np.float32,0x80471052,0x80471052,3 +np.float32,0xbe5dd950,0xbe47766c,3 +np.float32,0xfd8f88f0,0xbf800000,3 +np.float32,0x75a4b7,0x75a4b7,3 +np.float32,0x3f726f2e,0x3fc9fb7d,3 +np.float32,0x3ed6795c,0x3f053115,3 +np.float32,0x17d7f5,0x17d7f5,3 +np.float32,0xbf4cf19b,0xbf0d094f,3 +np.float32,0x3e0ec532,0x3e1933c6,3 +np.float32,0xff084016,0xbf800000,3 +np.float32,0x800829aa,0x800829aa,3 +np.float32,0x806d7302,0x806d7302,3 +np.float32,0x7f59d9da,0x7f800000,3 +np.float32,0x15f8b9,0x15f8b9,3 +np.float32,0x803befb3,0x803befb3,3 +np.float32,0x525043,0x525043,3 +np.float32,0x51a647,0x51a647,3 +np.float32,0xbf1cfce4,0xbeeab3d9,3 +np.float32,0x3f1f27a4,0x3f5cb1d2,3 +np.float32,0xbebc3a04,0xbe9d8142,3 +np.float32,0xbeea548c,0xbebc07e5,3 +np.float32,0x3f47401c,0x3f96c2a3,3 +np.float32,0x806b1ea3,0x806b1ea3,3 +np.float32,0x3ea56bb8,0x3ec3450c,3 +np.float32,0x3f7b4963,0x3fd597b5,3 +np.float32,0x7f051fa0,0x7f800000,3 +np.float32,0x1d411c,0x1d411c,3 +np.float32,0xff0b6a35,0xbf800000,3 +np.float32,0xbead63c0,0xbe9314f7,3 +np.float32,0x3738be,0x3738be,3 +np.float32,0x3f138cc8,0x3f479155,3 +np.float32,0x800a539f,0x800a539f,3 +np.float32,0x801b0ebd,0x801b0ebd,3 +np.float32,0x318fcd,0x318fcd,3 +np.float32,0x3ed67556,0x3f052e06,3 +np.float32,0x702886,0x702886,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x70a174,0x70a174,3 +np.float32,0x4f9c66,0x4f9c66,3 +np.float32,0x3e3e1927,0x3e50e351,3 +np.float32,0x7eac9a4d,0x7f800000,3 +np.float32,0x4b7407,0x4b7407,3 +np.float32,0x7f5bd2fd,0x7f800000,3 +np.float32,0x3eaafc58,0x3ecaffbd,3 +np.float32,0xbc989360,0xbc9729e2,3 +np.float32,0x3f470e5c,0x3f968c7b,3 +np.float32,0x4c5672,0x4c5672,3 +np.float32,0xff2b2ee2,0xbf800000,3 +np.float32,0xbf28a104,0xbef7079b,3 +np.float32,0x2c6175,0x2c6175,3 +np.float32,0x3d7e4fb0,0x3d832f9f,3 +np.float32,0x763276,0x763276,3 +np.float32,0x3cf364,0x3cf364,3 +np.float32,0xbf7ace75,0xbf1fe48c,3 +np.float32,0xff19e858,0xbf800000,3 +np.float32,0x80504c70,0x80504c70,3 +np.float32,0xff390210,0xbf800000,3 +np.float32,0x8046a743,0x8046a743,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0x806c51da,0x806c51da,3 +np.float32,0x806ab38f,0x806ab38f,3 +np.float32,0x3f3de863,0x3f8cc538,3 +np.float32,0x7f6d45bb,0x7f800000,3 +np.float32,0xfd16ec60,0xbf800000,3 +np.float32,0x80513cba,0x80513cba,3 +np.float32,0xbf68996b,0xbf18cefa,3 +np.float32,0xfe039f2c,0xbf800000,3 +np.float32,0x3f013207,0x3f280c55,3 +np.float32,0x7ef4bc07,0x7f800000,3 +np.float32,0xbe8b65ac,0xbe741069,3 +np.float32,0xbf7a8186,0xbf1fc7a6,3 +np.float32,0x802532e5,0x802532e5,3 +np.float32,0x32c7df,0x32c7df,3 +np.float32,0x3ce4dceb,0x3ce81701,3 +np.float32,0xfe801118,0xbf800000,3 +np.float32,0x3d905f20,0x3d9594fb,3 +np.float32,0xbe11ed28,0xbe080168,3 +np.float32,0x59e773,0x59e773,3 +np.float32,0x3e9a2547,0x3eb3dd57,3 +np.float32,0x7ecb7c67,0x7f800000,3 +np.float32,0x7f69a67e,0x7f800000,3 +np.float32,0xff121e11,0xbf800000,3 +np.float32,0x3f7917cb,0x3fd2ad8c,3 +np.float32,0xbf1a7da8,0xbee7fc0c,3 +np.float32,0x3f077e66,0x3f329c40,3 +np.float32,0x3ce8e040,0x3cec37b3,3 +np.float32,0xbf3f0b8e,0xbf069f4d,3 +np.float32,0x3f52f194,0x3fa3c9d6,3 +np.float32,0xbf0e7422,0xbeda80f2,3 +np.float32,0xfd67e230,0xbf800000,3 +np.float32,0xff14d9a9,0xbf800000,3 +np.float32,0x3f3546e3,0x3f83dc2b,3 +np.float32,0x3e152e3a,0x3e20983d,3 +np.float32,0x4a89a3,0x4a89a3,3 +np.float32,0x63217,0x63217,3 +np.float32,0xbeb9e2a8,0xbe9be153,3 +np.float32,0x7e9fa049,0x7f800000,3 +np.float32,0x7f58110c,0x7f800000,3 +np.float32,0x3e88290c,0x3e9bfba9,3 +np.float32,0xbf2cb206,0xbefb3494,3 +np.float32,0xff5880c4,0xbf800000,3 +np.float32,0x7ecff3ac,0x7f800000,3 +np.float32,0x3f4b3de6,0x3f9b23fd,3 +np.float32,0xbebd2048,0xbe9e208c,3 +np.float32,0xff08f7a2,0xbf800000,3 +np.float32,0xff473330,0xbf800000,3 +np.float32,0x1,0x1,3 +np.float32,0xbf5dc239,0xbf14584b,3 +np.float32,0x458e3f,0x458e3f,3 +np.float32,0xbdb8a650,0xbdb091f8,3 +np.float32,0xff336ffc,0xbf800000,3 +np.float32,0x3c60bd00,0x3c624966,3 +np.float32,0xbe16a4f8,0xbe0c1664,3 +np.float32,0x3f214246,0x3f60a0f0,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x7e08737e,0x7f800000,3 +np.float32,0x3f70574c,0x3fc74b8e,3 +np.float32,0xbed5745c,0xbeae8c77,3 +np.float32,0x361752,0x361752,3 +np.float32,0x3eb276d6,0x3ed584ea,3 +np.float32,0x3f03fc1e,0x3f2cb1a5,3 +np.float32,0x3fafd1,0x3fafd1,3 +np.float32,0x7e50d74c,0x7f800000,3 +np.float32,0x3eeca5,0x3eeca5,3 +np.float32,0x5dc963,0x5dc963,3 +np.float32,0x7f0e63ae,0x7f800000,3 +np.float32,0x8021745f,0x8021745f,3 +np.float32,0xbf5881a9,0xbf121d07,3 +np.float32,0x7dadc7fd,0x7f800000,3 +np.float32,0xbf2c0798,0xbefa86bb,3 +np.float32,0x3e635f50,0x3e7e97a9,3 +np.float32,0xbf2053fa,0xbeee4c0e,3 +np.float32,0x3e8eee2b,0x3ea4dfcc,3 +np.float32,0xfc8a03c0,0xbf800000,3 +np.float32,0xfd9e4948,0xbf800000,3 +np.float32,0x801e817e,0x801e817e,3 +np.float32,0xbf603a27,0xbf1560c3,3 +np.float32,0x7f729809,0x7f800000,3 +np.float32,0x3f5a1864,0x3fac0e04,3 +np.float32,0x3e7648b8,0x3e8b3677,3 +np.float32,0x3edade24,0x3f088bc1,3 +np.float32,0x65e16e,0x65e16e,3 +np.float32,0x3f24aa50,0x3f671117,3 +np.float32,0x803cb1d0,0x803cb1d0,3 +np.float32,0xbe7b1858,0xbe5eadcc,3 +np.float32,0xbf19bb27,0xbee726fb,3 +np.float32,0xfd1f6e60,0xbf800000,3 +np.float32,0xfeb0de60,0xbf800000,3 +np.float32,0xff511a52,0xbf800000,3 +np.float32,0xff7757f7,0xbf800000,3 +np.float32,0x463ff5,0x463ff5,3 +np.float32,0x3f770d12,0x3fcffcc2,3 +np.float32,0xbf208562,0xbeee80dc,3 +np.float32,0x6df204,0x6df204,3 +np.float32,0xbf62d24f,0xbf1673fb,3 +np.float32,0x3dfcf210,0x3e069d5f,3 +np.float32,0xbef26002,0xbec114d7,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f30fb85,0x7f800000,3 +np.float32,0x7ee5dfef,0x7f800000,3 +np.float32,0x3f317829,0x3f800611,3 +np.float32,0x3f4b0bbd,0x3f9aec88,3 +np.float32,0x7edf708c,0x7f800000,3 +np.float32,0xff071260,0xbf800000,3 +np.float32,0x3e7b8c30,0x3e8e9198,3 +np.float32,0x3f33778b,0x3f82077f,3 +np.float32,0x3e8cd11d,0x3ea215fd,3 +np.float32,0x8004483d,0x8004483d,3 +np.float32,0x801633e3,0x801633e3,3 +np.float32,0x7e76eb15,0x7f800000,3 +np.float32,0x3c1571,0x3c1571,3 +np.float32,0x7de3de52,0x7f800000,3 +np.float32,0x804ae906,0x804ae906,3 +np.float32,0x7f3a2616,0x7f800000,3 +np.float32,0xff7fffff,0xbf800000,3 +np.float32,0xff5d17e4,0xbf800000,3 +np.float32,0xbeaa6704,0xbe90f252,3 +np.float32,0x7e6a43af,0x7f800000,3 +np.float32,0x2a0f35,0x2a0f35,3 +np.float32,0xfd8fece0,0xbf800000,3 +np.float32,0xfeef2e2a,0xbf800000,3 +np.float32,0xff800000,0xbf800000,3 +np.float32,0xbeefcc52,0xbebf78e4,3 +np.float32,0x3db6c490,0x3dbf2bd5,3 +np.float32,0x8290f,0x8290f,3 +np.float32,0xbeace648,0xbe92bb7f,3 +np.float32,0x801fea79,0x801fea79,3 +np.float32,0x3ea6c230,0x3ec51ebf,3 +np.float32,0x3e5f2ca3,0x3e795c8a,3 +np.float32,0x3eb6f634,0x3edbeb9f,3 +np.float32,0xff790b45,0xbf800000,3 +np.float32,0x3d82e240,0x3d872816,3 +np.float32,0x3f0d6a57,0x3f3cc7db,3 +np.float32,0x7f08531a,0x7f800000,3 +np.float32,0x702b6d,0x702b6d,3 +np.float32,0x7d3a3c38,0x7f800000,3 +np.float32,0x3d0a7fb3,0x3d0cddf3,3 +np.float32,0xff28084c,0xbf800000,3 +np.float32,0xfeee8804,0xbf800000,3 +np.float32,0x804094eb,0x804094eb,3 +np.float32,0x7acb39,0x7acb39,3 +np.float32,0x3f01c07a,0x3f28f88c,3 +np.float32,0x3e05c500,0x3e0ee674,3 +np.float32,0xbe6f7c38,0xbe558ac1,3 +np.float32,0x803b1f4b,0x803b1f4b,3 +np.float32,0xbf76561f,0xbf1e332b,3 +np.float32,0xff30d368,0xbf800000,3 +np.float32,0x7e2e1f38,0x7f800000,3 +np.float32,0x3ee085b8,0x3f0ce7c0,3 +np.float32,0x8064c4a7,0x8064c4a7,3 +np.float32,0xa7c1d,0xa7c1d,3 +np.float32,0x3f27498a,0x3f6c14bc,3 +np.float32,0x137ca,0x137ca,3 +np.float32,0x3d0a5c60,0x3d0cb969,3 +np.float32,0x80765f1f,0x80765f1f,3 +np.float32,0x80230a71,0x80230a71,3 +np.float32,0x3f321ed2,0x3f80acf4,3 +np.float32,0x7d61e7f4,0x7f800000,3 +np.float32,0xbf39f7f2,0xbf0430f7,3 +np.float32,0xbe2503f8,0xbe1867e8,3 +np.float32,0x29333d,0x29333d,3 +np.float32,0x7edc5a0e,0x7f800000,3 +np.float32,0xbe81a8a2,0xbe651663,3 +np.float32,0x7f76ab6d,0x7f800000,3 +np.float32,0x7f46111f,0x7f800000,3 +np.float32,0xff0fc888,0xbf800000,3 +np.float32,0x805ece89,0x805ece89,3 +np.float32,0xc390b,0xc390b,3 +np.float32,0xff64bdee,0xbf800000,3 +np.float32,0x3dd07e4e,0x3ddb79bd,3 +np.float32,0xfecc1f10,0xbf800000,3 +np.float32,0x803f5177,0x803f5177,3 +np.float32,0x802a24d2,0x802a24d2,3 +np.float32,0x7f27d0cc,0x7f800000,3 +np.float32,0x3ef57c98,0x3f1d7e88,3 +np.float32,0x7b848d,0x7b848d,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xfe889c46,0xbf800000,3 +np.float32,0xff2d6dc5,0xbf800000,3 +np.float32,0x3f53a186,0x3fa492a6,3 +np.float32,0xbf239c94,0xbef1c90c,3 +np.float32,0xff7c0f4e,0xbf800000,3 +np.float32,0x3e7c69a9,0x3e8f1f3a,3 +np.float32,0xbf47c9e9,0xbf0ab2a9,3 +np.float32,0xbc1eaf00,0xbc1deae9,3 +np.float32,0x3f4a6d39,0x3f9a3d8e,3 +np.float32,0x3f677930,0x3fbc26eb,3 +np.float32,0x3f45eea1,0x3f955418,3 +np.float32,0x7f61a1f8,0x7f800000,3 +np.float32,0xff58c7c6,0xbf800000,3 +np.float32,0x80239801,0x80239801,3 +np.float32,0xff56e616,0xbf800000,3 +np.float32,0xff62052c,0xbf800000,3 +np.float32,0x8009b615,0x8009b615,3 +np.float32,0x293d6b,0x293d6b,3 +np.float32,0xfe9e585c,0xbf800000,3 +np.float32,0x7f58ff4b,0x7f800000,3 +np.float32,0x10937c,0x10937c,3 +np.float32,0x7f5cc13f,0x7f800000,3 +np.float32,0x110c5d,0x110c5d,3 +np.float32,0x805e51fc,0x805e51fc,3 +np.float32,0xbedcf70a,0xbeb3766c,3 +np.float32,0x3f4d5e42,0x3f9d8091,3 +np.float32,0xff5925a0,0xbf800000,3 +np.float32,0x7e87cafa,0x7f800000,3 +np.float32,0xbf6474b2,0xbf171fee,3 +np.float32,0x4b39b2,0x4b39b2,3 +np.float32,0x8020cc28,0x8020cc28,3 +np.float32,0xff004ed8,0xbf800000,3 +np.float32,0xbf204cf5,0xbeee448d,3 +np.float32,0x3e30cf10,0x3e40fdb1,3 +np.float32,0x80202bee,0x80202bee,3 +np.float32,0xbf55a985,0xbf10e2bc,3 +np.float32,0xbe297dd8,0xbe1c351c,3 +np.float32,0x5780d9,0x5780d9,3 +np.float32,0x7ef729fa,0x7f800000,3 +np.float32,0x8039a3b5,0x8039a3b5,3 +np.float32,0x7cdd3f,0x7cdd3f,3 +np.float32,0x7ef0145a,0x7f800000,3 +np.float32,0x807ad7ae,0x807ad7ae,3 +np.float32,0x7f6c2643,0x7f800000,3 +np.float32,0xbec56124,0xbea3c929,3 +np.float32,0x512c3b,0x512c3b,3 +np.float32,0xbed3effe,0xbead8c1e,3 +np.float32,0x7f5e0a4d,0x7f800000,3 +np.float32,0x3f315316,0x3f7fc200,3 +np.float32,0x7eca5727,0x7f800000,3 +np.float32,0x7f4834f3,0x7f800000,3 +np.float32,0x8004af6d,0x8004af6d,3 +np.float32,0x3f223ca4,0x3f6277e3,3 +np.float32,0x7eea4fdd,0x7f800000,3 +np.float32,0x3e7143e8,0x3e880763,3 +np.float32,0xbf737008,0xbf1d160e,3 +np.float32,0xfc408b00,0xbf800000,3 +np.float32,0x803912ca,0x803912ca,3 +np.float32,0x7db31f4e,0x7f800000,3 +np.float32,0xff578b54,0xbf800000,3 +np.float32,0x3f068ec4,0x3f31062b,3 +np.float32,0x35f64f,0x35f64f,3 +np.float32,0x80437df4,0x80437df4,3 +np.float32,0x568059,0x568059,3 +np.float32,0x8005f8ba,0x8005f8ba,3 +np.float32,0x6824ad,0x6824ad,3 +np.float32,0xff3fdf30,0xbf800000,3 +np.float32,0xbf6f7682,0xbf1b89d6,3 +np.float32,0x3dcea8a0,0x3dd971f5,3 +np.float32,0x3ee32a62,0x3f0ef5a9,3 +np.float32,0xbf735bcd,0xbf1d0e3d,3 +np.float32,0x7e8c7c28,0x7f800000,3 +np.float32,0x3ed552bc,0x3f045161,3 +np.float32,0xfed90a8a,0xbf800000,3 +np.float32,0xbe454368,0xbe336d2a,3 +np.float32,0xbf171d26,0xbee4442d,3 +np.float32,0x80652bf9,0x80652bf9,3 +np.float32,0xbdbaaa20,0xbdb26914,3 +np.float32,0x3f56063d,0x3fa7522e,3 +np.float32,0x3d3d4fd3,0x3d41c13f,3 +np.float32,0x80456040,0x80456040,3 +np.float32,0x3dc15586,0x3dcac0ef,3 +np.float32,0x7f753060,0x7f800000,3 +np.float32,0x7f7d8039,0x7f800000,3 +np.float32,0xfdebf280,0xbf800000,3 +np.float32,0xbf1892c3,0xbee5e116,3 +np.float32,0xbf0f1468,0xbedb3878,3 +np.float32,0x40d85c,0x40d85c,3 +np.float32,0x3f93dd,0x3f93dd,3 +np.float32,0xbf5730fd,0xbf118c24,3 +np.float32,0xfe17aa44,0xbf800000,3 +np.float32,0x3dc0baf4,0x3dca1716,3 +np.float32,0xbf3433d8,0xbf015efb,3 +np.float32,0x1c59f5,0x1c59f5,3 +np.float32,0x802b1540,0x802b1540,3 +np.float32,0xbe47df6c,0xbe35936e,3 +np.float32,0xbe8e7070,0xbe78af32,3 +np.float32,0xfe7057f4,0xbf800000,3 +np.float32,0x80668b69,0x80668b69,3 +np.float32,0xbe677810,0xbe4f2c2d,3 +np.float32,0xbe7a2f1c,0xbe5df733,3 +np.float32,0xfeb79e3c,0xbf800000,3 +np.float32,0xbeb6e320,0xbe99c9e8,3 +np.float32,0xfea188f2,0xbf800000,3 +np.float32,0x7dcaeb15,0x7f800000,3 +np.float32,0x1be567,0x1be567,3 +np.float32,0xbf4041cc,0xbf07320d,3 +np.float32,0x3f721aa7,0x3fc98e9a,3 +np.float32,0x7f5aa835,0x7f800000,3 +np.float32,0x15180e,0x15180e,3 +np.float32,0x3f73d739,0x3fcbccdb,3 +np.float32,0xbeecd380,0xbebd9b36,3 +np.float32,0x3f2caec7,0x3f768fea,3 +np.float32,0xbeaf65f2,0xbe9482bb,3 +np.float32,0xfe6aa384,0xbf800000,3 +np.float32,0xbf4f2c0a,0xbf0e085e,3 +np.float32,0xbf2b5907,0xbef9d431,3 +np.float32,0x3e855e0d,0x3e985960,3 +np.float32,0x8056cc64,0x8056cc64,3 +np.float32,0xff746bb5,0xbf800000,3 +np.float32,0x3e0332f6,0x3e0bf986,3 +np.float32,0xff637720,0xbf800000,3 +np.float32,0xbf330676,0xbf00c990,3 +np.float32,0x3ec449a1,0x3eef3862,3 +np.float32,0x766541,0x766541,3 +np.float32,0xfe2edf6c,0xbf800000,3 +np.float32,0xbebb28ca,0xbe9cc3e2,3 +np.float32,0x3f16c930,0x3f4d5ce4,3 +np.float32,0x7f1a9a4a,0x7f800000,3 +np.float32,0x3e9ba1,0x3e9ba1,3 +np.float32,0xbf73d5f6,0xbf1d3d69,3 +np.float32,0xfdc8a8b0,0xbf800000,3 +np.float32,0x50f051,0x50f051,3 +np.float32,0xff0add02,0xbf800000,3 +np.float32,0x1e50bf,0x1e50bf,3 +np.float32,0x3f04d287,0x3f2e1948,3 +np.float32,0x7f1e50,0x7f1e50,3 +np.float32,0x2affb3,0x2affb3,3 +np.float32,0x80039f07,0x80039f07,3 +np.float32,0x804ba79e,0x804ba79e,3 +np.float32,0x7b5a8eed,0x7f800000,3 +np.float32,0x3e1a8b28,0x3e26d0a7,3 +np.float32,0x3ea95f29,0x3ec8bfa4,3 +np.float32,0x7e09fa55,0x7f800000,3 +np.float32,0x7eacb1b3,0x7f800000,3 +np.float32,0x3e8ad7c0,0x3e9f7dec,3 +np.float32,0x7e0e997c,0x7f800000,3 +np.float32,0x3f4422b4,0x3f936398,3 +np.float32,0x806bd222,0x806bd222,3 +np.float32,0x677ae6,0x677ae6,3 +np.float32,0x62cf68,0x62cf68,3 +np.float32,0x7e4e594e,0x7f800000,3 +np.float32,0x80445fd1,0x80445fd1,3 +np.float32,0xff3a0d04,0xbf800000,3 +np.float32,0x8052b256,0x8052b256,3 +np.float32,0x3cb34440,0x3cb53e11,3 +np.float32,0xbf0e3865,0xbeda3c6d,3 +np.float32,0x3f49f5df,0x3f99ba17,3 +np.float32,0xbed75a22,0xbeafcc09,3 +np.float32,0xbf7aec64,0xbf1fefc8,3 +np.float32,0x7f35a62d,0x7f800000,3 +np.float32,0xbf787b03,0xbf1f03fc,3 +np.float32,0x8006a62a,0x8006a62a,3 +np.float32,0x3f6419e7,0x3fb803c7,3 +np.float32,0x3ecea2e5,0x3efe8f01,3 +np.float32,0x80603577,0x80603577,3 +np.float32,0xff73198c,0xbf800000,3 +np.float32,0x7def110a,0x7f800000,3 +np.float32,0x544efd,0x544efd,3 +np.float32,0x3f052340,0x3f2ea0fc,3 +np.float32,0xff306666,0xbf800000,3 +np.float32,0xbf800000,0xbf21d2a7,3 +np.float32,0xbed3e150,0xbead826a,3 +np.float32,0x3f430c99,0x3f92390f,3 +np.float32,0xbf4bffa4,0xbf0c9c73,3 +np.float32,0xfd97a710,0xbf800000,3 +np.float32,0x3cadf0fe,0x3cafcd1a,3 +np.float32,0x807af7b4,0x807af7b4,3 +np.float32,0xbc508600,0xbc4f33bc,3 +np.float32,0x7f3e0ec7,0x7f800000,3 +np.float32,0xbe51334c,0xbe3d36f7,3 +np.float32,0xfe7b7fb4,0xbf800000,3 +np.float32,0xfed9c45e,0xbf800000,3 +np.float32,0x3da024eb,0x3da6926a,3 +np.float32,0x7eed9e76,0x7f800000,3 +np.float32,0xbf2b8f1f,0xbefa0b91,3 +np.float32,0x3f2b9286,0x3f746318,3 +np.float32,0xfe8af49c,0xbf800000,3 +np.float32,0x9c4f7,0x9c4f7,3 +np.float32,0x801d7543,0x801d7543,3 +np.float32,0xbf66474a,0xbf17de66,3 +np.float32,0xbf562155,0xbf1116b1,3 +np.float32,0x46a8de,0x46a8de,3 +np.float32,0x8053fe6b,0x8053fe6b,3 +np.float32,0xbf6ee842,0xbf1b51f3,3 +np.float32,0xbf6ad78e,0xbf19b565,3 +np.float32,0xbf012574,0xbecad7ff,3 +np.float32,0x748364,0x748364,3 +np.float32,0x8073f59b,0x8073f59b,3 +np.float32,0xff526825,0xbf800000,3 +np.float32,0xfeb02dc4,0xbf800000,3 +np.float32,0x8033eb1c,0x8033eb1c,3 +np.float32,0x3f3685ea,0x3f8520cc,3 +np.float32,0x7f657902,0x7f800000,3 +np.float32,0xbf75eac4,0xbf1e0a1f,3 +np.float32,0xfe67f384,0xbf800000,3 +np.float32,0x3f56d3cc,0x3fa83faf,3 +np.float32,0x44a4ce,0x44a4ce,3 +np.float32,0x1dc4b3,0x1dc4b3,3 +np.float32,0x4fb3b2,0x4fb3b2,3 +np.float32,0xbea904a4,0xbe8ff3ed,3 +np.float32,0x7e668f16,0x7f800000,3 +np.float32,0x7f538378,0x7f800000,3 +np.float32,0x80541709,0x80541709,3 +np.float32,0x80228040,0x80228040,3 +np.float32,0x7ef9694e,0x7f800000,3 +np.float32,0x3f5fca9b,0x3fb2ce54,3 +np.float32,0xbe9c43c2,0xbe86ab84,3 +np.float32,0xfecee000,0xbf800000,3 +np.float32,0x5a65c2,0x5a65c2,3 +np.float32,0x3f736572,0x3fcb3985,3 +np.float32,0xbf2a03f7,0xbef87600,3 +np.float32,0xfe96b488,0xbf800000,3 +np.float32,0xfedd8800,0xbf800000,3 +np.float32,0x80411804,0x80411804,3 +np.float32,0x7edcb0a6,0x7f800000,3 +np.float32,0x2bb882,0x2bb882,3 +np.float32,0x3f800000,0x3fdbf0a9,3 +np.float32,0x764b27,0x764b27,3 +np.float32,0x7e92035d,0x7f800000,3 +np.float32,0x3e80facb,0x3e92ae1d,3 +np.float32,0x8040b81a,0x8040b81a,3 +np.float32,0x7f487fe4,0x7f800000,3 +np.float32,0xbc641780,0xbc6282ed,3 +np.float32,0x804b0bb9,0x804b0bb9,3 +np.float32,0x7d0b7c39,0x7f800000,3 +np.float32,0xff072080,0xbf800000,3 +np.float32,0xbed7aff8,0xbeb00462,3 +np.float32,0x35e247,0x35e247,3 +np.float32,0xbf7edd19,0xbf216766,3 +np.float32,0x8004a539,0x8004a539,3 +np.float32,0xfdfc1790,0xbf800000,3 +np.float32,0x8037a841,0x8037a841,3 +np.float32,0xfed0a8a8,0xbf800000,3 +np.float32,0x7f1f1697,0x7f800000,3 +np.float32,0x3f2ccc6e,0x3f76ca23,3 +np.float32,0x35eada,0x35eada,3 +np.float32,0xff111f42,0xbf800000,3 +np.float32,0x3ee1ab7f,0x3f0dcbbe,3 +np.float32,0xbf6e89ee,0xbf1b2cd4,3 +np.float32,0x3f58611c,0x3faa0cdc,3 +np.float32,0x1ac6a6,0x1ac6a6,3 +np.float32,0xbf1286fa,0xbedf2312,3 +np.float32,0x7e451137,0x7f800000,3 +np.float32,0xbe92c326,0xbe7f3405,3 +np.float32,0x3f2fdd16,0x3f7cd87b,3 +np.float32,0xbe5c0ea0,0xbe4604c2,3 +np.float32,0xbdb29968,0xbdab0883,3 +np.float32,0x3964,0x3964,3 +np.float32,0x3f0dc236,0x3f3d60a0,3 +np.float32,0x7c3faf06,0x7f800000,3 +np.float32,0xbef41f7a,0xbec22b16,3 +np.float32,0x3f4c0289,0x3f9bfdcc,3 +np.float32,0x806084e9,0x806084e9,3 +np.float32,0x3ed1d8dd,0x3f01b0c1,3 +np.float32,0x806d8d8b,0x806d8d8b,3 +np.float32,0x3f052180,0x3f2e9e0a,3 +np.float32,0x803d85d5,0x803d85d5,3 +np.float32,0x3e0afd70,0x3e14dd48,3 +np.float32,0x2fbc63,0x2fbc63,3 +np.float32,0x2e436f,0x2e436f,3 +np.float32,0xbf7b19e6,0xbf2000da,3 +np.float32,0x3f34022e,0x3f829362,3 +np.float32,0x3d2b40e0,0x3d2ee246,3 +np.float32,0x3f5298b4,0x3fa3649b,3 +np.float32,0xbdb01328,0xbda8b7de,3 +np.float32,0x7f693c81,0x7f800000,3 +np.float32,0xbeb1abc0,0xbe961edc,3 +np.float32,0x801d9b5d,0x801d9b5d,3 +np.float32,0x80628668,0x80628668,3 +np.float32,0x800f57dd,0x800f57dd,3 +np.float32,0x8017c94f,0x8017c94f,3 +np.float32,0xbf16f5f4,0xbee418b8,3 +np.float32,0x3e686476,0x3e827022,3 +np.float32,0xbf256796,0xbef3abd9,3 +np.float32,0x7f1b4485,0x7f800000,3 +np.float32,0xbea0b3cc,0xbe89ed21,3 +np.float32,0xfee08b2e,0xbf800000,3 +np.float32,0x523cb4,0x523cb4,3 +np.float32,0x3daf2cb2,0x3db6e273,3 +np.float32,0xbd531c40,0xbd4dc323,3 +np.float32,0x80078fe5,0x80078fe5,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0x3f232438,0x3f642d1a,3 +np.float32,0x3ec29446,0x3eecb7c0,3 +np.float32,0x3dbcd2a4,0x3dc5cd1d,3 +np.float32,0x7f045b0d,0x7f800000,3 +np.float32,0x7f22e6d1,0x7f800000,3 +np.float32,0xbf5d3430,0xbf141c80,3 +np.float32,0xbe03ec70,0xbdf78ee6,3 +np.float32,0x3e93ec9a,0x3eab822f,3 +np.float32,0x7f3b9262,0x7f800000,3 +np.float32,0x65ac6a,0x65ac6a,3 +np.float32,0x3db9a8,0x3db9a8,3 +np.float32,0xbf37ab59,0xbf031306,3 +np.float32,0x33c40e,0x33c40e,3 +np.float32,0x7f7a478f,0x7f800000,3 +np.float32,0xbe8532d0,0xbe6a906f,3 +np.float32,0x801c081d,0x801c081d,3 +np.float32,0xbe4212a0,0xbe30ca73,3 +np.float32,0xff0b603e,0xbf800000,3 +np.float32,0x4554dc,0x4554dc,3 +np.float32,0x3dd324be,0x3dde695e,3 +np.float32,0x3f224c44,0x3f629557,3 +np.float32,0x8003cd79,0x8003cd79,3 +np.float32,0xbf31351c,0xbeffc2fd,3 +np.float32,0x8034603a,0x8034603a,3 +np.float32,0xbf6fcb70,0xbf1bab24,3 +np.float32,0x804eb67e,0x804eb67e,3 +np.float32,0xff05c00e,0xbf800000,3 +np.float32,0x3eb5b36f,0x3eda1ec7,3 +np.float32,0x3f1ed7f9,0x3f5c1d90,3 +np.float32,0x3f052d8a,0x3f2eb24b,3 +np.float32,0x5ddf51,0x5ddf51,3 +np.float32,0x7e50c11c,0x7f800000,3 +np.float32,0xff74f55a,0xbf800000,3 +np.float32,0x4322d,0x4322d,3 +np.float32,0x3f16f8a9,0x3f4db27a,3 +np.float32,0x3f4f23d6,0x3f9f7c2c,3 +np.float32,0xbf706c1e,0xbf1bea0a,3 +np.float32,0x3f2cbd52,0x3f76ac77,3 +np.float32,0xf3043,0xf3043,3 +np.float32,0xfee79de0,0xbf800000,3 +np.float32,0x7e942f69,0x7f800000,3 +np.float32,0x180139,0x180139,3 +np.float32,0xff69c678,0xbf800000,3 +np.float32,0x3f46773f,0x3f95e840,3 +np.float32,0x804aae1c,0x804aae1c,3 +np.float32,0x3eb383b4,0x3ed7024c,3 +np.float32,0x8032624e,0x8032624e,3 +np.float32,0xbd0a0f80,0xbd07c27d,3 +np.float32,0xbf1c9b98,0xbeea4a61,3 +np.float32,0x7f370999,0x7f800000,3 +np.float32,0x801931f9,0x801931f9,3 +np.float32,0x3f6f45ce,0x3fc5eea0,3 +np.float32,0xff0ab4cc,0xbf800000,3 +np.float32,0x4c043d,0x4c043d,3 +np.float32,0x8002a599,0x8002a599,3 +np.float32,0xbc4a6080,0xbc4921d7,3 +np.float32,0x3f008d14,0x3f26fb72,3 +np.float32,0x7f48b3d9,0x7f800000,3 +np.float32,0x7cb2ec7e,0x7f800000,3 +np.float32,0xbf1338bd,0xbedfeb61,3 +np.float32,0x0,0x0,3 +np.float32,0xbf2f5b64,0xbefde71c,3 +np.float32,0xbe422974,0xbe30dd56,3 +np.float32,0x3f776be8,0x3fd07950,3 +np.float32,0xbf3e97a1,0xbf06684a,3 +np.float32,0x7d28cb26,0x7f800000,3 +np.float32,0x801618d2,0x801618d2,3 +np.float32,0x807e4f83,0x807e4f83,3 +np.float32,0x8006b07d,0x8006b07d,3 +np.float32,0xfea1c042,0xbf800000,3 +np.float32,0xff24ef74,0xbf800000,3 +np.float32,0xfef7ab16,0xbf800000,3 +np.float32,0x70b771,0x70b771,3 +np.float32,0x7daeb64e,0x7f800000,3 +np.float32,0xbe66e378,0xbe4eb59c,3 +np.float32,0xbead1534,0xbe92dcf7,3 +np.float32,0x7e6769b8,0x7f800000,3 +np.float32,0x7ecd0890,0x7f800000,3 +np.float32,0xbe7380d8,0xbe58b747,3 +np.float32,0x3efa6f2f,0x3f218265,3 +np.float32,0x3f59dada,0x3fabc5eb,3 +np.float32,0xff0f2d20,0xbf800000,3 +np.float32,0x8060210e,0x8060210e,3 +np.float32,0x3ef681e8,0x3f1e51c8,3 +np.float32,0x77a6dd,0x77a6dd,3 +np.float32,0xbebfdd0e,0xbea00399,3 +np.float32,0xfe889b72,0xbf800000,3 +np.float32,0x8049ed2c,0x8049ed2c,3 +np.float32,0x3b089dc4,0x3b08c23e,3 +np.float32,0xbf13c7c4,0xbee08c28,3 +np.float32,0x3efa13b9,0x3f2137d7,3 +np.float32,0x3e9385dc,0x3eaaf914,3 +np.float32,0x7e0e6a43,0x7f800000,3 +np.float32,0x7df6d63f,0x7f800000,3 +np.float32,0x3f3efead,0x3f8dea03,3 +np.float32,0xff52548c,0xbf800000,3 +np.float32,0x803ff9d8,0x803ff9d8,3 +np.float32,0x3c825823,0x3c836303,3 +np.float32,0xfc9e97a0,0xbf800000,3 +np.float32,0xfe644f48,0xbf800000,3 +np.float32,0x802f5017,0x802f5017,3 +np.float32,0x3d5753b9,0x3d5d1661,3 +np.float32,0x7f2a55d2,0x7f800000,3 +np.float32,0x7f4dabfe,0x7f800000,3 +np.float32,0x3f49492a,0x3f98fc47,3 +np.float32,0x3f4d1589,0x3f9d2f82,3 +np.float32,0xff016208,0xbf800000,3 +np.float32,0xbf571cb7,0xbf118365,3 +np.float32,0xbf1ef297,0xbeecd136,3 +np.float32,0x36266b,0x36266b,3 +np.float32,0xbed07b0e,0xbeab4129,3 +np.float32,0x7f553365,0x7f800000,3 +np.float32,0xfe9bb8c6,0xbf800000,3 +np.float32,0xbeb497d6,0xbe982e19,3 +np.float32,0xbf27af6c,0xbef60d16,3 +np.float32,0x55cf51,0x55cf51,3 +np.float32,0x3eab1db0,0x3ecb2e4f,3 +np.float32,0x3e777603,0x3e8bf62f,3 +np.float32,0x7f10e374,0x7f800000,3 +np.float32,0xbf1f6480,0xbeed4b8d,3 +np.float32,0x40479d,0x40479d,3 +np.float32,0x156259,0x156259,3 +np.float32,0x3d852e30,0x3d899b2d,3 +np.float32,0x80014ff3,0x80014ff3,3 +np.float32,0xbd812fa8,0xbd7a645c,3 +np.float32,0x800ab780,0x800ab780,3 +np.float32,0x3ea02ff4,0x3ebc13bd,3 +np.float32,0x7e858b8e,0x7f800000,3 +np.float32,0x75d63b,0x75d63b,3 +np.float32,0xbeb15c94,0xbe95e6e3,3 +np.float32,0x3da0cee0,0x3da74a39,3 +np.float32,0xff21c01c,0xbf800000,3 +np.float32,0x8049b5eb,0x8049b5eb,3 +np.float32,0x80177ab0,0x80177ab0,3 +np.float32,0xff137a50,0xbf800000,3 +np.float32,0x3f7febba,0x3fdbd51c,3 +np.float32,0x8041e4dd,0x8041e4dd,3 +np.float32,0x99b8c,0x99b8c,3 +np.float32,0x5621ba,0x5621ba,3 +np.float32,0x14b534,0x14b534,3 +np.float32,0xbe2eb3a8,0xbe209c95,3 +np.float32,0x7e510c28,0x7f800000,3 +np.float32,0x804ec2f2,0x804ec2f2,3 +np.float32,0x3f662406,0x3fba82b0,3 +np.float32,0x800000,0x800000,3 +np.float32,0x3f3120d6,0x3f7f5d96,3 +np.float32,0x7f179b8e,0x7f800000,3 +np.float32,0x7f65278e,0x7f800000,3 +np.float32,0xfeb50f52,0xbf800000,3 +np.float32,0x7f051bd1,0x7f800000,3 +np.float32,0x7ea0558d,0x7f800000,3 +np.float32,0xbd0a96c0,0xbd08453f,3 +np.float64,0xee82da5ddd05c,0xee82da5ddd05c,3 +np.float64,0x800c3a22d7f87446,0x800c3a22d7f87446,3 +np.float64,0xbfd34b20eaa69642,0xbfd0a825e7688d3e,3 +np.float64,0x3fd6a0f2492d41e5,0x3fdb253b906057b3,3 +np.float64,0xbfda13d8783427b0,0xbfd56b1d76684332,3 +np.float64,0xbfe50b5a99ea16b5,0xbfded7dd82c6f746,3 +np.float64,0x3f82468fc0248d20,0x3f825b7fa9378ee9,3 +np.float64,0x7ff0000000000000,0x7ff0000000000000,3 +np.float64,0x856e50290adca,0x856e50290adca,3 +np.float64,0x7fde55a5fa3cab4b,0x7ff0000000000000,3 +np.float64,0x7fcf2c8dd93e591b,0x7ff0000000000000,3 +np.float64,0x8001b3a0e3236743,0x8001b3a0e3236743,3 +np.float64,0x8000fdb14821fb63,0x8000fdb14821fb63,3 +np.float64,0xbfe3645e08e6c8bc,0xbfdd161362a5e9ef,3 +np.float64,0x7feb34d28b3669a4,0x7ff0000000000000,3 +np.float64,0x80099dd810933bb1,0x80099dd810933bb1,3 +np.float64,0xbfedbcc1097b7982,0xbfe35d86414d53dc,3 +np.float64,0x7fdc406fbdb880de,0x7ff0000000000000,3 +np.float64,0x800c4bf85ab897f1,0x800c4bf85ab897f1,3 +np.float64,0x3fd8f7b0e0b1ef60,0x3fde89b497ae20d8,3 +np.float64,0xffe4fced5c69f9da,0xbff0000000000000,3 +np.float64,0xbfe54d421fea9a84,0xbfdf1be0cbfbfcba,3 +np.float64,0x800af72f3535ee5f,0x800af72f3535ee5f,3 +np.float64,0x3fe24e6570e49ccb,0x3fe8b3a86d970411,3 +np.float64,0xbfdd7b22d0baf646,0xbfd79fac2e4f7558,3 +np.float64,0xbfe6a7654c6d4eca,0xbfe03c1f13f3b409,3 +np.float64,0x3fe2c3eb662587d7,0x3fe98566e625d4f5,3 +np.float64,0x3b1ef71e763e0,0x3b1ef71e763e0,3 +np.float64,0xffed03c6baba078d,0xbff0000000000000,3 +np.float64,0x3febac19d0b75834,0x3ff5fdacc9d51bcd,3 +np.float64,0x800635d6794c6bae,0x800635d6794c6bae,3 +np.float64,0xbfe8cafc827195f9,0xbfe1411438608ae1,3 +np.float64,0x7feeb616a83d6c2c,0x7ff0000000000000,3 +np.float64,0x3fd52d62a2aa5ac5,0x3fd91a07a7f18f44,3 +np.float64,0x80036996b8a6d32e,0x80036996b8a6d32e,3 +np.float64,0x2b1945965632a,0x2b1945965632a,3 +np.float64,0xbfecb5e8c9796bd2,0xbfe2f40fca276aa2,3 +np.float64,0x3fe8669ed4f0cd3e,0x3ff24c89fc9cdbff,3 +np.float64,0x71e9f65ee3d3f,0x71e9f65ee3d3f,3 +np.float64,0xbfd5ab262bab564c,0xbfd261ae108ef79e,3 +np.float64,0xbfe7091342ee1226,0xbfe06bf5622d75f6,3 +np.float64,0x49e888d093d12,0x49e888d093d12,3 +np.float64,0x2272f3dc44e5f,0x2272f3dc44e5f,3 +np.float64,0x7fe98736e0b30e6d,0x7ff0000000000000,3 +np.float64,0x30fa9cde61f54,0x30fa9cde61f54,3 +np.float64,0x7fdc163fc0382c7f,0x7ff0000000000000,3 +np.float64,0xffb40d04ee281a08,0xbff0000000000000,3 +np.float64,0xffe624617f2c48c2,0xbff0000000000000,3 +np.float64,0x3febb582bd376b05,0x3ff608da584d1716,3 +np.float64,0xfc30a5a5f8615,0xfc30a5a5f8615,3 +np.float64,0x3fef202efd7e405e,0x3ffa52009319b069,3 +np.float64,0x8004d0259829a04c,0x8004d0259829a04c,3 +np.float64,0x800622dc71ec45ba,0x800622dc71ec45ba,3 +np.float64,0xffefffffffffffff,0xbff0000000000000,3 +np.float64,0x800e89113c9d1223,0x800e89113c9d1223,3 +np.float64,0x7fba7fde3034ffbb,0x7ff0000000000000,3 +np.float64,0xbfeea31e807d463d,0xbfe3b7369b725915,3 +np.float64,0x3feb7c9589f6f92c,0x3ff5c56cf71b0dff,3 +np.float64,0x3fd52d3b59aa5a77,0x3fd919d0f683fd07,3 +np.float64,0x800de90a43fbd215,0x800de90a43fbd215,3 +np.float64,0x3fe7eb35a9efd66b,0x3ff1c940dbfc6ef9,3 +np.float64,0xbda0adcb7b416,0xbda0adcb7b416,3 +np.float64,0x7fc5753e3a2aea7b,0x7ff0000000000000,3 +np.float64,0xffdd101d103a203a,0xbff0000000000000,3 +np.float64,0x7fcb54f56836a9ea,0x7ff0000000000000,3 +np.float64,0xbfd61c8d6eac391a,0xbfd2b23bc0a2cef4,3 +np.float64,0x3feef55de37deabc,0x3ffa198639a0161d,3 +np.float64,0x7fe4ffbfaea9ff7e,0x7ff0000000000000,3 +np.float64,0x9d1071873a20e,0x9d1071873a20e,3 +np.float64,0x3fef1ecb863e3d97,0x3ffa502a81e09cfc,3 +np.float64,0xad2da12b5a5b4,0xad2da12b5a5b4,3 +np.float64,0xffe614b74c6c296e,0xbff0000000000000,3 +np.float64,0xffe60d3f286c1a7e,0xbff0000000000000,3 +np.float64,0x7fda7d91f4b4fb23,0x7ff0000000000000,3 +np.float64,0x800023f266a047e6,0x800023f266a047e6,3 +np.float64,0x7fdf5f9ad23ebf35,0x7ff0000000000000,3 +np.float64,0x3fa7459f002e8b3e,0x3fa7cf178dcf0af6,3 +np.float64,0x3fe9938d61f3271b,0x3ff39516a13caec3,3 +np.float64,0xbfd59314c3ab262a,0xbfd250830f73efd2,3 +np.float64,0xbfc7e193f72fc328,0xbfc5c924339dd7a8,3 +np.float64,0x7fec1965f17832cb,0x7ff0000000000000,3 +np.float64,0xbfd932908eb26522,0xbfd4d4312d272580,3 +np.float64,0xbfdf2d08e2be5a12,0xbfd8add1413b0b1b,3 +np.float64,0x7fdcf7cc74b9ef98,0x7ff0000000000000,3 +np.float64,0x7fc79300912f2600,0x7ff0000000000000,3 +np.float64,0xffd4bd8f23297b1e,0xbff0000000000000,3 +np.float64,0x41869ce0830e,0x41869ce0830e,3 +np.float64,0x3fe5dcec91ebb9da,0x3fef5e213598cbd4,3 +np.float64,0x800815d9c2902bb4,0x800815d9c2902bb4,3 +np.float64,0x800ba1a4b877434a,0x800ba1a4b877434a,3 +np.float64,0x80069d7bdc4d3af8,0x80069d7bdc4d3af8,3 +np.float64,0xcf00d4339e01b,0xcf00d4339e01b,3 +np.float64,0x80072b71bd4e56e4,0x80072b71bd4e56e4,3 +np.float64,0x80059ca6fbab394f,0x80059ca6fbab394f,3 +np.float64,0x3fe522fc092a45f8,0x3fedf212682bf894,3 +np.float64,0x7fe17f384ea2fe70,0x7ff0000000000000,3 +np.float64,0x0,0x0,3 +np.float64,0x3f72bb4c20257698,0x3f72c64766b52069,3 +np.float64,0x7fbc97c940392f92,0x7ff0000000000000,3 +np.float64,0xffc5904ebd2b209c,0xbff0000000000000,3 +np.float64,0xbfe34fb55b669f6a,0xbfdcff81dd30a49d,3 +np.float64,0x8007ccda006f99b5,0x8007ccda006f99b5,3 +np.float64,0x3fee50e4c8fca1ca,0x3ff9434c7750ad0f,3 +np.float64,0x7fee7b07c67cf60f,0x7ff0000000000000,3 +np.float64,0x3fdcce4a5a399c95,0x3fe230c83f28218a,3 +np.float64,0x7fee5187b37ca30e,0x7ff0000000000000,3 +np.float64,0x3fc48f6a97291ed8,0x3fc64db6200a9833,3 +np.float64,0xc7fec3498ffd9,0xc7fec3498ffd9,3 +np.float64,0x800769c59d2ed38c,0x800769c59d2ed38c,3 +np.float64,0xffe69ede782d3dbc,0xbff0000000000000,3 +np.float64,0x3fecd9770979b2ee,0x3ff76a1f2f0f08f2,3 +np.float64,0x5aa358a8b546c,0x5aa358a8b546c,3 +np.float64,0xbfe795a0506f2b40,0xbfe0afcc52c0166b,3 +np.float64,0xffd4ada1e8a95b44,0xbff0000000000000,3 +np.float64,0xffcac1dc213583b8,0xbff0000000000000,3 +np.float64,0xffe393c15fa72782,0xbff0000000000000,3 +np.float64,0xbfcd6a3c113ad478,0xbfca47a2157b9cdd,3 +np.float64,0xffedde20647bbc40,0xbff0000000000000,3 +np.float64,0x3fd0d011b1a1a024,0x3fd33a57945559f4,3 +np.float64,0x3fef27e29f7e4fc6,0x3ffa5c314e0e3d69,3 +np.float64,0xffe96ff71f72dfee,0xbff0000000000000,3 +np.float64,0xffe762414f2ec482,0xbff0000000000000,3 +np.float64,0x3fc2dcfd3d25b9fa,0x3fc452f41682a12e,3 +np.float64,0xbfbdb125b63b6248,0xbfbc08e6553296d4,3 +np.float64,0x7b915740f724,0x7b915740f724,3 +np.float64,0x60b502b2c16a1,0x60b502b2c16a1,3 +np.float64,0xbfeb38b0be367162,0xbfe254f6782cfc47,3 +np.float64,0x800dc39a3edb8735,0x800dc39a3edb8735,3 +np.float64,0x3fea4fb433349f68,0x3ff468b97cf699f5,3 +np.float64,0xbfd49967962932d0,0xbfd19ceb41ff4cd0,3 +np.float64,0xbfebf75cd377eeba,0xbfe2a576bdbccccc,3 +np.float64,0xbfb653d65c2ca7b0,0xbfb561ab8fcb3f26,3 +np.float64,0xffe3f34b8727e696,0xbff0000000000000,3 +np.float64,0x3fdd798064baf301,0x3fe2b7c130a6fc63,3 +np.float64,0x3febe027e6b7c050,0x3ff63bac1b22e12d,3 +np.float64,0x7fcaa371af3546e2,0x7ff0000000000000,3 +np.float64,0xbfe6ee980a2ddd30,0xbfe05f0bc5dc80d2,3 +np.float64,0xc559c33f8ab39,0xc559c33f8ab39,3 +np.float64,0x84542c2b08a86,0x84542c2b08a86,3 +np.float64,0xbfe5645e046ac8bc,0xbfdf3398dc3cc1bd,3 +np.float64,0x3fee8c48ae7d1892,0x3ff9902899480526,3 +np.float64,0x3fb706471c2e0c8e,0x3fb817787aace8db,3 +np.float64,0x7fefe78f91ffcf1e,0x7ff0000000000000,3 +np.float64,0xbfcf6d560b3edaac,0xbfcbddc72a2130df,3 +np.float64,0x7fd282bfd925057f,0x7ff0000000000000,3 +np.float64,0x3fb973dbee32e7b8,0x3fbac2c87cbd0215,3 +np.float64,0x3fd1ce38ff239c72,0x3fd4876de5164420,3 +np.float64,0x8008ac2e3c31585d,0x8008ac2e3c31585d,3 +np.float64,0x3fa05e06dc20bc00,0x3fa0a1b7de904dce,3 +np.float64,0x7fd925f215324be3,0x7ff0000000000000,3 +np.float64,0x3f949d95d0293b2c,0x3f94d31197d51874,3 +np.float64,0xffdded9e67bbdb3c,0xbff0000000000000,3 +np.float64,0x3fed390dcfba721c,0x3ff7e08c7a709240,3 +np.float64,0x7fe6e62300adcc45,0x7ff0000000000000,3 +np.float64,0xbfd779bc312ef378,0xbfd3a6cb64bb0181,3 +np.float64,0x3fe43e9877287d31,0x3fec3e100ef935fd,3 +np.float64,0x210b68e44216e,0x210b68e44216e,3 +np.float64,0x3fcdffc1e73bff84,0x3fd0e729d02ec539,3 +np.float64,0xcea10c0f9d422,0xcea10c0f9d422,3 +np.float64,0x7feb97a82d772f4f,0x7ff0000000000000,3 +np.float64,0x9b4b4d953696a,0x9b4b4d953696a,3 +np.float64,0x3fd1bd8e95237b1d,0x3fd4716dd34cf828,3 +np.float64,0x800fc273841f84e7,0x800fc273841f84e7,3 +np.float64,0xbfd2aef167255de2,0xbfd0340f30d82f18,3 +np.float64,0x800d021a551a0435,0x800d021a551a0435,3 +np.float64,0xffebf934a8b7f268,0xbff0000000000000,3 +np.float64,0x3fd819849fb03308,0x3fdd43bca0aac749,3 +np.float64,0x7ff8000000000000,0x7ff8000000000000,3 +np.float64,0x27c34b064f86a,0x27c34b064f86a,3 +np.float64,0x7fef4f5a373e9eb3,0x7ff0000000000000,3 +np.float64,0x7fd92fccce325f99,0x7ff0000000000000,3 +np.float64,0x800520869d6a410e,0x800520869d6a410e,3 +np.float64,0x3fccbcaddf397958,0x3fd01bf6b0c4d97f,3 +np.float64,0x80039ebfc4273d80,0x80039ebfc4273d80,3 +np.float64,0xbfed1f0b3c7a3e16,0xbfe31ea6e4c69141,3 +np.float64,0x7fee1bb7c4bc376f,0x7ff0000000000000,3 +np.float64,0xbfa8bee1d8317dc0,0xbfa8283b7dbf95a9,3 +np.float64,0x3fe797db606f2fb6,0x3ff171b1c2bc8fe5,3 +np.float64,0xbfee2ecfdbbc5da0,0xbfe38a3f0a43d14e,3 +np.float64,0x3fe815c7f1302b90,0x3ff1f65165c45d71,3 +np.float64,0xbfbb265c94364cb8,0xbfb9c27ec61a9a1d,3 +np.float64,0x3fcf1cab5d3e3957,0x3fd19c07444642f9,3 +np.float64,0xbfe6ae753f6d5cea,0xbfe03f99666dbe17,3 +np.float64,0xbfd18a2a73a31454,0xbfceaee204aca016,3 +np.float64,0x3fb8a1dffc3143c0,0x3fb9db38341ab1a3,3 +np.float64,0x7fd2a0376025406e,0x7ff0000000000000,3 +np.float64,0x7fe718c0e3ae3181,0x7ff0000000000000,3 +np.float64,0x3fb264d42424c9a8,0x3fb3121f071d4db4,3 +np.float64,0xd27190a7a4e32,0xd27190a7a4e32,3 +np.float64,0xbfe467668c68cecd,0xbfde2c4616738d5e,3 +np.float64,0x800ab9a2b9357346,0x800ab9a2b9357346,3 +np.float64,0x7fcbd108d537a211,0x7ff0000000000000,3 +np.float64,0x3fb79bba6e2f3770,0x3fb8bb2c140d3445,3 +np.float64,0xffefa7165e3f4e2c,0xbff0000000000000,3 +np.float64,0x7fb40185a428030a,0x7ff0000000000000,3 +np.float64,0xbfe9e3d58e73c7ab,0xbfe1c04d51c83d69,3 +np.float64,0x7fef5b97b17eb72e,0x7ff0000000000000,3 +np.float64,0x800a2957683452af,0x800a2957683452af,3 +np.float64,0x800f54f1925ea9e3,0x800f54f1925ea9e3,3 +np.float64,0xeffa4e77dff4a,0xeffa4e77dff4a,3 +np.float64,0xffbe501aa03ca038,0xbff0000000000000,3 +np.float64,0x8006c651bced8ca4,0x8006c651bced8ca4,3 +np.float64,0x3fe159faff22b3f6,0x3fe708f78efbdbed,3 +np.float64,0x800e7d59a31cfab3,0x800e7d59a31cfab3,3 +np.float64,0x3fe6ac2f272d585e,0x3ff07ee5305385c3,3 +np.float64,0x7fd014c054202980,0x7ff0000000000000,3 +np.float64,0xbfe4800b11e90016,0xbfde4648c6f29ce5,3 +np.float64,0xbfe6738470ece709,0xbfe0227b5b42b713,3 +np.float64,0x3fed052add3a0a56,0x3ff7a01819e65c6e,3 +np.float64,0xffe03106f120620e,0xbff0000000000000,3 +np.float64,0x7fe11df4d4e23be9,0x7ff0000000000000,3 +np.float64,0xbfcea25d7b3d44bc,0xbfcb3e808e7ce852,3 +np.float64,0xd0807b03a1010,0xd0807b03a1010,3 +np.float64,0x8004eda4fec9db4b,0x8004eda4fec9db4b,3 +np.float64,0x3fceb5c98d3d6b90,0x3fd15a894b15dd9f,3 +np.float64,0xbfee27228afc4e45,0xbfe38741702f3c0b,3 +np.float64,0xbfe606278c6c0c4f,0xbfdfd7cb6093652d,3 +np.float64,0xbfd66f59bc2cdeb4,0xbfd2ecb2297f6afc,3 +np.float64,0x4aee390095dc8,0x4aee390095dc8,3 +np.float64,0xbfe391355d67226a,0xbfdd46ddc0997014,3 +np.float64,0xffd27765e7a4eecc,0xbff0000000000000,3 +np.float64,0xbfe795e20a2f2bc4,0xbfe0afebc66c4dbd,3 +np.float64,0x7fc9a62e81334c5c,0x7ff0000000000000,3 +np.float64,0xffe4e57e52a9cafc,0xbff0000000000000,3 +np.float64,0x7fac326c8c3864d8,0x7ff0000000000000,3 +np.float64,0x3fe8675f6370cebf,0x3ff24d5863029c15,3 +np.float64,0x7fcf4745e73e8e8b,0x7ff0000000000000,3 +np.float64,0x7fcc9aec9f3935d8,0x7ff0000000000000,3 +np.float64,0x3fec2e8fcab85d20,0x3ff699ccd0b2fed6,3 +np.float64,0x3fd110a968222153,0x3fd38e81a88c2d13,3 +np.float64,0xffb3a68532274d08,0xbff0000000000000,3 +np.float64,0xf0e562bbe1cad,0xf0e562bbe1cad,3 +np.float64,0xbfe815b9e5f02b74,0xbfe0ec9f5023aebc,3 +np.float64,0xbf5151d88022a400,0xbf514f80c465feea,3 +np.float64,0x2547e3144a8fd,0x2547e3144a8fd,3 +np.float64,0x3fedcc0c28fb9818,0x3ff899612fbeb4c5,3 +np.float64,0x3fdc3d1c0f387a38,0x3fe1bf6e2d39bd75,3 +np.float64,0x7fe544dbe62a89b7,0x7ff0000000000000,3 +np.float64,0x8001500e48e2a01d,0x8001500e48e2a01d,3 +np.float64,0xbfed3b2b09fa7656,0xbfe329f3e7bada64,3 +np.float64,0xbfe76a943aeed528,0xbfe09b24e3aa3f79,3 +np.float64,0x3fe944330e328866,0x3ff33d472dee70c5,3 +np.float64,0x8004bbbd6cc9777c,0x8004bbbd6cc9777c,3 +np.float64,0xbfe28133fb650268,0xbfdc1ac230ac4ef5,3 +np.float64,0xc1370af7826e2,0xc1370af7826e2,3 +np.float64,0x7fcfa47f5f3f48fe,0x7ff0000000000000,3 +np.float64,0xbfa3002a04260050,0xbfa2a703a538b54e,3 +np.float64,0xffef44f3903e89e6,0xbff0000000000000,3 +np.float64,0xc32cce298659a,0xc32cce298659a,3 +np.float64,0x7b477cc2f68f0,0x7b477cc2f68f0,3 +np.float64,0x40a7f4ec814ff,0x40a7f4ec814ff,3 +np.float64,0xffee38edf67c71db,0xbff0000000000000,3 +np.float64,0x3fe23f6f1ce47ede,0x3fe8992b8bb03499,3 +np.float64,0x7fc8edfe7f31dbfc,0x7ff0000000000000,3 +np.float64,0x800bb8e6fb3771ce,0x800bb8e6fb3771ce,3 +np.float64,0xbfe11d364ee23a6c,0xbfda82a0c2ef9e46,3 +np.float64,0xbfeb993cb4b7327a,0xbfe27df565da85dc,3 +np.float64,0x10000000000000,0x10000000000000,3 +np.float64,0x3fc1f997d723f330,0x3fc34c5cff060af1,3 +np.float64,0x6e326fa0dc64f,0x6e326fa0dc64f,3 +np.float64,0x800fa30c2c5f4618,0x800fa30c2c5f4618,3 +np.float64,0x7fed16ad603a2d5a,0x7ff0000000000000,3 +np.float64,0x9411cf172823a,0x9411cf172823a,3 +np.float64,0xffece51d4cb9ca3a,0xbff0000000000000,3 +np.float64,0x3fdda3d1453b47a3,0x3fe2d954f7849890,3 +np.float64,0xffd58330172b0660,0xbff0000000000000,3 +np.float64,0xbfc6962ae52d2c54,0xbfc4b4bdf0069f17,3 +np.float64,0xbfb4010a8e280218,0xbfb33e1236f7efa0,3 +np.float64,0x7fd0444909208891,0x7ff0000000000000,3 +np.float64,0xbfe027a24de04f44,0xbfd95e9064101e7c,3 +np.float64,0xa6f3f3214de9,0xa6f3f3214de9,3 +np.float64,0xbfe112eb0fe225d6,0xbfda768f7cbdf346,3 +np.float64,0xbfe99e90d4b33d22,0xbfe1a153e45a382a,3 +np.float64,0xffecb34f8e79669e,0xbff0000000000000,3 +np.float64,0xbfdf32c9653e6592,0xbfd8b159caf5633d,3 +np.float64,0x3fe9519829b2a330,0x3ff34c0a8152e20f,3 +np.float64,0xffd08ec8a7a11d92,0xbff0000000000000,3 +np.float64,0xffd19b71b6a336e4,0xbff0000000000000,3 +np.float64,0x7feda6b9377b4d71,0x7ff0000000000000,3 +np.float64,0x800fda2956bfb453,0x800fda2956bfb453,3 +np.float64,0x3fe54f601bea9ec0,0x3fee483cb03cbde4,3 +np.float64,0xbfe2a8ad5ee5515a,0xbfdc46ee7a10bf0d,3 +np.float64,0xbfd336c8bd266d92,0xbfd09916d432274a,3 +np.float64,0xfff0000000000000,0xbff0000000000000,3 +np.float64,0x3fd9a811a9b35024,0x3fdf8fa68cc048e3,3 +np.float64,0x3fe078c68520f18d,0x3fe58aecc1f9649b,3 +np.float64,0xbfc6d5aa3a2dab54,0xbfc4e9ea84f3d73c,3 +np.float64,0xf9682007f2d04,0xf9682007f2d04,3 +np.float64,0x3fee54523dbca8a4,0x3ff947b826de81f4,3 +np.float64,0x80461e5d008c4,0x80461e5d008c4,3 +np.float64,0x3fdd6d12d5bada26,0x3fe2ade8dee2fa02,3 +np.float64,0x3fcd5f0dfd3abe18,0x3fd081d6cd25731d,3 +np.float64,0x7fa36475c826c8eb,0x7ff0000000000000,3 +np.float64,0xbfdf3ce052be79c0,0xbfd8b78baccfb908,3 +np.float64,0x7fcd890dd13b121b,0x7ff0000000000000,3 +np.float64,0x8000000000000001,0x8000000000000001,3 +np.float64,0x800ec0f4281d81e8,0x800ec0f4281d81e8,3 +np.float64,0xbfba960116352c00,0xbfb94085424496d9,3 +np.float64,0x3fdddedc9bbbbdb8,0x3fe30853fe4ef5ce,3 +np.float64,0x238092a847013,0x238092a847013,3 +np.float64,0xbfe38d4803271a90,0xbfdd429a955c46af,3 +np.float64,0xbfd4c9067329920c,0xbfd1bf6255ed91a4,3 +np.float64,0xbfbee213923dc428,0xbfbd17ce1bda6088,3 +np.float64,0xffd5a2d337ab45a6,0xbff0000000000000,3 +np.float64,0x7fe21bfcf82437f9,0x7ff0000000000000,3 +np.float64,0x3fe2a2714da544e3,0x3fe949594a74ea25,3 +np.float64,0x800e05cf8ebc0b9f,0x800e05cf8ebc0b9f,3 +np.float64,0x559a1526ab343,0x559a1526ab343,3 +np.float64,0xffe6a1b7906d436e,0xbff0000000000000,3 +np.float64,0xffef27d6253e4fab,0xbff0000000000000,3 +np.float64,0xbfe0f90ab0a1f216,0xbfda5828a1edde48,3 +np.float64,0x9675d2ab2cebb,0x9675d2ab2cebb,3 +np.float64,0xffee0f7eecfc1efd,0xbff0000000000000,3 +np.float64,0x2ec005625d801,0x2ec005625d801,3 +np.float64,0x7fde35ff14bc6bfd,0x7ff0000000000000,3 +np.float64,0xffe03f36d9e07e6d,0xbff0000000000000,3 +np.float64,0x7fe09ff7c4213fef,0x7ff0000000000000,3 +np.float64,0xffeac29dd1b5853b,0xbff0000000000000,3 +np.float64,0x3fb63120aa2c6241,0x3fb72ea3de98a853,3 +np.float64,0xffd079eb84a0f3d8,0xbff0000000000000,3 +np.float64,0xbfd3c2cc75a78598,0xbfd1005996880b3f,3 +np.float64,0x7fb80507ee300a0f,0x7ff0000000000000,3 +np.float64,0xffe8006105f000c1,0xbff0000000000000,3 +np.float64,0x8009138b0ab22716,0x8009138b0ab22716,3 +np.float64,0xbfd6dfb40b2dbf68,0xbfd33b8e4008e3b0,3 +np.float64,0xbfe7c2cf9bef859f,0xbfe0c55c807460df,3 +np.float64,0xbfe75fe4da6ebfca,0xbfe09600256d3b81,3 +np.float64,0xffd662fc73acc5f8,0xbff0000000000000,3 +np.float64,0x20b99dbc41735,0x20b99dbc41735,3 +np.float64,0x3fe10b38ade21671,0x3fe68229a9bbeefc,3 +np.float64,0x3743b99c6e878,0x3743b99c6e878,3 +np.float64,0xff9eb5ed903d6be0,0xbff0000000000000,3 +np.float64,0x3ff0000000000000,0x3ffb7e151628aed3,3 +np.float64,0xffb9e0569e33c0b0,0xbff0000000000000,3 +np.float64,0x7fd39c804fa73900,0x7ff0000000000000,3 +np.float64,0x3fe881ef67f103df,0x3ff269dd704b7129,3 +np.float64,0x1b6eb40236dd7,0x1b6eb40236dd7,3 +np.float64,0xbfe734ea432e69d4,0xbfe0813e6355d02f,3 +np.float64,0xffcf48f3743e91e8,0xbff0000000000000,3 +np.float64,0xffed10bcf6fa2179,0xbff0000000000000,3 +np.float64,0x3fef07723b7e0ee4,0x3ffa3156123f3c15,3 +np.float64,0xffe45c704aa8b8e0,0xbff0000000000000,3 +np.float64,0xb7b818d96f703,0xb7b818d96f703,3 +np.float64,0x42fcc04085f99,0x42fcc04085f99,3 +np.float64,0xbfda7ced01b4f9da,0xbfd5b0ce1e5524ae,3 +np.float64,0xbfe1e5963d63cb2c,0xbfdb6a87b6c09185,3 +np.float64,0x7fdfa18003bf42ff,0x7ff0000000000000,3 +np.float64,0xbfe3790a43e6f214,0xbfdd2c9a38b4f089,3 +np.float64,0xffe0ff5b9ae1feb6,0xbff0000000000000,3 +np.float64,0x80085a7d3110b4fb,0x80085a7d3110b4fb,3 +np.float64,0xffd6bfa6622d7f4c,0xbff0000000000000,3 +np.float64,0xbfef5ddc7cfebbb9,0xbfe3fe170521593e,3 +np.float64,0x3fc21773fa242ee8,0x3fc36ebda1f91a72,3 +np.float64,0x7fc04d98da209b31,0x7ff0000000000000,3 +np.float64,0xbfeba3b535b7476a,0xbfe282602e3c322e,3 +np.float64,0xffd41fb5c1a83f6c,0xbff0000000000000,3 +np.float64,0xf87d206df0fa4,0xf87d206df0fa4,3 +np.float64,0x800060946fc0c12a,0x800060946fc0c12a,3 +np.float64,0x3fe69d5f166d3abe,0x3ff06fdddcf4ca93,3 +np.float64,0x7fe9b5793b336af1,0x7ff0000000000000,3 +np.float64,0x7fe0dd4143e1ba82,0x7ff0000000000000,3 +np.float64,0xbfa8eaea3c31d5d0,0xbfa8522e397da3bd,3 +np.float64,0x119f0078233e1,0x119f0078233e1,3 +np.float64,0xbfd78a207aaf1440,0xbfd3b225bbf2ab4f,3 +np.float64,0xc66a6d4d8cd4e,0xc66a6d4d8cd4e,3 +np.float64,0xe7fc4b57cff8a,0xe7fc4b57cff8a,3 +np.float64,0x800883e8091107d0,0x800883e8091107d0,3 +np.float64,0x3fa6520c842ca419,0x3fa6d06e1041743a,3 +np.float64,0x3fa563182c2ac630,0x3fa5d70e27a84c97,3 +np.float64,0xe6a30b61cd462,0xe6a30b61cd462,3 +np.float64,0x3fee85dac37d0bb6,0x3ff987cfa41a9778,3 +np.float64,0x3fe8f621db71ec44,0x3ff2e7b768a2e9d0,3 +np.float64,0x800f231d861e463b,0x800f231d861e463b,3 +np.float64,0xbfe22eb07c645d61,0xbfdbbdbb853ab4c6,3 +np.float64,0x7fd2dda2dea5bb45,0x7ff0000000000000,3 +np.float64,0xbfd09b79a0a136f4,0xbfcd4147606ffd27,3 +np.float64,0xca039cc394074,0xca039cc394074,3 +np.float64,0x8000000000000000,0x8000000000000000,3 +np.float64,0xcb34575d9668b,0xcb34575d9668b,3 +np.float64,0x3fea62c1f3f4c584,0x3ff47e6dc67ec89f,3 +np.float64,0x7fe544c8606a8990,0x7ff0000000000000,3 +np.float64,0xffe0a980c4615301,0xbff0000000000000,3 +np.float64,0x3fdd67d5f8bacfac,0x3fe2a9c3421830f1,3 +np.float64,0xffe41d3dda283a7b,0xbff0000000000000,3 +np.float64,0xffeed59e5ffdab3c,0xbff0000000000000,3 +np.float64,0xffeeae8326fd5d05,0xbff0000000000000,3 +np.float64,0x800d70b4fa7ae16a,0x800d70b4fa7ae16a,3 +np.float64,0xffec932e6839265c,0xbff0000000000000,3 +np.float64,0xee30b185dc616,0xee30b185dc616,3 +np.float64,0x7fc3cf4397279e86,0x7ff0000000000000,3 +np.float64,0xbfeab34f1875669e,0xbfe21b868229de7d,3 +np.float64,0xf45f5f7de8bec,0xf45f5f7de8bec,3 +np.float64,0x3fad2c4b203a5896,0x3fae0528b568f3cf,3 +np.float64,0xbfe2479543e48f2a,0xbfdbd9e57cf64028,3 +np.float64,0x3fd41a1473283429,0x3fd79df2bc60debb,3 +np.float64,0x3febb5155ef76a2a,0x3ff608585afd698b,3 +np.float64,0xffe21f5303e43ea6,0xbff0000000000000,3 +np.float64,0x7fe9ef390833de71,0x7ff0000000000000,3 +np.float64,0xffe8ee873d71dd0e,0xbff0000000000000,3 +np.float64,0x7fd7cbc55e2f978a,0x7ff0000000000000,3 +np.float64,0x80081f9080d03f21,0x80081f9080d03f21,3 +np.float64,0x7fecbafc8b3975f8,0x7ff0000000000000,3 +np.float64,0x800b6c4b0b16d896,0x800b6c4b0b16d896,3 +np.float64,0xbfaa0fc2d4341f80,0xbfa968cdf32b98ad,3 +np.float64,0x3fec79fe4078f3fc,0x3ff6f5361a4a5d93,3 +np.float64,0xbfb14b79de2296f0,0xbfb0b93b75ecec11,3 +np.float64,0x800009d084c013a2,0x800009d084c013a2,3 +np.float64,0x4a4cdfe29499d,0x4a4cdfe29499d,3 +np.float64,0xbfe721c2d56e4386,0xbfe077f541987d76,3 +np.float64,0x3e5f539e7cbeb,0x3e5f539e7cbeb,3 +np.float64,0x3fd23f044c247e09,0x3fd51ceafcdd64aa,3 +np.float64,0x3fc70785b02e0f0b,0x3fc93b2a37eb342a,3 +np.float64,0xbfe7ab4ec7af569e,0xbfe0ba28eecbf6b0,3 +np.float64,0x800c1d4134583a83,0x800c1d4134583a83,3 +np.float64,0xffd9a73070334e60,0xbff0000000000000,3 +np.float64,0x68a4bf24d1499,0x68a4bf24d1499,3 +np.float64,0x7feba9d9507753b2,0x7ff0000000000000,3 +np.float64,0xbfe9d747db73ae90,0xbfe1bab53d932010,3 +np.float64,0x800a9a4aed953496,0x800a9a4aed953496,3 +np.float64,0xffcb89b0ad371360,0xbff0000000000000,3 +np.float64,0xbfc62388b82c4710,0xbfc4547be442a38c,3 +np.float64,0x800a006d187400db,0x800a006d187400db,3 +np.float64,0x3fcef2fbd33de5f8,0x3fd18177b2150148,3 +np.float64,0x8000b74e3da16e9d,0x8000b74e3da16e9d,3 +np.float64,0x25be536e4b7cb,0x25be536e4b7cb,3 +np.float64,0x3fa86e189430dc31,0x3fa905b4684c9f01,3 +np.float64,0xa7584b114eb0a,0xa7584b114eb0a,3 +np.float64,0x800331133c866227,0x800331133c866227,3 +np.float64,0x3fb52b48142a5690,0x3fb611a6f6e7c664,3 +np.float64,0x3fe825797cf04af2,0x3ff206fd60e98116,3 +np.float64,0x3fd0bec4e5217d8a,0x3fd323db3ffd59b2,3 +np.float64,0x907b43a120f7,0x907b43a120f7,3 +np.float64,0x3fed31eb1d3a63d6,0x3ff7d7a91c6930a4,3 +np.float64,0x7f97a13d782f427a,0x7ff0000000000000,3 +np.float64,0xffc7121a702e2434,0xbff0000000000000,3 +np.float64,0xbfe8bb4cbbf1769a,0xbfe139d7f46f1fb1,3 +np.float64,0xbfe3593cc5a6b27a,0xbfdd09ec91d6cd48,3 +np.float64,0x7fcff218ff9ff,0x7fcff218ff9ff,3 +np.float64,0x3fe73651d4ae6ca4,0x3ff10c5c1d21d127,3 +np.float64,0x80054e396eaa9c74,0x80054e396eaa9c74,3 +np.float64,0x3fe527d5f9aa4fac,0x3fedfb7743db9b53,3 +np.float64,0x7fec6f28c5f8de51,0x7ff0000000000000,3 +np.float64,0x3fcd2bbff53a5780,0x3fd061987416b49b,3 +np.float64,0xffd1f0046423e008,0xbff0000000000000,3 +np.float64,0x80034d97fac69b31,0x80034d97fac69b31,3 +np.float64,0x3faa803f14350080,0x3fab32e3f8073be4,3 +np.float64,0x3fcf8da0163f1b40,0x3fd1e42ba2354c8e,3 +np.float64,0x3fd573c2632ae785,0x3fd97c37609d18d7,3 +np.float64,0x7f922960482452c0,0x7ff0000000000000,3 +np.float64,0x800ebd0c5d3d7a19,0x800ebd0c5d3d7a19,3 +np.float64,0xbfee63b7807cc76f,0xbfe39ec7981035db,3 +np.float64,0xffdc023f8e380480,0xbff0000000000000,3 +np.float64,0x3fe3ffa02c67ff40,0x3febc7f8b900ceba,3 +np.float64,0x36c508b86d8a2,0x36c508b86d8a2,3 +np.float64,0x3fc9fbb0f133f760,0x3fcccee9f6ba801c,3 +np.float64,0x3fd75c1d5faeb83b,0x3fdc3150f9eff99e,3 +np.float64,0x3fe9a8d907b351b2,0x3ff3accc78a31df8,3 +np.float64,0x3fdd8fdcafbb1fb8,0x3fe2c97c97757994,3 +np.float64,0x3fb10c34ca22186a,0x3fb1a0cc42c76b86,3 +np.float64,0xbff0000000000000,0xbfe43a54e4e98864,3 +np.float64,0xffd046aefda08d5e,0xbff0000000000000,3 +np.float64,0x80067989758cf314,0x80067989758cf314,3 +np.float64,0x3fee9d77763d3aef,0x3ff9a67ff0841ba5,3 +np.float64,0xffe4d3cbf8e9a798,0xbff0000000000000,3 +np.float64,0x800f9cab273f3956,0x800f9cab273f3956,3 +np.float64,0x800a5c84f9f4b90a,0x800a5c84f9f4b90a,3 +np.float64,0x4fd377009fa8,0x4fd377009fa8,3 +np.float64,0xbfe7ba26af6f744e,0xbfe0c13ce45d6f95,3 +np.float64,0x609c8a86c1392,0x609c8a86c1392,3 +np.float64,0x7fe4d0296ea9a052,0x7ff0000000000000,3 +np.float64,0x59847bccb3090,0x59847bccb3090,3 +np.float64,0xbfdf944157bf2882,0xbfd8ed092bacad43,3 +np.float64,0xbfe7560a632eac15,0xbfe091405ec34973,3 +np.float64,0x3fea0699f4340d34,0x3ff415eb72089230,3 +np.float64,0x800a5533f374aa68,0x800a5533f374aa68,3 +np.float64,0xbf8e8cdb103d19c0,0xbf8e52cffcb83774,3 +np.float64,0x3fe87d9e52f0fb3d,0x3ff2653952344b81,3 +np.float64,0x7fca3950f73472a1,0x7ff0000000000000,3 +np.float64,0xffd5d1068aaba20e,0xbff0000000000000,3 +np.float64,0x3fd1a5f169a34be4,0x3fd4524b6ef17f91,3 +np.float64,0x3fdc4b95a8b8972c,0x3fe1caafd8652bf7,3 +np.float64,0x3fe333f65a6667ed,0x3fea502fb1f8a578,3 +np.float64,0xbfc117aaac222f54,0xbfc00018a4b84b6e,3 +np.float64,0x7fecf2efdf39e5df,0x7ff0000000000000,3 +np.float64,0x4e99d83e9d33c,0x4e99d83e9d33c,3 +np.float64,0x800d18937bda3127,0x800d18937bda3127,3 +np.float64,0x3fd6c67778ad8cef,0x3fdb5aba70a3ea9e,3 +np.float64,0x3fdbb71770b76e2f,0x3fe157ae8da20bc5,3 +np.float64,0xbfe9faf6ebf3f5ee,0xbfe1ca963d83f17f,3 +np.float64,0x80038850ac0710a2,0x80038850ac0710a2,3 +np.float64,0x8006beb72f8d7d6f,0x8006beb72f8d7d6f,3 +np.float64,0x3feead67bffd5acf,0x3ff9bb43e8b15e2f,3 +np.float64,0xbfd1174b89222e98,0xbfcdff9972799907,3 +np.float64,0x7fee2c077cfc580e,0x7ff0000000000000,3 +np.float64,0xbfbdbd904e3b7b20,0xbfbc13f4916ed466,3 +np.float64,0xffee47b8fe3c8f71,0xbff0000000000000,3 +np.float64,0xffd161884222c310,0xbff0000000000000,3 +np.float64,0xbfd42f27c4a85e50,0xbfd14fa8d67ba5ee,3 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,3 +np.float64,0x8008151791b02a30,0x8008151791b02a30,3 +np.float64,0xbfba79029234f208,0xbfb926616cf41755,3 +np.float64,0x8004c486be29890e,0x8004c486be29890e,3 +np.float64,0x7fe5325a252a64b3,0x7ff0000000000000,3 +np.float64,0x5a880f04b5103,0x5a880f04b5103,3 +np.float64,0xbfe6f4b7702de96f,0xbfe06209002dd72c,3 +np.float64,0xbfdf8b3739bf166e,0xbfd8e783efe3c30f,3 +np.float64,0xbfe32571c8e64ae4,0xbfdcd128b9aa49a1,3 +np.float64,0xbfe97c98c172f932,0xbfe1920ac0fc040f,3 +np.float64,0x3fd0b513a2a16a28,0x3fd31744e3a1bf0a,3 +np.float64,0xffe3ab70832756e0,0xbff0000000000000,3 +np.float64,0x80030f055ce61e0b,0x80030f055ce61e0b,3 +np.float64,0xffd5f3b21b2be764,0xbff0000000000000,3 +np.float64,0x800c1f2d6c783e5b,0x800c1f2d6c783e5b,3 +np.float64,0x80075f4f148ebe9f,0x80075f4f148ebe9f,3 +np.float64,0xbfa5a046f42b4090,0xbfa52cfbf8992256,3 +np.float64,0xffd6702583ace04c,0xbff0000000000000,3 +np.float64,0x800dc0a5cf1b814c,0x800dc0a5cf1b814c,3 +np.float64,0x14f2203a29e45,0x14f2203a29e45,3 +np.float64,0x800421a40ee84349,0x800421a40ee84349,3 +np.float64,0xbfea7c279df4f84f,0xbfe2037fff3ed877,3 +np.float64,0xbfe9b41ddcf3683c,0xbfe1aafe18a44bf8,3 +np.float64,0xffe7b037022f606e,0xbff0000000000000,3 +np.float64,0x800bafb648775f6d,0x800bafb648775f6d,3 +np.float64,0x800b81681d5702d1,0x800b81681d5702d1,3 +np.float64,0x3fe29f8dc8653f1c,0x3fe9442da1c32c6b,3 +np.float64,0xffef9a05dc7f340b,0xbff0000000000000,3 +np.float64,0x800c8c65a65918cb,0x800c8c65a65918cb,3 +np.float64,0xffe99df0d5f33be1,0xbff0000000000000,3 +np.float64,0x9afeb22535fd7,0x9afeb22535fd7,3 +np.float64,0x7fc620dd822c41ba,0x7ff0000000000000,3 +np.float64,0x29c2cdf25385b,0x29c2cdf25385b,3 +np.float64,0x2d92284e5b246,0x2d92284e5b246,3 +np.float64,0xffc794aa942f2954,0xbff0000000000000,3 +np.float64,0xbfe7ed907eafdb21,0xbfe0d9a7b1442497,3 +np.float64,0xbfd4e0d4aea9c1aa,0xbfd1d09366dba2a7,3 +np.float64,0xa70412c34e083,0xa70412c34e083,3 +np.float64,0x41dc0ee083b9,0x41dc0ee083b9,3 +np.float64,0x8000ece20da1d9c5,0x8000ece20da1d9c5,3 +np.float64,0x3fdf3dae103e7b5c,0x3fe42314bf826bc5,3 +np.float64,0x3fe972533c72e4a6,0x3ff3703761e70f04,3 +np.float64,0xffba1d2b82343a58,0xbff0000000000000,3 +np.float64,0xe0086c83c010e,0xe0086c83c010e,3 +np.float64,0x3fe6fb0dde6df61c,0x3ff0cf5fae01aa08,3 +np.float64,0x3fcfaf057e3f5e0b,0x3fd1f98c1fd20139,3 +np.float64,0xbfdca19d9239433c,0xbfd7158745192ca9,3 +np.float64,0xffb17f394e22fe70,0xbff0000000000000,3 +np.float64,0x7fe40f05c7681e0b,0x7ff0000000000000,3 +np.float64,0x800b3c575d5678af,0x800b3c575d5678af,3 +np.float64,0x7fa4ab20ac295640,0x7ff0000000000000,3 +np.float64,0xbfd2fff4f6a5ffea,0xbfd07069bb50e1a6,3 +np.float64,0xbfef81b9147f0372,0xbfe40b845a749787,3 +np.float64,0x7fd7400e54ae801c,0x7ff0000000000000,3 +np.float64,0x3fd4401a17a88034,0x3fd7d20fb76a4f3d,3 +np.float64,0xbfd3e907fd27d210,0xbfd11c64b7577fc5,3 +np.float64,0x7fe34bed9ae697da,0x7ff0000000000000,3 +np.float64,0x80039119c0472234,0x80039119c0472234,3 +np.float64,0xbfe2e36ac565c6d6,0xbfdc88454ee997b3,3 +np.float64,0xbfec57204478ae40,0xbfe2cd3183de1d2d,3 +np.float64,0x7fed7e2a12fafc53,0x7ff0000000000000,3 +np.float64,0x7fd5c5fa7d2b8bf4,0x7ff0000000000000,3 +np.float64,0x3fdcf368d6b9e6d0,0x3fe24decce1ebd35,3 +np.float64,0xbfe0ebfcf2e1d7fa,0xbfda48c9247ae8cf,3 +np.float64,0xbfe10dbea2e21b7e,0xbfda707d68b59674,3 +np.float64,0xbfdf201b6ebe4036,0xbfd8a5df27742fdf,3 +np.float64,0xffe16555be62caab,0xbff0000000000000,3 +np.float64,0xffc23a5db22474bc,0xbff0000000000000,3 +np.float64,0xffe1cbb3f8a39768,0xbff0000000000000,3 +np.float64,0x8007b823be0f7048,0x8007b823be0f7048,3 +np.float64,0xbfa5d1f3042ba3e0,0xbfa55c97cd77bf6e,3 +np.float64,0xbfe316a074662d41,0xbfdcc0da4e7334d0,3 +np.float64,0xbfdfab2bf2bf5658,0xbfd8fb046b88b51f,3 +np.float64,0xfacc9dabf5994,0xfacc9dabf5994,3 +np.float64,0xffe7e420a4efc841,0xbff0000000000000,3 +np.float64,0x800bb986cd57730e,0x800bb986cd57730e,3 +np.float64,0xbfe314fa38e629f4,0xbfdcbf09302c3bf5,3 +np.float64,0x7fc56b17772ad62e,0x7ff0000000000000,3 +np.float64,0x8006a87d54ad50fb,0x8006a87d54ad50fb,3 +np.float64,0xbfe6633e4a6cc67c,0xbfe01a67c3b3ff32,3 +np.float64,0x3fe0ff56eb21feae,0x3fe66df01defb0fb,3 +np.float64,0xffc369cfc126d3a0,0xbff0000000000000,3 +np.float64,0x7fe8775d9a30eeba,0x7ff0000000000000,3 +np.float64,0x3fb53db13e2a7b60,0x3fb625a7279cdac3,3 +np.float64,0xffee76e7e6fcedcf,0xbff0000000000000,3 +np.float64,0xb45595b568ab3,0xb45595b568ab3,3 +np.float64,0xffa09a1d50213440,0xbff0000000000000,3 +np.float64,0x7d11dc16fa23c,0x7d11dc16fa23c,3 +np.float64,0x7fd4cc2928299851,0x7ff0000000000000,3 +np.float64,0x6a30e0ead461d,0x6a30e0ead461d,3 +np.float64,0x7fd3ee735a27dce6,0x7ff0000000000000,3 +np.float64,0x8008d7084b31ae11,0x8008d7084b31ae11,3 +np.float64,0x3fe469353fe8d26a,0x3fec8e7e2df38590,3 +np.float64,0x3fcecef2743d9de5,0x3fd16a888b715dfd,3 +np.float64,0x460130d68c027,0x460130d68c027,3 +np.float64,0xbfd76510c62eca22,0xbfd398766b741d6e,3 +np.float64,0x800ec88c2a5d9118,0x800ec88c2a5d9118,3 +np.float64,0x3fac969c6c392d40,0x3fad66ca6a1e583c,3 +np.float64,0x3fe5c616bf6b8c2e,0x3fef30f931e8dde5,3 +np.float64,0xb4cb6cd56996e,0xb4cb6cd56996e,3 +np.float64,0xffc3eacf8827d5a0,0xbff0000000000000,3 +np.float64,0x3fe1ceaf60e39d5f,0x3fe7d31e0a627cf9,3 +np.float64,0xffea69b42ff4d368,0xbff0000000000000,3 +np.float64,0x800ff8aef99ff15e,0x800ff8aef99ff15e,3 +np.float64,0x6c3953f0d872b,0x6c3953f0d872b,3 +np.float64,0x8007ca5a0d0f94b5,0x8007ca5a0d0f94b5,3 +np.float64,0x800993ce3ad3279d,0x800993ce3ad3279d,3 +np.float64,0x3fe5a4d1516b49a2,0x3feeef67b22ac65b,3 +np.float64,0x8003d7512a67aea3,0x8003d7512a67aea3,3 +np.float64,0x33864430670c9,0x33864430670c9,3 +np.float64,0xbfdbf477e3b7e8f0,0xbfd6a63f1b36f424,3 +np.float64,0x3fb5da92582bb525,0x3fb6d04ef1a1d31a,3 +np.float64,0xe38aae71c7156,0xe38aae71c7156,3 +np.float64,0x3fcaf5590a35eab2,0x3fce01ed6eb6188e,3 +np.float64,0x800deba9b05bd754,0x800deba9b05bd754,3 +np.float64,0x7fee0cde287c19bb,0x7ff0000000000000,3 +np.float64,0xbfe0c2ae70e1855d,0xbfda17fa64d84fcf,3 +np.float64,0x518618faa30c4,0x518618faa30c4,3 +np.float64,0xbfeb4c49b8769894,0xbfe25d52cd7e529f,3 +np.float64,0xbfeb3aa21b367544,0xbfe255cae1df4cfd,3 +np.float64,0xffd23f1c5d247e38,0xbff0000000000000,3 +np.float64,0xff9a75132034ea20,0xbff0000000000000,3 +np.float64,0xbfef9d96307f3b2c,0xbfe415e8b6ce0e50,3 +np.float64,0x8004046f2f0808df,0x8004046f2f0808df,3 +np.float64,0x3fe15871aea2b0e3,0x3fe706532ea5c770,3 +np.float64,0x7fd86b1576b0d62a,0x7ff0000000000000,3 +np.float64,0xbfc240a5c724814c,0xbfc102c7971ca455,3 +np.float64,0xffd8ea670bb1d4ce,0xbff0000000000000,3 +np.float64,0xbfeb1ddd1ff63bba,0xbfe2497c4e27bb8e,3 +np.float64,0x3fcd47e0a33a8fc1,0x3fd0734444150d83,3 +np.float64,0xe00b6a65c016e,0xe00b6a65c016e,3 +np.float64,0xbfc7d582142fab04,0xbfc5bf1fbe755a4c,3 +np.float64,0x8cc91ca11993,0x8cc91ca11993,3 +np.float64,0x7fdbc530e3b78a61,0x7ff0000000000000,3 +np.float64,0x7fee437522bc86e9,0x7ff0000000000000,3 +np.float64,0xffe9e09ae2b3c135,0xbff0000000000000,3 +np.float64,0x8002841cada5083a,0x8002841cada5083a,3 +np.float64,0x3fd6b485f8ad690c,0x3fdb412135932699,3 +np.float64,0x80070e8d0b0e1d1b,0x80070e8d0b0e1d1b,3 +np.float64,0x7fed5df165babbe2,0x7ff0000000000000,3 +np.float64,0x7ff4000000000000,0x7ffc000000000000,3 +np.float64,0x7fe99d08cd333a11,0x7ff0000000000000,3 +np.float64,0xdfff4201bfff,0xdfff4201bfff,3 +np.float64,0x800ccf7aaf999ef6,0x800ccf7aaf999ef6,3 +np.float64,0x3fddb05aad3b60b5,0x3fe2e34bdd1dd9d5,3 +np.float64,0xbfe5e1c60e6bc38c,0xbfdfb3275cc1675f,3 +np.float64,0x8004fe674269fccf,0x8004fe674269fccf,3 +np.float64,0x7fe9280363325006,0x7ff0000000000000,3 +np.float64,0xf605b9f1ec0b7,0xf605b9f1ec0b7,3 +np.float64,0x800c7c214018f843,0x800c7c214018f843,3 +np.float64,0x7fd97eb6b9b2fd6c,0x7ff0000000000000,3 +np.float64,0x7fd03f8fb6207f1e,0x7ff0000000000000,3 +np.float64,0x7fc526b64d2a4d6c,0x7ff0000000000000,3 +np.float64,0xbfef1a7c42fe34f9,0xbfe3e4b4399e0fcf,3 +np.float64,0xffdde10a2fbbc214,0xbff0000000000000,3 +np.float64,0xbfdd274f72ba4e9e,0xbfd76aa73788863c,3 +np.float64,0xbfecf7f77af9efef,0xbfe30ee2ae03fed1,3 +np.float64,0xffde709322bce126,0xbff0000000000000,3 +np.float64,0x268b5dac4d16d,0x268b5dac4d16d,3 +np.float64,0x8005c099606b8134,0x8005c099606b8134,3 +np.float64,0xffcf54c1593ea984,0xbff0000000000000,3 +np.float64,0xbfee9b8ebabd371d,0xbfe3b44f2663139d,3 +np.float64,0x3faf0330643e0661,0x3faff88fab74b447,3 +np.float64,0x7fe1c6011be38c01,0x7ff0000000000000,3 +np.float64,0xbfe9d58053b3ab01,0xbfe1b9ea12242485,3 +np.float64,0xbfe15a80fee2b502,0xbfdaca2aa7d1231a,3 +np.float64,0x7fe0d766d8a1aecd,0x7ff0000000000000,3 +np.float64,0x800f65e6a21ecbcd,0x800f65e6a21ecbcd,3 +np.float64,0x7fc85e45a530bc8a,0x7ff0000000000000,3 +np.float64,0x3fcc240e5438481d,0x3fcf7954fc080ac3,3 +np.float64,0xffddd49da2bba93c,0xbff0000000000000,3 +np.float64,0x1376f36c26edf,0x1376f36c26edf,3 +np.float64,0x3feffb7af17ff6f6,0x3ffb77f0ead2f881,3 +np.float64,0x3fd9354ea9b26a9d,0x3fdee4e4c8db8239,3 +np.float64,0xffdf7beed4bef7de,0xbff0000000000000,3 +np.float64,0xbfdef256ecbde4ae,0xbfd889b0e213a019,3 +np.float64,0x800d78bd1e7af17a,0x800d78bd1e7af17a,3 +np.float64,0xb66d66276cdad,0xb66d66276cdad,3 +np.float64,0x7fd8f51138b1ea21,0x7ff0000000000000,3 +np.float64,0xffe8c9c302b19385,0xbff0000000000000,3 +np.float64,0x8000be4cf5417c9b,0x8000be4cf5417c9b,3 +np.float64,0xbfe2293a25645274,0xbfdbb78a8c547c68,3 +np.float64,0xce8392c19d08,0xce8392c19d08,3 +np.float64,0xbfe075736b60eae7,0xbfd9bc0f6e34a283,3 +np.float64,0xbfe8d6fe6a71adfd,0xbfe1469ba80b4915,3 +np.float64,0xffe0c7993fa18f32,0xbff0000000000000,3 +np.float64,0x3fce5210fd3ca422,0x3fd11b40a1270a95,3 +np.float64,0x6c0534a8d80a7,0x6c0534a8d80a7,3 +np.float64,0x23c1823647831,0x23c1823647831,3 +np.float64,0x3fc901253732024a,0x3fcb9d264accb07c,3 +np.float64,0x3fe42b8997685714,0x3fec1a39e207b6e4,3 +np.float64,0x3fec4fd00fb89fa0,0x3ff6c1fdd0c262c8,3 +np.float64,0x8007b333caaf6668,0x8007b333caaf6668,3 +np.float64,0x800f9275141f24ea,0x800f9275141f24ea,3 +np.float64,0xffbba361a23746c0,0xbff0000000000000,3 +np.float64,0xbfee4effa9fc9dff,0xbfe396c11d0cd524,3 +np.float64,0x3e47e84c7c8fe,0x3e47e84c7c8fe,3 +np.float64,0x3fe80eb7b1301d6f,0x3ff1eed318a00153,3 +np.float64,0x7fd3f4c5b4a7e98a,0x7ff0000000000000,3 +np.float64,0x158abab02b158,0x158abab02b158,3 +np.float64,0x1,0x1,3 +np.float64,0x1f1797883e2f4,0x1f1797883e2f4,3 +np.float64,0x3feec055d03d80ac,0x3ff9d3fb0394de33,3 +np.float64,0x8010000000000000,0x8010000000000000,3 +np.float64,0xbfd070860ea0e10c,0xbfccfeec2828efef,3 +np.float64,0x80015c8b3e82b917,0x80015c8b3e82b917,3 +np.float64,0xffef9956d9ff32ad,0xbff0000000000000,3 +np.float64,0x7fe7f087dd2fe10f,0x7ff0000000000000,3 +np.float64,0x8002e7718665cee4,0x8002e7718665cee4,3 +np.float64,0x3fdfb9adb2bf735c,0x3fe4887a86214c1e,3 +np.float64,0xffc7747dfb2ee8fc,0xbff0000000000000,3 +np.float64,0x3fec309bb5386137,0x3ff69c44e1738547,3 +np.float64,0xffdbe2bf9ab7c580,0xbff0000000000000,3 +np.float64,0xbfe6a274daed44ea,0xbfe039aff2be9d48,3 +np.float64,0x7fd5a4e4efab49c9,0x7ff0000000000000,3 +np.float64,0xffbe6aaeb03cd560,0xbff0000000000000,3 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log.csv new file mode 100644 index 00000000..7717745d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log.csv @@ -0,0 +1,271 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0xc2afbc1b,4 +np.float32,0x007b2490,0xc2aec01e,4 +np.float32,0x007c99fa,0xc2aeba17,4 +np.float32,0x00734a0c,0xc2aee1dc,4 +np.float32,0x0070de24,0xc2aeecba,4 +np.float32,0x007fffff,0xc2aeac50,4 +np.float32,0x00000001,0xc2ce8ed0,4 +## -ve denormals ## +np.float32,0x80495d65,0xffc00000,4 +np.float32,0x806894f6,0xffc00000,4 +np.float32,0x80555a76,0xffc00000,4 +np.float32,0x804e1fb8,0xffc00000,4 +np.float32,0x80687de9,0xffc00000,4 +np.float32,0x807fffff,0xffc00000,4 +np.float32,0x80000001,0xffc00000,4 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0xff800000,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x7f7fffff,0x42b17218,4 +np.float32,0x80800000,0xffc00000,4 +np.float32,0xff7fffff,0xffc00000,4 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x00000000,4 +np.float32,0x3f800001,0x33ffffff,4 +np.float32,0x3f800002,0x347ffffe,4 +np.float32,0x3f7fffff,0xb3800000,4 +np.float32,0x3f7ffffe,0xb4000000,4 +np.float32,0x3f7ffffd,0xb4400001,4 +np.float32,0x402df853,0x3f7ffffe,4 +np.float32,0x402df854,0x3f7fffff,4 +np.float32,0x402df855,0x3f800000,4 +np.float32,0x402df856,0x3f800001,4 +np.float32,0x3ebc5ab0,0xbf800001,4 +np.float32,0x3ebc5ab1,0xbf800000,4 +np.float32,0x3ebc5ab2,0xbf800000,4 +np.float32,0x3ebc5ab3,0xbf7ffffe,4 +np.float32,0x423ef575,0x407768ab,4 +np.float32,0x427b8c61,0x408485dd,4 +np.float32,0x4211e9ee,0x406630b0,4 +np.float32,0x424d5c41,0x407c0fed,4 +np.float32,0x42be722a,0x4091cc91,4 +np.float32,0x42b73d30,0x4090908b,4 +np.float32,0x427e48e2,0x4084de7f,4 +np.float32,0x428f759b,0x4088bba3,4 +np.float32,0x41629069,0x4029a0cc,4 +np.float32,0x4272c99d,0x40836379,4 +np.float32,0x4d1b7458,0x4197463d,4 +np.float32,0x4f10c594,0x41ace2b2,4 +np.float32,0x4ea397c2,0x41a85171,4 +np.float32,0x4fefa9d1,0x41b6769c,4 +np.float32,0x4ebac6ab,0x41a960dc,4 +np.float32,0x4f6efb42,0x41b0e535,4 +np.float32,0x4e9ab8e7,0x41a7df44,4 +np.float32,0x4e81b5d1,0x41a67625,4 +np.float32,0x5014d9f2,0x41b832bd,4 +np.float32,0x4f02175c,0x41ac07b8,4 +np.float32,0x7f034f89,0x42b01c47,4 +np.float32,0x7f56d00e,0x42b11849,4 +np.float32,0x7f1cd5f6,0x42b0773a,4 +np.float32,0x7e979174,0x42af02d7,4 +np.float32,0x7f23369f,0x42b08ba2,4 +np.float32,0x7f0637ae,0x42b0277d,4 +np.float32,0x7efcb6e8,0x42b00897,4 +np.float32,0x7f7907c8,0x42b163f6,4 +np.float32,0x7e95c4c2,0x42aefcba,4 +np.float32,0x7f4577b2,0x42b0ed2d,4 +np.float32,0x3f49c92e,0xbe73ae84,4 +np.float32,0x3f4a23d1,0xbe71e2f8,4 +np.float32,0x3f4abb67,0xbe6ee430,4 +np.float32,0x3f48169a,0xbe7c5532,4 +np.float32,0x3f47f5fa,0xbe7cfc37,4 +np.float32,0x3f488309,0xbe7a2ad8,4 +np.float32,0x3f479df4,0xbe7ebf5f,4 +np.float32,0x3f47cfff,0xbe7dbec9,4 +np.float32,0x3f496704,0xbe75a125,4 +np.float32,0x3f478ee8,0xbe7f0c92,4 +np.float32,0x3f4a763b,0xbe7041ce,4 +np.float32,0x3f47a108,0xbe7eaf94,4 +np.float32,0x3f48136c,0xbe7c6578,4 +np.float32,0x3f481c17,0xbe7c391c,4 +np.float32,0x3f47cd28,0xbe7dcd56,4 +np.float32,0x3f478be8,0xbe7f1bf7,4 +np.float32,0x3f4c1f8e,0xbe67e367,4 +np.float32,0x3f489b0c,0xbe79b03f,4 +np.float32,0x3f4934cf,0xbe76a08a,4 +np.float32,0x3f4954df,0xbe75fd6a,4 +np.float32,0x3f47a3f5,0xbe7ea093,4 +np.float32,0x3f4ba4fc,0xbe6a4b02,4 +np.float32,0x3f47a0e1,0xbe7eb05c,4 +np.float32,0x3f48c30a,0xbe78e42f,4 +np.float32,0x3f48cab8,0xbe78bd05,4 +np.float32,0x3f4b0569,0xbe6d6ea4,4 +np.float32,0x3f47de32,0xbe7d7607,4 +np.float32,0x3f477328,0xbe7f9b00,4 +np.float32,0x3f496dab,0xbe757f52,4 +np.float32,0x3f47662c,0xbe7fddac,4 +np.float32,0x3f48ddd8,0xbe785b80,4 +np.float32,0x3f481866,0xbe7c4bff,4 +np.float32,0x3f48b119,0xbe793fb6,4 +np.float32,0x3f48c7e8,0xbe78cb5c,4 +np.float32,0x3f4985f6,0xbe7503da,4 +np.float32,0x3f483fdf,0xbe7b8212,4 +np.float32,0x3f4b1c76,0xbe6cfa67,4 +np.float32,0x3f480b2e,0xbe7c8fa8,4 +np.float32,0x3f48745f,0xbe7a75bf,4 +np.float32,0x3f485bda,0xbe7af308,4 +np.float32,0x3f47a660,0xbe7e942c,4 +np.float32,0x3f47d4d5,0xbe7da600,4 +np.float32,0x3f4b0a26,0xbe6d56be,4 +np.float32,0x3f4a4883,0xbe712924,4 +np.float32,0x3f4769e7,0xbe7fca84,4 +np.float32,0x3f499702,0xbe74ad3f,4 +np.float32,0x3f494ab1,0xbe763131,4 +np.float32,0x3f476b69,0xbe7fc2c6,4 +np.float32,0x3f4884e8,0xbe7a214a,4 +np.float32,0x3f486945,0xbe7aae76,4 +#float64 +## +ve denormal ## +np.float64,0x0000000000000001,0xc0874385446d71c3,2 +np.float64,0x0001000000000000,0xc086395a2079b70c,2 +np.float64,0x000fffffffffffff,0xc086232bdd7abcd2,2 +np.float64,0x0007ad63e2168cb6,0xc086290bc0b2980f,2 +## -ve denormal ## +np.float64,0x8000000000000001,0xfff8000000000001,2 +np.float64,0x8001000000000000,0xfff8000000000001,2 +np.float64,0x800fffffffffffff,0xfff8000000000001,2 +np.float64,0x8007ad63e2168cb6,0xfff8000000000001,2 +## +/-0.0f, MAX, MIN## +np.float64,0x0000000000000000,0xfff0000000000000,2 +np.float64,0x8000000000000000,0xfff0000000000000,2 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,2 +np.float64,0xffefffffffffffff,0xfff8000000000001,2 +## near 1.0f ## +np.float64,0x3ff0000000000000,0x0000000000000000,2 +np.float64,0x3fe8000000000000,0xbfd269621134db92,2 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,2 +np.float64,0x3ff0000020000000,0x3e7fffffe000002b,2 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,2 +np.float64,0x3fefffffe0000000,0xbe70000008000005,2 +np.float64,0x3fefffffffffffff,0xbca0000000000000,2 +## random numbers ## +np.float64,0x02500186f3d9da56,0xc0855b8abf135773,2 +np.float64,0x09200815a3951173,0xc082ff1ad7131bdc,2 +np.float64,0x0da029623b0243d4,0xc0816fc994695bb5,2 +np.float64,0x48703b8ac483a382,0x40579213a313490b,2 +np.float64,0x09207b74c87c9860,0xc082fee20ff349ef,2 +np.float64,0x62c077698e8df947,0x407821c996d110f0,2 +np.float64,0x2350b45e87c3cfb0,0xc073d6b16b51d072,2 +np.float64,0x3990a23f9ff2b623,0xc051aa60eadd8c61,2 +np.float64,0x0d011386a116c348,0xc081a6cc7ea3b8fb,2 +np.float64,0x1fe0f0303ebe273a,0xc0763870b78a81ca,2 +np.float64,0x0cd1260121d387da,0xc081b7668d61a9d1,2 +np.float64,0x1e6135a8f581d422,0xc077425ac10f08c2,2 +np.float64,0x622168db5fe52d30,0x4077b3c669b9fadb,2 +np.float64,0x69f188e1ec6d1718,0x407d1e2f18c63889,2 +np.float64,0x3aa1bf1d9c4dd1a3,0xc04d682e24bde479,2 +np.float64,0x6c81c4011ce4f683,0x407ee5190e8a8e6a,2 +np.float64,0x2191fa55aa5a5095,0xc0750c0c318b5e2d,2 +np.float64,0x32a1f602a32bf360,0xc06270caa493fc17,2 +np.float64,0x16023c90ba93249b,0xc07d0f88e0801638,2 +np.float64,0x1c525fe6d71fa9ff,0xc078af49c66a5d63,2 +np.float64,0x1a927675815d65b7,0xc079e5bdd7fe376e,2 +np.float64,0x41227b8fe70da028,0x402aa0c9f9a84c71,2 +np.float64,0x4962bb6e853fe87d,0x405a34aa04c83747,2 +np.float64,0x23d2cda00b26b5a4,0xc0737c13a06d00ea,2 +np.float64,0x2d13083fd62987fa,0xc06a25055aeb474e,2 +np.float64,0x10e31e4c9b4579a1,0xc0804e181929418e,2 +np.float64,0x26d3247d556a86a9,0xc0716774171da7e8,2 +np.float64,0x6603379398d0d4ac,0x407a64f51f8a887b,2 +np.float64,0x02d38af17d9442ba,0xc0852d955ac9dd68,2 +np.float64,0x6a2382b4818dd967,0x407d4129d688e5d4,2 +np.float64,0x2ee3c403c79b3934,0xc067a091fefaf8b6,2 +np.float64,0x6493a699acdbf1a4,0x4079663c8602bfc5,2 +np.float64,0x1c8413c4f0de3100,0xc0788c99697059b6,2 +np.float64,0x4573f1ed350d9622,0x404e9bd1e4c08920,2 +np.float64,0x2f34265c9200b69c,0xc067310cfea4e986,2 +np.float64,0x19b43e65fa22029b,0xc07a7f8877de22d6,2 +np.float64,0x0af48ab7925ed6bc,0xc0825c4fbc0e5ade,2 +np.float64,0x4fa49699cad82542,0x4065c76d2a318235,2 +np.float64,0x7204a15e56ade492,0x40815bb87484dffb,2 +np.float64,0x4734aa08a230982d,0x40542a4bf7a361a9,2 +np.float64,0x1ae4ed296c2fd749,0xc079ac4921f20abb,2 +np.float64,0x472514ea4370289c,0x4053ff372bd8f18f,2 +np.float64,0x53a54b3f73820430,0x406b5411fc5f2e33,2 +np.float64,0x64754de5a15684fa,0x407951592e99a5ab,2 +np.float64,0x69358e279868a7c3,0x407c9c671a882c31,2 +np.float64,0x284579ec61215945,0xc0706688e55f0927,2 +np.float64,0x68b5c58806447adc,0x407c43d6f4eff760,2 +np.float64,0x1945a83f98b0e65d,0xc07acc15eeb032cc,2 +np.float64,0x0fc5eb98a16578bf,0xc080b0d02eddca0e,2 +np.float64,0x6a75e208f5784250,0x407d7a7383bf8f05,2 +np.float64,0x0fe63a029c47645d,0xc080a59ca1e98866,2 +np.float64,0x37963ac53f065510,0xc057236281f7bdb6,2 +np.float64,0x135661bb07067ff7,0xc07ee924930c21e4,2 +np.float64,0x4b4699469d458422,0x405f73843756e887,2 +np.float64,0x1a66d73e4bf4881b,0xc07a039ba1c63adf,2 +np.float64,0x12a6b9b119a7da59,0xc07f62e49c6431f3,2 +np.float64,0x24c719aa8fd1bdb5,0xc072d26da4bf84d3,2 +np.float64,0x0fa6ff524ffef314,0xc080bb8514662e77,2 +np.float64,0x1db751d66fdd4a9a,0xc077b77cb50d7c92,2 +np.float64,0x4947374c516da82c,0x4059e9acfc7105bf,2 +np.float64,0x1b1771ab98f3afc8,0xc07989326b8e1f66,2 +np.float64,0x25e78805baac8070,0xc0720a818e6ef080,2 +np.float64,0x4bd7a148225d3687,0x406082d004ea3ee7,2 +np.float64,0x53d7d6b2bbbda00a,0x406b9a398967cbd5,2 +np.float64,0x6997fb9f4e1c685f,0x407ce0a703413eba,2 +np.float64,0x069802c2ff71b951,0xc083df39bf7acddc,2 +np.float64,0x4d683ac9890f66d8,0x4062ae21d8c2acf0,2 +np.float64,0x5a2825863ec14f4c,0x40722d718d549552,2 +np.float64,0x0398799a88f4db80,0xc084e93dab8e2158,2 +np.float64,0x5ed87a8b77e135a5,0x40756d7051777b33,2 +np.float64,0x5828cd6d79b9bede,0x4070cafb22fc6ca1,2 +np.float64,0x7b18ba2a5ec6f068,0x408481386b3ed6fe,2 +np.float64,0x4938fd60922198fe,0x4059c206b762ea7e,2 +np.float64,0x31b8f44fcdd1a46e,0xc063b2faa8b6434e,2 +np.float64,0x5729341c0d918464,0x407019cac0c4a7d7,2 +np.float64,0x13595e9228ee878e,0xc07ee7235a7d8088,2 +np.float64,0x17698b0dc9dd4135,0xc07c1627e3a5ad5f,2 +np.float64,0x63b977c283abb0cc,0x4078cf1ec6ed65be,2 +np.float64,0x7349cc0d4dc16943,0x4081cc697ce4cb53,2 +np.float64,0x4e49a80b732fb28d,0x4063e67e3c5cbe90,2 +np.float64,0x07ba14b848a8ae02,0xc0837ac032a094e0,2 +np.float64,0x3da9f17b691bfddc,0xc03929c25366acda,2 +np.float64,0x02ea39aa6c3ac007,0xc08525af6f21e1c4,2 +np.float64,0x3a6a42f04ed9563d,0xc04e98e825dca46b,2 +np.float64,0x1afa877cd7900be7,0xc0799d6648cb34a9,2 +np.float64,0x58ea986649e052c6,0x4071512e939ad790,2 +np.float64,0x691abbc04647f536,0x407c89aaae0fcb83,2 +np.float64,0x43aabc5063e6f284,0x4044b45d18106fd2,2 +np.float64,0x488b003c893e0bea,0x4057df012a2dafbe,2 +np.float64,0x77eb076ed67caee5,0x40836720de94769e,2 +np.float64,0x5c1b46974aba46f4,0x40738731ba256007,2 +np.float64,0x1a5b29ecb5d3c261,0xc07a0becc77040d6,2 +np.float64,0x5d8b6ccf868c6032,0x4074865c1865e2db,2 +np.float64,0x4cfb6690b4aaf5af,0x406216cd8c7e8ddb,2 +np.float64,0x76cbd8eb5c5fc39e,0x4083038dc66d682b,2 +np.float64,0x28bbd1fec5012814,0xc07014c2dd1b9711,2 +np.float64,0x33dc1b3a4fd6bf7a,0xc060bd0756e07d8a,2 +np.float64,0x52bbe89b37de99f3,0x406a10041aa7d343,2 +np.float64,0x07bc479d15eb2dd3,0xc0837a1a6e3a3b61,2 +np.float64,0x18fc5275711a901d,0xc07aff3e9d62bc93,2 +np.float64,0x114c9758e247dc71,0xc080299a7cf15b05,2 +np.float64,0x25ac8f6d60755148,0xc07233c4c0c511d4,2 +np.float64,0x260cae2bb9e9fd7e,0xc071f128c7e82eac,2 +np.float64,0x572ccdfe0241de82,0x40701bedc84bb504,2 +np.float64,0x0ddcef6c8d41f5ee,0xc0815a7e16d07084,2 +np.float64,0x6dad1d59c988af68,0x407fb4a0bc0142b1,2 +np.float64,0x025d200580d8b6d1,0xc08556c0bc32b1b2,2 +np.float64,0x7aad344b6aa74c18,0x40845bbc453f22be,2 +np.float64,0x5b5d9d6ad9d14429,0x4073036d2d21f382,2 +np.float64,0x49cd8d8dcdf19954,0x405b5c034f5c7353,2 +np.float64,0x63edb9483335c1e6,0x4078f2dd21378786,2 +np.float64,0x7b1dd64c9d2c26bd,0x408482b922017bc9,2 +np.float64,0x782e13e0b574be5f,0x40837e2a0090a5ad,2 +np.float64,0x592dfe18b9d6db2f,0x40717f777fbcb1ec,2 +np.float64,0x654e3232ac60d72c,0x4079e71a95a70446,2 +np.float64,0x7b8e42ad22091456,0x4084a9a6f1e61722,2 +np.float64,0x570e88dfd5860ae6,0x407006ae6c0d137a,2 +np.float64,0x294e98346cb98ef1,0xc06f5edaac12bd44,2 +np.float64,0x1adeaa4ab792e642,0xc079b1431d5e2633,2 +np.float64,0x7b6ead3377529ac8,0x40849eabc8c7683c,2 +np.float64,0x2b8eedae8a9b2928,0xc06c400054deef11,2 +np.float64,0x65defb45b2dcf660,0x407a4b53f181c05a,2 +np.float64,0x1baf582d475e7701,0xc07920bcad4a502c,2 +np.float64,0x461f39cf05a0f15a,0x405126368f984fa1,2 +np.float64,0x7e5f6f5dcfff005b,0x4085a37d610439b4,2 +np.float64,0x136f66e4d09bd662,0xc07ed8a2719f2511,2 +np.float64,0x65afd8983fb6ca1f,0x407a2a7f48bf7fc1,2 +np.float64,0x572fa7f95ed22319,0x40701d706cf82e6f,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv new file mode 100644 index 00000000..7f5241a2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x3f6fd5c8,0xbce80e8e,4 +np.float32,0x3ea4ab17,0xbefc3deb,4 +np.float32,0x3e87a133,0xbf13b0b7,4 +np.float32,0x3f0d9069,0xbe83bb19,4 +np.float32,0x3f7b9269,0xbbf84f47,4 +np.float32,0x3f7a9ffa,0xbc16fd97,4 +np.float32,0x7f535d34,0x4219cb66,4 +np.float32,0x3e79ad7c,0xbf1ce857,4 +np.float32,0x7e8bfd3b,0x4217dfe9,4 +np.float32,0x3f2d2ee9,0xbe2dcec6,4 +np.float32,0x572e04,0xc21862e4,4 +np.float32,0x7f36f8,0xc217bad5,4 +np.float32,0x3f7982fb,0xbc36aaed,4 +np.float32,0x45b019,0xc218c67c,4 +np.float32,0x3f521c46,0xbdafb3e3,4 +np.float32,0x80000001,0x7fc00000,4 +np.float32,0x3f336c81,0xbe1e107f,4 +np.float32,0x3eac92d7,0xbef1d0bb,4 +np.float32,0x47bdfc,0xc218b990,4 +np.float32,0x7f2d94c8,0x421973d1,4 +np.float32,0x7d53ff8d,0x4214fbb6,4 +np.float32,0x3f581e4e,0xbd96a079,4 +np.float32,0x7ddaf20d,0x42163e4e,4 +np.float32,0x3f341d3c,0xbe1c5b4c,4 +np.float32,0x7ef04ba9,0x4218d032,4 +np.float32,0x620ed2,0xc2182e99,4 +np.float32,0x507850,0xc2188682,4 +np.float32,0x7d08f9,0xc217c284,4 +np.float32,0x7f0cf2aa,0x42191734,4 +np.float32,0x3f109a17,0xbe7e04fe,4 +np.float32,0x7f426152,0x4219a625,4 +np.float32,0x7f32d5a3,0x42198113,4 +np.float32,0x2e14b2,0xc2197e6f,4 +np.float32,0x3a5acd,0xc219156a,4 +np.float32,0x50a565,0xc2188589,4 +np.float32,0x5b751c,0xc2184d97,4 +np.float32,0x7e4149f6,0x42173b22,4 +np.float32,0x3dc34bf9,0xbf82a42a,4 +np.float32,0x3d12bc28,0xbfb910d6,4 +np.float32,0x7ebd2584,0x421865c1,4 +np.float32,0x7f6b3375,0x4219faeb,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3f35fe7d,0xbe17bd33,4 +np.float32,0x7db45c87,0x4215e818,4 +np.float32,0x3efff366,0xbe9a2b8d,4 +np.float32,0x3eb331d0,0xbee971a3,4 +np.float32,0x3f259d5f,0xbe41ae2e,4 +np.float32,0x3eab85ec,0xbef32c4a,4 +np.float32,0x7f194b8a,0x42193c8c,4 +np.float32,0x3f11a614,0xbe7acfc7,4 +np.float32,0x5b17,0xc221f16b,4 +np.float32,0x3f33dadc,0xbe1cff4d,4 +np.float32,0x3cda1506,0xbfc9920f,4 +np.float32,0x3f6856f1,0xbd2c8290,4 +np.float32,0x7f3357fb,0x42198257,4 +np.float32,0x7f56f329,0x4219d2e1,4 +np.float32,0x3ef84108,0xbea0f595,4 +np.float32,0x3f72340f,0xbcc51916,4 +np.float32,0x3daf28,0xc218fcbd,4 +np.float32,0x131035,0xc21b06f4,4 +np.float32,0x3f275c3b,0xbe3d0487,4 +np.float32,0x3ef06130,0xbea82069,4 +np.float32,0x3f57f3b0,0xbd974fef,4 +np.float32,0x7f6c4a78,0x4219fcfa,4 +np.float32,0x7e8421d0,0x4217c639,4 +np.float32,0x3f17a479,0xbe68e08e,4 +np.float32,0x7f03774e,0x4218f83b,4 +np.float32,0x441a33,0xc218d0b8,4 +np.float32,0x539158,0xc21875b6,4 +np.float32,0x3e8fcc75,0xbf0d3018,4 +np.float32,0x7ef74130,0x4218dce4,4 +np.float32,0x3ea6f4fa,0xbef92c38,4 +np.float32,0x7f3948ab,0x421990d5,4 +np.float32,0x7db6f8f5,0x4215ee7c,4 +np.float32,0x3ee44a2f,0xbeb399e5,4 +np.float32,0x156c59,0xc21ad30d,4 +np.float32,0x3f21ee53,0xbe4baf16,4 +np.float32,0x3f2c08f4,0xbe30c424,4 +np.float32,0x3f49885c,0xbdd4c6a9,4 +np.float32,0x3eae0b9c,0xbeefed54,4 +np.float32,0x1b5c1f,0xc21a6646,4 +np.float32,0x3e7330e2,0xbf1fd592,4 +np.float32,0x3ebbeb4c,0xbededf82,4 +np.float32,0x427154,0xc218dbb1,4 +np.float32,0x3f6b8b4b,0xbd142498,4 +np.float32,0x8e769,0xc21c5981,4 +np.float32,0x3e9db557,0xbf02ec1c,4 +np.float32,0x3f001bef,0xbe99f019,4 +np.float32,0x3e58b48c,0xbf2ca77a,4 +np.float32,0x3d46c16b,0xbfa8327c,4 +np.float32,0x7eeeb305,0x4218cd3b,4 +np.float32,0x3e3f163d,0xbf3aa446,4 +np.float32,0x3f66c872,0xbd3877d9,4 +np.float32,0x7f7162f8,0x421a0677,4 +np.float32,0x3edca3bc,0xbebb2e28,4 +np.float32,0x3dc1055b,0xbf834afa,4 +np.float32,0x12b16f,0xc21b0fad,4 +np.float32,0x3f733898,0xbcb62e16,4 +np.float32,0x3e617af8,0xbf283db0,4 +np.float32,0x7e86577a,0x4217cd99,4 +np.float32,0x3f0ba3c7,0xbe86c633,4 +np.float32,0x3f4cad25,0xbdc70247,4 +np.float32,0xb6cdf,0xc21bea9f,4 +np.float32,0x3f42971a,0xbdf3f49e,4 +np.float32,0x3e6ccad2,0xbf22cc78,4 +np.float32,0x7f2121b2,0x421952b8,4 +np.float32,0x3f6d3f55,0xbd075366,4 +np.float32,0x3f524f,0xc218f117,4 +np.float32,0x3e95b5d9,0xbf08b56a,4 +np.float32,0x7f6ae47d,0x4219fa56,4 +np.float32,0x267539,0xc219ceda,4 +np.float32,0x3ef72f6d,0xbea1eb2e,4 +np.float32,0x2100b2,0xc21a12e2,4 +np.float32,0x3d9777d1,0xbf90c4e7,4 +np.float32,0x44c6f5,0xc218cc56,4 +np.float32,0x7f2a613d,0x42196b8a,4 +np.float32,0x390a25,0xc2191f8d,4 +np.float32,0x3f1de5ad,0xbe56e703,4 +np.float32,0x2f59ce,0xc2197258,4 +np.float32,0x7f3b12a1,0x4219951b,4 +np.float32,0x3ecb66d4,0xbecd44ca,4 +np.float32,0x7e74ff,0xc217bd7d,4 +np.float32,0x7ed83f78,0x4218a14d,4 +np.float32,0x685994,0xc21812f1,4 +np.float32,0xbf800000,0x7fc00000,4 +np.float32,0x736f47,0xc217e60b,4 +np.float32,0x7f09c371,0x42190d0a,4 +np.float32,0x3f7ca51d,0xbbbbbce0,4 +np.float32,0x7f4b4d3b,0x4219ba1a,4 +np.float32,0x3f6c4471,0xbd0eb076,4 +np.float32,0xd944e,0xc21b9dcf,4 +np.float32,0x7cb06ffc,0x421375cd,4 +np.float32,0x586187,0xc2185cce,4 +np.float32,0x3f3cbf5b,0xbe078911,4 +np.float32,0x3f30b504,0xbe24d983,4 +np.float32,0x3f0a16ba,0xbe8941fd,4 +np.float32,0x5c43b0,0xc21849af,4 +np.float32,0x3dad74f6,0xbf893bd5,4 +np.float32,0x3c586958,0xbff087a6,4 +np.float32,0x3e8307a8,0xbf1786ba,4 +np.float32,0x7dcd1776,0x4216213d,4 +np.float32,0x3f44d107,0xbde9d662,4 +np.float32,0x3e2e6823,0xbf44cbec,4 +np.float32,0x3d87ea27,0xbf96caca,4 +np.float32,0x3e0c715b,0xbf5ce07e,4 +np.float32,0x7ec9cd5a,0x4218828e,4 +np.float32,0x3e26c0b4,0xbf49c93e,4 +np.float32,0x75b94e,0xc217dd50,4 +np.float32,0x3df7b9f5,0xbf6ad7f4,4 +np.float32,0x0,0xff800000,4 +np.float32,0x3f284795,0xbe3a94da,4 +np.float32,0x7ee49092,0x4218b9f0,4 +np.float32,0x7f4c20e0,0x4219bbe8,4 +np.float32,0x3efbbce8,0xbe9ddc4b,4 +np.float32,0x12274a,0xc21b1cb4,4 +np.float32,0x5fa1b1,0xc21839be,4 +np.float32,0x7f0b210e,0x4219116d,4 +np.float32,0x3f67092a,0xbd368545,4 +np.float32,0x3d572721,0xbfa3ca5b,4 +np.float32,0x3f7913ce,0xbc431028,4 +np.float32,0x3b0613,0xc2191059,4 +np.float32,0x3e1d16c0,0xbf506c6f,4 +np.float32,0xab130,0xc21c081d,4 +np.float32,0x3e23ac97,0xbf4bdb9d,4 +np.float32,0x7ef52368,0x4218d911,4 +np.float32,0x7f38e686,0x42198fe9,4 +np.float32,0x3f106a21,0xbe7e9897,4 +np.float32,0x3ecef8d5,0xbec96644,4 +np.float32,0x3ec37e02,0xbed61683,4 +np.float32,0x3efbd063,0xbe9dcb17,4 +np.float32,0x3f318fe3,0xbe22b402,4 +np.float32,0x7e5e5228,0x4217795d,4 +np.float32,0x72a046,0xc217e92c,4 +np.float32,0x7f6f970b,0x421a0324,4 +np.float32,0x3ed871b4,0xbebf72fb,4 +np.float32,0x7a2eaa,0xc217ccc8,4 +np.float32,0x3e819655,0xbf18c1d7,4 +np.float32,0x80800000,0x7fc00000,4 +np.float32,0x7eab0719,0x421838f9,4 +np.float32,0x7f0763cb,0x4219054f,4 +np.float32,0x3f191672,0xbe64a8af,4 +np.float32,0x7d4327,0xc217c1b6,4 +np.float32,0x3f724ba6,0xbcc3bea3,4 +np.float32,0x60fe06,0xc2183375,4 +np.float32,0x48cd59,0xc218b30b,4 +np.float32,0x3f7fec2b,0xb909d3f3,4 +np.float32,0x1c7bb9,0xc21a5460,4 +np.float32,0x24d8a8,0xc219e1e4,4 +np.float32,0x3e727c52,0xbf20283c,4 +np.float32,0x4bc460,0xc218a14a,4 +np.float32,0x63e313,0xc2182661,4 +np.float32,0x7f625581,0x4219e9d4,4 +np.float32,0x3eeb3e77,0xbeacedc0,4 +np.float32,0x7ef27a47,0x4218d437,4 +np.float32,0x27105a,0xc219c7e6,4 +np.float32,0x22a10b,0xc219fd7d,4 +np.float32,0x3f41e907,0xbdf711ab,4 +np.float32,0x7c1fbf95,0x4212155b,4 +np.float32,0x7e5acceb,0x42177244,4 +np.float32,0x3e0892fa,0xbf5ffb83,4 +np.float32,0x3ea0e51d,0xbf00b2c0,4 +np.float32,0x3e56fc29,0xbf2d8a51,4 +np.float32,0x7ee724ed,0x4218beed,4 +np.float32,0x7ebf142b,0x42186a46,4 +np.float32,0x7f6cf35c,0x4219fe37,4 +np.float32,0x3f11abf7,0xbe7abdcd,4 +np.float32,0x588d7a,0xc2185bf1,4 +np.float32,0x3f6e81d2,0xbcfbcf97,4 +np.float32,0x3f1b6be8,0xbe5dee2b,4 +np.float32,0x7f3815e0,0x42198df2,4 +np.float32,0x3f5bfc88,0xbd86d93d,4 +np.float32,0x3f3775d0,0xbe142bbc,4 +np.float32,0x78a958,0xc217d25a,4 +np.float32,0x2ff7c3,0xc2196c96,4 +np.float32,0x4b9c0,0xc21d733c,4 +np.float32,0x3ec025af,0xbed9ecf3,4 +np.float32,0x6443f0,0xc21824b3,4 +np.float32,0x3f754e28,0xbc97d299,4 +np.float32,0x3eaa91d3,0xbef4699d,4 +np.float32,0x3e5f2837,0xbf296478,4 +np.float32,0xe5676,0xc21b85a4,4 +np.float32,0x3f6859f2,0xbd2c6b90,4 +np.float32,0x3f68686b,0xbd2bfcc6,4 +np.float32,0x4b39b8,0xc218a47b,4 +np.float32,0x630ac4,0xc2182a28,4 +np.float32,0x160980,0xc21ac67d,4 +np.float32,0x3ed91c4d,0xbebec3fd,4 +np.float32,0x7ec27b0d,0x4218721f,4 +np.float32,0x3f3c0a5f,0xbe09344b,4 +np.float32,0x3dbff9c1,0xbf839841,4 +np.float32,0x7f0e8ea7,0x42191c40,4 +np.float32,0x3f36b162,0xbe1608e4,4 +np.float32,0x228bb3,0xc219fe90,4 +np.float32,0x2fdd30,0xc2196d8c,4 +np.float32,0x3e8fce8e,0xbf0d2e79,4 +np.float32,0x3f36acc7,0xbe16141a,4 +np.float32,0x7f44b51c,0x4219ab70,4 +np.float32,0x3ec3371c,0xbed66736,4 +np.float32,0x4388a2,0xc218d473,4 +np.float32,0x3f5aa6c3,0xbd8c4344,4 +np.float32,0x7f09fce4,0x42190dc3,4 +np.float32,0x7ed7854a,0x42189fce,4 +np.float32,0x7f4da83a,0x4219bf3a,4 +np.float32,0x3db8da28,0xbf85b25a,4 +np.float32,0x7f449686,0x4219ab2b,4 +np.float32,0x2eb25,0xc21e498c,4 +np.float32,0x3f2bcc08,0xbe3161bd,4 +np.float32,0x36c923,0xc219317b,4 +np.float32,0x3d52a866,0xbfa4f6d2,4 +np.float32,0x3f7d6688,0xbb913e4e,4 +np.float32,0x3f5a6ba4,0xbd8d33e3,4 +np.float32,0x719740,0xc217ed35,4 +np.float32,0x78a472,0xc217d26c,4 +np.float32,0x7ee33d0c,0x4218b759,4 +np.float32,0x7f668c1d,0x4219f208,4 +np.float32,0x3e29c600,0xbf47ca46,4 +np.float32,0x3f3cefc3,0xbe071712,4 +np.float32,0x3e224ebd,0xbf4cca41,4 +np.float32,0x7f1417be,0x42192d31,4 +np.float32,0x7f29d7d5,0x42196a23,4 +np.float32,0x3338ce,0xc2194f65,4 +np.float32,0x2a7897,0xc219a2b6,4 +np.float32,0x3d6bc3d8,0xbf9eb468,4 +np.float32,0x3f6bd7bf,0xbd11e392,4 +np.float32,0x7f6d26bf,0x4219fe98,4 +np.float32,0x3f52d378,0xbdacadb5,4 +np.float32,0x3efac453,0xbe9eb84a,4 +np.float32,0x3f692eb7,0xbd261184,4 +np.float32,0x3f6a0bb5,0xbd1f7ec1,4 +np.float32,0x3f037a49,0xbe942aa8,4 +np.float32,0x3f465bd4,0xbde2e530,4 +np.float32,0x7ef0f47b,0x4218d16a,4 +np.float32,0x637127,0xc218285e,4 +np.float32,0x3f41e511,0xbdf723d7,4 +np.float32,0x7f800000,0x7f800000,4 +np.float32,0x3f3342d5,0xbe1e77d5,4 +np.float32,0x7f57cfe6,0x4219d4a9,4 +np.float32,0x3e4358ed,0xbf3830a7,4 +np.float32,0x3ce25f15,0xbfc77f2b,4 +np.float32,0x7ed057e7,0x421890be,4 +np.float32,0x7ce154d9,0x4213e295,4 +np.float32,0x3ee91984,0xbeaef703,4 +np.float32,0x7e4e919c,0x421758af,4 +np.float32,0x6830e7,0xc218139e,4 +np.float32,0x3f12f08e,0xbe76e328,4 +np.float32,0x7f0a7a32,0x42190f56,4 +np.float32,0x7f38e,0xc21c8bd3,4 +np.float32,0x3e01def9,0xbf6593e3,4 +np.float32,0x3f5c8c6d,0xbd849432,4 +np.float32,0x3eed8747,0xbeaac7a3,4 +np.float32,0x3cadaa0e,0xbfd63b21,4 +np.float32,0x3f7532a9,0xbc996178,4 +np.float32,0x31f3ac,0xc2195a8f,4 +np.float32,0x3f0e0f97,0xbe82f3af,4 +np.float32,0x3f2a1f35,0xbe35bd3f,4 +np.float32,0x3f4547b2,0xbde7bebd,4 +np.float32,0x3f7988a6,0xbc36094c,4 +np.float32,0x74464c,0xc217e2d2,4 +np.float32,0x7f7518be,0x421a0d3f,4 +np.float32,0x7e97fa0a,0x42180473,4 +np.float32,0x584e3a,0xc2185d2f,4 +np.float32,0x3e7291f3,0xbf201e52,4 +np.float32,0xc0a05,0xc21bd359,4 +np.float32,0x3a3177,0xc21916a6,4 +np.float32,0x4f417f,0xc2188d45,4 +np.float32,0x263fce,0xc219d145,4 +np.float32,0x7e1d58,0xc217beb1,4 +np.float32,0x7f056af3,0x4218fec9,4 +np.float32,0x3f21c181,0xbe4c2a3f,4 +np.float32,0x7eca4956,0x4218839f,4 +np.float32,0x3e58afa8,0xbf2ca9fd,4 +np.float32,0x3f40d583,0xbdfc04ef,4 +np.float32,0x7f432fbb,0x4219a7fc,4 +np.float32,0x43aaa4,0xc218d393,4 +np.float32,0x7f2c9b62,0x42197150,4 +np.float32,0x5c3876,0xc21849e5,4 +np.float32,0x7f2034e8,0x42195029,4 +np.float32,0x7e5be772,0x42177481,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x3f5be03b,0xbd874bb0,4 +np.float32,0x3e32494f,0xbf4259be,4 +np.float32,0x3e1f4671,0xbf4ee30b,4 +np.float32,0x4606cc,0xc218c454,4 +np.float32,0x425cbc,0xc218dc3b,4 +np.float32,0x7dd9b8bf,0x42163bd0,4 +np.float32,0x3f0465d0,0xbe929db7,4 +np.float32,0x3f735077,0xbcb4d0fa,4 +np.float32,0x4d6a43,0xc21897b8,4 +np.float32,0x3e27d600,0xbf4910f5,4 +np.float32,0x3f06e0cc,0xbe8e7d24,4 +np.float32,0x3f3fd064,0xbe005e45,4 +np.float32,0x176f1,0xc21f7c2d,4 +np.float32,0x3eb64e6f,0xbee59d9c,4 +np.float32,0x7f0f075d,0x42191db8,4 +np.float32,0x3f718cbe,0xbcceb621,4 +np.float32,0x3ead7bda,0xbef0a54a,4 +np.float32,0x7f77c1a8,0x421a120c,4 +np.float32,0x3f6a79c5,0xbd1c3afd,4 +np.float32,0x3e992d1f,0xbf062a02,4 +np.float32,0x3e6f6335,0xbf219639,4 +np.float32,0x7f6d9a3e,0x4219ff70,4 +np.float32,0x557ed1,0xc2186b91,4 +np.float32,0x3f13a456,0xbe74c457,4 +np.float32,0x15c2dc,0xc21acc17,4 +np.float32,0x71f36f,0xc217ebcc,4 +np.float32,0x748dea,0xc217e1c1,4 +np.float32,0x7f0f32e0,0x42191e3f,4 +np.float32,0x5b1da8,0xc2184f41,4 +np.float32,0x3d865d3a,0xbf976e11,4 +np.float32,0x3f800000,0x0,4 +np.float32,0x7f67b56d,0x4219f444,4 +np.float32,0x6266a1,0xc2182d0c,4 +np.float32,0x3ec9c5e4,0xbecf0e6b,4 +np.float32,0x6a6a0e,0xc2180a3b,4 +np.float32,0x7e9db6fd,0x421814ef,4 +np.float32,0x3e7458f7,0xbf1f4e88,4 +np.float32,0x3ead8016,0xbef09fdc,4 +np.float32,0x3e263d1c,0xbf4a211e,4 +np.float32,0x7f6b3329,0x4219faeb,4 +np.float32,0x800000,0xc217b818,4 +np.float32,0x3f0654c7,0xbe8f6471,4 +np.float32,0x3f281b71,0xbe3b0990,4 +np.float32,0x7c4c8e,0xc217c524,4 +np.float32,0x7d113a87,0x4214537d,4 +np.float32,0x734b5f,0xc217e696,4 +np.float32,0x7f079d05,0x4219060b,4 +np.float32,0x3ee830b1,0xbeafd58b,4 +np.float32,0x3f1c3b8b,0xbe5b9d96,4 +np.float32,0x3f2bf0c6,0xbe3102aa,4 +np.float32,0x7ddffe22,0x42164871,4 +np.float32,0x3f1e58b4,0xbe55a37f,4 +np.float32,0x5f3edf,0xc2183b8a,4 +np.float32,0x7f1fb6ec,0x42194eca,4 +np.float32,0x3f78718e,0xbc55311e,4 +np.float32,0x3e574b7d,0xbf2d6152,4 +np.float32,0x7eab27c6,0x4218394e,4 +np.float32,0x7f34603c,0x421984e5,4 +np.float32,0x3f3a8b57,0xbe0cc1ca,4 +np.float32,0x3f744181,0xbca7134e,4 +np.float32,0x3f7e3bc4,0xbb45156b,4 +np.float32,0x93ab4,0xc21c498b,4 +np.float32,0x7ed5541e,0x42189b42,4 +np.float32,0x6bf8ec,0xc21803c4,4 +np.float32,0x757395,0xc217de58,4 +np.float32,0x7f177214,0x42193726,4 +np.float32,0x59935f,0xc21856d6,4 +np.float32,0x2cd9ba,0xc2198a78,4 +np.float32,0x3ef6fd5c,0xbea2183c,4 +np.float32,0x3ebb6c63,0xbedf75e0,4 +np.float32,0x7f43272c,0x4219a7e9,4 +np.float32,0x7f42e67d,0x4219a755,4 +np.float32,0x3f3f744f,0xbe0133f6,4 +np.float32,0x7f5fddaa,0x4219e4f4,4 +np.float32,0x3dc9874f,0xbf80e529,4 +np.float32,0x3f2efe64,0xbe292ec8,4 +np.float32,0x3e0406a6,0xbf63bf7c,4 +np.float32,0x3cdbb0aa,0xbfc92984,4 +np.float32,0x3e6597e7,0xbf263b30,4 +np.float32,0x3f0c1153,0xbe861807,4 +np.float32,0x7fce16,0xc217b8c6,4 +np.float32,0x3f5f4e5f,0xbd730dc6,4 +np.float32,0x3ed41ffa,0xbec3ee69,4 +np.float32,0x3f216c78,0xbe4d1446,4 +np.float32,0x3f123ed7,0xbe78fe4b,4 +np.float32,0x7f7e0ca9,0x421a1d34,4 +np.float32,0x7e318af4,0x42171558,4 +np.float32,0x7f1e1659,0x42194a3d,4 +np.float32,0x34d12a,0xc21941c2,4 +np.float32,0x3d9566ad,0xbf918870,4 +np.float32,0x3e799a47,0xbf1cf0e5,4 +np.float32,0x3e89dd6f,0xbf11df76,4 +np.float32,0x32f0d3,0xc21951d8,4 +np.float32,0x7e89d17e,0x4217d8f6,4 +np.float32,0x1f3b38,0xc21a2b6b,4 +np.float32,0x7ee9e060,0x4218c427,4 +np.float32,0x31a673,0xc2195d41,4 +np.float32,0x5180f1,0xc21880d5,4 +np.float32,0x3cd36f,0xc21902f8,4 +np.float32,0x3bb63004,0xc01050cb,4 +np.float32,0x3e8ee9d1,0xbf0ddfde,4 +np.float32,0x3d2a7da3,0xbfb0b970,4 +np.float32,0x3ea58107,0xbefb1dc3,4 +np.float32,0x7f6760b0,0x4219f3a2,4 +np.float32,0x7f7f9e08,0x421a1ff0,4 +np.float32,0x37e7f1,0xc219287b,4 +np.float32,0x3ef7eb53,0xbea14267,4 +np.float32,0x3e2eb581,0xbf449aa5,4 +np.float32,0x3da7671c,0xbf8b3568,4 +np.float32,0x7af36f7b,0x420f33ee,4 +np.float32,0x3eb3602c,0xbee93823,4 +np.float32,0x3f68bcff,0xbd2975de,4 +np.float32,0x3ea7cefb,0xbef80a9d,4 +np.float32,0x3f329689,0xbe202414,4 +np.float32,0x7f0c7c80,0x421915be,4 +np.float32,0x7f4739b8,0x4219b118,4 +np.float32,0x73af58,0xc217e515,4 +np.float32,0x7f13eb2a,0x42192cab,4 +np.float32,0x30f2d9,0xc2196395,4 +np.float32,0x7ea7066c,0x42182e71,4 +np.float32,0x669fec,0xc2181a5b,4 +np.float32,0x3f7d6876,0xbb90d1ef,4 +np.float32,0x3f08a4ef,0xbe8b9897,4 +np.float32,0x7f2a906c,0x42196c05,4 +np.float32,0x3ed3ca42,0xbec44856,4 +np.float32,0x9d27,0xc220fee2,4 +np.float32,0x3e4508a1,0xbf373c03,4 +np.float32,0x3e41f8de,0xbf38f9bb,4 +np.float32,0x3e912714,0xbf0c255b,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x7eefd13d,0x4218cf4f,4 +np.float32,0x3f491674,0xbdd6bded,4 +np.float32,0x3ef49512,0xbea445c9,4 +np.float32,0x3f045b79,0xbe92af15,4 +np.float32,0x3ef6c412,0xbea24bd5,4 +np.float32,0x3e6f3c28,0xbf21a85d,4 +np.float32,0x3ef71839,0xbea2000e,4 +np.float32,0x1,0xc23369f4,4 +np.float32,0x3e3fcfe4,0xbf3a3876,4 +np.float32,0x3e9d7a65,0xbf0315b2,4 +np.float32,0x20b7c4,0xc21a16bd,4 +np.float32,0x7f707b10,0x421a04cb,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3f285ebd,0xbe3a57ac,4 +np.float32,0x74c9ea,0xc217e0dc,4 +np.float32,0x3f6501f2,0xbd4634ab,4 +np.float32,0x3f248959,0xbe4495cc,4 +np.float32,0x7e915ff0,0x4217f0b3,4 +np.float32,0x7edbb910,0x4218a864,4 +np.float32,0x3f7042dd,0xbce1bddb,4 +np.float32,0x6f08c9,0xc217f754,4 +np.float32,0x7f423993,0x4219a5ca,4 +np.float32,0x3f125704,0xbe78b4cd,4 +np.float32,0x7ef7f5ae,0x4218de28,4 +np.float32,0x3f2dd940,0xbe2c1a33,4 +np.float32,0x3f1ca78e,0xbe5a6a8b,4 +np.float32,0x244863,0xc219e8be,4 +np.float32,0x3f2614fe,0xbe406d6b,4 +np.float32,0x3e75e7a3,0xbf1e99b5,4 +np.float32,0x2bdd6e,0xc2199459,4 +np.float32,0x7e49e279,0x42174e7b,4 +np.float32,0x3e3bb09a,0xbf3ca2cd,4 +np.float32,0x649f06,0xc2182320,4 +np.float32,0x7f4a44e1,0x4219b7d6,4 +np.float32,0x400473,0xc218ec3a,4 +np.float32,0x3edb19ad,0xbebcbcad,4 +np.float32,0x3d8ee956,0xbf94006c,4 +np.float32,0x7e91c603,0x4217f1eb,4 +np.float32,0x221384,0xc21a04a6,4 +np.float32,0x7f7dd660,0x421a1cd5,4 +np.float32,0x7ef34609,0x4218d5ac,4 +np.float32,0x7f5ed529,0x4219e2e5,4 +np.float32,0x7f1bf685,0x42194438,4 +np.float32,0x3cdd094a,0xbfc8d294,4 +np.float32,0x7e87fc8e,0x4217d303,4 +np.float32,0x7f53d971,0x4219cc6b,4 +np.float32,0xabc8b,0xc21c0646,4 +np.float32,0x7f5011e6,0x4219c46a,4 +np.float32,0x7e460638,0x421745e5,4 +np.float32,0xa8126,0xc21c0ffd,4 +np.float32,0x3eec2a66,0xbeac0f2d,4 +np.float32,0x3f3a1213,0xbe0de340,4 +np.float32,0x7f5908db,0x4219d72c,4 +np.float32,0x7e0ad3c5,0x4216a7f3,4 +np.float32,0x3f2de40e,0xbe2bfe90,4 +np.float32,0x3d0463c5,0xbfbec8e4,4 +np.float32,0x7c7cde0b,0x4212e19a,4 +np.float32,0x74c24f,0xc217e0f9,4 +np.float32,0x3f14b4cb,0xbe71929b,4 +np.float32,0x3e94e192,0xbf09537f,4 +np.float32,0x3eebde71,0xbeac56bd,4 +np.float32,0x3f65e413,0xbd3f5b8a,4 +np.float32,0x7e109199,0x4216b9f9,4 +np.float32,0x3f22f5d0,0xbe48ddc0,4 +np.float32,0x3e22d3bc,0xbf4c6f4d,4 +np.float32,0x3f7a812f,0xbc1a680b,4 +np.float32,0x3f67f361,0xbd2f7d7c,4 +np.float32,0x3f1caa63,0xbe5a6281,4 +np.float32,0x3f306fde,0xbe2587ab,4 +np.float32,0x3e8df9d3,0xbf0e9b2f,4 +np.float32,0x3eaaccc4,0xbef41cd4,4 +np.float32,0x7f3f65ec,0x42199f45,4 +np.float32,0x3dc706e0,0xbf8196ec,4 +np.float32,0x3e14eaba,0xbf565cf6,4 +np.float32,0xcc60,0xc2208a09,4 +np.float32,0x358447,0xc2193be7,4 +np.float32,0x3dcecade,0xbf7eec70,4 +np.float32,0x3f20b4f8,0xbe4f0ef0,4 +np.float32,0x7e7c979f,0x4217b222,4 +np.float32,0x7f2387b9,0x4219594a,4 +np.float32,0x3f6f6e5c,0xbcee0e05,4 +np.float32,0x7f19ad81,0x42193da8,4 +np.float32,0x5635e1,0xc21867dd,4 +np.float32,0x4c5e97,0xc2189dc4,4 +np.float32,0x7f35f97f,0x421988d1,4 +np.float32,0x7f685224,0x4219f571,4 +np.float32,0x3eca0616,0xbecec7b8,4 +np.float32,0x3f436d0d,0xbdf024ca,4 +np.float32,0x12a97d,0xc21b106a,4 +np.float32,0x7f0fdc93,0x4219204d,4 +np.float32,0x3debfb42,0xbf703e65,4 +np.float32,0x3c6c54d2,0xbfeba291,4 +np.float32,0x7e5d7491,0x421777a1,4 +np.float32,0x3f4bd2f0,0xbdcab87d,4 +np.float32,0x3f7517f4,0xbc9ae510,4 +np.float32,0x3f71a59a,0xbccd480d,4 +np.float32,0x3f514653,0xbdb33f61,4 +np.float32,0x3f4e6ea4,0xbdbf694b,4 +np.float32,0x3eadadec,0xbef06526,4 +np.float32,0x3f3b41c1,0xbe0b0fbf,4 +np.float32,0xc35a,0xc2209e1e,4 +np.float32,0x384982,0xc2192575,4 +np.float32,0x3464c3,0xc2194556,4 +np.float32,0x7f5e20d9,0x4219e17d,4 +np.float32,0x3ea18b62,0xbf004016,4 +np.float32,0x63a02b,0xc218278c,4 +np.float32,0x7ef547ba,0x4218d953,4 +np.float32,0x3f2496fb,0xbe4470f4,4 +np.float32,0x7ea0c8c6,0x42181d81,4 +np.float32,0x3f42ba60,0xbdf35372,4 +np.float32,0x7e40d9,0xc217be34,4 +np.float32,0x3e95883b,0xbf08d750,4 +np.float32,0x3e0cddf3,0xbf5c8aa8,4 +np.float32,0x3f2305d5,0xbe48b20a,4 +np.float32,0x7f0d0941,0x4219177b,4 +np.float32,0x3f7b98d3,0xbbf6e477,4 +np.float32,0x3f687cdc,0xbd2b6057,4 +np.float32,0x3f42ce91,0xbdf2f73d,4 +np.float32,0x3ee00fc0,0xbeb7c217,4 +np.float32,0x7f3d483a,0x42199a53,4 +np.float32,0x3e1e08eb,0xbf4fc18d,4 +np.float32,0x7e202ff5,0x4216e798,4 +np.float32,0x582898,0xc2185ded,4 +np.float32,0x3e3552b1,0xbf40790c,4 +np.float32,0x3d3f7c87,0xbfaa44b6,4 +np.float32,0x669d8e,0xc2181a65,4 +np.float32,0x3f0e21b4,0xbe82d757,4 +np.float32,0x686f95,0xc2181293,4 +np.float32,0x3f48367f,0xbdda9ead,4 +np.float32,0x3dc27802,0xbf82e0a0,4 +np.float32,0x3f6ac40c,0xbd1a07d4,4 +np.float32,0x3bba6d,0xc2190b12,4 +np.float32,0x3ec7b6b0,0xbed15665,4 +np.float32,0x3f1f9ca4,0xbe521955,4 +np.float32,0x3ef2f147,0xbea5c4b8,4 +np.float32,0x7c65f769,0x4212b762,4 +np.float32,0x7e98e162,0x42180716,4 +np.float32,0x3f0f0c09,0xbe8169ea,4 +np.float32,0x3d67f03b,0xbf9f9d48,4 +np.float32,0x7f3751e4,0x42198c18,4 +np.float32,0x7f1fac61,0x42194ead,4 +np.float32,0x3e9b698b,0xbf048d89,4 +np.float32,0x7e66507b,0x42178913,4 +np.float32,0x7f5cb680,0x4219dea5,4 +np.float32,0x234700,0xc219f53e,4 +np.float32,0x3d9984ad,0xbf900591,4 +np.float32,0x3f33a3f2,0xbe1d872a,4 +np.float32,0x3eaf52b6,0xbeee4cf4,4 +np.float32,0x7f078930,0x421905ca,4 +np.float32,0x3f083b39,0xbe8c44df,4 +np.float32,0x3e3823f8,0xbf3ec231,4 +np.float32,0x3eef6f5d,0xbea9008c,4 +np.float32,0x6145e1,0xc218322c,4 +np.float32,0x16d9ae,0xc21ab65f,4 +np.float32,0x7e543376,0x421764a5,4 +np.float32,0x3ef77ccb,0xbea1a5a0,4 +np.float32,0x3f4a443f,0xbdd18af5,4 +np.float32,0x8f209,0xc21c5770,4 +np.float32,0x3ecac126,0xbecdfa33,4 +np.float32,0x3e8662f9,0xbf14b6c7,4 +np.float32,0x23759a,0xc219f2f4,4 +np.float32,0xf256d,0xc21b6d3f,4 +np.float32,0x3f579f93,0xbd98aaa2,4 +np.float32,0x3ed4cc8e,0xbec339cb,4 +np.float32,0x3ed25400,0xbec5d2a1,4 +np.float32,0x3ed6f8ba,0xbec0f795,4 +np.float32,0x7f36efd9,0x42198b2a,4 +np.float32,0x7f5169dd,0x4219c746,4 +np.float32,0x7de18a20,0x42164b80,4 +np.float32,0x3e8de526,0xbf0eab61,4 +np.float32,0x3de0cbcd,0xbf75a47e,4 +np.float32,0xe265f,0xc21b8b82,4 +np.float32,0x3df3cdbd,0xbf6c9e40,4 +np.float32,0x3f38a25a,0xbe115589,4 +np.float32,0x7f01f2c0,0x4218f311,4 +np.float32,0x3da7d5f4,0xbf8b10a5,4 +np.float32,0x4d4fe8,0xc2189850,4 +np.float32,0x3cc96d9d,0xbfcdfc8d,4 +np.float32,0x259a88,0xc219d8d7,4 +np.float32,0x7f1d5102,0x42194810,4 +np.float32,0x7e17ca91,0x4216cfa7,4 +np.float32,0x3f73d110,0xbcad7a8f,4 +np.float32,0x3f009383,0xbe9920ed,4 +np.float32,0x7e22af,0xc217be9f,4 +np.float32,0x3f7de2ce,0xbb6c0394,4 +np.float32,0x3edd0cd2,0xbebac45a,4 +np.float32,0x3ec9b5c1,0xbecf2035,4 +np.float32,0x3168c5,0xc2195f6b,4 +np.float32,0x3e935522,0xbf0a7d18,4 +np.float32,0x3e494077,0xbf34e120,4 +np.float32,0x3f52ed06,0xbdac41ec,4 +np.float32,0x3f73d51e,0xbcad3f65,4 +np.float32,0x3f03d453,0xbe939295,4 +np.float32,0x7ef4ee68,0x4218d8b1,4 +np.float32,0x3ed0e2,0xc218f4a7,4 +np.float32,0x4efab8,0xc2188ed3,4 +np.float32,0x3dbd5632,0xbf845d3b,4 +np.float32,0x7eecad4f,0x4218c972,4 +np.float32,0x9d636,0xc21c2d32,4 +np.float32,0x3e5f3b6b,0xbf295ae7,4 +np.float32,0x7f4932df,0x4219b57a,4 +np.float32,0x4b59b5,0xc218a3be,4 +np.float32,0x3e5de97f,0xbf2a03b4,4 +np.float32,0x3f1c479d,0xbe5b7b3c,4 +np.float32,0x3f42e7e4,0xbdf283a5,4 +np.float32,0x2445,0xc2238af2,4 +np.float32,0x7aa71b43,0x420e8c9e,4 +np.float32,0x3ede6e4e,0xbeb961e1,4 +np.float32,0x7f05dd3b,0x42190045,4 +np.float32,0x3ef5b55c,0xbea3404b,4 +np.float32,0x7f738624,0x421a0a62,4 +np.float32,0x3e7d50a1,0xbf1b4cb4,4 +np.float32,0x3f44cc4a,0xbde9ebcc,4 +np.float32,0x7e1a7b0b,0x4216d777,4 +np.float32,0x3f1d9868,0xbe57c0da,4 +np.float32,0x1ebee2,0xc21a3263,4 +np.float32,0x31685f,0xc2195f6e,4 +np.float32,0x368a8e,0xc2193379,4 +np.float32,0xa9847,0xc21c0c2e,4 +np.float32,0x3bd3b3,0xc2190a56,4 +np.float32,0x3961e4,0xc2191ce3,4 +np.float32,0x7e13a243,0x4216c34e,4 +np.float32,0x7f7b1790,0x421a17ff,4 +np.float32,0x3e55f020,0xbf2e1545,4 +np.float32,0x3f513861,0xbdb37aa8,4 +np.float32,0x3dd9e754,0xbf791ad2,4 +np.float32,0x5e8d86,0xc2183ec9,4 +np.float32,0x26b796,0xc219cbdd,4 +np.float32,0x429daa,0xc218da89,4 +np.float32,0x3f477caa,0xbdddd9ba,4 +np.float32,0x3f0e5114,0xbe828d45,4 +np.float32,0x3f54f362,0xbda3c286,4 +np.float32,0x6eac1c,0xc217f8c8,4 +np.float32,0x3f04c479,0xbe91fef5,4 +np.float32,0x3e993765,0xbf06228e,4 +np.float32,0x3eafd99f,0xbeeda21b,4 +np.float32,0x3f2a759e,0xbe34db96,4 +np.float32,0x3f05adfb,0xbe907937,4 +np.float32,0x3f6e2dfc,0xbd005980,4 +np.float32,0x3f2f2daa,0xbe28b6b5,4 +np.float32,0x15e746,0xc21ac931,4 +np.float32,0x7d34ca26,0x4214b4e5,4 +np.float32,0x7ebd175c,0x4218659f,4 +np.float32,0x7f1ed26b,0x42194c4c,4 +np.float32,0x2588b,0xc21eaab0,4 +np.float32,0x3f0065e3,0xbe996fe2,4 +np.float32,0x3f610376,0xbd658122,4 +np.float32,0x451995,0xc218ca41,4 +np.float32,0x70e083,0xc217f002,4 +np.float32,0x7e19821a,0x4216d4a8,4 +np.float32,0x3e7cd9a0,0xbf1b80fb,4 +np.float32,0x7f1a8f18,0x42194033,4 +np.float32,0x3f008fee,0xbe99271f,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x7f31d826,0x42197e9b,4 +np.float32,0x3f18cf12,0xbe657838,4 +np.float32,0x3e5c1bc7,0xbf2aebf9,4 +np.float32,0x3e3d3993,0xbf3bbaf8,4 +np.float32,0x68457a,0xc2181347,4 +np.float32,0x7ddf7561,0x42164761,4 +np.float32,0x7f47341b,0x4219b10c,4 +np.float32,0x4d3ecd,0xc21898b2,4 +np.float32,0x7f43dee8,0x4219a98b,4 +np.float32,0x3f0def7c,0xbe8325f5,4 +np.float32,0x3d5a551f,0xbfa2f994,4 +np.float32,0x7ed26602,0x4218951b,4 +np.float32,0x3ee7fa5b,0xbeb0099a,4 +np.float32,0x7ef74ea8,0x4218dcfc,4 +np.float32,0x6a3bb2,0xc2180afd,4 +np.float32,0x7f4c1e6e,0x4219bbe3,4 +np.float32,0x3e26f625,0xbf49a5a2,4 +np.float32,0xb8482,0xc21be70b,4 +np.float32,0x3f32f077,0xbe1f445b,4 +np.float32,0x7dd694b6,0x4216355a,4 +np.float32,0x7f3d62fd,0x42199a92,4 +np.float32,0x3f48e41a,0xbdd79cbf,4 +np.float32,0x338fc3,0xc2194c75,4 +np.float32,0x3e8355f0,0xbf174462,4 +np.float32,0x7f487e83,0x4219b3eb,4 +np.float32,0x2227f7,0xc21a039b,4 +np.float32,0x7e4383dd,0x4217403a,4 +np.float32,0x52d28b,0xc21879b2,4 +np.float32,0x12472c,0xc21b19a9,4 +np.float32,0x353530,0xc2193e7b,4 +np.float32,0x3f4e4728,0xbdc0137a,4 +np.float32,0x3bf169,0xc2190979,4 +np.float32,0x3eb3ee2e,0xbee8885f,4 +np.float32,0x3f03e3c0,0xbe937892,4 +np.float32,0x3c9f8408,0xbfdaf47f,4 +np.float32,0x40e792,0xc218e61b,4 +np.float32,0x5a6b29,0xc21852ab,4 +np.float32,0x7f268b83,0x4219616a,4 +np.float32,0x3ee25997,0xbeb57fa7,4 +np.float32,0x3f175324,0xbe69cf53,4 +np.float32,0x3f781d91,0xbc5e9827,4 +np.float32,0x7dba5210,0x4215f68c,4 +np.float32,0x7f1e66,0xc217bb2b,4 +np.float32,0x7f7fffff,0x421a209b,4 +np.float32,0x3f646202,0xbd4b10b8,4 +np.float32,0x575248,0xc218622b,4 +np.float32,0x7c67faa1,0x4212bb42,4 +np.float32,0x7f1683f2,0x42193469,4 +np.float32,0x1a3864,0xc21a7931,4 +np.float32,0x7f30ad75,0x42197bae,4 +np.float32,0x7f1c9d05,0x42194612,4 +np.float32,0x3e791795,0xbf1d2b2c,4 +np.float32,0x7e9ebc19,0x421817cd,4 +np.float32,0x4999b7,0xc218ae31,4 +np.float32,0x3d130e2c,0xbfb8f1cc,4 +np.float32,0x3f7e436f,0xbb41bb07,4 +np.float32,0x3ee00241,0xbeb7cf7d,4 +np.float32,0x7e496181,0x42174d5f,4 +np.float32,0x7efe58be,0x4218e978,4 +np.float32,0x3f5e5b0c,0xbd7aa43f,4 +np.float32,0x7ee4c6ab,0x4218ba59,4 +np.float32,0x3f6da8c6,0xbd043d7e,4 +np.float32,0x3e3e6e0f,0xbf3b064b,4 +np.float32,0x3f0143b3,0xbe97f10a,4 +np.float32,0x79170f,0xc217d0c6,4 +np.float32,0x517645,0xc218810f,4 +np.float32,0x3f1f9960,0xbe52226e,4 +np.float32,0x2a8df9,0xc219a1d6,4 +np.float32,0x2300a6,0xc219f8b8,4 +np.float32,0x3ee31355,0xbeb4c97a,4 +np.float32,0x3f20b05f,0xbe4f1ba9,4 +np.float32,0x3ee64249,0xbeb1b0ff,4 +np.float32,0x3a94b7,0xc21913b2,4 +np.float32,0x7ef7ef43,0x4218de1d,4 +np.float32,0x3f1abb5d,0xbe5fe872,4 +np.float32,0x7f65360b,0x4219ef72,4 +np.float32,0x3d315d,0xc219004c,4 +np.float32,0x3f26bbc4,0xbe3eafb9,4 +np.float32,0x3ee8c6e9,0xbeaf45de,4 +np.float32,0x7e5f1452,0x42177ae1,4 +np.float32,0x3f32e777,0xbe1f5aba,4 +np.float32,0x4d39a1,0xc21898d0,4 +np.float32,0x3e59ad15,0xbf2c2841,4 +np.float32,0x3f4be746,0xbdca5fc4,4 +np.float32,0x72e4fd,0xc217e821,4 +np.float32,0x1af0b8,0xc21a6d25,4 +np.float32,0x3f311147,0xbe23f18d,4 +np.float32,0x3f1ecebb,0xbe545880,4 +np.float32,0x7e90d293,0x4217ef02,4 +np.float32,0x3e3b366a,0xbf3ceb46,4 +np.float32,0x3f133239,0xbe761c96,4 +np.float32,0x7541ab,0xc217df15,4 +np.float32,0x3d8c8275,0xbf94f1a1,4 +np.float32,0x483b92,0xc218b689,4 +np.float32,0x3eb0dbed,0xbeec5c6b,4 +np.float32,0x3f00c676,0xbe98c8e2,4 +np.float32,0x3f445ac2,0xbdebed7c,4 +np.float32,0x3d2af4,0xc219007a,4 +np.float32,0x7f196ee1,0x42193cf2,4 +np.float32,0x290c94,0xc219b1db,4 +np.float32,0x3f5dbdc9,0xbd7f9019,4 +np.float32,0x3e80c62e,0xbf1974fc,4 +np.float32,0x3ec9ed2c,0xbecee326,4 +np.float32,0x7f469d60,0x4219afbb,4 +np.float32,0x3f698413,0xbd2386ce,4 +np.float32,0x42163f,0xc218de14,4 +np.float32,0x67a554,0xc21815f4,4 +np.float32,0x3f4bff74,0xbdc9f651,4 +np.float32,0x16a743,0xc21aba39,4 +np.float32,0x2eb8b0,0xc219784b,4 +np.float32,0x3eed9be1,0xbeaab45b,4 +np.float64,0x7fe0d76873e1aed0,0x40733f9d783bad7a,2 +np.float64,0x3fe22626bb244c4d,0xbfcf86a59864eea2,2 +np.float64,0x7f874113d02e8227,0x407324f54c4015b8,2 +np.float64,0x3fe40a46a9e8148d,0xbfca0411f533fcb9,2 +np.float64,0x3fd03932eea07266,0xbfe312bc9cf5649e,2 +np.float64,0x7fee5d2a1b3cba53,0x407343b5f56367a0,2 +np.float64,0x3feb7bda4a76f7b5,0xbfb0ea2c6edc784a,2 +np.float64,0x3fd6cd831a2d9b06,0xbfdcaf2e1a5faf51,2 +np.float64,0x98324e273064a,0xc0733e0e4c6d11c6,2 +np.float64,0x7fe1dd63b363bac6,0x4073400667c405c3,2 +np.float64,0x3fec5971f178b2e4,0xbfaaef32a7d94563,2 +np.float64,0x17abc07e2f579,0xc0734afca4da721e,2 +np.float64,0x3feec6ab5cfd8d57,0xbf9157f3545a8235,2 +np.float64,0x3fe3ae9622a75d2c,0xbfcb04b5ad254581,2 +np.float64,0x7fea73d854b4e7b0,0x407342c0a548f4c5,2 +np.float64,0x7fe29babf4653757,0x4073404eeb5fe714,2 +np.float64,0x7fd3a55d85a74aba,0x40733bde72e86c27,2 +np.float64,0x3fe83ce305f079c6,0xbfbee3511e85e0f1,2 +np.float64,0x3fd72087ea2e4110,0xbfdc4ab30802d7c2,2 +np.float64,0x7feb54ddab76a9ba,0x407342facb6f3ede,2 +np.float64,0xc57e34a18afd,0xc0734f82ec815baa,2 +np.float64,0x7a8cb97ef5198,0xc0733f8fb3777a67,2 +np.float64,0x7fe801032c300205,0x40734213dbe4eda9,2 +np.float64,0x3aefb1f475df7,0xc07344a5f08a0584,2 +np.float64,0x7fee85f1dd3d0be3,0x407343bf4441c2a7,2 +np.float64,0x3fdc7f1055b8fe21,0xbfd67d300630e893,2 +np.float64,0xe8ecddb3d1d9c,0xc0733b194f18f466,2 +np.float64,0x3fdf2b23c73e5648,0xbfd3ff6872c1f887,2 +np.float64,0x3fdba4aef2b7495e,0xbfd7557205e18b7b,2 +np.float64,0x3fe2ac34c6e5586a,0xbfcdf1dac69bfa08,2 +np.float64,0x3fc9852628330a4c,0xbfe66914f0fb9b0a,2 +np.float64,0x7fda211acf344235,0x40733dd9c2177aeb,2 +np.float64,0x3fe9420eb432841d,0xbfba4dd969a32575,2 +np.float64,0xb2f9d1ed65f3a,0xc0733cedfb6527ff,2 +np.float64,0x3fe9768a68f2ed15,0xbfb967c39c35c435,2 +np.float64,0x7fe8268462b04d08,0x4073421eaed32734,2 +np.float64,0x3fcf331f063e663e,0xbfe39e2f4b427ca9,2 +np.float64,0x7fd4eb9e2b29d73b,0x40733c4e4141418d,2 +np.float64,0x7fd2bba658a5774c,0x40733b89cd53d5b1,2 +np.float64,0x3fdfdf04913fbe09,0xbfd360c7fd9d251b,2 +np.float64,0x3fca5bfd0534b7fa,0xbfe5f5f844b2b20c,2 +np.float64,0x3feacd5032f59aa0,0xbfb3b5234ba8bf7b,2 +np.float64,0x7fe9241cec724839,0x4073426631362cec,2 +np.float64,0x3fe57aca20eaf594,0xbfc628e3ac2c6387,2 +np.float64,0x3fec6553ca38caa8,0xbfaa921368d3b222,2 +np.float64,0x3fe1e9676563d2cf,0xbfd020f866ba9b24,2 +np.float64,0x3fd5590667aab20d,0xbfde8458af5a4fd6,2 +np.float64,0x3fdf7528f43eea52,0xbfd3bdb438d6ba5e,2 +np.float64,0xb8dddc5571bbc,0xc0733cb4601e5bb2,2 +np.float64,0xe6d4e1fbcda9c,0xc0733b295ef4a4ba,2 +np.float64,0x3fe7019d962e033b,0xbfc257c0a6e8de16,2 +np.float64,0x3f94ef585029deb1,0xbffb07e5dfb0e936,2 +np.float64,0x7fc863b08030c760,0x4073388e28d7b354,2 +np.float64,0xf684443bed089,0xc0733ab46cfbff9a,2 +np.float64,0x7fe00e901d201d1f,0x40733f489c05a0f0,2 +np.float64,0x9e5c0a273cb82,0xc0733dc7af797e19,2 +np.float64,0x7fe49734f0692e69,0x4073410303680df0,2 +np.float64,0x7fb7b584442f6b08,0x4073338acff72502,2 +np.float64,0x3f99984c30333098,0xbff9a2642a6ed8cc,2 +np.float64,0x7fea2fcda8745f9a,0x407342aeae7f5e64,2 +np.float64,0xe580caadcb01a,0xc0733b33a3639217,2 +np.float64,0x1899ab3831336,0xc0734ab823729417,2 +np.float64,0x39bd4c76737aa,0xc07344ca6fac6d21,2 +np.float64,0xd755b2dbaeab7,0xc0733ba4fe19f2cc,2 +np.float64,0x3f952bebf82a57d8,0xbffaf3e7749c2512,2 +np.float64,0x3fe62ee5d72c5dcc,0xbfc45e3cb5baad08,2 +np.float64,0xb1264a7d624ca,0xc0733d003a1d0a66,2 +np.float64,0x3fc4bd1bcd297a38,0xbfe94b3058345c46,2 +np.float64,0x7fc5758bb32aeb16,0x407337aa7805497f,2 +np.float64,0x3fb0edcaf421db96,0xbff2dfb09c405294,2 +np.float64,0x3fd240fceaa481fa,0xbfe16f356bb36134,2 +np.float64,0x38c0c62a7181a,0xc07344e916d1e9b7,2 +np.float64,0x3fe98f2b3bf31e56,0xbfb8fc6eb622a820,2 +np.float64,0x3fe2bdf99c257bf3,0xbfcdbd0dbbae4d0b,2 +np.float64,0xce4b390d9c967,0xc0733bf14ada3134,2 +np.float64,0x3fd2ad607ba55ac1,0xbfe11da15167b37b,2 +np.float64,0x3fd8154f11b02a9e,0xbfdb2a6fabb9a026,2 +np.float64,0xf37849fde6f09,0xc0733aca8c64344c,2 +np.float64,0x3fcbae43b2375c87,0xbfe547f267c8e570,2 +np.float64,0x3fcd46fd7d3a8dfb,0xbfe48070f7232929,2 +np.float64,0x7fcdd245273ba489,0x407339f3d907b101,2 +np.float64,0x3fac75cd0838eb9a,0xbff4149d177b057b,2 +np.float64,0x7fe8ff3fd7f1fe7f,0x4073425bf968ba6f,2 +np.float64,0x7febadaa4df75b54,0x407343113a91f0e9,2 +np.float64,0x7fd5e4649c2bc8c8,0x40733c9f0620b065,2 +np.float64,0x903429812069,0xc07351b255e27887,2 +np.float64,0x3fe1d8c51c63b18a,0xbfd03ad448c1f1ee,2 +np.float64,0x3fe573ea646ae7d5,0xbfc63ab0bfd0e601,2 +np.float64,0x3f83b3f3c02767e8,0xc00022677e310649,2 +np.float64,0x7fd15d1582a2ba2a,0x40733b02c469c1d6,2 +np.float64,0x3fe63d3dabec7a7b,0xbfc43a56ee97b27e,2 +np.float64,0x7fe3a452fb2748a5,0x407340af1973c228,2 +np.float64,0x3fafac6b303f58d6,0xbff35651703ae9f2,2 +np.float64,0x513ddd24a27bc,0xc073426af96aaebb,2 +np.float64,0x3fef152246be2a45,0xbf89df79d7719282,2 +np.float64,0x3fe8c923e9f19248,0xbfbc67228e8db5f6,2 +np.float64,0x3fd6e2325fadc465,0xbfdc9602fb0b950f,2 +np.float64,0x3fe9616815f2c2d0,0xbfb9c4311a3b415b,2 +np.float64,0x2fe4e4005fc9d,0xc0734616fe294395,2 +np.float64,0x3fbceb02dc39d606,0xbfee4e68f1c7886f,2 +np.float64,0x7fe35e843d66bd07,0x407340963b066ad6,2 +np.float64,0x7fecd6c648f9ad8c,0x4073435a4c176e94,2 +np.float64,0x7fcbd72bf437ae57,0x4073397994b85665,2 +np.float64,0x3feff6443b3fec88,0xbf40eb380d5318ae,2 +np.float64,0x7fb9373cf6326e79,0x407333f869edef08,2 +np.float64,0x63790d9cc6f22,0xc0734102d4793cda,2 +np.float64,0x3f9de6efe83bcde0,0xbff88db6f0a6b56e,2 +np.float64,0xe00f2dc1c01f,0xc0734ea26ab84ff2,2 +np.float64,0xd7a9aa8baf536,0xc0733ba248fa33ab,2 +np.float64,0x3fee0089ea7c0114,0xbf9cab936ac31c4b,2 +np.float64,0x3fdec0d51cbd81aa,0xbfd45ed8878c5860,2 +np.float64,0x7fe91bf5e9f237eb,0x40734263f005081d,2 +np.float64,0x34ea7d1e69d50,0xc07345659dde7444,2 +np.float64,0x7fe67321a3ace642,0x4073419cc8130d95,2 +np.float64,0x9d1aeb2f3a35e,0xc0733dd5d506425c,2 +np.float64,0x7fbb01df003603bd,0x4073347282f1391d,2 +np.float64,0x42b945b285729,0xc07343c92d1bbef9,2 +np.float64,0x7fc92799b8324f32,0x407338c51e3f0733,2 +np.float64,0x3fe119c19b223383,0xbfd16ab707f65686,2 +np.float64,0x3fc9f9ac5333f359,0xbfe62a2f91ec0dff,2 +np.float64,0x3fd820d5a8b041ab,0xbfdb1d2586fe7b18,2 +np.float64,0x10000000000000,0xc0733a7146f72a42,2 +np.float64,0x3fe7e1543eafc2a8,0xbfc045362889592d,2 +np.float64,0xcbc0e1819783,0xc0734f4b68e05b1c,2 +np.float64,0xeb57e411d6afd,0xc0733b06efec001a,2 +np.float64,0xa9b74b47536ea,0xc0733d4c7bd06ddc,2 +np.float64,0x3fe56d4022eada80,0xbfc64bf8c7e3dd59,2 +np.float64,0x3fd445ca27288b94,0xbfdff40aecd0f882,2 +np.float64,0x3fe5af1cf5ab5e3a,0xbfc5a21d83699a04,2 +np.float64,0x7fed3431eb7a6863,0x40734370aa6131e1,2 +np.float64,0x3fd878dea1b0f1bd,0xbfdab8730dc00517,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0x3feba9fcc1f753fa,0xbfb03027dcecbf65,2 +np.float64,0x7fca4feed6349fdd,0x4073391526327eb0,2 +np.float64,0x3fe7748ddbaee91c,0xbfc144b438218065,2 +np.float64,0x3fb5fbd94c2bf7b3,0xbff10ee6342c21a0,2 +np.float64,0x3feb603b97f6c077,0xbfb15a1f99d6d25e,2 +np.float64,0x3fe2e6fc8ce5cdf9,0xbfcd43edd7f3b4e6,2 +np.float64,0x7feb2b31f7765663,0x407342f02b306688,2 +np.float64,0x3fe290e2282521c4,0xbfce436deb8dbcf3,2 +np.float64,0x3fe3d5adf9e7ab5c,0xbfca96b8aa55d942,2 +np.float64,0x691899f2d2314,0xc07340a1026897c8,2 +np.float64,0x7fe468b008e8d15f,0x407340f33eadc628,2 +np.float64,0x3fb3a4c416274988,0xbff1d71da539a56e,2 +np.float64,0x3fe2442b29e48856,0xbfcf2b0037322661,2 +np.float64,0x3f376fbc7e6ef,0xc073442939a84643,2 +np.float64,0x3fe7c78d65ef8f1b,0xbfc08157cff411de,2 +np.float64,0xd4f27acba9e50,0xc0733bb8d38daa50,2 +np.float64,0x5198919ea3313,0xc07342633ba7cbea,2 +np.float64,0x7fd09f66f0a13ecd,0x40733ab5310b4385,2 +np.float64,0x3fdfe5531dbfcaa6,0xbfd35b487c7e739f,2 +np.float64,0x3fc4b0fecc2961fe,0xbfe95350c38c1640,2 +np.float64,0x7fd5ae21962b5c42,0x40733c8db78b7250,2 +np.float64,0x3fa4a8fcd42951fa,0xbff64e62fe602b72,2 +np.float64,0x7fc8e0e25831c1c4,0x407338b179b91223,2 +np.float64,0x7fdde1df6f3bc3be,0x40733ec87f9f027e,2 +np.float64,0x3fd8b9ad86b1735b,0xbfda6f385532c41b,2 +np.float64,0x3fd9f20ee933e41e,0xbfd91872fd858597,2 +np.float64,0x7feb35332df66a65,0x407342f2b9c715f0,2 +np.float64,0x7fe783dc7eaf07b8,0x407341ef41873706,2 +np.float64,0x7fceee929f3ddd24,0x40733a34e3c660fd,2 +np.float64,0x985b58d730b6b,0xc0733e0c6cfbb6f8,2 +np.float64,0x3fef4bb55cfe976b,0xbf83cb246c6f2a78,2 +np.float64,0x3fe218014f243003,0xbfcfb20ac683e1f6,2 +np.float64,0x7fe43b9fbea8773e,0x407340e3d5d5d29e,2 +np.float64,0x7fe148c74c62918e,0x40733fcba4367b8b,2 +np.float64,0x3feea4ad083d495a,0xbf93443917f3c991,2 +np.float64,0x8bcf6311179ed,0xc0733ea54d59dd31,2 +np.float64,0xf4b7a2dbe96f5,0xc0733ac175182401,2 +np.float64,0x543338baa8668,0xc073422b59165fe4,2 +np.float64,0x3fdb467317368ce6,0xbfd7b4d515929635,2 +np.float64,0x7fe3bbbc89e77778,0x407340b75cdf3de7,2 +np.float64,0x7fe693377aad266e,0x407341a6af60a0f1,2 +np.float64,0x3fc66210502cc421,0xbfe83bb940610a24,2 +np.float64,0x7fa75638982eac70,0x40732e9da476b816,2 +np.float64,0x3fe0d72a4761ae55,0xbfd1d7c82c479fab,2 +np.float64,0x97dec0dd2fbd8,0xc0733e121e072804,2 +np.float64,0x3fef33ec8c7e67d9,0xbf86701be6be8df1,2 +np.float64,0x7fcfca9b423f9536,0x40733a65a51efb94,2 +np.float64,0x9f2215633e443,0xc0733dbf043de9ed,2 +np.float64,0x2469373e48d28,0xc07347fe9e904b77,2 +np.float64,0x7fecc2e18cb985c2,0x407343557f58dfa2,2 +np.float64,0x3fde4acbfdbc9598,0xbfd4ca559e575e74,2 +np.float64,0x3fd6b11cf1ad623a,0xbfdcd1e17ef36114,2 +np.float64,0x3fc19ec494233d89,0xbfeb8ef228e8826a,2 +np.float64,0x4c89ee389913e,0xc07342d50c904f61,2 +np.float64,0x88c2046f11841,0xc0733ecc91369431,2 +np.float64,0x7fc88c13fd311827,0x40733899a125b392,2 +np.float64,0x3fcebd893a3d7b12,0xbfe3d2f35ab93765,2 +np.float64,0x3feb582a1476b054,0xbfb17ae8ec6a0465,2 +np.float64,0x7fd4369e5da86d3c,0x40733c1118b8cd67,2 +np.float64,0x3fda013fc1340280,0xbfd90831b85e98b2,2 +np.float64,0x7fed33d73fba67ad,0x4073437094ce1bd9,2 +np.float64,0x3fed3191053a6322,0xbfa468cc26a8f685,2 +np.float64,0x3fc04ed51c209daa,0xbfeca24a6f093bca,2 +np.float64,0x3fee4ac8763c9591,0xbf986458abbb90b5,2 +np.float64,0xa2d39dd145a74,0xc0733d9633651fbc,2 +np.float64,0x3fe7d9f86f2fb3f1,0xbfc0565a0b059f1c,2 +np.float64,0x3fe3250144e64a03,0xbfcc8eb2b9ae494b,2 +np.float64,0x7fe2b29507a56529,0x4073405774492075,2 +np.float64,0x7fdcdfcbe2b9bf97,0x40733e8b736b1bd8,2 +np.float64,0x3fc832730f3064e6,0xbfe7267ac9b2e7c3,2 +np.float64,0x3fc7e912e52fd226,0xbfe750dfc0aeae57,2 +np.float64,0x7fc960472f32c08d,0x407338d4b4cb3957,2 +np.float64,0x3fbdf182ea3be306,0xbfedd27150283ffb,2 +np.float64,0x3fd1e9359823d26b,0xbfe1b2ac7fd25f8d,2 +np.float64,0x7fbcf75f6039eebe,0x407334ef13eb16f8,2 +np.float64,0x3fe5a3c910eb4792,0xbfc5bf2f57c5d643,2 +np.float64,0x3fcf4f2a6e3e9e55,0xbfe391b6f065c4b8,2 +np.float64,0x3fee067873fc0cf1,0xbf9c53af0373fc0e,2 +np.float64,0xd3f08b85a7e12,0xc0733bc14357e686,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x3fc8635f6430c6bf,0xbfe70a7dc77749a7,2 +np.float64,0x3fe3ff5c52a7feb9,0xbfca22617c6636d5,2 +np.float64,0x3fbbae91fa375d24,0xbfeee9d4c300543f,2 +np.float64,0xe3f71b59c7ee4,0xc0733b3f99187375,2 +np.float64,0x7fca93d3be3527a6,0x40733926fd48ecd6,2 +np.float64,0x3fcd29f7223a53ee,0xbfe48e3edf32fe57,2 +np.float64,0x7fdc4ef6f8389ded,0x40733e68401cf2a6,2 +np.float64,0xe009bc81c014,0xc0734ea295ee3e5b,2 +np.float64,0x61f56c78c3eae,0xc073411e1dbd7c54,2 +np.float64,0x3fde131928bc2632,0xbfd4fda024f6927c,2 +np.float64,0x3fb21ee530243dca,0xbff266aaf0358129,2 +np.float64,0x7feaac82a4f55904,0x407342cf7809d9f9,2 +np.float64,0x3fe66ab177ecd563,0xbfc3c92d4d522819,2 +np.float64,0xfe9f9c2bfd3f4,0xc0733a7ade3a88a7,2 +np.float64,0x7fd0c5217c218a42,0x40733ac4e4c6dfa5,2 +np.float64,0x430f4ae6861ea,0xc07343c03d8a9442,2 +np.float64,0x494bff2a92981,0xc073432209d2fd16,2 +np.float64,0x3f8860e9d030c1d4,0xbffeca059ebf5e89,2 +np.float64,0x3fe43732dc286e66,0xbfc98800388bad2e,2 +np.float64,0x6443b60ec8877,0xc07340f4bab11827,2 +np.float64,0x3feda9be6d7b537d,0xbfa0dcb9a6914069,2 +np.float64,0x3fc5ceb6772b9d6d,0xbfe89868c881db70,2 +np.float64,0x3fbdf153023be2a6,0xbfedd2878c3b4949,2 +np.float64,0x7fe8f6b8e8f1ed71,0x407342599a30b273,2 +np.float64,0x3fea6fbdb8b4df7b,0xbfb53bf66f71ee96,2 +np.float64,0xc7ac3dbb8f588,0xc0733c2b525b7963,2 +np.float64,0x3fef3a91f77e7524,0xbf85b2bd3adbbe31,2 +np.float64,0x3f887cb97030f973,0xbffec21ccbb5d22a,2 +np.float64,0x8b2f1c9f165e4,0xc0733ead49300951,2 +np.float64,0x2c1cb32058397,0xc07346a951bd8d2b,2 +np.float64,0x3fe057edd620afdc,0xbfd2acf1881b7e99,2 +np.float64,0x7f82e9530025d2a5,0x4073238591dd52ce,2 +np.float64,0x3fe4e03dff69c07c,0xbfc7be96c5c006fc,2 +np.float64,0x52727b4aa4e50,0xc0734250c58ebbc1,2 +np.float64,0x3f99a62160334c43,0xbff99ea3ca09d8f9,2 +np.float64,0x3fd5314b4faa6297,0xbfdeb843daf01e03,2 +np.float64,0x3fefde89e13fbd14,0xbf5d1facb7a1e9de,2 +np.float64,0x7fb460f1a228c1e2,0x4073327d8cbc5f86,2 +np.float64,0xeb93efb3d727e,0xc0733b052a4990e4,2 +np.float64,0x3fe884baecf10976,0xbfbd9ba9cfe23713,2 +np.float64,0x7fefffffffffffff,0x40734413509f79ff,2 +np.float64,0x149dc7c6293ba,0xc0734bf26b1df025,2 +np.float64,0x64188f88c8313,0xc07340f7b8e6f4b5,2 +np.float64,0x3fdfac314abf5863,0xbfd38d3e9dba1b0e,2 +np.float64,0x3fd72052a42e40a5,0xbfdc4af30ee0b245,2 +np.float64,0x7fdd951f743b2a3e,0x40733eb68fafa838,2 +np.float64,0x65a2dd5acb45c,0xc07340dc8ed625e1,2 +np.float64,0x7fe89a79997134f2,0x4073423fbceb1cbe,2 +np.float64,0x3fe70a000d6e1400,0xbfc24381e09d02f7,2 +np.float64,0x3fe2cec160259d83,0xbfcd8b5e92354129,2 +np.float64,0x3feb9ef77a773def,0xbfb05c7b2ee6f388,2 +np.float64,0xe0d66689c1acd,0xc0733b582c779620,2 +np.float64,0x3fee86bd0ffd0d7a,0xbf94f7870502c325,2 +np.float64,0x186afc6230d60,0xc0734ac55fb66d5d,2 +np.float64,0xc0631f4b80c64,0xc0733c6d7149d373,2 +np.float64,0x3fdad1b87735a371,0xbfd82cca73ec663b,2 +np.float64,0x7fe7f6d313efeda5,0x40734210e84576ab,2 +np.float64,0x7fd7b7fce6af6ff9,0x40733d2d92ffdaaf,2 +np.float64,0x3fe6f35a28ade6b4,0xbfc27a4239b540c3,2 +np.float64,0x7fdb0b834eb61706,0x40733e17073a61f3,2 +np.float64,0x82f4661105e8d,0xc0733f19b34adeed,2 +np.float64,0x3fc77230112ee460,0xbfe796a7603c0d16,2 +np.float64,0x8000000000000000,0xfff0000000000000,2 +np.float64,0x7fb8317bc63062f7,0x407333aec761a739,2 +np.float64,0x7fd165609a22cac0,0x40733b061541ff15,2 +np.float64,0x3fed394768fa728f,0xbfa42e1596e1faf6,2 +np.float64,0x7febab693d7756d1,0x40734310a9ac828e,2 +np.float64,0x7fe809a69230134c,0x407342165b9acb69,2 +np.float64,0x3fc091d38f2123a7,0xbfec69a70fc23548,2 +np.float64,0x3fb2a8f5dc2551ec,0xbff2327f2641dd0d,2 +np.float64,0x7fc60b6fe02c16df,0x407337da5adc342c,2 +np.float64,0x3fefa53c3bbf4a78,0xbf73d1be15b73b00,2 +np.float64,0x7fee09c1717c1382,0x407343a2c479e1cb,2 +np.float64,0x8000000000000001,0x7ff8000000000000,2 +np.float64,0x3fede0b2733bc165,0xbf9e848ac2ecf604,2 +np.float64,0x3fee2ac331bc5586,0xbf9a3b699b721c9a,2 +np.float64,0x3fd4db12d829b626,0xbfdf2a413d1e453a,2 +np.float64,0x7fe605230dec0a45,0x4073417a67db06be,2 +np.float64,0x3fe378b2bf26f165,0xbfcb9dbb2b6d6832,2 +np.float64,0xc1d4c1ab83a98,0xc0733c60244cadbf,2 +np.float64,0x3feb15500e762aa0,0xbfb28c071d5efc22,2 +np.float64,0x3fe36225a626c44b,0xbfcbde4259e9047e,2 +np.float64,0x3fe7c586a72f8b0d,0xbfc08614b13ed4b2,2 +np.float64,0x7fb0f2d8cc21e5b1,0x40733135b2c7dd99,2 +np.float64,0x5957f3feb2aff,0xc07341c1df75638c,2 +np.float64,0x3fca4851bd3490a3,0xbfe6005ae5279485,2 +np.float64,0x824217d904843,0xc0733f232fd58f0f,2 +np.float64,0x4f9332269f267,0xc073428fd8e9cb32,2 +np.float64,0x3fea6f087374de11,0xbfb53ef0d03918b2,2 +np.float64,0x3fd9409ab4328135,0xbfd9d9231381e2b8,2 +np.float64,0x3fdba03b00374076,0xbfd759ec94a7ab5b,2 +np.float64,0x3fe0ce3766619c6f,0xbfd1e6912582ccf0,2 +np.float64,0x3fabd45ddc37a8bc,0xbff43c78d3188423,2 +np.float64,0x3fc3cadd592795bb,0xbfe9f1576c9b2c79,2 +np.float64,0x3fe10df049621be1,0xbfd17df2f2c28022,2 +np.float64,0x945b5d1328b6c,0xc0733e3bc06f1e75,2 +np.float64,0x7fc1c3742b2386e7,0x4073365a403d1051,2 +np.float64,0x7fdc957138b92ae1,0x40733e7977717586,2 +np.float64,0x7f943fa1a0287f42,0x407328d01de143f5,2 +np.float64,0x3fec9631c4392c64,0xbfa914b176d8f9d2,2 +np.float64,0x3fd8e7c008b1cf80,0xbfda3b9d9b6da8f4,2 +np.float64,0x7222f9fee4460,0xc073400e371516cc,2 +np.float64,0x3fe890e43eb121c8,0xbfbd64921462e823,2 +np.float64,0x3fcfd7fe2a3faffc,0xbfe3557e2f207800,2 +np.float64,0x3fed5dd1c1babba4,0xbfa318bb20db64e6,2 +np.float64,0x3fe6aa34c66d546a,0xbfc32c8a8991c11e,2 +np.float64,0x8ca79801196,0xc0736522bd5adf6a,2 +np.float64,0x3feb274079364e81,0xbfb2427b24b0ca20,2 +np.float64,0x7fe04927e4a0924f,0x40733f61c96f7f89,2 +np.float64,0x7c05f656f80bf,0xc0733f7a70555b4e,2 +np.float64,0x7fe97819eff2f033,0x4073427d4169b0f8,2 +np.float64,0x9def86e33bdf1,0xc0733dcc740b7175,2 +np.float64,0x7fedd1ef3f3ba3dd,0x40734395ceab8238,2 +np.float64,0x77bed86cef7dc,0xc0733fb8e0e9bf73,2 +np.float64,0x9274b41b24e97,0xc0733e52b16dff71,2 +np.float64,0x8010000000000000,0x7ff8000000000000,2 +np.float64,0x9c977855392ef,0xc0733ddba7d421d9,2 +np.float64,0xfb4560a3f68ac,0xc0733a9271e6a118,2 +np.float64,0xa67d9f394cfb4,0xc0733d6e9d58cc94,2 +np.float64,0x3fbfa766b03f4ecd,0xbfed0cccfecfc900,2 +np.float64,0x3fe177417522ee83,0xbfd0d45803bff01a,2 +np.float64,0x7fe85e077bb0bc0e,0x4073422e957a4aa3,2 +np.float64,0x7feeb0a6883d614c,0x407343c8f6568f7c,2 +np.float64,0xbab82edb75706,0xc0733ca2a2b20094,2 +np.float64,0xfadb44bdf5b69,0xc0733a9561b7ec04,2 +np.float64,0x3fefb9b82b3f7370,0xbf6ea776b2dcc3a9,2 +np.float64,0x7fe080ba8a610174,0x40733f795779b220,2 +np.float64,0x3f87faa1c02ff544,0xbffee76acafc92b7,2 +np.float64,0x7fed474108fa8e81,0x4073437531d4313e,2 +np.float64,0x3fdb7b229336f645,0xbfd77f583a4a067f,2 +np.float64,0x256dbf0c4adb9,0xc07347cd94e6fa81,2 +np.float64,0x3fd034ae25a0695c,0xbfe3169c15decdac,2 +np.float64,0x3a72177274e44,0xc07344b4cf7d68cd,2 +np.float64,0x7fa2522d5c24a45a,0x40732cef2f793470,2 +np.float64,0x3fb052bdde20a57c,0xbff3207fd413c848,2 +np.float64,0x3fdccfecbbb99fd9,0xbfd62ec04a1a687a,2 +np.float64,0x3fd403ac53280759,0xbfe027a31df2c8cc,2 +np.float64,0x3fab708e4036e11d,0xbff45591df4f2e8b,2 +np.float64,0x7fcfc001993f8002,0x40733a63539acf9d,2 +np.float64,0x3fd2b295dfa5652c,0xbfe119c1b476c536,2 +np.float64,0x7fe8061262b00c24,0x4073421552ae4538,2 +np.float64,0xffefffffffffffff,0x7ff8000000000000,2 +np.float64,0x7fed52093ffaa411,0x40734377c072a7e8,2 +np.float64,0xf3df902fe7bf2,0xc0733ac79a75ff7a,2 +np.float64,0x7fe13d382e227a6f,0x40733fc6fd0486bd,2 +np.float64,0x3621d5086c43b,0xc073453d31effbcd,2 +np.float64,0x3ff0000000000000,0x0,2 +np.float64,0x3fdaffea27b5ffd4,0xbfd7fd139dc1c2c5,2 +np.float64,0x7fea6536dc34ca6d,0x407342bccc564fdd,2 +np.float64,0x7fd478f00c28f1df,0x40733c27c0072fde,2 +np.float64,0x7fa72ef0502e5de0,0x40732e91e83db75c,2 +np.float64,0x7fd302970626052d,0x40733ba3ec6775f6,2 +np.float64,0x7fbb57ab0036af55,0x407334887348e613,2 +np.float64,0x3fda0ff722b41fee,0xbfd8f87b77930330,2 +np.float64,0x1e983ce23d309,0xc073493438f57e61,2 +np.float64,0x7fc90de97c321bd2,0x407338be01ffd4bd,2 +np.float64,0x7fe074b09c20e960,0x40733f7443f0dbe1,2 +np.float64,0x3fed5dec9fbabbd9,0xbfa317efb1fe8a95,2 +np.float64,0x7fdb877632b70eeb,0x40733e3697c88ba8,2 +np.float64,0x7fe4fb0067e9f600,0x40734124604b99e8,2 +np.float64,0x7fd447dc96288fb8,0x40733c1703ab2cce,2 +np.float64,0x3feb2d1e64f65a3d,0xbfb22a781df61c05,2 +np.float64,0xb6c8e6676d91d,0xc0733cc8859a0b91,2 +np.float64,0x3fdc3c2418387848,0xbfd6bec3a3c3cdb5,2 +np.float64,0x3fdecb9ccdbd973a,0xbfd4551c05721a8e,2 +np.float64,0x3feb1100e7762202,0xbfb29db911fe6768,2 +np.float64,0x3fe0444bc2a08898,0xbfd2ce69582e78c1,2 +np.float64,0x7fda403218b48063,0x40733de201d8340c,2 +np.float64,0x3fdc70421238e084,0xbfd68ba4bd48322b,2 +np.float64,0x3fe06e747c60dce9,0xbfd286bcac34a981,2 +np.float64,0x7fc1931d9623263a,0x407336473da54de4,2 +np.float64,0x229914da45323,0xc073485979ff141c,2 +np.float64,0x3fe142f92da285f2,0xbfd1280909992cb6,2 +np.float64,0xf1d02fa9e3a06,0xc0733ad6b19d71a0,2 +np.float64,0x3fb1fe9b0023fd36,0xbff27317d8252c16,2 +np.float64,0x3fa544b9242a8972,0xbff61ac38569bcfc,2 +np.float64,0x3feeb129d4fd6254,0xbf928f23ad20c1ee,2 +np.float64,0xa2510b7f44a22,0xc0733d9bc81ea0a1,2 +np.float64,0x3fca75694d34ead3,0xbfe5e8975b3646c2,2 +np.float64,0x7fece10621b9c20b,0x4073435cc3dd9a1b,2 +np.float64,0x7fe98a57d3b314af,0x4073428239b6a135,2 +np.float64,0x3fe259c62a64b38c,0xbfcee96682a0f355,2 +np.float64,0x3feaaa9b9d755537,0xbfb445779f3359af,2 +np.float64,0xdaadecfdb55be,0xc0733b899338432a,2 +np.float64,0x3fed00eae4fa01d6,0xbfa5dc8d77be5991,2 +np.float64,0x7fcc96c773392d8e,0x407339a8c5cd786e,2 +np.float64,0x3fef7b8b203ef716,0xbf7cff655ecb6424,2 +np.float64,0x7fd4008113a80101,0x40733bfe6552acb7,2 +np.float64,0x7fe99ff035b33fdf,0x407342881753ee2e,2 +np.float64,0x3ee031e87dc07,0xc0734432d736e492,2 +np.float64,0x3fddfe390f3bfc72,0xbfd510f1d9ec3e36,2 +np.float64,0x3fd9ddce74b3bb9d,0xbfd92e2d75a061bb,2 +np.float64,0x7fe5f742edebee85,0x40734176058e3a77,2 +np.float64,0x3fdb04185b360831,0xbfd7f8c63aa5e1c4,2 +np.float64,0xea2b0f43d4562,0xc0733b0fd77c8118,2 +np.float64,0x7fc3f4973527e92d,0x407337293bbb22c4,2 +np.float64,0x3fb9adfb38335bf6,0xbfeff4f3ea85821a,2 +np.float64,0x87fb98750ff73,0xc0733ed6ad83c269,2 +np.float64,0x3fe005721a200ae4,0xbfd33a9f1ebfb0ac,2 +np.float64,0xd9e04fe7b3c0a,0xc0733b901ee257f3,2 +np.float64,0x2c39102658723,0xc07346a4db63bf55,2 +np.float64,0x3f7dc28e003b851c,0xc0011c1d1233d948,2 +np.float64,0x3430fd3868620,0xc073457e24e0b70d,2 +np.float64,0xbff0000000000000,0x7ff8000000000000,2 +np.float64,0x3fd23e45e0247c8c,0xbfe17146bcf87b57,2 +np.float64,0x6599df3ecb33d,0xc07340dd2c41644c,2 +np.float64,0x3fdf074f31be0e9e,0xbfd41f6e9dbb68a5,2 +np.float64,0x7fdd6233f3bac467,0x40733eaa8f674b72,2 +np.float64,0x7fe03e8481607d08,0x40733f5d3df3b087,2 +np.float64,0x3fcc3b79f13876f4,0xbfe501bf3b379b77,2 +np.float64,0xe5d97ae3cbb30,0xc0733b30f47cbd12,2 +np.float64,0x8acbc4a115979,0xc0733eb240a4d2c6,2 +np.float64,0x3fedbdbc48bb7b79,0xbfa0470fd70c4359,2 +np.float64,0x3fde1611103c2c22,0xbfd4fae1fa8e7e5e,2 +np.float64,0x3fe09478bd2128f1,0xbfd246b7e85711dc,2 +np.float64,0x3fd6dfe8f3adbfd2,0xbfdc98ca2f32c1ad,2 +np.float64,0x72ccf274e599f,0xc0734003e5b0da63,2 +np.float64,0xe27c7265c4f8f,0xc0733b4b2d808566,2 +np.float64,0x7fee3161703c62c2,0x407343abe90f5649,2 +np.float64,0xf54fb5c1eaa0,0xc0734e01384fcf78,2 +np.float64,0xcde5924d9bcb3,0xc0733bf4b83c66c2,2 +np.float64,0x3fc46fdbe528dfb8,0xbfe97f55ef5e9683,2 +np.float64,0x7fe513528a2a26a4,0x4073412c69baceca,2 +np.float64,0x3fd29eca4aa53d95,0xbfe128801cd33ed0,2 +np.float64,0x7febb21718b7642d,0x4073431256def857,2 +np.float64,0x3fcab536c0356a6e,0xbfe5c73c59f41578,2 +np.float64,0x7fc7e9f0d82fd3e1,0x4073386b213e5dfe,2 +np.float64,0xb5b121276b624,0xc0733cd33083941c,2 +np.float64,0x7e0dd9bcfc1bc,0xc0733f5d8bf35050,2 +np.float64,0x3fd1c75106238ea2,0xbfe1cd11cccda0f4,2 +np.float64,0x9f060e673e0c2,0xc0733dc03da71909,2 +np.float64,0x7fd915a2f3322b45,0x40733d912af07189,2 +np.float64,0x3fd8cbae4431975d,0xbfda5b02ca661139,2 +np.float64,0x3fde8b411f3d1682,0xbfd48f6f710a53b6,2 +np.float64,0x3fc17a780622f4f0,0xbfebabb10c55255f,2 +np.float64,0x3fde5cbe5f3cb97d,0xbfd4b9e2e0101fb1,2 +np.float64,0x7fd859036530b206,0x40733d5c2252ff81,2 +np.float64,0xb0f5040f61ea1,0xc0733d02292f527b,2 +np.float64,0x3fde5c49ae3cb893,0xbfd4ba4db3ce2cf3,2 +np.float64,0x3fecc4518df988a3,0xbfa7af0bfc98bc65,2 +np.float64,0x3feffee03cbffdc0,0xbf0f3ede6ca7d695,2 +np.float64,0xbc5eac9b78bd6,0xc0733c92fb51c8ae,2 +np.float64,0x3fe2bb4ef765769e,0xbfcdc4f70a65dadc,2 +np.float64,0x5089443ca1129,0xc073427a7d0cde4a,2 +np.float64,0x3fd0d6e29121adc5,0xbfe28e28ece1db86,2 +np.float64,0xbe171e397c2e4,0xc0733c82cede5d02,2 +np.float64,0x4ede27be9dbc6,0xc073429fba1a4af1,2 +np.float64,0x3fe2aff3af655fe7,0xbfcde6b52a8ed3c1,2 +np.float64,0x7fd85ca295b0b944,0x40733d5d2adcccf1,2 +np.float64,0x24919bba49234,0xc07347f6ed704a6f,2 +np.float64,0x7fd74bc1eeae9783,0x40733d0d94a89011,2 +np.float64,0x3fc1cd12cb239a26,0xbfeb6a9c25c2a11d,2 +np.float64,0x3fdafbc0ac35f781,0xbfd8015ccf1f1b51,2 +np.float64,0x3fee01327c3c0265,0xbf9ca1d0d762dc18,2 +np.float64,0x3fe65bd7702cb7af,0xbfc3ee0de5c36b8d,2 +np.float64,0x7349c82ee693a,0xc0733ffc5b6eccf2,2 +np.float64,0x3fdc5906f738b20e,0xbfd6a26288eb5933,2 +np.float64,0x1,0xc07434e6420f4374,2 +np.float64,0x3fb966128a32cc25,0xbff00e0aa7273838,2 +np.float64,0x3fd501ff9a2a03ff,0xbfdef69133482121,2 +np.float64,0x194d4f3c329ab,0xc0734a861b44cfbe,2 +np.float64,0x3fec5d34f8f8ba6a,0xbfaad1b31510e70b,2 +np.float64,0x1635e4c22c6be,0xc0734b6dec650943,2 +np.float64,0x3fead2f8edb5a5f2,0xbfb39dac30a962cf,2 +np.float64,0x3f7dfa4ce03bf49a,0xc00115a112141aa7,2 +np.float64,0x3fef6827223ed04e,0xbf80a42c9edebfe9,2 +np.float64,0xe771f303cee3f,0xc0733b24a6269fe4,2 +np.float64,0x1160ccc622c1b,0xc0734d22604eacb9,2 +np.float64,0x3fc485cd08290b9a,0xbfe970723008c8c9,2 +np.float64,0x7fef99c518bf3389,0x407343fcf9ed202f,2 +np.float64,0x7fd8c1447a318288,0x40733d79a440b44d,2 +np.float64,0xaf219f955e434,0xc0733d149c13f440,2 +np.float64,0xcf45f6239e8bf,0xc0733be8ddda045d,2 +np.float64,0x7599394aeb328,0xc0733fd90fdbb0ea,2 +np.float64,0xc7f6390f8fec7,0xc0733c28bfbc66a3,2 +np.float64,0x3fd39ae96c2735d3,0xbfe0712274a8742b,2 +np.float64,0xa4d6c18f49ad8,0xc0733d805a0528f7,2 +np.float64,0x7fd9ea78d7b3d4f1,0x40733dcb2b74802a,2 +np.float64,0x3fecd251cb39a4a4,0xbfa742ed41d4ae57,2 +np.float64,0x7fed7a07cd7af40f,0x407343813476027e,2 +np.float64,0x3fd328ae7f26515d,0xbfe0c30b56a83c64,2 +np.float64,0x7fc937ff7a326ffe,0x407338c9a45b9140,2 +np.float64,0x3fcf1d31143e3a62,0xbfe3a7f760fbd6a8,2 +np.float64,0x7fb911dcbc3223b8,0x407333ee158cccc7,2 +np.float64,0x3fd352fc83a6a5f9,0xbfe0a47d2f74d283,2 +np.float64,0x7fd310753fa620e9,0x40733ba8fc4300dd,2 +np.float64,0x3febd64b4577ac97,0xbfaefd4a79f95c4b,2 +np.float64,0x6a6961a4d4d2d,0xc073408ae1687943,2 +np.float64,0x3fe4ba73d16974e8,0xbfc8239341b9e457,2 +np.float64,0x3fed8e7cac3b1cf9,0xbfa1a96a0cc5fcdc,2 +np.float64,0x7fd505ec04aa0bd7,0x40733c56f86e3531,2 +np.float64,0x3fdf166e9abe2cdd,0xbfd411e5f8569d70,2 +np.float64,0x7fe1bc6434e378c7,0x40733ff9861bdabb,2 +np.float64,0x3fd3b0b175a76163,0xbfe061ba5703f3c8,2 +np.float64,0x7fed75d7ffbaebaf,0x4073438037ba6f19,2 +np.float64,0x5a9e109cb53c3,0xc07341a8b04819c8,2 +np.float64,0x3fe14786b4e28f0d,0xbfd120b541bb880e,2 +np.float64,0x3fed4948573a9291,0xbfa3b471ff91614b,2 +np.float64,0x66aac5d8cd559,0xc07340ca9b18af46,2 +np.float64,0x3fdb48efd23691e0,0xbfd7b24c5694838b,2 +np.float64,0x7fe6da7d1eadb4f9,0x407341bc7d1fae43,2 +np.float64,0x7feb702cf336e059,0x40734301b96cc3c0,2 +np.float64,0x3fd1e60987a3cc13,0xbfe1b522cfcc3d0e,2 +np.float64,0x3feca57f50794aff,0xbfa89dc90625d39c,2 +np.float64,0x7fdc46dc56b88db8,0x40733e664294a0f9,2 +np.float64,0x8dc8fd811b920,0xc0733e8c5955df06,2 +np.float64,0xf01634abe02c7,0xc0733ae370a76d0c,2 +np.float64,0x3fc6f8d8ab2df1b1,0xbfe7df5093829464,2 +np.float64,0xda3d7597b47af,0xc0733b8d2702727a,2 +np.float64,0x7feefd53227dfaa5,0x407343da3d04db28,2 +np.float64,0x3fe2fbca3525f794,0xbfcd06e134417c08,2 +np.float64,0x7fd36d3ce226da79,0x40733bca7c322df1,2 +np.float64,0x7fec37e00b786fbf,0x4073433397b48a5b,2 +np.float64,0x3fbf133f163e267e,0xbfed4e72f1362a77,2 +np.float64,0x3fc11efbb9223df7,0xbfebf53002a561fe,2 +np.float64,0x3fc89c0e5431381d,0xbfe6ea562364bf81,2 +np.float64,0x3f9cd45da839a8bb,0xbff8ceb14669ee4b,2 +np.float64,0x23dc8fa647b93,0xc0734819aaa9b0ee,2 +np.float64,0x3fe829110d305222,0xbfbf3e60c45e2399,2 +np.float64,0x7fed8144e57b0289,0x40734382e917a02a,2 +np.float64,0x7fe033fbf7a067f7,0x40733f58bb00b20f,2 +np.float64,0xe3807f45c7010,0xc0733b43379415d1,2 +np.float64,0x3fd708fb342e11f6,0xbfdc670ef9793782,2 +np.float64,0x3fe88c924b311925,0xbfbd78210d9e7164,2 +np.float64,0x3fe0a2a7c7614550,0xbfd22efaf0472c4a,2 +np.float64,0x7fe3a37501a746e9,0x407340aecaeade41,2 +np.float64,0x3fd05077ec20a0f0,0xbfe2fedbf07a5302,2 +np.float64,0x7fd33bf61da677eb,0x40733bb8c58912aa,2 +np.float64,0x3feb29bdae76537b,0xbfb2384a8f61b5f9,2 +np.float64,0x3fec0fc14ff81f83,0xbfad3423e7ade174,2 +np.float64,0x3fd0f8b1a1a1f163,0xbfe2725dd4ccea8b,2 +np.float64,0x3fe382d26a6705a5,0xbfcb80dba4218bdf,2 +np.float64,0x3fa873f2cc30e7e6,0xbff522911cb34279,2 +np.float64,0x7fed7fd7377affad,0x4073438292f6829b,2 +np.float64,0x3feeacd8067d59b0,0xbf92cdbeda94b35e,2 +np.float64,0x7fe464d62228c9ab,0x407340f1eee19aa9,2 +np.float64,0xe997648bd32ed,0xc0733b143aa0fad3,2 +np.float64,0x7fea4869f13490d3,0x407342b5333b54f7,2 +np.float64,0x935b871926b71,0xc0733e47c6683319,2 +np.float64,0x28a9d0c05155,0xc0735a7e3532af83,2 +np.float64,0x79026548f204d,0xc0733fa6339ffa2f,2 +np.float64,0x3fdb1daaabb63b55,0xbfd7de839c240ace,2 +np.float64,0x3fc0db73b421b6e7,0xbfec2c6e36c4f416,2 +np.float64,0xb8b50ac1716b,0xc0734ff9fc60ebce,2 +np.float64,0x7fdf13e0c6be27c1,0x40733f0e44f69437,2 +np.float64,0x3fcd0cb97b3a1973,0xbfe49c34ff531273,2 +np.float64,0x3fcbac034b375807,0xbfe54913d73f180d,2 +np.float64,0x3fe091d2a2e123a5,0xbfd24b290a9218de,2 +np.float64,0xede43627dbc87,0xc0733af3c7c7f716,2 +np.float64,0x7fc037e7ed206fcf,0x407335b85fb0fedb,2 +np.float64,0x3fce7ae4c63cf5ca,0xbfe3f1350fe03f28,2 +np.float64,0x7fcdd862263bb0c3,0x407339f5458bb20e,2 +np.float64,0x4d7adf709af5d,0xc07342bf4edfadb2,2 +np.float64,0xdc6c03f3b8d81,0xc0733b7b74d6a635,2 +np.float64,0x3fe72ae0a4ee55c1,0xbfc1f4665608b21f,2 +np.float64,0xcd62f19d9ac5e,0xc0733bf92235e4d8,2 +np.float64,0xe3a7b8fdc74f7,0xc0733b4204f8e166,2 +np.float64,0x3fdafd35adb5fa6b,0xbfd7ffdca0753b36,2 +np.float64,0x3fa023e8702047d1,0xbff8059150ea1464,2 +np.float64,0x99ff336933fe7,0xc0733df961197517,2 +np.float64,0x7feeb365b9bd66ca,0x407343c995864091,2 +np.float64,0x7fe449b49f689368,0x407340e8aa3369e3,2 +np.float64,0x7faf5843043eb085,0x407330aa700136ca,2 +np.float64,0x3fd47b2922a8f652,0xbfdfab3de86f09ee,2 +np.float64,0x7fd9fc3248b3f864,0x40733dcfea6f9b3e,2 +np.float64,0xe20b0d8dc4162,0xc0733b4ea8fe7b3f,2 +np.float64,0x7feff8e0e23ff1c1,0x40734411c490ed70,2 +np.float64,0x7fa58382d02b0705,0x40732e0cf28e14fe,2 +np.float64,0xb8ad9a1b715b4,0xc0733cb630b8f2d4,2 +np.float64,0xe90abcf1d2158,0xc0733b186b04eeee,2 +np.float64,0x7fd6aa6f32ad54dd,0x40733cdccc636604,2 +np.float64,0x3fd8f84eedb1f09e,0xbfda292909a5298a,2 +np.float64,0x7fecd6b1d9f9ad63,0x4073435a472b05b5,2 +np.float64,0x3fd9f47604b3e8ec,0xbfd915e028cbf4a6,2 +np.float64,0x3fd20d9398241b27,0xbfe19691363dd508,2 +np.float64,0x3fe5ed09bbabda13,0xbfc5043dfc9c8081,2 +np.float64,0x7fbe5265363ca4c9,0x407335406f8e4fac,2 +np.float64,0xac2878af5850f,0xc0733d3311be9786,2 +np.float64,0xac2074555840f,0xc0733d3364970018,2 +np.float64,0x3fcd49b96b3a9373,0xbfe47f24c8181d9c,2 +np.float64,0x3fd10caca6a21959,0xbfe2620ae5594f9a,2 +np.float64,0xec5b87e9d8b71,0xc0733aff499e72ca,2 +np.float64,0x9d5e9fad3abd4,0xc0733dd2d70eeb4a,2 +np.float64,0x7fe3d3a24227a744,0x407340bfc2072fdb,2 +np.float64,0x3fc5f7a77c2bef4f,0xbfe87e69d502d784,2 +np.float64,0x33161a66662c4,0xc07345a436308244,2 +np.float64,0xa27acdc744f5a,0xc0733d99feb3d8ea,2 +np.float64,0x3fe2d9301565b260,0xbfcd6c914e204437,2 +np.float64,0x7fd5d111e12ba223,0x40733c98e14a6fd0,2 +np.float64,0x6c3387bed8672,0xc073406d3648171a,2 +np.float64,0x24d89fe849b15,0xc07347e97bec008c,2 +np.float64,0x3fefd763677faec7,0xbf61ae69caa9cad9,2 +np.float64,0x7fe0a4684ba148d0,0x40733f884d32c464,2 +np.float64,0x3fd5c3c939ab8792,0xbfddfaaefc1c7fca,2 +np.float64,0x3fec9b87a6b9370f,0xbfa8eb34efcc6b9b,2 +np.float64,0x3feb062431f60c48,0xbfb2ca6036698877,2 +np.float64,0x3fef97f6633f2fed,0xbf76bc742860a340,2 +np.float64,0x74477490e88ef,0xc0733fed220986bc,2 +np.float64,0x3fe4bea67ce97d4d,0xbfc818525292b0f6,2 +np.float64,0x3fc6add3a92d5ba7,0xbfe80cfdc9a90bda,2 +np.float64,0x847c9ce308f94,0xc0733f05026f5965,2 +np.float64,0x7fea53fd2eb4a7f9,0x407342b841fc4723,2 +np.float64,0x3fc55a16fc2ab42e,0xbfe8e3849130da34,2 +np.float64,0x3fbdf7d07c3befa1,0xbfedcf84b9c6c161,2 +np.float64,0x3fe5fb25aa6bf64b,0xbfc4e083ff96b116,2 +np.float64,0x61c776a8c38ef,0xc0734121611d84d7,2 +np.float64,0x3fec413164f88263,0xbfabadbd05131546,2 +np.float64,0x9bf06fe137e0e,0xc0733de315469ee0,2 +np.float64,0x2075eefc40ebf,0xc07348cae84de924,2 +np.float64,0x3fdd42e0143a85c0,0xbfd5c0b6f60b3cea,2 +np.float64,0xdbb1ab45b7636,0xc0733b8157329daf,2 +np.float64,0x3feac6d56bf58dab,0xbfb3d00771b28621,2 +np.float64,0x7fb2dc825025b904,0x407331f3e950751a,2 +np.float64,0x3fecea6efd79d4de,0xbfa689309cc0e3fe,2 +np.float64,0x3fd83abec7b0757e,0xbfdaff5c674a9c59,2 +np.float64,0x3fd396f7c0272df0,0xbfe073ee75c414ba,2 +np.float64,0x3fe10036c162006e,0xbfd1945a38342ae1,2 +np.float64,0x3fd5bbded52b77be,0xbfde04cca40d4156,2 +np.float64,0x3fe870945ab0e129,0xbfbdf72f0e6206fa,2 +np.float64,0x3fef72fddcbee5fc,0xbf7ee2dba88b1bad,2 +np.float64,0x4e111aa09c224,0xc07342b1e2b29643,2 +np.float64,0x3fd926d8b5b24db1,0xbfd9f58b78d6b061,2 +np.float64,0x3fc55679172aacf2,0xbfe8e5df687842e2,2 +np.float64,0x7f5f1749803e2e92,0x40731886e16cfc4d,2 +np.float64,0x7fea082b53b41056,0x407342a42227700e,2 +np.float64,0x3fece1d1d039c3a4,0xbfa6cb780988a469,2 +np.float64,0x3b2721d8764e5,0xc073449f6a5a4832,2 +np.float64,0x365cb7006cba,0xc0735879ba5f0b6e,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe606ce92ac0d9c,0x4073417aeebe97e8,2 +np.float64,0x3fe237b544a46f6b,0xbfcf50f8f76d7df9,2 +np.float64,0x3fe7265e5eee4cbd,0xbfc1ff39089ec8d0,2 +np.float64,0x7fe2bb3c5ea57678,0x4073405aaad81cf2,2 +np.float64,0x3fd811df84b023bf,0xbfdb2e670ea8d8de,2 +np.float64,0x3f6a0efd00341dfa,0xc003fac1ae831241,2 +np.float64,0x3fd0d214afa1a429,0xbfe2922080a91c72,2 +np.float64,0x3feca6a350b94d47,0xbfa894eea3a96809,2 +np.float64,0x7fe23e5c76247cb8,0x4073402bbaaf71c7,2 +np.float64,0x3fe739a1fdae7344,0xbfc1d109f66efb5d,2 +np.float64,0x3fdf4b8e283e971c,0xbfd3e28f46169cc5,2 +np.float64,0x38f2535271e4b,0xc07344e3085219fa,2 +np.float64,0x7fd263a0f9a4c741,0x40733b68d945dae0,2 +np.float64,0x7fdd941863bb2830,0x40733eb651e3dca9,2 +np.float64,0xace7279159ce5,0xc0733d2b63b5947e,2 +np.float64,0x7fe34670b2268ce0,0x4073408d92770cb5,2 +np.float64,0x7fd11fa6dfa23f4d,0x40733aea02e76ea3,2 +np.float64,0x3fe6d9cbca6db398,0xbfc2b84b5c8c7eab,2 +np.float64,0x3fd69a0274ad3405,0xbfdcee3c7e52c463,2 +np.float64,0x3feb5af671f6b5ed,0xbfb16f88d739477f,2 +np.float64,0x3feea400163d4800,0xbf934e071c64fd0b,2 +np.float64,0x3fefd6bcf17fad7a,0xbf61f711c392b119,2 +np.float64,0x3fe148d43da291a8,0xbfd11e9cd3f91cd3,2 +np.float64,0x7fedf1308b7be260,0x4073439d135656da,2 +np.float64,0x3fe614c99c6c2993,0xbfc49fd1984dfd6d,2 +np.float64,0xd6e8d4e5add1b,0xc0733ba88256026e,2 +np.float64,0xfff0000000000000,0x7ff8000000000000,2 +np.float64,0x3fb530b5562a616b,0xbff1504bcc5c8f73,2 +np.float64,0xb7da68396fb4d,0xc0733cbe2790f52e,2 +np.float64,0x7fad78e26c3af1c4,0x4073303cdbfb0a15,2 +np.float64,0x7fee5698447cad30,0x407343b474573a8b,2 +np.float64,0x3fd488325c291065,0xbfdf999296d901e7,2 +np.float64,0x2669283a4cd26,0xc073479f823109a4,2 +np.float64,0x7fef3b090afe7611,0x407343e805a3b264,2 +np.float64,0x7fe8b96ae0f172d5,0x4073424874a342ab,2 +np.float64,0x7fef409f56fe813e,0x407343e943c3cd44,2 +np.float64,0x3fed28073dfa500e,0xbfa4b17e4cd31a3a,2 +np.float64,0x7f87ecc4802fd988,0x40732527e027b24b,2 +np.float64,0x3fdda24da0bb449b,0xbfd566a43ac035af,2 +np.float64,0x179fc9e62f3fa,0xc0734b0028c80fc1,2 +np.float64,0x3fef85b0927f0b61,0xbf7ac27565d5ab4f,2 +np.float64,0x5631501aac62b,0xc0734201be12c5d4,2 +np.float64,0x3fd782e424af05c8,0xbfdbd57544f8a7c3,2 +np.float64,0x3fe603a9a6ac0753,0xbfc4caff04dc3caf,2 +np.float64,0x7fbd5225163aa449,0x40733504b88f0a56,2 +np.float64,0x3fecd27506b9a4ea,0xbfa741dd70e6b08c,2 +np.float64,0x9c99603b3932c,0xc0733ddb922dc5db,2 +np.float64,0x3fbeb57f1a3d6afe,0xbfed789ff217aa08,2 +np.float64,0x3fef9c0f85bf381f,0xbf75d5c3d6cb281a,2 +np.float64,0x3fde4afb613c95f7,0xbfd4ca2a231c9005,2 +np.float64,0x396233d472c47,0xc07344d56ee70631,2 +np.float64,0x3fb31ea1c6263d44,0xbff207356152138d,2 +np.float64,0x3fe50bdf78aa17bf,0xbfc74ae0cbffb735,2 +np.float64,0xef74c701dee99,0xc0733ae81e4bb443,2 +np.float64,0x9a3e13a1347c3,0xc0733df68b60afc7,2 +np.float64,0x33ba4f886774b,0xc073458e03f0c13e,2 +np.float64,0x3fe8ba0e9931741d,0xbfbcaadf974e8f64,2 +np.float64,0x3fe090a4cd61214a,0xbfd24d236cf365d6,2 +np.float64,0x7fd87d992930fb31,0x40733d668b73b820,2 +np.float64,0x3fe6422b296c8456,0xbfc42e070b695d01,2 +np.float64,0x3febe9334677d267,0xbfae667864606cfe,2 +np.float64,0x771a3ce4ee348,0xc0733fc274d12c97,2 +np.float64,0x3fe0413542e0826b,0xbfd2d3b08fb5b8a6,2 +np.float64,0x3fd00870ea2010e2,0xbfe33cc04cbd42e0,2 +np.float64,0x3fe74fb817ae9f70,0xbfc19c45dbf919e1,2 +np.float64,0x40382fa08071,0xc07357514ced5577,2 +np.float64,0xa14968474292d,0xc0733da71a990f3a,2 +np.float64,0x5487c740a90fa,0xc0734224622d5801,2 +np.float64,0x3fed7d8d14fafb1a,0xbfa228f7ecc2ac03,2 +np.float64,0x3fe39bb485e73769,0xbfcb3a235a722960,2 +np.float64,0x3fd01090b2202121,0xbfe335b752589a22,2 +np.float64,0x3fd21a3e7da4347d,0xbfe18cd435a7c582,2 +np.float64,0x3fe7fa855a2ff50b,0xbfc00ab0665709fe,2 +np.float64,0x3fedc0d4577b81a9,0xbfa02fef3ff553fc,2 +np.float64,0x3fe99d4906333a92,0xbfb8bf18220e5e8e,2 +np.float64,0x3fd944ee3c3289dc,0xbfd9d46071675e73,2 +np.float64,0x3fe3ed8d52e7db1b,0xbfca53f8d4aef484,2 +np.float64,0x7fe748623a6e90c3,0x407341dd97c9dd79,2 +np.float64,0x3fea1b4b98343697,0xbfb6a1560a56927f,2 +np.float64,0xe1215715c242b,0xc0733b55dbf1f0a8,2 +np.float64,0x3fd0d5bccca1ab7a,0xbfe28f1b66d7a470,2 +np.float64,0x881a962710353,0xc0733ed51848a30d,2 +np.float64,0x3fcf022afe3e0456,0xbfe3b40eabf24501,2 +np.float64,0x3fdf1ac6bbbe358d,0xbfd40e03e888288d,2 +np.float64,0x3fa51a5eac2a34bd,0xbff628a7c34d51b3,2 +np.float64,0x3fdbaf408d375e81,0xbfd74ad39d97c92a,2 +np.float64,0x3fcd2418ea3a4832,0xbfe4910b009d8b11,2 +np.float64,0x3fc7b3062a2f660c,0xbfe7706dc47993e1,2 +np.float64,0x7fb8232218304643,0x407333aaa7041a9f,2 +np.float64,0x7fd5f186362be30b,0x40733ca32fdf9cc6,2 +np.float64,0x3fe57ef1d6aafde4,0xbfc61e23d00210c7,2 +np.float64,0x7c6830baf8d07,0xc0733f74f19e9dad,2 +np.float64,0xcacbfd5595980,0xc0733c0fb49edca7,2 +np.float64,0x3fdfdeac873fbd59,0xbfd36114c56bed03,2 +np.float64,0x3fd31f0889263e11,0xbfe0ca0cc1250169,2 +np.float64,0x3fe839fbe47073f8,0xbfbef0a2abc3d63f,2 +np.float64,0x3fc36af57e26d5eb,0xbfea3553f38770b7,2 +np.float64,0x3fe73dbc44ee7b79,0xbfc1c738f8fa6b3d,2 +np.float64,0x3fd3760e4da6ec1d,0xbfe08b5b609d11e5,2 +np.float64,0x3fee1cfa297c39f4,0xbf9b06d081bc9d5b,2 +np.float64,0xdfb01561bf61,0xc0734ea55e559888,2 +np.float64,0x687bd01cd0f7b,0xc07340ab67fe1816,2 +np.float64,0x3fefc88f4cbf911f,0xbf6828c359cf19dc,2 +np.float64,0x8ad34adb15a6a,0xc0733eb1e03811e5,2 +np.float64,0x3fe2b49c12e56938,0xbfcdd8dbdbc0ce59,2 +np.float64,0x6e05037adc0a1,0xc073404f91261635,2 +np.float64,0x3fe2fd737fe5fae7,0xbfcd020407ef4d78,2 +np.float64,0x3fd0f3c0dc21e782,0xbfe2766a1ab02eae,2 +np.float64,0x28564d9850acb,0xc073474875f87c5e,2 +np.float64,0x3fe4758015a8eb00,0xbfc8ddb45134a1bd,2 +np.float64,0x7fe7f19306efe325,0x4073420f626141a7,2 +np.float64,0x7fd27f34c0a4fe69,0x40733b733d2a5b50,2 +np.float64,0x92c2366325847,0xc0733e4f04f8195a,2 +np.float64,0x3fc21f8441243f09,0xbfeb2ad23bc1ab0b,2 +np.float64,0x3fc721d3e42e43a8,0xbfe7c69bb47b40c2,2 +np.float64,0x3fe2f11a1625e234,0xbfcd26363b9c36c3,2 +np.float64,0x3fdcb585acb96b0b,0xbfd648446237cb55,2 +np.float64,0x3fd4060bf2280c18,0xbfe025fd4c8a658b,2 +np.float64,0x7fb8ae2750315c4e,0x407333d23b025d08,2 +np.float64,0x3fe3a03119a74062,0xbfcb2d6c91b38552,2 +np.float64,0x7fdd2af92bba55f1,0x40733e9d737e16e6,2 +np.float64,0x3fe50b05862a160b,0xbfc74d20815fe36b,2 +np.float64,0x164409f82c882,0xc0734b6980e19c03,2 +np.float64,0x3fe4093712a8126e,0xbfca070367fda5e3,2 +np.float64,0xae3049935c609,0xc0733d1e3608797b,2 +np.float64,0x3fd71df4b4ae3be9,0xbfdc4dcb7637600d,2 +np.float64,0x7fca01e8023403cf,0x407339006c521c49,2 +np.float64,0x3fb0c5c43e218b88,0xbff2f03211c63f25,2 +np.float64,0x3fee757af83ceaf6,0xbf95f33a6e56b454,2 +np.float64,0x3f865f1f402cbe3f,0xbfff62d9c9072bd7,2 +np.float64,0x89864e95130ca,0xc0733ec29f1e32c6,2 +np.float64,0x3fe51482bcea2905,0xbfc73414ddc8f1b7,2 +np.float64,0x7fd802f8fa3005f1,0x40733d43684e460a,2 +np.float64,0x3fbeb86ca63d70d9,0xbfed774ccca9b8f5,2 +np.float64,0x3fb355dcc826abba,0xbff1f33f9339e7a3,2 +np.float64,0x3fe506c61eaa0d8c,0xbfc7585a3f7565a6,2 +np.float64,0x7fe393f25ba727e4,0x407340a94bcea73b,2 +np.float64,0xf66f532decdeb,0xc0733ab5041feb0f,2 +np.float64,0x3fe26e872be4dd0e,0xbfceaaab466f32e0,2 +np.float64,0x3fefd9e290bfb3c5,0xbf60977d24496295,2 +np.float64,0x7fe19c5f692338be,0x40733fecef53ad95,2 +np.float64,0x3fe80365ab3006cb,0xbfbfec4090ef76ec,2 +np.float64,0x3fe88ab39eb11567,0xbfbd8099388d054d,2 +np.float64,0x3fe68fb09fad1f61,0xbfc36db9de38c2c0,2 +np.float64,0x3fe9051883b20a31,0xbfbb5b75b8cb8f24,2 +np.float64,0x3fd4708683a8e10d,0xbfdfb9b085dd8a83,2 +np.float64,0x3fe00ac11a601582,0xbfd3316af3e43500,2 +np.float64,0xd16af30ba2d5f,0xc0733bd68e8252f9,2 +np.float64,0x3fb97d654632facb,0xbff007ac1257f575,2 +np.float64,0x7fd637c10fac6f81,0x40733cb949d76546,2 +np.float64,0x7fed2cab6dba5956,0x4073436edfc3764e,2 +np.float64,0x3fed04afbbba095f,0xbfa5bfaa5074b7f4,2 +np.float64,0x0,0xfff0000000000000,2 +np.float64,0x389a1dc671345,0xc07344edd4206338,2 +np.float64,0x3fbc9ba25a393745,0xbfee74c34f49b921,2 +np.float64,0x3feee749947dce93,0xbf8f032d9cf6b5ae,2 +np.float64,0xedc4cf89db89a,0xc0733af4b2a57920,2 +np.float64,0x3fe41629eba82c54,0xbfc9e321faf79e1c,2 +np.float64,0x3feb0bcbf7b61798,0xbfb2b31e5d952869,2 +np.float64,0xad60654b5ac0d,0xc0733d26860df676,2 +np.float64,0x3fe154e1ff22a9c4,0xbfd10b416e58c867,2 +np.float64,0x7fb20e9c8a241d38,0x407331a66453b8bc,2 +np.float64,0x7fcbbaaf7d37755e,0x4073397274f28008,2 +np.float64,0x187d0fbc30fa3,0xc0734ac03cc98cc9,2 +np.float64,0x7fd153afeaa2a75f,0x40733aff00b4311d,2 +np.float64,0x3fe05310a5e0a621,0xbfd2b5386aeecaac,2 +np.float64,0x7fea863b2b750c75,0x407342c57807f700,2 +np.float64,0x3fed5f0c633abe19,0xbfa30f6cfbc4bf94,2 +np.float64,0xf227c8b3e44f9,0xc0733ad42daaec9f,2 +np.float64,0x3fe956524772aca5,0xbfb9f4cabed7081d,2 +np.float64,0xefd11af7dfa24,0xc0733ae570ed2552,2 +np.float64,0x1690fff02d221,0xc0734b51a56c2980,2 +np.float64,0x7fd2e547a825ca8e,0x40733b992d6d9635,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv new file mode 100644 index 00000000..6e4f88b3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3e10aca8,0x3e075347,2 +np.float32,0x3f776e66,0x3f2d2003,2 +np.float32,0xbf34e8ce,0xbf9cfd5c,2 +np.float32,0xbf0260ee,0xbf363f69,2 +np.float32,0x3ed285e8,0x3eb05870,2 +np.float32,0x262b88,0x262b88,2 +np.float32,0x3eeffd6c,0x3ec4cfdb,2 +np.float32,0x3ee86808,0x3ebf9f54,2 +np.float32,0x3f36eba8,0x3f0a0524,2 +np.float32,0xbf1c047a,0xbf70afc7,2 +np.float32,0x3ead2916,0x3e952902,2 +np.float32,0x61c9c9,0x61c9c9,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7f64ee52,0x42b138e0,2 +np.float32,0x7ed00b1e,0x42afa4ff,2 +np.float32,0x3db53340,0x3dada0b2,2 +np.float32,0x3e6b0a4a,0x3e5397a4,2 +np.float32,0x7ed5d64f,0x42afb310,2 +np.float32,0xbf12bc5f,0xbf59f5ee,2 +np.float32,0xbda12710,0xbda7d8b5,2 +np.float32,0xbe2e89d8,0xbe3f5a9f,2 +np.float32,0x3f5bee75,0x3f1ebea4,2 +np.float32,0x9317a,0x9317a,2 +np.float32,0x7ee00130,0x42afcad8,2 +np.float32,0x7ef0d16d,0x42afefe7,2 +np.float32,0xbec7463a,0xbefc6a44,2 +np.float32,0xbf760ecc,0xc04fe59c,2 +np.float32,0xbecacb3c,0xbf011ae3,2 +np.float32,0x3ead92be,0x3e9577f0,2 +np.float32,0xbf41510d,0xbfb41b3a,2 +np.float32,0x7f71d489,0x42b154f1,2 +np.float32,0x8023bcd5,0x8023bcd5,2 +np.float32,0x801d33d8,0x801d33d8,2 +np.float32,0x3f3f545d,0x3f0ee0d4,2 +np.float32,0xbf700682,0xc0318c25,2 +np.float32,0xbe54e990,0xbe6eb0a3,2 +np.float32,0x7f0289bf,0x42b01941,2 +np.float32,0xbd61ac90,0xbd682113,2 +np.float32,0xbf2ff310,0xbf94cd6f,2 +np.float32,0x7f10064a,0x42b04b98,2 +np.float32,0x804d0d6d,0x804d0d6d,2 +np.float32,0x80317b0a,0x80317b0a,2 +np.float32,0xbddfef18,0xbded2640,2 +np.float32,0x3f00c9ab,0x3ed0a5bd,2 +np.float32,0x7f04b905,0x42b021c1,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x6524c4,0x6524c4,2 +np.float32,0x3da08ae0,0x3d9a8f88,2 +np.float32,0x293ea9,0x293ea9,2 +np.float32,0x71499e,0x71499e,2 +np.float32,0xbf14f54d,0xbf5f38a5,2 +np.float32,0x806e60f5,0x806e60f5,2 +np.float32,0x3f5f34bb,0x3f207fff,2 +np.float32,0x80513427,0x80513427,2 +np.float32,0x7f379670,0x42b0c7dc,2 +np.float32,0x3efba888,0x3eccb20b,2 +np.float32,0x3eeadd1b,0x3ec14f4b,2 +np.float32,0x7ec5a27f,0x42af8ab8,2 +np.float32,0x3f2afe4e,0x3f02f7a2,2 +np.float32,0x5591c8,0x5591c8,2 +np.float32,0x3dbb7240,0x3db35bab,2 +np.float32,0x805b911b,0x805b911b,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7e784c04,0x42ae9cab,2 +np.float32,0x7ebaae14,0x42af6d86,2 +np.float32,0xbec84f7a,0xbefe1d42,2 +np.float32,0x7cea8281,0x42aa56bf,2 +np.float32,0xbf542cf6,0xbfe1eb1b,2 +np.float32,0xbf6bfb13,0xc0231a5b,2 +np.float32,0x7d6eeaef,0x42abc32c,2 +np.float32,0xbf062f6b,0xbf3e2000,2 +np.float32,0x8073d8e9,0x8073d8e9,2 +np.float32,0xbea4db14,0xbec6f485,2 +np.float32,0x7d7e8d62,0x42abe3a0,2 +np.float32,0x7e8fc34e,0x42aee7c6,2 +np.float32,0x7dcbb0c3,0x42acd464,2 +np.float32,0x7e123c,0x7e123c,2 +np.float32,0x3d77af62,0x3d707c34,2 +np.float32,0x498cc8,0x498cc8,2 +np.float32,0x7f4e2206,0x42b1032a,2 +np.float32,0x3f734e0a,0x3f2b04a1,2 +np.float32,0x8053a9d0,0x8053a9d0,2 +np.float32,0xbe8a67e0,0xbea15be9,2 +np.float32,0xbf78e0ea,0xc065409e,2 +np.float32,0x352bdd,0x352bdd,2 +np.float32,0x3ee42be7,0x3ebcb38a,2 +np.float32,0x7f482d10,0x42b0f427,2 +np.float32,0xbf23155e,0xbf81b993,2 +np.float32,0x594920,0x594920,2 +np.float32,0x63f53f,0x63f53f,2 +np.float32,0x363592,0x363592,2 +np.float32,0x7dafbb78,0x42ac88cc,2 +np.float32,0x7f69516c,0x42b14298,2 +np.float32,0x3e1d5be2,0x3e126131,2 +np.float32,0x410c23,0x410c23,2 +np.float32,0x7ec9563c,0x42af9439,2 +np.float32,0xbedd3a0e,0xbf10d705,2 +np.float32,0x7f7c4f1f,0x42b16aa8,2 +np.float32,0xbe99b34e,0xbeb6c2d3,2 +np.float32,0x6cdc84,0x6cdc84,2 +np.float32,0x5b3bbe,0x5b3bbe,2 +np.float32,0x252178,0x252178,2 +np.float32,0x7d531865,0x42ab83c8,2 +np.float32,0xbf565b44,0xbfe873bf,2 +np.float32,0x5977ce,0x5977ce,2 +np.float32,0x588a58,0x588a58,2 +np.float32,0x3eae7054,0x3e961d51,2 +np.float32,0x725049,0x725049,2 +np.float32,0x7f2b9386,0x42b0a538,2 +np.float32,0xbe674714,0xbe831245,2 +np.float32,0x8044f0d8,0x8044f0d8,2 +np.float32,0x800a3c21,0x800a3c21,2 +np.float32,0x807b275b,0x807b275b,2 +np.float32,0xbf2463b6,0xbf83896e,2 +np.float32,0x801cca42,0x801cca42,2 +np.float32,0xbf28f2d0,0xbf8a121a,2 +np.float32,0x3f4168c2,0x3f1010ce,2 +np.float32,0x6f91a1,0x6f91a1,2 +np.float32,0xbf2b9eeb,0xbf8e0fc5,2 +np.float32,0xbea4c858,0xbec6d8e4,2 +np.float32,0xbf7abba0,0xc0788e88,2 +np.float32,0x802f18f7,0x802f18f7,2 +np.float32,0xbf7f6c75,0xc0c3145c,2 +np.float32,0xbe988210,0xbeb50f5e,2 +np.float32,0xbf219b7e,0xbf7f6a3b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x7f7fffff,0x42b17218,2 +np.float32,0xbdca8d90,0xbdd5487e,2 +np.float32,0xbef683b0,0xbf2821b0,2 +np.float32,0x8043e648,0x8043e648,2 +np.float32,0xbf4319a4,0xbfb7cd1b,2 +np.float32,0x62c2b2,0x62c2b2,2 +np.float32,0xbf479ccd,0xbfc1a7b1,2 +np.float32,0x806c8a32,0x806c8a32,2 +np.float32,0x7f004447,0x42b01045,2 +np.float32,0x3f737d36,0x3f2b1ccf,2 +np.float32,0x3ee71f24,0x3ebebced,2 +np.float32,0x3ea0b6b4,0x3e8bc606,2 +np.float32,0x358fd7,0x358fd7,2 +np.float32,0xbe69780c,0xbe847d17,2 +np.float32,0x7f6bed18,0x42b14849,2 +np.float32,0xbf6a5113,0xc01dfe1d,2 +np.float32,0xbf255693,0xbf84de88,2 +np.float32,0x7f34acac,0x42b0bfac,2 +np.float32,0xbe8a3b6a,0xbea11efe,2 +np.float32,0x3f470d84,0x3f1342ab,2 +np.float32,0xbf2cbde3,0xbf8fc602,2 +np.float32,0x47c103,0x47c103,2 +np.float32,0xe3c94,0xe3c94,2 +np.float32,0xbec07afa,0xbef1693a,2 +np.float32,0x6a9cfe,0x6a9cfe,2 +np.float32,0xbe4339e0,0xbe5899da,2 +np.float32,0x7ea9bf1e,0x42af3cd6,2 +np.float32,0x3f6378b4,0x3f22c4c4,2 +np.float32,0xbd989ff0,0xbd9e9c77,2 +np.float32,0xbe6f2f50,0xbe88343d,2 +np.float32,0x3f7f2ac5,0x3f310764,2 +np.float32,0x3f256704,0x3eff2fb2,2 +np.float32,0x80786aca,0x80786aca,2 +np.float32,0x65d02f,0x65d02f,2 +np.float32,0x50d1c3,0x50d1c3,2 +np.float32,0x3f4a9d76,0x3f1541b4,2 +np.float32,0x802cf491,0x802cf491,2 +np.float32,0x3e935cec,0x3e81829b,2 +np.float32,0x3e2ad478,0x3e1dfd81,2 +np.float32,0xbf107cbd,0xbf54bef2,2 +np.float32,0xbf58c02e,0xbff007fe,2 +np.float32,0x80090808,0x80090808,2 +np.float32,0x805d1f66,0x805d1f66,2 +np.float32,0x6aec95,0x6aec95,2 +np.float32,0xbee3fc6e,0xbf16dc73,2 +np.float32,0x7f63314b,0x42b134f9,2 +np.float32,0x550443,0x550443,2 +np.float32,0xbefa8174,0xbf2c026e,2 +np.float32,0x3f7fb380,0x3f314bd5,2 +np.float32,0x80171f2c,0x80171f2c,2 +np.float32,0x3f2f56ae,0x3f058f2d,2 +np.float32,0x3eacaecb,0x3e94cd97,2 +np.float32,0xbe0c4f0c,0xbe16e69d,2 +np.float32,0x3f48e4cb,0x3f144b42,2 +np.float32,0x7f03efe2,0x42b01eb7,2 +np.float32,0xbf1019ac,0xbf53dbe9,2 +np.float32,0x3e958524,0x3e832eb5,2 +np.float32,0xbf1b23c6,0xbf6e72f2,2 +np.float32,0x12c554,0x12c554,2 +np.float32,0x7dee588c,0x42ad24d6,2 +np.float32,0xbe8c216c,0xbea3ba70,2 +np.float32,0x804553cb,0x804553cb,2 +np.float32,0xbe446324,0xbe5a0966,2 +np.float32,0xbef7150a,0xbf28adff,2 +np.float32,0xbf087282,0xbf42ec6e,2 +np.float32,0x3eeef15c,0x3ec41937,2 +np.float32,0x61bbd2,0x61bbd2,2 +np.float32,0x3e51b28d,0x3e3ec538,2 +np.float32,0x57e869,0x57e869,2 +np.float32,0x7e5e7711,0x42ae646c,2 +np.float32,0x8050b173,0x8050b173,2 +np.float32,0xbf63c90c,0xc00d2438,2 +np.float32,0xbeba774c,0xbee7dcf8,2 +np.float32,0x8016faac,0x8016faac,2 +np.float32,0xbe8b448c,0xbea28aaf,2 +np.float32,0x3e8cd448,0x3e78d29e,2 +np.float32,0x80484e02,0x80484e02,2 +np.float32,0x3f63ba68,0x3f22e78c,2 +np.float32,0x2e87bb,0x2e87bb,2 +np.float32,0x230496,0x230496,2 +np.float32,0x1327b2,0x1327b2,2 +np.float32,0xbf046c56,0xbf3a72d2,2 +np.float32,0x3ecefe60,0x3eadd69a,2 +np.float32,0x49c56e,0x49c56e,2 +np.float32,0x3df22d60,0x3de4e550,2 +np.float32,0x3f67c19d,0x3f250707,2 +np.float32,0x3f20eb9c,0x3ef9b624,2 +np.float32,0x3f05ca75,0x3ed742fa,2 +np.float32,0xbe8514f8,0xbe9a1d45,2 +np.float32,0x8070a003,0x8070a003,2 +np.float32,0x7e49650e,0x42ae317a,2 +np.float32,0x3de16ce9,0x3dd5dc3e,2 +np.float32,0xbf4ae952,0xbfc95f1f,2 +np.float32,0xbe44dd84,0xbe5aa0db,2 +np.float32,0x803c3bc0,0x803c3bc0,2 +np.float32,0x3eebb9e8,0x3ec1e692,2 +np.float32,0x80588275,0x80588275,2 +np.float32,0xbea1e69a,0xbec29d86,2 +np.float32,0x3f7b4bf8,0x3f2f154c,2 +np.float32,0x7eb47ecc,0x42af5c46,2 +np.float32,0x3d441e00,0x3d3f911a,2 +np.float32,0x7f54d40e,0x42b11388,2 +np.float32,0xbf47f17e,0xbfc26882,2 +np.float32,0x3ea7da57,0x3e912db4,2 +np.float32,0x3f59cc7b,0x3f1d984e,2 +np.float32,0x570e08,0x570e08,2 +np.float32,0x3e99560c,0x3e8620a2,2 +np.float32,0x3ecfbd14,0x3eae5e55,2 +np.float32,0x7e86be08,0x42aec698,2 +np.float32,0x3f10f28a,0x3ee5b5d3,2 +np.float32,0x7f228722,0x42b0897a,2 +np.float32,0x3f4b979b,0x3f15cd30,2 +np.float32,0xbf134283,0xbf5b30f9,2 +np.float32,0x3f2ae16a,0x3f02e64f,2 +np.float32,0x3e98e158,0x3e85c6cc,2 +np.float32,0x7ec39f27,0x42af857a,2 +np.float32,0x3effedb0,0x3ecf8cea,2 +np.float32,0xbd545620,0xbd5a09c1,2 +np.float32,0x503a28,0x503a28,2 +np.float32,0x3f712744,0x3f29e9a1,2 +np.float32,0x3edc6194,0x3eb748b1,2 +np.float32,0xbf4ec1e5,0xbfd2ff5f,2 +np.float32,0x3f46669e,0x3f12e4b5,2 +np.float32,0xabad3,0xabad3,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x803f2e6d,0x803f2e6d,2 +np.float32,0xbf431542,0xbfb7c3e6,2 +np.float32,0x3f6f2d53,0x3f28e496,2 +np.float32,0x546bd8,0x546bd8,2 +np.float32,0x25c80a,0x25c80a,2 +np.float32,0x3e50883c,0x3e3dcd7e,2 +np.float32,0xbf5fa2ba,0xc0045c14,2 +np.float32,0x80271c07,0x80271c07,2 +np.float32,0x8043755d,0x8043755d,2 +np.float32,0xbf3c5cea,0xbfaa5ee9,2 +np.float32,0x3f2fea38,0x3f05e6af,2 +np.float32,0x6da3dc,0x6da3dc,2 +np.float32,0xbf095945,0xbf44dc70,2 +np.float32,0xbe33d584,0xbe45c1f5,2 +np.float32,0x7eb41b2e,0x42af5b2b,2 +np.float32,0xbf0feb74,0xbf537242,2 +np.float32,0xbe96225a,0xbeb1b0b1,2 +np.float32,0x3f63b95f,0x3f22e700,2 +np.float32,0x0,0x0,2 +np.float32,0x3e20b0cc,0x3e154374,2 +np.float32,0xbf79880c,0xc06b6801,2 +np.float32,0xbea690b6,0xbec97b93,2 +np.float32,0xbf3e11ca,0xbfada449,2 +np.float32,0x7e7e6292,0x42aea912,2 +np.float32,0x3e793350,0x3e5f0b7b,2 +np.float32,0x802e7183,0x802e7183,2 +np.float32,0x3f1b3695,0x3ef2a788,2 +np.float32,0x801efa20,0x801efa20,2 +np.float32,0x3f1ec43a,0x3ef70f42,2 +np.float32,0xbf12c5ed,0xbf5a0c52,2 +np.float32,0x8005e99c,0x8005e99c,2 +np.float32,0xbf79f5e7,0xc06fcca5,2 +np.float32,0x3ecbaf50,0x3eab7a03,2 +np.float32,0x46b0fd,0x46b0fd,2 +np.float32,0x3edb9023,0x3eb6b631,2 +np.float32,0x7f24bc41,0x42b09063,2 +np.float32,0xbd8d9328,0xbd92b4c6,2 +np.float32,0x3f2c5d7f,0x3f03c9d9,2 +np.float32,0x807bebc9,0x807bebc9,2 +np.float32,0x7f797a99,0x42b164e2,2 +np.float32,0x756e3c,0x756e3c,2 +np.float32,0x80416f8a,0x80416f8a,2 +np.float32,0x3e0d512a,0x3e04611a,2 +np.float32,0x3f7be3e6,0x3f2f61ec,2 +np.float32,0x80075c41,0x80075c41,2 +np.float32,0xbe850294,0xbe9a046c,2 +np.float32,0x684679,0x684679,2 +np.float32,0x3eb393c4,0x3e99eed2,2 +np.float32,0x3f4177c6,0x3f10195b,2 +np.float32,0x3dd1f402,0x3dc7dfe5,2 +np.float32,0x3ef484d4,0x3ec7e2e1,2 +np.float32,0x53eb8f,0x53eb8f,2 +np.float32,0x7f072cb6,0x42b02b20,2 +np.float32,0xbf1b6b55,0xbf6f28d4,2 +np.float32,0xbd8a98d8,0xbd8f827d,2 +np.float32,0x3eafb418,0x3e970e96,2 +np.float32,0x6555af,0x6555af,2 +np.float32,0x7dd5118e,0x42aceb6f,2 +np.float32,0x800a13f7,0x800a13f7,2 +np.float32,0x331a9d,0x331a9d,2 +np.float32,0x8063773f,0x8063773f,2 +np.float32,0x3e95e068,0x3e837553,2 +np.float32,0x80654b32,0x80654b32,2 +np.float32,0x3dabe0e0,0x3da50bb3,2 +np.float32,0xbf6283c3,0xc00a5280,2 +np.float32,0x80751cc5,0x80751cc5,2 +np.float32,0x3f668eb6,0x3f2465c0,2 +np.float32,0x3e13c058,0x3e0a048c,2 +np.float32,0x77780c,0x77780c,2 +np.float32,0x3f7d6e48,0x3f302868,2 +np.float32,0x7e31f9e3,0x42adf22f,2 +np.float32,0x246c7b,0x246c7b,2 +np.float32,0xbe915bf0,0xbeaafa6c,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f698f42,0x3f25f8e0,2 +np.float32,0x7e698885,0x42ae7d48,2 +np.float32,0x3f5bbd42,0x3f1ea42c,2 +np.float32,0x5b8444,0x5b8444,2 +np.float32,0xbf6065f6,0xc005e2c6,2 +np.float32,0xbeb95036,0xbee60dad,2 +np.float32,0xbf44f846,0xbfbbcade,2 +np.float32,0xc96e5,0xc96e5,2 +np.float32,0xbf213e90,0xbf7e6eae,2 +np.float32,0xbeb309cc,0xbedc4fe6,2 +np.float32,0xbe781cf4,0xbe8e0fe6,2 +np.float32,0x7f0cf0db,0x42b04083,2 +np.float32,0xbf7b6143,0xc08078f9,2 +np.float32,0x80526fc6,0x80526fc6,2 +np.float32,0x3f092bf3,0x3edbaeec,2 +np.float32,0x3ecdf154,0x3ead16df,2 +np.float32,0x2fe85b,0x2fe85b,2 +np.float32,0xbf5100a0,0xbfd8f871,2 +np.float32,0xbec09d40,0xbef1a028,2 +np.float32,0x5e6a85,0x5e6a85,2 +np.float32,0xbec0e2a0,0xbef20f6b,2 +np.float32,0x3f72e788,0x3f2ad00d,2 +np.float32,0x880a6,0x880a6,2 +np.float32,0x3d9e90bf,0x3d98b9fc,2 +np.float32,0x15cf25,0x15cf25,2 +np.float32,0x10171b,0x10171b,2 +np.float32,0x805cf1aa,0x805cf1aa,2 +np.float32,0x3f19bd36,0x3ef0d0d2,2 +np.float32,0x3ebe2bda,0x3ea1b774,2 +np.float32,0xbecd8192,0xbf035c49,2 +np.float32,0x3e2ce508,0x3e1fc21b,2 +np.float32,0x290f,0x290f,2 +np.float32,0x803b679f,0x803b679f,2 +np.float32,0x1,0x1,2 +np.float32,0x807a9c76,0x807a9c76,2 +np.float32,0xbf65fced,0xc01257f8,2 +np.float32,0x3f783414,0x3f2d8475,2 +np.float32,0x3f2d9d92,0x3f0488da,2 +np.float32,0xbddb5798,0xbde80018,2 +np.float32,0x3e91afb8,0x3e8034e7,2 +np.float32,0xbf1b775a,0xbf6f476d,2 +np.float32,0xbf73a32c,0xc041f3ba,2 +np.float32,0xbea39364,0xbec5121b,2 +np.float32,0x80375b94,0x80375b94,2 +np.float32,0x3f331252,0x3f07c3e9,2 +np.float32,0xbf285774,0xbf892e74,2 +np.float32,0x3e699bb8,0x3e526d55,2 +np.float32,0x3f08208a,0x3eda523a,2 +np.float32,0xbf42fb4a,0xbfb78d60,2 +np.float32,0x8029c894,0x8029c894,2 +np.float32,0x3e926c0c,0x3e80c76e,2 +np.float32,0x801e4715,0x801e4715,2 +np.float32,0x3e4b36d8,0x3e395ffd,2 +np.float32,0x8041556b,0x8041556b,2 +np.float32,0xbf2d99ba,0xbf9119bd,2 +np.float32,0x3ed83ea8,0x3eb46250,2 +np.float32,0xbe94a280,0xbeaf92b4,2 +np.float32,0x7f4c7a64,0x42b0ff0a,2 +np.float32,0x806d4022,0x806d4022,2 +np.float32,0xbed382f8,0xbf086d26,2 +np.float32,0x1846fe,0x1846fe,2 +np.float32,0xbe702558,0xbe88d4d8,2 +np.float32,0xbe650ee0,0xbe81a3cc,2 +np.float32,0x3ee9d088,0x3ec0970c,2 +np.float32,0x7f6d4498,0x42b14b30,2 +np.float32,0xbef9f9e6,0xbf2b7ddb,2 +np.float32,0xbf70c384,0xc0349370,2 +np.float32,0xbeff9e9e,0xbf3110c8,2 +np.float32,0xbef06372,0xbf224aa9,2 +np.float32,0xbf15a692,0xbf60e1fa,2 +np.float32,0x8058c117,0x8058c117,2 +np.float32,0xbd9f74b8,0xbda6017b,2 +np.float32,0x801bf130,0x801bf130,2 +np.float32,0x805da84c,0x805da84c,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0xbeb01de2,0xbed7d6d6,2 +np.float32,0x8077de08,0x8077de08,2 +np.float32,0x3e327668,0x3e2482c1,2 +np.float32,0xbe7add88,0xbe8fe1ab,2 +np.float32,0x805a3c2e,0x805a3c2e,2 +np.float32,0x80326a73,0x80326a73,2 +np.float32,0x800b8a34,0x800b8a34,2 +np.float32,0x8048c83a,0x8048c83a,2 +np.float32,0xbf3799d6,0xbfa1a975,2 +np.float32,0x807649c7,0x807649c7,2 +np.float32,0x3dfdbf90,0x3def3798,2 +np.float32,0xbf1b538a,0xbf6eec4c,2 +np.float32,0xbf1e5989,0xbf76baa0,2 +np.float32,0xc7a80,0xc7a80,2 +np.float32,0x8001be54,0x8001be54,2 +np.float32,0x3f435bbc,0x3f112c6d,2 +np.float32,0xbeabcff8,0xbed151d1,2 +np.float32,0x7de20c78,0x42ad09b7,2 +np.float32,0x3f0e6d2e,0x3ee27b1e,2 +np.float32,0xbf0cb352,0xbf4c3267,2 +np.float32,0x7f6ec06f,0x42b14e61,2 +np.float32,0x7f6fa8ef,0x42b15053,2 +np.float32,0xbf3d2a6a,0xbfabe623,2 +np.float32,0x7f077a4c,0x42b02c46,2 +np.float32,0xbf2a68dc,0xbf8c3cc4,2 +np.float32,0x802a5dbe,0x802a5dbe,2 +np.float32,0x807f631c,0x807f631c,2 +np.float32,0x3dc9b8,0x3dc9b8,2 +np.float32,0x3ebdc1b7,0x3ea16a0a,2 +np.float32,0x7ef29dab,0x42aff3b5,2 +np.float32,0x3e8ab1cc,0x3e757806,2 +np.float32,0x3f27e88e,0x3f011c6d,2 +np.float32,0x3cfd1455,0x3cf93fb5,2 +np.float32,0x7f7eebf5,0x42b16fef,2 +np.float32,0x3c9b2140,0x3c99ade9,2 +np.float32,0x7e928601,0x42aef183,2 +np.float32,0xbd7d2db0,0xbd82abae,2 +np.float32,0x3e6f0df3,0x3e56da20,2 +np.float32,0x7d36a2fc,0x42ab39a3,2 +np.float32,0xbf49d3a2,0xbfc6c859,2 +np.float32,0x7ee541d3,0x42afd6b6,2 +np.float32,0x80753dc0,0x80753dc0,2 +np.float32,0x3f4ce486,0x3f16865d,2 +np.float32,0x39e701,0x39e701,2 +np.float32,0x3f3d9ede,0x3f0de5fa,2 +np.float32,0x7fafb2,0x7fafb2,2 +np.float32,0x3e013fdc,0x3df37090,2 +np.float32,0x807b6a2c,0x807b6a2c,2 +np.float32,0xbe86800a,0xbe9c08c7,2 +np.float32,0x7f40f080,0x42b0e14d,2 +np.float32,0x7eef5afe,0x42afecc8,2 +np.float32,0x7ec30052,0x42af83da,2 +np.float32,0x3eacf768,0x3e9503e1,2 +np.float32,0x7f13ef0e,0x42b0594e,2 +np.float32,0x80419f4a,0x80419f4a,2 +np.float32,0xbf485932,0xbfc3562a,2 +np.float32,0xbe8a24d6,0xbea10011,2 +np.float32,0xbda791c0,0xbdaed2bc,2 +np.float32,0x3e9b5169,0x3e87a67d,2 +np.float32,0x807dd882,0x807dd882,2 +np.float32,0x7f40170e,0x42b0df0a,2 +np.float32,0x7f02f7f9,0x42b01af1,2 +np.float32,0x3ea38bf9,0x3e8decde,2 +np.float32,0x3e2e7ce8,0x3e211ed4,2 +np.float32,0x70a7a6,0x70a7a6,2 +np.float32,0x7d978592,0x42ac3ce7,2 +np.float32,0x804d12d0,0x804d12d0,2 +np.float32,0x80165dc8,0x80165dc8,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x3e325da0,0x3e246da6,2 +np.float32,0xbe063bb8,0xbe0fe281,2 +np.float32,0x160b8,0x160b8,2 +np.float32,0xbe5687a4,0xbe70bbef,2 +np.float32,0x7f11ab34,0x42b05168,2 +np.float32,0xc955c,0xc955c,2 +np.float32,0xbea0003a,0xbebfd826,2 +np.float32,0x3f7fbdd9,0x3f315102,2 +np.float32,0xbe61aefc,0xbe7ef121,2 +np.float32,0xbf1b9873,0xbf6f9bc3,2 +np.float32,0x3a6d14,0x3a6d14,2 +np.float32,0xbf1ad3b4,0xbf6da808,2 +np.float32,0x3ed2dd24,0x3eb0963d,2 +np.float32,0xbe81a4ca,0xbe957d52,2 +np.float32,0x7f1be3e9,0x42b07421,2 +np.float32,0x7f5ce943,0x42b1269e,2 +np.float32,0x7eebcbdf,0x42afe51d,2 +np.float32,0x807181b5,0x807181b5,2 +np.float32,0xbecb03ba,0xbf0149ad,2 +np.float32,0x42edb8,0x42edb8,2 +np.float32,0xbf3aeec8,0xbfa7b13f,2 +np.float32,0xbd0c4f00,0xbd0ec4a0,2 +np.float32,0x3e48d260,0x3e376070,2 +np.float32,0x1a9731,0x1a9731,2 +np.float32,0x7f323be4,0x42b0b8b5,2 +np.float32,0x1a327f,0x1a327f,2 +np.float32,0x17f1fc,0x17f1fc,2 +np.float32,0xbf2f4f9b,0xbf93c91a,2 +np.float32,0x3ede8934,0x3eb8c9c3,2 +np.float32,0xbf56aaac,0xbfe968bb,2 +np.float32,0x3e22cb5a,0x3e17148c,2 +np.float32,0x7d9def,0x7d9def,2 +np.float32,0x8045b963,0x8045b963,2 +np.float32,0x77404f,0x77404f,2 +np.float32,0x7e2c9efb,0x42ade28b,2 +np.float32,0x8058ad89,0x8058ad89,2 +np.float32,0x7f4139,0x7f4139,2 +np.float32,0x8020e12a,0x8020e12a,2 +np.float32,0x800c9daa,0x800c9daa,2 +np.float32,0x7f2c5ac5,0x42b0a789,2 +np.float32,0x3f04a47b,0x3ed5c043,2 +np.float32,0x804692d5,0x804692d5,2 +np.float32,0xbf6e7fa4,0xc02bb493,2 +np.float32,0x80330756,0x80330756,2 +np.float32,0x7f3e29ad,0x42b0d9e1,2 +np.float32,0xbebf689a,0xbeefb24d,2 +np.float32,0x3f29a86c,0x3f022a56,2 +np.float32,0x3e3bd1c0,0x3e2c72b3,2 +np.float32,0x3f78f2e8,0x3f2de546,2 +np.float32,0x3f3709be,0x3f0a16af,2 +np.float32,0x3e11f150,0x3e086f97,2 +np.float32,0xbf5867ad,0xbfeee8a0,2 +np.float32,0xbebfb328,0xbef0296c,2 +np.float32,0x2f7f15,0x2f7f15,2 +np.float32,0x805cfe84,0x805cfe84,2 +np.float32,0xbf504e01,0xbfd71589,2 +np.float32,0x3ee0903c,0x3eba330c,2 +np.float32,0xbd838990,0xbd87f399,2 +np.float32,0x3f14444e,0x3ee9ee7d,2 +np.float32,0x7e352583,0x42adfb3a,2 +np.float32,0x7e76f824,0x42ae99ec,2 +np.float32,0x3f772d00,0x3f2cfebf,2 +np.float32,0x801f7763,0x801f7763,2 +np.float32,0x3f760bf5,0x3f2c6b87,2 +np.float32,0xbf0bb696,0xbf4a03a5,2 +np.float32,0x3f175d2c,0x3eedd6d2,2 +np.float32,0xbf5723f8,0xbfeae288,2 +np.float32,0x24de0a,0x24de0a,2 +np.float32,0x3cd73f80,0x3cd47801,2 +np.float32,0x7f013305,0x42b013fa,2 +np.float32,0x3e3ad425,0x3e2b9c50,2 +np.float32,0x7d3d16,0x7d3d16,2 +np.float32,0x3ef49738,0x3ec7ef54,2 +np.float32,0x3f5b8612,0x3f1e8678,2 +np.float32,0x7f0eeb5c,0x42b047a7,2 +np.float32,0x7e9d7cb0,0x42af1675,2 +np.float32,0xbdd1cfb0,0xbddd5aa0,2 +np.float32,0xbf645dba,0xc00e78fe,2 +np.float32,0x3f511174,0x3f18d56c,2 +np.float32,0x3d91ad00,0x3d8cba62,2 +np.float32,0x805298da,0x805298da,2 +np.float32,0xbedb6af4,0xbf0f4090,2 +np.float32,0x3d23b1ba,0x3d208205,2 +np.float32,0xbea5783e,0xbec7dc87,2 +np.float32,0x79d191,0x79d191,2 +np.float32,0x3e894413,0x3e7337da,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0xbf34a8d3,0xbf9c907b,2 +np.float32,0x3bae779a,0x3bae011f,2 +np.float32,0x8049284d,0x8049284d,2 +np.float32,0x3eb42cc4,0x3e9a600b,2 +np.float32,0x3da1e2d0,0x3d9bce5f,2 +np.float32,0x3f364b8a,0x3f09a7af,2 +np.float32,0x3d930b10,0x3d8e0118,2 +np.float32,0x8061f8d7,0x8061f8d7,2 +np.float32,0x3f473213,0x3f13573b,2 +np.float32,0x3f1e2a38,0x3ef65102,2 +np.float32,0x8068f7d9,0x8068f7d9,2 +np.float32,0x3f181ef8,0x3eeeca2c,2 +np.float32,0x3eeb6168,0x3ec1a9f5,2 +np.float32,0xc2db6,0xc2db6,2 +np.float32,0x3ef7b578,0x3eca0a69,2 +np.float32,0xbf5b5a84,0xbff8d075,2 +np.float32,0x7f479d5f,0x42b0f2b7,2 +np.float32,0x3e6f3c24,0x3e56ff92,2 +np.float32,0x3f45543a,0x3f1249f0,2 +np.float32,0xbea7c1fa,0xbecb40d2,2 +np.float32,0x7de082,0x7de082,2 +np.float32,0x383729,0x383729,2 +np.float32,0xbd91cb90,0xbd973eb3,2 +np.float32,0x7f320218,0x42b0b80f,2 +np.float32,0x5547f2,0x5547f2,2 +np.float32,0x291fe4,0x291fe4,2 +np.float32,0xbe078ba0,0xbe11655f,2 +np.float32,0x7e0c0658,0x42ad7764,2 +np.float32,0x7e129a2b,0x42ad8ee5,2 +np.float32,0x3f7c96d4,0x3f2fbc0c,2 +np.float32,0x3f800000,0x3f317218,2 +np.float32,0x7f131754,0x42b05662,2 +np.float32,0x15f833,0x15f833,2 +np.float32,0x80392ced,0x80392ced,2 +np.float32,0x3f7c141a,0x3f2f7a36,2 +np.float32,0xbf71c03f,0xc038dcfd,2 +np.float32,0xbe14fb2c,0xbe20fff3,2 +np.float32,0xbee0bac6,0xbf13f14c,2 +np.float32,0x801a32dd,0x801a32dd,2 +np.float32,0x8e12d,0x8e12d,2 +np.float32,0x3f48c606,0x3f143a04,2 +np.float32,0x7f418af5,0x42b0e2e6,2 +np.float32,0x3f1f2918,0x3ef78bb7,2 +np.float32,0x11141b,0x11141b,2 +np.float32,0x3e9fc9e8,0x3e8b11ad,2 +np.float32,0xbea5447a,0xbec79010,2 +np.float32,0xbe31d904,0xbe4359db,2 +np.float32,0x80184667,0x80184667,2 +np.float32,0xbf00503c,0xbf3212c2,2 +np.float32,0x3e0328cf,0x3df6d425,2 +np.float32,0x7ee8e1b7,0x42afdebe,2 +np.float32,0xbef95e24,0xbf2ae5db,2 +np.float32,0x7f3e4eed,0x42b0da45,2 +np.float32,0x3f43ee85,0x3f117fa0,2 +np.float32,0xbcfa2ac0,0xbcfe10fe,2 +np.float32,0x80162774,0x80162774,2 +np.float32,0x372e8b,0x372e8b,2 +np.float32,0x3f263802,0x3f0016b0,2 +np.float32,0x8008725f,0x8008725f,2 +np.float32,0x800beb40,0x800beb40,2 +np.float32,0xbe93308e,0xbead8a77,2 +np.float32,0x3d8a4240,0x3d85cab8,2 +np.float32,0x80179de0,0x80179de0,2 +np.float32,0x7f4a98f2,0x42b0fa4f,2 +np.float32,0x3f0d214e,0x3ee0cff1,2 +np.float32,0x80536c2c,0x80536c2c,2 +np.float32,0x7e7038ed,0x42ae8bbe,2 +np.float32,0x7f345af9,0x42b0bec4,2 +np.float32,0xbf243219,0xbf83442f,2 +np.float32,0x7e0d5555,0x42ad7c27,2 +np.float32,0x762e95,0x762e95,2 +np.float32,0x7ebf4548,0x42af79f6,2 +np.float32,0x8079639e,0x8079639e,2 +np.float32,0x3ef925c0,0x3ecb0260,2 +np.float32,0x3f708695,0x3f2996d6,2 +np.float32,0xfca9f,0xfca9f,2 +np.float32,0x8060dbf4,0x8060dbf4,2 +np.float32,0x4c8840,0x4c8840,2 +np.float32,0xbea922ee,0xbecd4ed5,2 +np.float32,0xbf4f28a9,0xbfd40b98,2 +np.float32,0xbe25ad48,0xbe34ba1b,2 +np.float32,0x3f2fb254,0x3f05c58c,2 +np.float32,0x3f73bcc2,0x3f2b3d5f,2 +np.float32,0xbf479a07,0xbfc1a165,2 +np.float32,0xbeb9a808,0xbee69763,2 +np.float32,0x7eb16a65,0x42af5376,2 +np.float32,0xbeb3e442,0xbedda042,2 +np.float32,0x3d8f439c,0x3d8a79ac,2 +np.float32,0x80347516,0x80347516,2 +np.float32,0x3e8a0c5d,0x3e74738c,2 +np.float32,0xbf0383a4,0xbf389289,2 +np.float32,0x806be8f5,0x806be8f5,2 +np.float32,0x8023f0c5,0x8023f0c5,2 +np.float32,0x2060e9,0x2060e9,2 +np.float32,0xbf759eba,0xc04d239f,2 +np.float32,0x3d84cc5a,0x3d80ab96,2 +np.float32,0xbf57746b,0xbfebdf87,2 +np.float32,0x3e418417,0x3e31401f,2 +np.float32,0xaecce,0xaecce,2 +np.float32,0x3cd1766f,0x3cced45c,2 +np.float32,0x53724a,0x53724a,2 +np.float32,0x3f773710,0x3f2d03de,2 +np.float32,0x8013d040,0x8013d040,2 +np.float32,0x4d0eb2,0x4d0eb2,2 +np.float32,0x8014364a,0x8014364a,2 +np.float32,0x7f3c56c9,0x42b0d4f2,2 +np.float32,0x3eee1e1c,0x3ec3891a,2 +np.float32,0xbdda3eb8,0xbde6c5a0,2 +np.float32,0x26ef4a,0x26ef4a,2 +np.float32,0x7ed3370c,0x42afacbf,2 +np.float32,0xbf06e31b,0xbf3f9ab7,2 +np.float32,0xbe3185f0,0xbe42f556,2 +np.float32,0x3dcf9abe,0x3dc5be41,2 +np.float32,0xbf3696d9,0xbf9fe2bd,2 +np.float32,0x3e68ee50,0x3e51e01a,2 +np.float32,0x3f3d4cc2,0x3f0db6ca,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf03070c,0xbf3792d0,2 +np.float32,0x3ea79e6c,0x3e910092,2 +np.float32,0xbf1a393a,0xbf6c2251,2 +np.float32,0x3f41eb0e,0x3f105afc,2 +np.float32,0x3ceadb2f,0x3ce78d79,2 +np.float32,0xbf5dc105,0xc000be2c,2 +np.float32,0x7ebb5a0e,0x42af6f5c,2 +np.float32,0xbf7c44eb,0xc0875058,2 +np.float32,0x6aaaf4,0x6aaaf4,2 +np.float32,0x807d8f23,0x807d8f23,2 +np.float32,0xbee6b142,0xbf194fef,2 +np.float32,0xbe83f256,0xbe989526,2 +np.float32,0x7d588e,0x7d588e,2 +np.float32,0x7cc80131,0x42aa0542,2 +np.float32,0x3e0ab198,0x3e02124f,2 +np.float32,0xbf6e64db,0xc02b52eb,2 +np.float32,0x3d238b56,0x3d205d1b,2 +np.float32,0xbeb408e2,0xbeddd8bc,2 +np.float32,0x3f78340d,0x3f2d8471,2 +np.float32,0x806162a3,0x806162a3,2 +np.float32,0x804e484f,0x804e484f,2 +np.float32,0xbeb8c576,0xbee53466,2 +np.float32,0x807aab15,0x807aab15,2 +np.float32,0x3f523e20,0x3f197ab8,2 +np.float32,0xbf009190,0xbf3295de,2 +np.float32,0x3df43da5,0x3de6bd82,2 +np.float32,0x7f639aea,0x42b135e6,2 +np.float32,0x3f1e638a,0x3ef697da,2 +np.float32,0xbf4884de,0xbfc3bac3,2 +np.float32,0xbe9336b6,0xbead931b,2 +np.float32,0x6daf7f,0x6daf7f,2 +np.float32,0xbf1fc152,0xbf7a70b1,2 +np.float32,0x3f103720,0x3ee4c649,2 +np.float32,0x3eeaa227,0x3ec126df,2 +np.float32,0x7f7ea945,0x42b16f69,2 +np.float32,0x3d3cd800,0x3d389ead,2 +np.float32,0x3f3d7268,0x3f0dcc6e,2 +np.float32,0xbf3c1b41,0xbfa9e2e3,2 +np.float32,0x3ecf3818,0x3eadffb2,2 +np.float32,0x3f1af312,0x3ef25372,2 +np.float32,0x48fae4,0x48fae4,2 +np.float64,0x7fedaa1ee4fb543d,0x40862da7ca7c308e,2 +np.float64,0x8007d2d810efa5b1,0x8007d2d810efa5b1,2 +np.float64,0x3fc385e069270bc0,0x3fc22b8884cf2c3b,2 +np.float64,0x68ed4130d1da9,0x68ed4130d1da9,2 +np.float64,0x8008e93e58d1d27d,0x8008e93e58d1d27d,2 +np.float64,0xbfd3d62852a7ac50,0xbfd7be3a7ad1af02,2 +np.float64,0xbfc1fa0ba923f418,0xbfc35f0f19447df7,2 +np.float64,0xbfe01b8cec20371a,0xbfe6658c7e6c8e50,2 +np.float64,0xbfeda81a147b5034,0xc004e9c94f2b91c1,2 +np.float64,0xbfe1c36a97e386d5,0xbfe9ead4d6beaa92,2 +np.float64,0x3fe50be51f2a17ca,0x3fe02c8067d9e5c5,2 +np.float64,0x3febed4d3337da9a,0x3fe413956466134f,2 +np.float64,0x80068ea59ced1d4c,0x80068ea59ced1d4c,2 +np.float64,0x3febe77d5877cefb,0x3fe4107ac088bc71,2 +np.float64,0x800ae77617d5ceed,0x800ae77617d5ceed,2 +np.float64,0x3fd0546b60a0a8d7,0x3fcd16c2e995ab23,2 +np.float64,0xbfe33e1476667c29,0xbfed6d7faec4db2f,2 +np.float64,0x3fe9d2fd51b3a5fb,0x3fe2eef834310219,2 +np.float64,0x8004249878284932,0x8004249878284932,2 +np.float64,0xbfd5b485c72b690c,0xbfda828ccc6a7a5c,2 +np.float64,0x7fcd6e6b6b3adcd6,0x408622807f04768e,2 +np.float64,0x3fd7f9c32caff386,0x3fd45d024514b8da,2 +np.float64,0x7f87eb9d702fd73a,0x40860aa99fcff27f,2 +np.float64,0xbfc5d1f6fb2ba3ec,0xbfc7ec367cb3fecc,2 +np.float64,0x8008316a44d062d5,0x8008316a44d062d5,2 +np.float64,0xbfd54e4358aa9c86,0xbfd9e889d2998a4a,2 +np.float64,0xda65facdb4cc0,0xda65facdb4cc0,2 +np.float64,0x3fc5b4f6f32b69f0,0x3fc40d13aa8e248b,2 +np.float64,0x3fd825a5d5b04b4c,0x3fd47ce73e04d3ff,2 +np.float64,0x7ac9d56ef593b,0x7ac9d56ef593b,2 +np.float64,0xbfd0a51977214a32,0xbfd34702071428be,2 +np.float64,0x3fd21f620b243ec4,0x3fcfea0c02193640,2 +np.float64,0x3fe6fb3f1b2df67e,0x3fe151ffb18c983b,2 +np.float64,0x700de022e01bd,0x700de022e01bd,2 +np.float64,0xbfbb76b81236ed70,0xbfbd0d31deea1ec7,2 +np.float64,0x3fecfc3856f9f870,0x3fe4a2fcadf221e0,2 +np.float64,0x3fede286517bc50c,0x3fe51af2fbd6ef63,2 +np.float64,0x7fdc8da96c391b52,0x408627ce09cfef2b,2 +np.float64,0x8000edfcfb81dbfb,0x8000edfcfb81dbfb,2 +np.float64,0x8009ebc42af3d789,0x8009ebc42af3d789,2 +np.float64,0x7fd658aaf8acb155,0x408625d80cd1ccc9,2 +np.float64,0x3feea584a37d4b09,0x3fe57f29a73729cd,2 +np.float64,0x4cfe494699fca,0x4cfe494699fca,2 +np.float64,0xbfe9d96460b3b2c9,0xbffa62ecfa026c77,2 +np.float64,0x7fdb3852c3b670a5,0x4086276c191dc9b1,2 +np.float64,0xbfe4d1fc9ee9a3f9,0xbff0d37ce37cf479,2 +np.float64,0xffefffffffffffff,0xfff8000000000000,2 +np.float64,0xbfd1c43d7fa3887a,0xbfd4cfbefb5f2c43,2 +np.float64,0x3fec4a8e0d78951c,0x3fe4453a82ca2570,2 +np.float64,0x7fafed74583fdae8,0x4086181017b8dac9,2 +np.float64,0x80076c4ebcced89e,0x80076c4ebcced89e,2 +np.float64,0x8001a9aa7b235356,0x8001a9aa7b235356,2 +np.float64,0x121260fe2424d,0x121260fe2424d,2 +np.float64,0x3fddd028e3bba052,0x3fd87998c4c43c5b,2 +np.float64,0x800ed1cf4a9da39f,0x800ed1cf4a9da39f,2 +np.float64,0xbfef2e63d7fe5cc8,0xc00d53480b16971b,2 +np.float64,0xbfedde3309fbbc66,0xc005ab55b7a7c127,2 +np.float64,0x3fda3e1e85b47c3d,0x3fd5fddafd8d6729,2 +np.float64,0x8007c6443c6f8c89,0x8007c6443c6f8c89,2 +np.float64,0xbfe101705f2202e0,0xbfe8420817665121,2 +np.float64,0x7fe0bff3c1e17fe7,0x4086291539c56d80,2 +np.float64,0x7fe6001dab6c003a,0x40862b43aa7cb060,2 +np.float64,0x7fbdecf7de3bd9ef,0x40861d170b1c51a5,2 +np.float64,0xbfc0fd508c21faa0,0xbfc23a5876e99fa3,2 +np.float64,0xbfcf6eb14f3edd64,0xbfd208cbf742c8ea,2 +np.float64,0x3f6d40ea403a81d5,0x3f6d33934ab8e799,2 +np.float64,0x7fc32600b6264c00,0x40861f10302357e0,2 +np.float64,0x3fd05870baa0b0e0,0x3fcd1d2af420fac7,2 +np.float64,0x80051d5120aa3aa3,0x80051d5120aa3aa3,2 +np.float64,0x3fdb783fcfb6f080,0x3fd6db229658c083,2 +np.float64,0x3fe0b61199e16c24,0x3fdae41e277be2eb,2 +np.float64,0x3daf62167b5ed,0x3daf62167b5ed,2 +np.float64,0xbfec3c53b6f878a7,0xc0011f0ce7a78a2a,2 +np.float64,0x800fc905161f920a,0x800fc905161f920a,2 +np.float64,0x3fdc7b9cc138f73a,0x3fd78f9c2360e661,2 +np.float64,0x7fe4079e97a80f3c,0x40862a83795f2443,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0x7fe6da5345adb4a6,0x40862b9183c1e4b0,2 +np.float64,0xbfd0a76667214ecc,0xbfd34a1e0c1f6186,2 +np.float64,0x37fb0b906ff62,0x37fb0b906ff62,2 +np.float64,0x7fe170e59fa2e1ca,0x408629680a55e5c5,2 +np.float64,0x3fea900c77752019,0x3fe356eec75aa345,2 +np.float64,0x3fc575c63a2aeb8c,0x3fc3d701167d76b5,2 +np.float64,0x3fe8b45da87168bc,0x3fe24ecbb778fd44,2 +np.float64,0xbfcb990ab5373214,0xbfcf1596c076813c,2 +np.float64,0xf146fdfbe28e0,0xf146fdfbe28e0,2 +np.float64,0x8001fcd474c3f9aa,0x8001fcd474c3f9aa,2 +np.float64,0xbfe9b555eeb36aac,0xbffa0630c3bb485b,2 +np.float64,0x800f950be83f2a18,0x800f950be83f2a18,2 +np.float64,0x7feb0e03ab761c06,0x40862ceb30e36887,2 +np.float64,0x7fca51bd4a34a37a,0x4086219b9dfd35c9,2 +np.float64,0xbfdc27c34cb84f86,0xbfe28ccde8d6bc08,2 +np.float64,0x80009ce1714139c4,0x80009ce1714139c4,2 +np.float64,0x8005290fb1ea5220,0x8005290fb1ea5220,2 +np.float64,0xbfee81e6473d03cd,0xc00885972ca1699b,2 +np.float64,0x7fcfb11a373f6233,0x408623180b8f75d9,2 +np.float64,0xbfcb9c4bfd373898,0xbfcf19bd25881928,2 +np.float64,0x7feaec5885f5d8b0,0x40862ce136050e6c,2 +np.float64,0x8009e17a4a53c2f5,0x8009e17a4a53c2f5,2 +np.float64,0xbfe1cceb9e6399d7,0xbfea0038bd3def20,2 +np.float64,0x8009170bd7122e18,0x8009170bd7122e18,2 +np.float64,0xb2b6f7f1656df,0xb2b6f7f1656df,2 +np.float64,0x3fc75bfd1f2eb7f8,0x3fc574c858332265,2 +np.float64,0x3fa24c06ec249800,0x3fa1fa462ffcb8ec,2 +np.float64,0xaa9a4d2d5534a,0xaa9a4d2d5534a,2 +np.float64,0xbfd7b76208af6ec4,0xbfdda0c3200dcc9f,2 +np.float64,0x7f8cbab73039756d,0x40860c20cba57a94,2 +np.float64,0x3fdbcf9f48b79f3f,0x3fd71827a60e8b6d,2 +np.float64,0xbfdd60f71a3ac1ee,0xbfe3a94bc8cf134d,2 +np.float64,0xb9253589724a7,0xb9253589724a7,2 +np.float64,0xbfcf28e37e3e51c8,0xbfd1da9977b741e3,2 +np.float64,0x80011457f7e228b1,0x80011457f7e228b1,2 +np.float64,0x7fec33df737867be,0x40862d404a897122,2 +np.float64,0xae55f8f95cabf,0xae55f8f95cabf,2 +np.float64,0xbfc1ab9397235728,0xbfc303e5533d4a5f,2 +np.float64,0x7fef0f84b3be1f08,0x40862e05f9ba7118,2 +np.float64,0x7fdc94f328b929e5,0x408627d01449d825,2 +np.float64,0x3fee1b598c7c36b3,0x3fe53847be166834,2 +np.float64,0x3fee8326f37d064e,0x3fe56d96f3fbcf43,2 +np.float64,0x3fe7b18a83ef6316,0x3fe1bb6a6d48c675,2 +np.float64,0x3fe5db969c6bb72e,0x3fe0a8d7d151996c,2 +np.float64,0x3e3391d27c673,0x3e3391d27c673,2 +np.float64,0x3fe79a46d76f348e,0x3fe1ae09a96ea628,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe57d6505aafac9,0x40862b13925547f1,2 +np.float64,0x3fc433371d28666e,0x3fc2c196a764c47b,2 +np.float64,0x8008dbf69cd1b7ee,0x8008dbf69cd1b7ee,2 +np.float64,0xbfe744f459ee89e8,0xbff4c847ad3ee152,2 +np.float64,0x80098aa245331545,0x80098aa245331545,2 +np.float64,0x6747112ece8e3,0x6747112ece8e3,2 +np.float64,0x5d342a40ba69,0x5d342a40ba69,2 +np.float64,0xf7a17739ef42f,0xf7a17739ef42f,2 +np.float64,0x3fe1b34a9d236695,0x3fdc2d7c4e2c347a,2 +np.float64,0x7fb53bf5ec2a77eb,0x40861a585ec8f7ff,2 +np.float64,0xbfe6256f1cec4ade,0xbff2d89a36be65ae,2 +np.float64,0xb783bc9b6f078,0xb783bc9b6f078,2 +np.float64,0xbfedf74a3bfbee94,0xc0060bb6f2bc11ef,2 +np.float64,0x3fda2a5eccb454be,0x3fd5efd7f18b8e81,2 +np.float64,0xbfb3838ab2270718,0xbfb44c337fbca3c3,2 +np.float64,0x3fb4ac6dc22958e0,0x3fb3e194ca01a502,2 +np.float64,0x76c11aaaed824,0x76c11aaaed824,2 +np.float64,0x80025bb1af04b764,0x80025bb1af04b764,2 +np.float64,0x3fdc02740ab804e8,0x3fd73b8cd6f95f19,2 +np.float64,0x3fe71856f5ee30ae,0x3fe162e9fafb4428,2 +np.float64,0x800236f332646de7,0x800236f332646de7,2 +np.float64,0x7fe13fd9d2e27fb3,0x408629516b42a317,2 +np.float64,0x7fdf6bbd34bed779,0x40862892069d805c,2 +np.float64,0x3fd4727beba8e4f8,0x3fd1be5b48d9e282,2 +np.float64,0x800e0fac9e5c1f59,0x800e0fac9e5c1f59,2 +np.float64,0xfb54423ff6a89,0xfb54423ff6a89,2 +np.float64,0x800fbf7ed47f7efe,0x800fbf7ed47f7efe,2 +np.float64,0x3fe9d41fa2f3a840,0x3fe2ef98dc1fd463,2 +np.float64,0x800d733e805ae67d,0x800d733e805ae67d,2 +np.float64,0x3feebe4c46fd7c98,0x3fe58bcf7f47264e,2 +np.float64,0x7fe1ab77b5e356ee,0x40862982bb3dce34,2 +np.float64,0xbfdddac05abbb580,0xbfe41aa45f72d5a2,2 +np.float64,0x3fe14219dee28434,0x3fdb9b137d1f1220,2 +np.float64,0x3fe25d3d5a24ba7b,0x3fdd06e1cf32d35a,2 +np.float64,0x8000fa4fbe81f4a0,0x8000fa4fbe81f4a0,2 +np.float64,0x3fe303e23e6607c4,0x3fddd94982efa9f1,2 +np.float64,0x3fe89cf5d83139ec,0x3fe24193a2e12f75,2 +np.float64,0x3fe9b36ef87366de,0x3fe2dd7cdc25a4a5,2 +np.float64,0xbfdb8b38f8371672,0xbfe2023ba7e002bb,2 +np.float64,0xafc354955f86b,0xafc354955f86b,2 +np.float64,0xbfe2f3d49e65e7a9,0xbfecb557a94123d3,2 +np.float64,0x800496617c092cc4,0x800496617c092cc4,2 +np.float64,0x32db0cfa65b62,0x32db0cfa65b62,2 +np.float64,0xbfd893bfa2b12780,0xbfdf02a8c1e545aa,2 +np.float64,0x7fd5ac927d2b5924,0x408625997e7c1f9b,2 +np.float64,0x3fde9defb8bd3be0,0x3fd9056190986349,2 +np.float64,0x80030cfeb54619fe,0x80030cfeb54619fe,2 +np.float64,0x3fcba85b273750b8,0x3fc90a5ca976594f,2 +np.float64,0x3fe98f6f5cf31edf,0x3fe2c97fcb4eca25,2 +np.float64,0x3fe33dbf90667b80,0x3fde21b83321b993,2 +np.float64,0x3fe4686636e8d0cc,0x3fdf928cdca751b3,2 +np.float64,0x80018ade6ce315be,0x80018ade6ce315be,2 +np.float64,0x7fa9af70c8335ee1,0x408616528cd5a906,2 +np.float64,0x3fbeb460aa3d68c0,0x3fbcff96b00a2193,2 +np.float64,0x7fa82c869830590c,0x408615d6598d9368,2 +np.float64,0xd08c0e6fa1182,0xd08c0e6fa1182,2 +np.float64,0x3fef4eb750fe9d6f,0x3fe5d522fd4e7f64,2 +np.float64,0xbfc586f5492b0dec,0xbfc791eaae92aad1,2 +np.float64,0x7fede64ac7bbcc95,0x40862db7f444fa7b,2 +np.float64,0x3fe540003d6a8000,0x3fe04bdfc2916a0b,2 +np.float64,0x8009417fe6f28300,0x8009417fe6f28300,2 +np.float64,0x3fe6959cf16d2b3a,0x3fe116a1ce01887b,2 +np.float64,0x3fb0a40036214800,0x3fb01f447778219a,2 +np.float64,0x3feff26e91ffe4dd,0x3fe627798fc859a7,2 +np.float64,0x7fed8e46cd7b1c8d,0x40862da044a1d102,2 +np.float64,0x7fec4eb774f89d6e,0x40862d47e43edb53,2 +np.float64,0x3fe800e5e07001cc,0x3fe1e8e2b9105fc2,2 +np.float64,0x800f4eb2f9be9d66,0x800f4eb2f9be9d66,2 +np.float64,0x800611659bcc22cc,0x800611659bcc22cc,2 +np.float64,0x3fd66e65d2acdccc,0x3fd33ad63a5e1000,2 +np.float64,0x800a9085b7f5210c,0x800a9085b7f5210c,2 +np.float64,0x7fdf933a3fbf2673,0x4086289c0e292f2b,2 +np.float64,0x1cd1ba7a39a38,0x1cd1ba7a39a38,2 +np.float64,0xbfefd0b10fffa162,0xc0149ded900ed851,2 +np.float64,0xbfe8c63485b18c69,0xbff7cf3078b1574f,2 +np.float64,0x3fecde56ca79bcae,0x3fe4934afbd7dda9,2 +np.float64,0x8006cd6888cd9ad2,0x8006cd6888cd9ad2,2 +np.float64,0x3fd7a391c2af4724,0x3fd41e2f74df2329,2 +np.float64,0x3fe6a8ad58ed515a,0x3fe121ccfb28e6f5,2 +np.float64,0x7fe18a80dd631501,0x40862973c09086b9,2 +np.float64,0xbf74fd6d8029fb00,0xbf750b3e368ebe6b,2 +np.float64,0x3fdd35e93dba6bd4,0x3fd810071faaffad,2 +np.float64,0x3feb0d8f57361b1f,0x3fe39b3abdef8b7a,2 +np.float64,0xbfd5ec7288abd8e6,0xbfdad764df0d2ca1,2 +np.float64,0x7fdc848272b90904,0x408627cb78f3fb9e,2 +np.float64,0x800ed3eda91da7db,0x800ed3eda91da7db,2 +np.float64,0x3fefac64857f58c9,0x3fe60459dbaad1ba,2 +np.float64,0x3fd1df7a5ba3bef4,0x3fcf864a39b926ff,2 +np.float64,0xfe26ca4bfc4da,0xfe26ca4bfc4da,2 +np.float64,0xbfd1099f8da21340,0xbfd3cf6e6efe934b,2 +np.float64,0xbfe15de9a7a2bbd4,0xbfe909cc895f8795,2 +np.float64,0x3fe89714ed712e2a,0x3fe23e40d31242a4,2 +np.float64,0x800387113e470e23,0x800387113e470e23,2 +np.float64,0x3fe4f80730e9f00e,0x3fe0208219314cf1,2 +np.float64,0x2f95a97c5f2b6,0x2f95a97c5f2b6,2 +np.float64,0x800ea7cdd87d4f9c,0x800ea7cdd87d4f9c,2 +np.float64,0xbf64b967c0297300,0xbf64c020a145b7a5,2 +np.float64,0xbfc5a91a342b5234,0xbfc7bafd77a61d81,2 +np.float64,0xbfe2226fe76444e0,0xbfeac33eb1d1b398,2 +np.float64,0x3fc6aaa8d42d5552,0x3fc4de79f5c68cd4,2 +np.float64,0x3fe54fd4c1ea9faa,0x3fe05561a9a5922b,2 +np.float64,0x80029c1f75653840,0x80029c1f75653840,2 +np.float64,0xbfcb4a84a2369508,0xbfceb1a23bac3995,2 +np.float64,0x80010abeff02157f,0x80010abeff02157f,2 +np.float64,0x7f92d12cf825a259,0x40860e49bde3a5b6,2 +np.float64,0x800933e7027267ce,0x800933e7027267ce,2 +np.float64,0x3fc022b12e204562,0x3fbe64acc53ed887,2 +np.float64,0xbfe35f938de6bf27,0xbfedc1f3e443c016,2 +np.float64,0x1f8d9bae3f1b4,0x1f8d9bae3f1b4,2 +np.float64,0x3fe552f22ceaa5e4,0x3fe057404072350f,2 +np.float64,0xbfa73753442e6ea0,0xbfa7c24a100190f1,2 +np.float64,0x7fb3e2982827c52f,0x408619d1efa676b6,2 +np.float64,0xbfd80cb7a5301970,0xbfde28e65f344f33,2 +np.float64,0xbfcde835973bd06c,0xbfd10806fba46c8f,2 +np.float64,0xbfd4e3c749a9c78e,0xbfd949aff65de39c,2 +np.float64,0x3fcb4b9d6f36973b,0x3fc8be02ad6dc0d3,2 +np.float64,0x1a63000034c7,0x1a63000034c7,2 +np.float64,0x7fdc9c751e3938e9,0x408627d22df71959,2 +np.float64,0x3fd74f3f712e9e7f,0x3fd3e07df0c37ec1,2 +np.float64,0xbfceab74d33d56e8,0xbfd187e99bf82903,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0xbfb2cca466259948,0xbfb3868208e8de30,2 +np.float64,0x800204688b8408d2,0x800204688b8408d2,2 +np.float64,0x3e4547407c8aa,0x3e4547407c8aa,2 +np.float64,0xbfe4668846e8cd10,0xbff03c85189f3818,2 +np.float64,0x800dd350245ba6a0,0x800dd350245ba6a0,2 +np.float64,0xbfbc13c160382780,0xbfbdbd56ce996d16,2 +np.float64,0x7fe25a628a24b4c4,0x408629d06eb2d64d,2 +np.float64,0x3fd19dabbc233b57,0x3fcf1f3ed1d34c8c,2 +np.float64,0x547e20faa8fc5,0x547e20faa8fc5,2 +np.float64,0xbfe19392c6232726,0xbfe97ffe4f303335,2 +np.float64,0x3f87f9f6702ff400,0x3f87d64fb471bb04,2 +np.float64,0x9dfc52db3bf8b,0x9dfc52db3bf8b,2 +np.float64,0x800e1f5a9adc3eb5,0x800e1f5a9adc3eb5,2 +np.float64,0xbfddbd09c8bb7a14,0xbfe3fed7d7cffc70,2 +np.float64,0xbfeda71af87b4e36,0xc004e6631c514544,2 +np.float64,0xbfdbfcfe1bb7f9fc,0xbfe266b5d4a56265,2 +np.float64,0x3fe4ee78cd69dcf2,0x3fe01abba4e81fc9,2 +np.float64,0x800f13b820de2770,0x800f13b820de2770,2 +np.float64,0x3f861e09702c3c00,0x3f85ffae83b02c4f,2 +np.float64,0xbfc0972479212e48,0xbfc1c4bf70b30cbc,2 +np.float64,0x7fef057ef57e0afd,0x40862e036479f6a9,2 +np.float64,0x8bdbabe517b76,0x8bdbabe517b76,2 +np.float64,0xbfec495417f892a8,0xc0013ade88746d18,2 +np.float64,0x3fec680ab3f8d015,0x3fe454dd304b560d,2 +np.float64,0xbfae7ce60c3cf9d0,0xbfaf6eef15bbe56b,2 +np.float64,0x3fec314124786282,0x3fe437ca06294f5a,2 +np.float64,0x7fd5ed05b82bda0a,0x408625b125518e58,2 +np.float64,0x3feac9f02f3593e0,0x3fe3768104dd5cb7,2 +np.float64,0x0,0x0,2 +np.float64,0xbfddd2abd5bba558,0xbfe41312b8ea20de,2 +np.float64,0xbfedf9558c7bf2ab,0xc00613c53e0bb33a,2 +np.float64,0x3fef245ffefe48c0,0x3fe5bfb4dfe3b7a5,2 +np.float64,0x7fe178604922f0c0,0x4086296b77d5eaef,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x7fed026766ba04ce,0x40862d7a0dc45643,2 +np.float64,0xbfde27d8c3bc4fb2,0xbfe46336b6447697,2 +np.float64,0x3fe9485d9cb290bb,0x3fe2a1e4b6419423,2 +np.float64,0xbfe27b8a7464f715,0xbfeb9382f5b16f65,2 +np.float64,0x5c34d274b869b,0x5c34d274b869b,2 +np.float64,0xbfeee0b7453dc16f,0xc00acdb46459b6e6,2 +np.float64,0x7fe3dfb4d4e7bf69,0x40862a73785fdf12,2 +np.float64,0xb4635eef68c6c,0xb4635eef68c6c,2 +np.float64,0xbfe522a2c82a4546,0xbff148912a59a1d6,2 +np.float64,0x8009ba38a9737472,0x8009ba38a9737472,2 +np.float64,0xbfc056ff3820ae00,0xbfc17b2205fa180d,2 +np.float64,0x7fe1c8b8a0239170,0x4086298feeee6133,2 +np.float64,0x3fe2d2c6b9e5a58e,0x3fdd9b907471031b,2 +np.float64,0x3fa0a161bc2142c0,0x3fa05db36f6a073b,2 +np.float64,0x3fdef4268ebde84c,0x3fd93f980794d1e7,2 +np.float64,0x800ecd9fe2fd9b40,0x800ecd9fe2fd9b40,2 +np.float64,0xbfc9fbd45e33f7a8,0xbfcd0afc47c340f6,2 +np.float64,0x3fe8c3035b718606,0x3fe2570eb65551a1,2 +np.float64,0xbfe78c4ad2ef1896,0xbff54d25b3328742,2 +np.float64,0x8006f5dcf8adebbb,0x8006f5dcf8adebbb,2 +np.float64,0x800301dca2a603ba,0x800301dca2a603ba,2 +np.float64,0xad4289e55a851,0xad4289e55a851,2 +np.float64,0x80037764f9e6eecb,0x80037764f9e6eecb,2 +np.float64,0xbfe73575b26e6aec,0xbff4abfb5e985c62,2 +np.float64,0xbfc6cb91652d9724,0xbfc91a8001b33ec2,2 +np.float64,0xbfe3a918ffe75232,0xbfee7e6e4fd34c53,2 +np.float64,0x9bc84e2b3790a,0x9bc84e2b3790a,2 +np.float64,0x7fdeec303cbdd85f,0x408628714a49d996,2 +np.float64,0x3fe1d1dcb763a3ba,0x3fdc54ce060dc7f4,2 +np.float64,0x8008ae6432b15cc9,0x8008ae6432b15cc9,2 +np.float64,0x3fd8022fa2b00460,0x3fd46322bf02a609,2 +np.float64,0xbfc55b64472ab6c8,0xbfc75d9568f462e0,2 +np.float64,0xbfe8b165437162ca,0xbff7a15e2ead645f,2 +np.float64,0x7f759330feeb3,0x7f759330feeb3,2 +np.float64,0xbfd504f68eaa09ee,0xbfd97b06c01d7473,2 +np.float64,0x54702d5aa8e06,0x54702d5aa8e06,2 +np.float64,0xbfed1779337a2ef2,0xc0032f7109ef5a51,2 +np.float64,0xe248bd4dc4918,0xe248bd4dc4918,2 +np.float64,0xbfd8c59150318b22,0xbfdf53bca6ca8b1e,2 +np.float64,0xbfe3b9d942e773b2,0xbfeea9fcad277ba7,2 +np.float64,0x800934ec127269d9,0x800934ec127269d9,2 +np.float64,0xbfbb7f535a36fea8,0xbfbd16d61b6c52b8,2 +np.float64,0xccb185a199631,0xccb185a199631,2 +np.float64,0x3fe3dda76fe7bb4e,0x3fdee83bc6094301,2 +np.float64,0xbfe0c902f5e19206,0xbfe7ca7c0e888006,2 +np.float64,0xbfefeed08cbfdda1,0xc018aadc483c8724,2 +np.float64,0x7fd0c05c52a180b8,0x40862389daf64aac,2 +np.float64,0xbfd28e3323a51c66,0xbfd5e9ba278fb685,2 +np.float64,0xbef4103b7de82,0xbef4103b7de82,2 +np.float64,0x3fe7661fd12ecc40,0x3fe18ff7dfb696e2,2 +np.float64,0x3fddd5f2f0bbabe4,0x3fd87d8bb6719c3b,2 +np.float64,0x800b3914cfd6722a,0x800b3914cfd6722a,2 +np.float64,0xf3f09a97e7e14,0xf3f09a97e7e14,2 +np.float64,0x7f97092b502e1256,0x40860fe8054cf54e,2 +np.float64,0xbfdbec7917b7d8f2,0xbfe2580b4b792c79,2 +np.float64,0x7fe7ff215aaffe42,0x40862bf5887fa062,2 +np.float64,0x80080186e570030e,0x80080186e570030e,2 +np.float64,0xbfc27f05e624fe0c,0xbfc3fa214be4adc4,2 +np.float64,0x3fe4481be1689038,0x3fdf6b11e9c4ca72,2 +np.float64,0x3fd642cc9cac8598,0x3fd31a857fe70227,2 +np.float64,0xbef8782d7df0f,0xbef8782d7df0f,2 +np.float64,0x8003077dc2e60efc,0x8003077dc2e60efc,2 +np.float64,0x80083eb5a2507d6c,0x80083eb5a2507d6c,2 +np.float64,0x800e8d1eb77d1a3e,0x800e8d1eb77d1a3e,2 +np.float64,0xbfc7737cd22ee6f8,0xbfc9e7716f03f1fc,2 +np.float64,0xbfe9a2b4ddf3456a,0xbff9d71664a8fc78,2 +np.float64,0x7fe67c7d322cf8f9,0x40862b7066465194,2 +np.float64,0x3fec080ce2b8101a,0x3fe421dac225be46,2 +np.float64,0xbfe6d27beb6da4f8,0xbff3fbb1add521f7,2 +np.float64,0x3fdd4f96ceba9f2e,0x3fd821a638986dbe,2 +np.float64,0x3fbd89f1303b13e2,0x3fbbf49223a9d002,2 +np.float64,0xbfe94e2b9d329c57,0xbff907e549c534f5,2 +np.float64,0x3fe2f2cc51e5e599,0x3fddc3d6b4a834a1,2 +np.float64,0xfdcb5b49fb96c,0xfdcb5b49fb96c,2 +np.float64,0xbfea7108fa74e212,0xbffc01b392f4897b,2 +np.float64,0x3fd38baef7a7175c,0x3fd10e7fd3b958dd,2 +np.float64,0x3fa75bf9cc2eb800,0x3fa6d792ecdedb8e,2 +np.float64,0x7fd19fd20aa33fa3,0x408623f1e2cd04c3,2 +np.float64,0x3fd62c708dac58e0,0x3fd309ec7818d16e,2 +np.float64,0x3fdf489047be9120,0x3fd978640617c758,2 +np.float64,0x1,0x1,2 +np.float64,0xbfe21e7c3ea43cf8,0xbfeaba21320697d3,2 +np.float64,0xbfd3649047a6c920,0xbfd71a6f14223744,2 +np.float64,0xbfd68ca68c2d194e,0xbfdbcce6784e5d44,2 +np.float64,0x3fdb26b0ea364d62,0x3fd6a1f86f64ff74,2 +np.float64,0xbfd843821cb08704,0xbfde80e90805ab3f,2 +np.float64,0x3fd508a27aaa1144,0x3fd22fc203a7b9d8,2 +np.float64,0xbfdb951c7eb72a38,0xbfe20aeaec13699b,2 +np.float64,0x3fef556ba57eaad7,0x3fe5d8865cce0a6d,2 +np.float64,0x3fd0d224b3a1a448,0x3fcdde7be5d7e21e,2 +np.float64,0x8007ff272baffe4f,0x8007ff272baffe4f,2 +np.float64,0x3fe1c7bddf638f7c,0x3fdc47cc6cf2f5cd,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0x2016d560402f,0x2016d560402f,2 +np.float64,0xbfcca10be9394218,0xbfd033f36b94fc54,2 +np.float64,0xbfdb833628b7066c,0xbfe1fb344b840c70,2 +np.float64,0x3fd8529cb3b0a539,0x3fd49d847fe77218,2 +np.float64,0xbfc0b0ebab2161d8,0xbfc1e260c60ffd1b,2 +np.float64,0xbfea8b9a79f51735,0xbffc4ee6be8a0fa2,2 +np.float64,0x7feca8fab7f951f4,0x40862d613e454646,2 +np.float64,0x7fd8c52d82318a5a,0x408626aaf37423a3,2 +np.float64,0xbfe364ad4526c95a,0xbfedcee39bc93ff5,2 +np.float64,0x800b78161256f02d,0x800b78161256f02d,2 +np.float64,0xbfd55f0153aabe02,0xbfda01a78f72d494,2 +np.float64,0x800315a5f0662b4d,0x800315a5f0662b4d,2 +np.float64,0x7fe4c0dca02981b8,0x40862acc27e4819f,2 +np.float64,0x8009825c703304b9,0x8009825c703304b9,2 +np.float64,0x3fe6e94e1cadd29c,0x3fe1478ccc634f49,2 +np.float64,0x7fe622d8586c45b0,0x40862b504177827e,2 +np.float64,0x3fe4458600688b0c,0x3fdf67e79a84b953,2 +np.float64,0xbfdd75d8a1baebb2,0xbfe3bc9e6ca1bbb5,2 +np.float64,0x3fde789c6bbcf138,0x3fd8ec1d435531b3,2 +np.float64,0x3fe7052b94ee0a58,0x3fe157c5c4418dc1,2 +np.float64,0x7fef31652abe62c9,0x40862e0eaeabcfc0,2 +np.float64,0x3fe279691ee4f2d2,0x3fdd2aa41eb43cd4,2 +np.float64,0xbfd533fa95aa67f6,0xbfd9c12f516d29d7,2 +np.float64,0x3fe6d057f96da0b0,0x3fe138fd96693a6a,2 +np.float64,0x800bad984f775b31,0x800bad984f775b31,2 +np.float64,0x7fdd6fdba4badfb6,0x4086280c73d8ef97,2 +np.float64,0x7fe9b5c0eef36b81,0x40862c82c6f57a53,2 +np.float64,0x8000bc02ece17807,0x8000bc02ece17807,2 +np.float64,0xbff0000000000000,0xfff0000000000000,2 +np.float64,0xbfed430be3fa8618,0xc003aaf338c75b3c,2 +np.float64,0x3fee17b759fc2f6f,0x3fe53668696bf48b,2 +np.float64,0x3f8d4cf9d03a9a00,0x3f8d17d2f532afdc,2 +np.float64,0x8005d6257b8bac4c,0x8005d6257b8bac4c,2 +np.float64,0xbfd17a6df9a2f4dc,0xbfd469e3848adc6e,2 +np.float64,0xb28a293965145,0xb28a293965145,2 +np.float64,0xbfe7d011e42fa024,0xbff5cf818998c8ec,2 +np.float64,0xbfe74f0f136e9e1e,0xbff4dad6ebb0443c,2 +np.float64,0x800f249fc9be4940,0x800f249fc9be4940,2 +np.float64,0x2542f8fe4a860,0x2542f8fe4a860,2 +np.float64,0xc48d40cd891a8,0xc48d40cd891a8,2 +np.float64,0x3fe4e64bc8e9cc98,0x3fe015c9eb3caa53,2 +np.float64,0x3fd33881eca67104,0x3fd0cea886be2457,2 +np.float64,0xbfd01748fba02e92,0xbfd28875959e6901,2 +np.float64,0x7fb7ab01f22f5603,0x40861b369927bf53,2 +np.float64,0xbfe340274ce6804e,0xbfed72b39f0ebb24,2 +np.float64,0x7fc16c0c3422d817,0x40861e4eaf1a286c,2 +np.float64,0x3fc26944a324d288,0x3fc133a77b356ac4,2 +np.float64,0xa149d7134293b,0xa149d7134293b,2 +np.float64,0x800837382d106e71,0x800837382d106e71,2 +np.float64,0x797d1740f2fa4,0x797d1740f2fa4,2 +np.float64,0xc3f15b7787e2c,0xc3f15b7787e2c,2 +np.float64,0x80cad1b90195a,0x80cad1b90195a,2 +np.float64,0x3fdd8f1142bb1e23,0x3fd84d21490d1ce6,2 +np.float64,0xbfbde6c9123bcd90,0xbfbfcc030a86836a,2 +np.float64,0x8007f77e032feefd,0x8007f77e032feefd,2 +np.float64,0x3fe74fed1c6e9fda,0x3fe18322cf19cb61,2 +np.float64,0xbfd8a40bbcb14818,0xbfdf1d23520ba74b,2 +np.float64,0xbfeb7a0e6076f41d,0xbfff4ddfb926efa5,2 +np.float64,0xbfcb8c5f663718c0,0xbfcf0570f702bda9,2 +np.float64,0xf668cd97ecd1a,0xf668cd97ecd1a,2 +np.float64,0xbfe92accf572559a,0xbff8b4393878ffdb,2 +np.float64,0xbfeaa955567552ab,0xbffca70c7d73eee5,2 +np.float64,0xbfe083a14f610742,0xbfe739d84bc35077,2 +np.float64,0x78290568f0521,0x78290568f0521,2 +np.float64,0x3fe94bae2372975c,0x3fe2a3beac5c9858,2 +np.float64,0x3fca4fbab9349f78,0x3fc7edbca2492acb,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x7fb9eb505433d6a0,0x40861bf0adedb74d,2 +np.float64,0x7fdc66f72a38cded,0x408627c32aeecf0f,2 +np.float64,0x2e8e6f445d1cf,0x2e8e6f445d1cf,2 +np.float64,0xbfec43195af88633,0xc0012d7e3f91b7e8,2 +np.float64,0x7fcdb971e93b72e3,0x40862294c9e3a7bc,2 +np.float64,0x800cabc461195789,0x800cabc461195789,2 +np.float64,0x2c79709c58f2f,0x2c79709c58f2f,2 +np.float64,0x8005d772d3cbaee6,0x8005d772d3cbaee6,2 +np.float64,0x3fe84d8c03709b18,0x3fe21490ce3673dd,2 +np.float64,0x7fe5578adc2aaf15,0x40862b056e8437d4,2 +np.float64,0xbf91298c58225320,0xbf914ec86c32d11f,2 +np.float64,0xc7ed2b6d8fda6,0xc7ed2b6d8fda6,2 +np.float64,0x2761404c4ec29,0x2761404c4ec29,2 +np.float64,0x3fbad3c48835a789,0x3fb9833c02385305,2 +np.float64,0x3fa46fee5428dfe0,0x3fa40a357fb24c23,2 +np.float64,0xbfe3900c6fe72019,0xbfee3dba29dd9d43,2 +np.float64,0x3fe7a9e41a6f53c8,0x3fe1b704dfb9884b,2 +np.float64,0xbfe74a7a1eee94f4,0xbff4d269cacb1f29,2 +np.float64,0xbfee609c72fcc139,0xc007da8499d34123,2 +np.float64,0x3fef2d5fc23e5ac0,0x3fe5c44414e59cb4,2 +np.float64,0xbfd7bdc0402f7b80,0xbfddaae1e7bb78fb,2 +np.float64,0xd71ee01dae3dc,0xd71ee01dae3dc,2 +np.float64,0x3fe98cbcdef3197a,0x3fe2c7ffe33c4541,2 +np.float64,0x8000f8dbb3a1f1b8,0x8000f8dbb3a1f1b8,2 +np.float64,0x3fe3e98ad567d316,0x3fdef6e58058313f,2 +np.float64,0x41ad0bfc835a2,0x41ad0bfc835a2,2 +np.float64,0x7fdcc2dc0d3985b7,0x408627dce39f77af,2 +np.float64,0xbfe47b980de8f730,0xbff059acdccd6e2b,2 +np.float64,0xbfef49b6577e936d,0xc00e714f46b2ccc1,2 +np.float64,0x3fac31816c386300,0x3fab71cb92b0db8f,2 +np.float64,0x3fe59097e76b2130,0x3fe07c299fd1127c,2 +np.float64,0xbfecf0df5cf9e1bf,0xc002c7ebdd65039c,2 +np.float64,0x3fd2b7d0b6a56fa1,0x3fd06b638990ae02,2 +np.float64,0xbfeb68deecf6d1be,0xbfff1187e042d3e4,2 +np.float64,0x3fd44a9771a8952f,0x3fd1a01867c5e302,2 +np.float64,0xf79a9dedef354,0xf79a9dedef354,2 +np.float64,0x800c25a170d84b43,0x800c25a170d84b43,2 +np.float64,0x3ff0000000000000,0x3fe62e42fefa39ef,2 +np.float64,0x3fbff4f7623fe9f0,0x3fbe1d3878f4c417,2 +np.float64,0xd284c845a5099,0xd284c845a5099,2 +np.float64,0xbfe3c7815f678f02,0xbfeecdab5ca2e651,2 +np.float64,0x3fc19c934e233927,0x3fc08036104b1f23,2 +np.float64,0x800b6096de16c12e,0x800b6096de16c12e,2 +np.float64,0xbfe962a67e32c54d,0xbff9392313a112a1,2 +np.float64,0x2b9d0116573a1,0x2b9d0116573a1,2 +np.float64,0x3fcab269ed3564d4,0x3fc83f7e1c3095b7,2 +np.float64,0x3fc8c78d86318f1b,0x3fc6a6cde5696f99,2 +np.float64,0xd5b1e9b5ab63d,0xd5b1e9b5ab63d,2 +np.float64,0xbfed802a47fb0054,0xc00465cad3b5b0ef,2 +np.float64,0xbfd73aaf08ae755e,0xbfdcdbd62b8af271,2 +np.float64,0xbfd4f13c0229e278,0xbfd95dacff79e570,2 +np.float64,0xbfe9622808f2c450,0xbff937f13c397e8d,2 +np.float64,0xbfeddfa62efbbf4c,0xc005b0c835eed829,2 +np.float64,0x3fd65663d4acacc8,0x3fd3290cd0e675dc,2 +np.float64,0x8005e890f1abd123,0x8005e890f1abd123,2 +np.float64,0xbfe924919fb24923,0xbff8a5a827a28756,2 +np.float64,0x3fe8cdf490719be9,0x3fe25d39535e8366,2 +np.float64,0x7fc229e6ff2453cd,0x40861ea40ef87a5a,2 +np.float64,0x3fe5cf53ceeb9ea8,0x3fe0a18e0b65f27e,2 +np.float64,0xa79cf6fb4f39f,0xa79cf6fb4f39f,2 +np.float64,0x7fddbb3c0f3b7677,0x40862820d5edf310,2 +np.float64,0x3e1011de7c203,0x3e1011de7c203,2 +np.float64,0x3fc0b59a83216b38,0x3fbf6916510ff411,2 +np.float64,0x8647f98d0c8ff,0x8647f98d0c8ff,2 +np.float64,0x8005dad33ecbb5a7,0x8005dad33ecbb5a7,2 +np.float64,0x8a80d0631501a,0x8a80d0631501a,2 +np.float64,0xbfe18f7d6ee31efb,0xbfe976f06713afc1,2 +np.float64,0xbfe06eaed560dd5e,0xbfe70eac696933e6,2 +np.float64,0xbfed8ef93c7b1df2,0xc00495bfa3195b53,2 +np.float64,0x3febe9c24677d385,0x3fe411b10db16c42,2 +np.float64,0x7fd5d80c1fabb017,0x408625a97a7787ba,2 +np.float64,0x3fca79b59334f368,0x3fc8108a521341dc,2 +np.float64,0xbfccf8db4339f1b8,0xbfd06c9a5424aadb,2 +np.float64,0xbfea5ac5a574b58b,0xbffbc21d1405d840,2 +np.float64,0x800ce2bf4b19c57f,0x800ce2bf4b19c57f,2 +np.float64,0xbfe8df896d31bf13,0xbff807ab38ac41ab,2 +np.float64,0x3feab83da9f5707c,0x3fe36cdd827c0eff,2 +np.float64,0x3fee717683bce2ed,0x3fe564879171719b,2 +np.float64,0x80025e5577c4bcac,0x80025e5577c4bcac,2 +np.float64,0x3fe3e5378e67ca70,0x3fdef1902c5d1efd,2 +np.float64,0x3fa014bb7c202980,0x3f9faacf9238d499,2 +np.float64,0x3fddbf5e16bb7ebc,0x3fd86e2311cb0f6d,2 +np.float64,0x3fd24e50e6a49ca0,0x3fd0198f04f82186,2 +np.float64,0x656b5214cad6b,0x656b5214cad6b,2 +np.float64,0x8b0a4bfd1614a,0x8b0a4bfd1614a,2 +np.float64,0xbfeeb6bd9e7d6d7b,0xc009b669285e319e,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0xbfe719feceee33fe,0xbff47a4c8cbf0cca,2 +np.float64,0xbfd14fa8c8a29f52,0xbfd42f27b1aced39,2 +np.float64,0x7fec9dcb80f93b96,0x40862d5e1e70bbb9,2 +np.float64,0x7fecacb826f9596f,0x40862d6249746915,2 +np.float64,0x973459f52e68b,0x973459f52e68b,2 +np.float64,0x7f40a59e00214b3b,0x4085f194f45f82b1,2 +np.float64,0x7fc5dbaec32bb75d,0x4086201f3e7065d9,2 +np.float64,0x82d0801305a10,0x82d0801305a10,2 +np.float64,0x7fec81c0f4790381,0x40862d5643c0fc85,2 +np.float64,0xbfe2d81e9ee5b03d,0xbfec71a8e864ea40,2 +np.float64,0x6c545c9ad8a8c,0x6c545c9ad8a8c,2 +np.float64,0x3f9be95a5037d2b5,0x3f9b89b48ac8f5d8,2 +np.float64,0x8000cae9702195d4,0x8000cae9702195d4,2 +np.float64,0xbfd375f45126ebe8,0xbfd733677e54a80d,2 +np.float64,0x3fd29a5b81a534b7,0x3fd05494bf200278,2 +np.float64,0xfff0000000000000,0xfff8000000000000,2 +np.float64,0x7fca8fc195351f82,0x408621ae61aa6c13,2 +np.float64,0x1b28e2ae3651d,0x1b28e2ae3651d,2 +np.float64,0x3fe7fdbd14effb7a,0x3fe1e714884b46a8,2 +np.float64,0x3fdf1ce068be39c0,0x3fd95b054e0fad3d,2 +np.float64,0x3fe79f9a636f3f34,0x3fe1b11a40c00b3e,2 +np.float64,0x3fe60eb7036c1d6e,0x3fe0c72a02176874,2 +np.float64,0x229da17e453b5,0x229da17e453b5,2 +np.float64,0x3fc1a921b5235240,0x3fc08b3f35e47fb1,2 +np.float64,0xbb92d2af7725b,0xbb92d2af7725b,2 +np.float64,0x3fe4110cb1e8221a,0x3fdf2787de6c73f7,2 +np.float64,0xbfbc87771a390ef0,0xbfbe3f6e95622363,2 +np.float64,0xbfe74025dfee804c,0xbff4bf7b1895e697,2 +np.float64,0x964eb6592c9d7,0x964eb6592c9d7,2 +np.float64,0x3f951689b82a2d00,0x3f94dfb38d746fdf,2 +np.float64,0x800356271be6ac4f,0x800356271be6ac4f,2 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,2 +np.float64,0xbfed5ce250fab9c5,0xc003f7ddfeb94345,2 +np.float64,0x3fec3d5dc1387abc,0x3fe43e39c02d86f4,2 +np.float64,0x3999897e73332,0x3999897e73332,2 +np.float64,0xbfdcb57744b96aee,0xbfe30c4b98f3d088,2 +np.float64,0x7f961fb0b82c3f60,0x40860f9549c3a380,2 +np.float64,0x67d6efcacfadf,0x67d6efcacfadf,2 +np.float64,0x8002c9498f859294,0x8002c9498f859294,2 +np.float64,0xbfa3033800260670,0xbfa35fe3bf43e188,2 +np.float64,0xbfeab2fc157565f8,0xbffcc413c486b4eb,2 +np.float64,0x3fe25e62f364bcc6,0x3fdd0856e19e3430,2 +np.float64,0x7fb2f42dda25e85b,0x4086196fb34a65fd,2 +np.float64,0x3fe0f1a5af61e34c,0x3fdb3235a1786efb,2 +np.float64,0x800a340ca1f4681a,0x800a340ca1f4681a,2 +np.float64,0x7c20b9def8418,0x7c20b9def8418,2 +np.float64,0xdf0842a1be109,0xdf0842a1be109,2 +np.float64,0x3fe9f22cc2f3e45a,0x3fe300359b842bf0,2 +np.float64,0x3fe389ed73e713da,0x3fde809780fe4432,2 +np.float64,0x9500fb932a020,0x9500fb932a020,2 +np.float64,0x3fd8a21ffdb14440,0x3fd4d70862345d86,2 +np.float64,0x800d99c15cbb3383,0x800d99c15cbb3383,2 +np.float64,0x3fd96c98c932d932,0x3fd568959c9b028f,2 +np.float64,0x7fc228483a24508f,0x40861ea358420976,2 +np.float64,0x7fc6737bef2ce6f7,0x408620560ffc6a98,2 +np.float64,0xbfb2c27cee2584f8,0xbfb37b8cc7774b5f,2 +np.float64,0xbfd18409f9230814,0xbfd4771d1a9a24fb,2 +np.float64,0x3fb53cb3f42a7968,0x3fb466f06f88044b,2 +np.float64,0x3fef61d0187ec3a0,0x3fe5dec8a9d13dd9,2 +np.float64,0x3fe59a6ffd2b34e0,0x3fe0820a99c6143d,2 +np.float64,0x3fce18aff43c3160,0x3fcb07c7b523f0d1,2 +np.float64,0xbfb1319a62226338,0xbfb1cc62f31b2b40,2 +np.float64,0xa00cce6d4019a,0xa00cce6d4019a,2 +np.float64,0x80068ae8e0ed15d3,0x80068ae8e0ed15d3,2 +np.float64,0x3fecef353239de6a,0x3fe49c280adc607b,2 +np.float64,0x3fdf1a7fb0be34ff,0x3fd9596bafe2d766,2 +np.float64,0x3feb5e12eeb6bc26,0x3fe3c6be3ede8d07,2 +np.float64,0x3fdeff5cd43dfeba,0x3fd947262ec96b05,2 +np.float64,0x3f995e75e832bd00,0x3f990f511f4c7f1c,2 +np.float64,0xbfeb5b3ed0b6b67e,0xbffee24fc0fc2881,2 +np.float64,0x7fb82aad0a305559,0x40861b614d901182,2 +np.float64,0xbfe5c3a4926b8749,0xbff23cd0ad144fe6,2 +np.float64,0x3fef47da373e8fb4,0x3fe5d1aaa4031993,2 +np.float64,0x7fc6a8c3872d5186,0x40862068f5ca84be,2 +np.float64,0x7fc0c2276221844e,0x40861dff2566d001,2 +np.float64,0x7fc9ce7d28339cf9,0x40862173541f84d1,2 +np.float64,0x3fce2c34933c5869,0x3fcb179428ad241d,2 +np.float64,0xbfcf864c293f0c98,0xbfd21872c4821cfc,2 +np.float64,0x3fc51fd1f82a3fa4,0x3fc38d4f1685c166,2 +np.float64,0xbfe2707b70a4e0f7,0xbfeb795fbd5bb444,2 +np.float64,0x46629b568cc54,0x46629b568cc54,2 +np.float64,0x7fe5f821f32bf043,0x40862b40c2cdea3f,2 +np.float64,0x3fedd2c9457ba592,0x3fe512ce92394526,2 +np.float64,0x7fe6dcb8ceadb971,0x40862b925a7dc05d,2 +np.float64,0x3fd1b983b4a37307,0x3fcf4ae2545cf64e,2 +np.float64,0xbfe1c93104639262,0xbfe9f7d28e4c0c82,2 +np.float64,0x995ebc2932bd8,0x995ebc2932bd8,2 +np.float64,0x800a4c3ee614987e,0x800a4c3ee614987e,2 +np.float64,0x3fbb58766e36b0f0,0x3fb9fb3b9810ec16,2 +np.float64,0xbfe36d636666dac7,0xbfede5080f69053c,2 +np.float64,0x3f4feee1003fddc2,0x3f4feae5f05443d1,2 +np.float64,0x3fed0b772ffa16ee,0x3fe4aafb924903c6,2 +np.float64,0x800bb3faef3767f6,0x800bb3faef3767f6,2 +np.float64,0x3fe285cda5e50b9c,0x3fdd3a58df06c427,2 +np.float64,0x7feb9d560bb73aab,0x40862d152362bb94,2 +np.float64,0x3fecd1f447f9a3e9,0x3fe48cc78288cb3f,2 +np.float64,0x3fca927b0c3524f6,0x3fc8250f49ba28df,2 +np.float64,0x7fcc19944e383328,0x40862221b02fcf43,2 +np.float64,0xbfd8ddf41db1bbe8,0xbfdf7b92073ff2fd,2 +np.float64,0x80006fe736e0dfcf,0x80006fe736e0dfcf,2 +np.float64,0x800bbeb66d577d6d,0x800bbeb66d577d6d,2 +np.float64,0xbfe4329353e86526,0xbfefeaf19ab92b42,2 +np.float64,0x2fad72805f5af,0x2fad72805f5af,2 +np.float64,0x3fe1b827aa637050,0x3fdc33bf46012c0d,2 +np.float64,0x3fc3f3f8e227e7f2,0x3fc28aeb86d65278,2 +np.float64,0x3fec018933780312,0x3fe41e619aa4285c,2 +np.float64,0xbfd92428e0b24852,0xbfdfeecb08d154df,2 +np.float64,0x2d7046845ae0a,0x2d7046845ae0a,2 +np.float64,0x7fde7fd2233cffa3,0x408628550f8a948f,2 +np.float64,0x8000a32cd241465a,0x8000a32cd241465a,2 +np.float64,0x8004267a45084cf5,0x8004267a45084cf5,2 +np.float64,0xbfe6b422556d6844,0xbff3c71f67661e6e,2 +np.float64,0x3fe3a37d922746fb,0x3fdea04e04d6195c,2 +np.float64,0xbfddcc54b53b98aa,0xbfe40d2389cdb848,2 +np.float64,0x3fe18b4b92a31697,0x3fdbf9e68cbf5794,2 +np.float64,0x7fc9c5b2ee338b65,0x408621709a17a47a,2 +np.float64,0x1ebd1ce03d7b,0x1ebd1ce03d7b,2 +np.float64,0x8008a6fc39d14df9,0x8008a6fc39d14df9,2 +np.float64,0x3fec11384c782270,0x3fe426bdaedd2965,2 +np.float64,0x3fefc28344ff8507,0x3fe60f75d34fc3d2,2 +np.float64,0xc35f379786be7,0xc35f379786be7,2 +np.float64,0x3feef51f4a7dea3e,0x3fe5a7b95d7786b5,2 +np.float64,0x3fec9b9f0379373e,0x3fe4702477abbb63,2 +np.float64,0x3fde94f8cdbd29f0,0x3fd8ff50f7df0a6f,2 +np.float64,0xbfed32d1cdfa65a4,0xc0037c1470f6f979,2 +np.float64,0x800d3ba44f5a7749,0x800d3ba44f5a7749,2 +np.float64,0x3fe3c56c8fe78ad9,0x3fdeca4eb9bb8918,2 +np.float64,0xbfe7c97242ef92e4,0xbff5c2950dfd6f69,2 +np.float64,0xbd9440057b288,0xbd9440057b288,2 +np.float64,0x7feb2fc111f65f81,0x40862cf524bd2001,2 +np.float64,0x800a431e2df4863d,0x800a431e2df4863d,2 +np.float64,0x80038a3b79e71478,0x80038a3b79e71478,2 +np.float64,0x80000c93d4601928,0x80000c93d4601928,2 +np.float64,0x7fe9fec022f3fd7f,0x40862c995db8ada0,2 +np.float64,0x3fead0129c35a025,0x3fe379d7a92c8f79,2 +np.float64,0x3fdd8cbaf7bb1974,0x3fd84b87ff0c26c7,2 +np.float64,0x3fe8fb7c60b1f6f9,0x3fe276d5339e7135,2 +np.float64,0x85a255e10b44b,0x85a255e10b44b,2 +np.float64,0xbfe507c23fea0f84,0xbff1212d2260022a,2 +np.float64,0x3fc5487c7b2a90f9,0x3fc3b03222d3d148,2 +np.float64,0x7fec0bdcb8f817b8,0x40862d34e8fd11e7,2 +np.float64,0xbfc5f34b4f2be698,0xbfc8146a899c7a0c,2 +np.float64,0xbfa2a49c14254940,0xbfa2fdab2eae3826,2 +np.float64,0x800ec52f15dd8a5e,0x800ec52f15dd8a5e,2 +np.float64,0xbfe3ba4b12a77496,0xbfeeab256b3e9422,2 +np.float64,0x80034d6c7ba69ada,0x80034d6c7ba69ada,2 +np.float64,0x7fd394d4202729a7,0x408624c98a216742,2 +np.float64,0xbfd4493a38289274,0xbfd865d67af2de91,2 +np.float64,0xe47d6203c8fad,0xe47d6203c8fad,2 +np.float64,0x98eb4e4b31d6a,0x98eb4e4b31d6a,2 +np.float64,0x4507fb128a100,0x4507fb128a100,2 +np.float64,0xbfc77032e42ee064,0xbfc9e36ab747a14d,2 +np.float64,0xa1f8a03b43f14,0xa1f8a03b43f14,2 +np.float64,0xbfc3d4da8527a9b4,0xbfc58c27af2476b0,2 +np.float64,0x3fc0eb7d6921d6fb,0x3fbfc858a077ed61,2 +np.float64,0x7fddb2e9403b65d2,0x4086281e98443709,2 +np.float64,0xbfa7ea62942fd4c0,0xbfa87dfd06b05d2a,2 +np.float64,0xbfe7d5c5426fab8a,0xbff5daa969c6d9e5,2 +np.float64,0x3fbf7cba0c3ef974,0x3fbdb23cd8fe875b,2 +np.float64,0x7fe92021eb324043,0x40862c53aee8b154,2 +np.float64,0x7fefbaa1827f7542,0x40862e3194737072,2 +np.float64,0x3fc6f82c402df059,0x3fc520432cbc533f,2 +np.float64,0x7fb37679a826ecf2,0x408619a5f857e27f,2 +np.float64,0x79ec1528f3d83,0x79ec1528f3d83,2 +np.float64,0x3fbefe1d0c3dfc3a,0x3fbd41650ba2c893,2 +np.float64,0x3fc3e5e11827cbc2,0x3fc27eb9b47c9c42,2 +np.float64,0x16aed1922d5db,0x16aed1922d5db,2 +np.float64,0x800124f7e58249f1,0x800124f7e58249f1,2 +np.float64,0x8004f7d12489efa3,0x8004f7d12489efa3,2 +np.float64,0x3fef80b8e27f0172,0x3fe5ee5fd43322c6,2 +np.float64,0xbfe7740c88eee819,0xbff51f823c8da14d,2 +np.float64,0xbfe6e1f1f6edc3e4,0xbff416bcb1302e7c,2 +np.float64,0x8001a2c4a7e3458a,0x8001a2c4a7e3458a,2 +np.float64,0x3fe861e155f0c3c2,0x3fe2201d3000c329,2 +np.float64,0x3fd00a101a201420,0x3fcca01087dbd728,2 +np.float64,0x7fdf0eb1133e1d61,0x4086287a327839b8,2 +np.float64,0x95e3ffdb2bc80,0x95e3ffdb2bc80,2 +np.float64,0x3fd87a1e8230f43d,0x3fd4ba1eb9be1270,2 +np.float64,0x3fedc4792afb88f2,0x3fe50b6529080f73,2 +np.float64,0x7fc9e81fa833d03e,0x4086217b428cc6ff,2 +np.float64,0xbfd21f1ba5a43e38,0xbfd54e048b988e09,2 +np.float64,0xbfbf52af5a3ea560,0xbfc0b4ab3b81fafc,2 +np.float64,0x7fe475f8e268ebf1,0x40862aaf14fee029,2 +np.float64,0x3fcf56899f3ead10,0x3fcc081de28ae9cf,2 +np.float64,0x917d407122fa8,0x917d407122fa8,2 +np.float64,0x22e23e3245c49,0x22e23e3245c49,2 +np.float64,0xbfeec2814f3d8503,0xc00a00ecca27b426,2 +np.float64,0xbfd97fee1c32ffdc,0xbfe04351dfe306ec,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv new file mode 100644 index 00000000..179c6519 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x80000000,0xff800000,3 +np.float32,0x7f12870a,0x42fe63db,3 +np.float32,0x3ef29cf5,0xbf89eb12,3 +np.float32,0x3d6ba8fb,0xc083d26c,3 +np.float32,0x3d9907e8,0xc06f8230,3 +np.float32,0x4ee592,0xc2fd656e,3 +np.float32,0x58d8b1,0xc2fd0db3,3 +np.float32,0x7ba103,0xc2fc19aa,3 +np.float32,0x7f52e90e,0x42ff70e4,3 +np.float32,0x7fcb15,0xc2fc0132,3 +np.float32,0x7cb7129f,0x42f50855,3 +np.float32,0x9faba,0xc301ae59,3 +np.float32,0x7f300a,0xc2fc04b4,3 +np.float32,0x3f0bf047,0xbf5f10cb,3 +np.float32,0x2fb1fb,0xc2fed934,3 +np.float32,0x3eedb0d1,0xbf8db417,3 +np.float32,0x3d7a0b40,0xc0811638,3 +np.float32,0x2e0bac,0xc2fef334,3 +np.float32,0x6278c1,0xc2fcc1b9,3 +np.float32,0x7f61ab2e,0x42ffa2d9,3 +np.float32,0x8fe7c,0xc301d4be,3 +np.float32,0x3f25e6ee,0xbf203536,3 +np.float32,0x7efc78f0,0x42fdf5c0,3 +np.float32,0x6d7304,0xc2fc73a7,3 +np.float32,0x7f1a472a,0x42fe89ed,3 +np.float32,0x7dd029a6,0x42f96734,3 +np.float32,0x3e9b9327,0xbfdbf8f7,3 +np.float32,0x3f4eefc1,0xbe9d2942,3 +np.float32,0x7f5b9b64,0x42ff8ebc,3 +np.float32,0x3e458ee1,0xc017ed6e,3 +np.float32,0x3f7b766b,0xbcd35acf,3 +np.float32,0x3e616070,0xc00bc378,3 +np.float32,0x7f20e633,0x42fea8f8,3 +np.float32,0x3ee3b461,0xbf95a126,3 +np.float32,0x7e7722ba,0x42fbe5f8,3 +np.float32,0x3f0873d7,0xbf6861fa,3 +np.float32,0x7b4cb2,0xc2fc1ba3,3 +np.float32,0x3f0b6b02,0xbf60712e,3 +np.float32,0x9bff4,0xc301b6f2,3 +np.float32,0x3f07be25,0xbf6a4f0c,3 +np.float32,0x3ef10e57,0xbf8b1b75,3 +np.float32,0x46ad75,0xc2fdb6b1,3 +np.float32,0x3f7bc542,0xbcc4e3a9,3 +np.float32,0x3f6673d4,0xbe1b509c,3 +np.float32,0x7f19fe59,0x42fe8890,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f2fe696,0x42feead0,3 +np.float32,0x3dc9432d,0xc0563655,3 +np.float32,0x3ee47623,0xbf950446,3 +np.float32,0x3f1f8817,0xbf2eab51,3 +np.float32,0x7f220ec5,0x42feae44,3 +np.float32,0x2325e3,0xc2ffbab1,3 +np.float32,0x29dfc8,0xc2ff395a,3 +np.float32,0x7f524950,0x42ff6eb3,3 +np.float32,0x3e2234e0,0xc02a21c8,3 +np.float32,0x7f1c6f5a,0x42fe942f,3 +np.float32,0x3b6a61,0xc2fe36e7,3 +np.float32,0x3f1df90e,0xbf324ba9,3 +np.float32,0xb57f0,0xc3017f07,3 +np.float32,0x7d0eba,0xc2fc112e,3 +np.float32,0x403aa9,0xc2fdfd5c,3 +np.float32,0x3e74ecc7,0xc004155f,3 +np.float32,0x17509c,0xc30074f2,3 +np.float32,0x7f62196b,0x42ffa442,3 +np.float32,0x3ecef9a9,0xbfa7417a,3 +np.float32,0x7f14b158,0x42fe6eb1,3 +np.float32,0x3ede12be,0xbf9a40fe,3 +np.float32,0x42cfaa,0xc2fde03f,3 +np.float32,0x3f407b0f,0xbed2a6f5,3 +np.float32,0x7f7fffff,0x43000000,3 +np.float32,0x5467c6,0xc2fd3394,3 +np.float32,0x7ea6b80f,0x42fcc336,3 +np.float32,0x3f21e7b2,0xbf293704,3 +np.float32,0x3dc7e9eb,0xc056d542,3 +np.float32,0x7f3e6e67,0x42ff2571,3 +np.float32,0x3e3e809d,0xc01b4911,3 +np.float32,0x3f800000,0x0,3 +np.float32,0x3d8fd238,0xc0753d52,3 +np.float32,0x3f74aa65,0xbd85cd0e,3 +np.float32,0x7ec30305,0x42fd36ff,3 +np.float32,0x3e97bb93,0xbfe0971d,3 +np.float32,0x3e109d9c,0xc034bb1b,3 +np.float32,0x3f4a0b67,0xbeaed537,3 +np.float32,0x3f25a7aa,0xbf20c228,3 +np.float32,0x3ebc05eb,0xbfb8fd6b,3 +np.float32,0x3eebe749,0xbf8f18e5,3 +np.float32,0x3e9dc479,0xbfd96356,3 +np.float32,0x7f245200,0x42feb882,3 +np.float32,0x1573a8,0xc30093b5,3 +np.float32,0x3e66c4b9,0xc00994a6,3 +np.float32,0x3e73bffc,0xc0048709,3 +np.float32,0x3dfef8e5,0xc0405f16,3 +np.float32,0x403750,0xc2fdfd83,3 +np.float32,0x3ebedf17,0xbfb636a4,3 +np.float32,0x15cae6,0xc3008de2,3 +np.float32,0x3edf4d4e,0xbf993c24,3 +np.float32,0x3f7cc41e,0xbc963fb3,3 +np.float32,0x3e9e12a4,0xbfd907ee,3 +np.float32,0x7ded7b59,0x42f9c889,3 +np.float32,0x7f034878,0x42fe12b5,3 +np.float32,0x7ddce43f,0x42f9930b,3 +np.float32,0x3d82b257,0xc07e1333,3 +np.float32,0x3dae89c1,0xc0635dd4,3 +np.float32,0x6b1d00,0xc2fc8396,3 +np.float32,0x449a5a,0xc2fdccb3,3 +np.float32,0x4e89d2,0xc2fd68cb,3 +np.float32,0x7e1ae83f,0x42fa8cef,3 +np.float32,0x7e4bb22c,0x42fb572e,3 +np.float32,0x3de308ea,0xc04b1634,3 +np.float32,0x7f238c7a,0x42feb508,3 +np.float32,0x3f6c62a3,0xbdeb86f3,3 +np.float32,0x3e58cba6,0xc00f5908,3 +np.float32,0x7f7dd91f,0x42fff9c4,3 +np.float32,0x3d989376,0xc06fc88d,3 +np.float32,0x3dd013c5,0xc0532339,3 +np.float32,0x4b17e6,0xc2fd89ed,3 +np.float32,0x7f67f287,0x42ffb71e,3 +np.float32,0x3f69365e,0xbe09ba3c,3 +np.float32,0x3e4b8b21,0xc0152bf1,3 +np.float32,0x3a75b,0xc3032171,3 +np.float32,0x7f303676,0x42feec1f,3 +np.float32,0x7f6570e5,0x42ffaf18,3 +np.float32,0x3f5ed61e,0xbe4cf676,3 +np.float32,0x3e9b22f9,0xbfdc7e4f,3 +np.float32,0x2c095e,0xc2ff1428,3 +np.float32,0x3f1b17c1,0xbf391754,3 +np.float32,0x422dc6,0xc2fde746,3 +np.float32,0x3f677c8d,0xbe14b365,3 +np.float32,0x3ef85d0c,0xbf8597a9,3 +np.float32,0x3ecaaa6b,0xbfab2430,3 +np.float32,0x3f0607d1,0xbf6eff3d,3 +np.float32,0x3f011fdb,0xbf7cc50d,3 +np.float32,0x6ed7c1,0xc2fc6a4e,3 +np.float32,0x7ec2d1a2,0x42fd3644,3 +np.float32,0x3f75b7fe,0xbd7238a2,3 +np.float32,0x3ef2d146,0xbf89c344,3 +np.float32,0x7ec2cd27,0x42fd3633,3 +np.float32,0x7ee1e55a,0x42fda397,3 +np.float32,0x7f464d6a,0x42ff435c,3 +np.float32,0x7f469a93,0x42ff447b,3 +np.float32,0x7ece752f,0x42fd6121,3 +np.float32,0x2ed878,0xc2fee67b,3 +np.float32,0x75b23,0xc3021eff,3 +np.float32,0x3e0f4be4,0xc03593b8,3 +np.float32,0x2778e1,0xc2ff64fc,3 +np.float32,0x5fe2b7,0xc2fcd561,3 +np.float32,0x19b8a9,0xc30050ab,3 +np.float32,0x7df303e5,0x42f9d98d,3 +np.float32,0x608b8d,0xc2fcd051,3 +np.float32,0x588f46,0xc2fd1017,3 +np.float32,0x3eec6a11,0xbf8eb2a1,3 +np.float32,0x3f714121,0xbdaf4906,3 +np.float32,0x7f4f7b9e,0x42ff64c9,3 +np.float32,0x3c271606,0xc0d3b29c,3 +np.float32,0x3f002fe0,0xbf7f75f6,3 +np.float32,0x7efa4798,0x42fdef4f,3 +np.float32,0x3f61a865,0xbe3a601a,3 +np.float32,0x7e8087aa,0x42fc030d,3 +np.float32,0x3f70f0c7,0xbdb321ba,3 +np.float32,0x5db898,0xc2fce63f,3 +np.float32,0x7a965f,0xc2fc1fea,3 +np.float32,0x7f68b112,0x42ffb97c,3 +np.float32,0x7ef0ed3d,0x42fdd32d,3 +np.float32,0x7f3156a1,0x42fef0d3,3 +np.float32,0x3f1d405f,0xbf33fc6e,3 +np.float32,0x3e3494cf,0xc0203945,3 +np.float32,0x6018de,0xc2fcd3c1,3 +np.float32,0x623e49,0xc2fcc370,3 +np.float32,0x3ea29f0f,0xbfd3cad4,3 +np.float32,0xa514,0xc305a20c,3 +np.float32,0x3e1b2ab1,0xc02e3a8f,3 +np.float32,0x3f450b6f,0xbec1578f,3 +np.float32,0x7eb12908,0x42fcf015,3 +np.float32,0x3f10b720,0xbf52ab48,3 +np.float32,0x3e0a93,0xc2fe16f6,3 +np.float32,0x93845,0xc301cb96,3 +np.float32,0x7f4e9ce3,0x42ff61af,3 +np.float32,0x3f6d4296,0xbde09ceb,3 +np.float32,0x6ddede,0xc2fc70d0,3 +np.float32,0x3f4fb6fd,0xbe9a636d,3 +np.float32,0x3f6d08de,0xbde36c0b,3 +np.float32,0x3f56f057,0xbe8122ad,3 +np.float32,0x334e95,0xc2fea349,3 +np.float32,0x7efadbcd,0x42fdf104,3 +np.float32,0x3db02e88,0xc0628046,3 +np.float32,0x3f3309d1,0xbf041066,3 +np.float32,0x2d8722,0xc2fefb8f,3 +np.float32,0x7e926cac,0x42fc6356,3 +np.float32,0x3e3674ab,0xc01f452e,3 +np.float32,0x1b46ce,0xc3003afc,3 +np.float32,0x3f06a338,0xbf6d53fc,3 +np.float32,0x1b1ba7,0xc3003d46,3 +np.float32,0x319dfb,0xc2febc06,3 +np.float32,0x3e2f126a,0xc02315a5,3 +np.float32,0x3f40fe65,0xbed0af9e,3 +np.float32,0x3f1d842f,0xbf335d4b,3 +np.float32,0x3d044e4f,0xc09e78f8,3 +np.float32,0x7f272674,0x42fec51f,3 +np.float32,0x3cda6d8f,0xc0a753db,3 +np.float32,0x3eb92f12,0xbfbbccbb,3 +np.float32,0x7e4318f4,0x42fb3752,3 +np.float32,0x3c5890,0xc2fe2b6d,3 +np.float32,0x3d1993c9,0xc09796f8,3 +np.float32,0x7f18ef24,0x42fe8377,3 +np.float32,0x3e30c3a0,0xc0223244,3 +np.float32,0x3f27cd27,0xbf1c00ef,3 +np.float32,0x3f150957,0xbf47cd6c,3 +np.float32,0x7e7178a3,0x42fbd4d8,3 +np.float32,0x3f298db8,0xbf182ac3,3 +np.float32,0x7cb3be,0xc2fc1348,3 +np.float32,0x3ef64266,0xbf8729de,3 +np.float32,0x3eeb06ce,0xbf8fc8f2,3 +np.float32,0x3f406e36,0xbed2d845,3 +np.float32,0x7f1e1bd3,0x42fe9c0b,3 +np.float32,0x478dcc,0xc2fdad97,3 +np.float32,0x7f7937b5,0x42ffec2b,3 +np.float32,0x3f20f350,0xbf2b6624,3 +np.float32,0x7f13661a,0x42fe683c,3 +np.float32,0x208177,0xc2fff46b,3 +np.float32,0x263cfb,0xc2ff7c72,3 +np.float32,0x7f0bd28c,0x42fe4141,3 +np.float32,0x7230d8,0xc2fc5453,3 +np.float32,0x3f261bbf,0xbf1fbfb4,3 +np.float32,0x737b56,0xc2fc4c05,3 +np.float32,0x3ef88f33,0xbf857263,3 +np.float32,0x7e036464,0x42fa1352,3 +np.float32,0x4b5c4f,0xc2fd874d,3 +np.float32,0x3f77984d,0xbd454596,3 +np.float32,0x3f674202,0xbe162932,3 +np.float32,0x3e7157d9,0xc0057197,3 +np.float32,0x3f3f21da,0xbed7d861,3 +np.float32,0x7f1fb40f,0x42fea375,3 +np.float32,0x7ef0157f,0x42fdd096,3 +np.float32,0x3f71e88d,0xbda74962,3 +np.float32,0x3f174855,0xbf424728,3 +np.float32,0x3f3fdd2c,0xbed505d5,3 +np.float32,0x7b95d1,0xc2fc19ed,3 +np.float32,0x7f23f4e5,0x42feb6df,3 +np.float32,0x7d741925,0x42f7dcd6,3 +np.float32,0x60f81d,0xc2fccd14,3 +np.float32,0x3f17d267,0xbf40f6ae,3 +np.float32,0x3f036fc8,0xbf7636f8,3 +np.float32,0x167653,0xc30082b5,3 +np.float32,0x256d05,0xc2ff8c4f,3 +np.float32,0x3eccc63d,0xbfa93adb,3 +np.float32,0x7f6c91ea,0x42ffc5b2,3 +np.float32,0x2ee52a,0xc2fee5b3,3 +np.float32,0x3dc3579e,0xc058f80d,3 +np.float32,0x4c7170,0xc2fd7cc4,3 +np.float32,0x7f737f20,0x42ffdb03,3 +np.float32,0x3f2f9dbf,0xbf0b3119,3 +np.float32,0x3f4d0c54,0xbea3eec5,3 +np.float32,0x7e380862,0x42fb0c32,3 +np.float32,0x5d637f,0xc2fce8df,3 +np.float32,0x3f0aa623,0xbf627c27,3 +np.float32,0x3e4d5896,0xc0145b88,3 +np.float32,0x3f6cacdc,0xbde7e7ca,3 +np.float32,0x63a2c3,0xc2fcb90a,3 +np.float32,0x6c138c,0xc2fc7cfa,3 +np.float32,0x2063c,0xc303fb88,3 +np.float32,0x7e9e5a3e,0x42fc9d2f,3 +np.float32,0x56ec64,0xc2fd1ddd,3 +np.float32,0x7f1d6a35,0x42fe98cc,3 +np.float32,0x73dc96,0xc2fc4998,3 +np.float32,0x3e5d74e5,0xc00d6238,3 +np.float32,0x7f033cbb,0x42fe1273,3 +np.float32,0x3f5143fc,0xbe94e4e7,3 +np.float32,0x1d56d9,0xc3002010,3 +np.float32,0x2bf3e4,0xc2ff1591,3 +np.float32,0x3f2a6ef1,0xbf164170,3 +np.float32,0x3f33238b,0xbf03db58,3 +np.float32,0x22780e,0xc2ffc91a,3 +np.float32,0x7f00b873,0x42fe0425,3 +np.float32,0x3f7f6145,0xbb654706,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x63895a,0xc2fcb9c7,3 +np.float32,0x18a1b2,0xc30060a8,3 +np.float32,0x7e43c6a6,0x42fb39e3,3 +np.float32,0x78676e,0xc2fc2d30,3 +np.float32,0x3f16d839,0xbf435940,3 +np.float32,0x7eff78ba,0x42fdfe79,3 +np.float32,0x3f2e152c,0xbf0e6e54,3 +np.float32,0x3db20ced,0xc06186e1,3 +np.float32,0x3f0cd1d8,0xbf5cbf57,3 +np.float32,0x3fd7a8,0xc2fe01d2,3 +np.float32,0x3ebb075e,0xbfb9f816,3 +np.float32,0x7f94ef,0xc2fc026b,3 +np.float32,0x3d80ba0e,0xc07f7a2b,3 +np.float32,0x7f227e15,0x42feb03f,3 +np.float32,0x792264bf,0x42e6afcc,3 +np.float32,0x7f501576,0x42ff66ec,3 +np.float32,0x223629,0xc2ffcea3,3 +np.float32,0x40a79e,0xc2fdf87b,3 +np.float32,0x449483,0xc2fdccf2,3 +np.float32,0x3f4fa978,0xbe9a9382,3 +np.float32,0x7f148c53,0x42fe6df9,3 +np.float32,0x3ec98b3c,0xbfac2a98,3 +np.float32,0x3e4da320,0xc0143a0a,3 +np.float32,0x3d1d94bb,0xc09666d0,3 +np.float32,0x3c8e624e,0xc0bb155b,3 +np.float32,0x66a9af,0xc2fca2ef,3 +np.float32,0x3ec76ed7,0xbfae1c57,3 +np.float32,0x3f4b52f3,0xbeaa2b81,3 +np.float32,0x7e99bbb5,0x42fc8750,3 +np.float32,0x3f69a46b,0xbe0701be,3 +np.float32,0x3f775400,0xbd4ba495,3 +np.float32,0x131e56,0xc300be3c,3 +np.float32,0x3f30abb4,0xbf08fb10,3 +np.float32,0x7f7e528c,0x42fffb25,3 +np.float32,0x3eb89515,0xbfbc668a,3 +np.float32,0x7e9191b6,0x42fc5f02,3 +np.float32,0x7e80c7e9,0x42fc047e,3 +np.float32,0x3f77ef58,0xbd3d2995,3 +np.float32,0x7ddb1f8a,0x42f98d1b,3 +np.float32,0x7ebc6c4f,0x42fd1d9c,3 +np.float32,0x3f6638e0,0xbe1ccab8,3 +np.float32,0x7f4c45,0xc2fc0410,3 +np.float32,0x3e7d8aad,0xc000e414,3 +np.float32,0x3f4d148b,0xbea3d12e,3 +np.float32,0x3e98c45c,0xbfdf55f4,3 +np.float32,0x3d754c78,0xc081f8a9,3 +np.float32,0x17e4cf,0xc3006be3,3 +np.float32,0x7eb65814,0x42fd0563,3 +np.float32,0x3f65e0d8,0xbe1f0008,3 +np.float32,0x3e99541f,0xbfdea87e,3 +np.float32,0x3f3cb80e,0xbee13b27,3 +np.float32,0x3e99f0c0,0xbfddec3b,3 +np.float32,0x3f43903e,0xbec6ea66,3 +np.float32,0x7e211cd4,0x42faa9f2,3 +np.float32,0x824af,0xc301f971,3 +np.float32,0x3e16a56e,0xc030f56c,3 +np.float32,0x542b3b,0xc2fd35a6,3 +np.float32,0x3eeea2d1,0xbf8cf873,3 +np.float32,0x232e93,0xc2ffb9fa,3 +np.float32,0x3e8c52b9,0xbfef06aa,3 +np.float32,0x7f69c7e3,0x42ffbcef,3 +np.float32,0x3f573e43,0xbe801714,3 +np.float32,0x43b009,0xc2fdd69f,3 +np.float32,0x3ee571ab,0xbf943966,3 +np.float32,0x3ee3d5d8,0xbf958604,3 +np.float32,0x338b12,0xc2fe9fe4,3 +np.float32,0x29cb1f,0xc2ff3ac6,3 +np.float32,0x3f0892b4,0xbf680e7a,3 +np.float32,0x3e8c4f7f,0xbfef0ae9,3 +np.float32,0x7c9d3963,0x42f497e6,3 +np.float32,0x3f26ba84,0xbf1e5f59,3 +np.float32,0x3dd0acc0,0xc052df6f,3 +np.float32,0x3e43fbda,0xc018aa8c,3 +np.float32,0x3ec4fd0f,0xbfb0635d,3 +np.float32,0x3f52c8c6,0xbe8f8d85,3 +np.float32,0x3f5fdc5d,0xbe462fdb,3 +np.float32,0x3f461920,0xbebd6743,3 +np.float32,0x6161ff,0xc2fcc9ef,3 +np.float32,0x7f7ed306,0x42fffc9a,3 +np.float32,0x3d212263,0xc0955f46,3 +np.float32,0x3eca5826,0xbfab6f36,3 +np.float32,0x7d6317ac,0x42f7a77e,3 +np.float32,0x3eb02063,0xbfc50f60,3 +np.float32,0x7f71a6f8,0x42ffd565,3 +np.float32,0x1a3efe,0xc3004935,3 +np.float32,0x3dc599c9,0xc057e856,3 +np.float32,0x3f3e1301,0xbedbf205,3 +np.float32,0xf17d4,0xc301158d,3 +np.float32,0x3f615f84,0xbe3c3d85,3 +np.float32,0x3de63be1,0xc049cb77,3 +np.float32,0x3e8d2f51,0xbfede541,3 +np.float32,0x3a5cdd,0xc2fe441c,3 +np.float32,0x3f443ec0,0xbec4586a,3 +np.float32,0x3eacbd00,0xbfc8a5ad,3 +np.float32,0x3f600f6a,0xbe44df1b,3 +np.float32,0x5f77a6,0xc2fcd89c,3 +np.float32,0x476706,0xc2fdaf28,3 +np.float32,0x2f469,0xc3036fde,3 +np.float32,0x7dc4ba24,0x42f93d77,3 +np.float32,0x3e2d6080,0xc023fb9b,3 +np.float32,0x7e8d7135,0x42fc49c3,3 +np.float32,0x3f589065,0xbe77247b,3 +np.float32,0x3f59e210,0xbe6e2c05,3 +np.float32,0x7f51d388,0x42ff6d15,3 +np.float32,0x7d9a5fda,0x42f88a63,3 +np.float32,0x3e67d5bc,0xc00927ab,3 +np.float32,0x61d72c,0xc2fcc679,3 +np.float32,0x3ef3351d,0xbf897766,3 +np.float32,0x1,0xc3150000,3 +np.float32,0x7f653429,0x42ffae54,3 +np.float32,0x7e1ad3e5,0x42fa8c8e,3 +np.float32,0x3f4ca01d,0xbea57500,3 +np.float32,0x3f7606db,0xbd6ad13e,3 +np.float32,0x7ec4a27d,0x42fd3d1f,3 +np.float32,0x3efe4fd5,0xbf8138c7,3 +np.float32,0x77c2f1,0xc2fc3124,3 +np.float32,0x7e4d3251,0x42fb5c9a,3 +np.float32,0x3f543ac7,0xbe8a8154,3 +np.float32,0x7c3dbe29,0x42f322c4,3 +np.float32,0x408e01,0xc2fdf9a0,3 +np.float32,0x45069b,0xc2fdc829,3 +np.float32,0x3d7ecab7,0xc08037e8,3 +np.float32,0xf8c22,0xc3010a99,3 +np.float32,0x7f69af63,0x42ffbca2,3 +np.float32,0x7ec7d228,0x42fd48fe,3 +np.float32,0xff800000,0xffc00000,3 +np.float32,0xdd7c5,0xc301357c,3 +np.float32,0x143f38,0xc300a90e,3 +np.float32,0x7e65c176,0x42fbb01b,3 +np.float32,0x2c1a9e,0xc2ff1307,3 +np.float32,0x7f6e9224,0x42ffcbeb,3 +np.float32,0x3d32ab39,0xc0909a77,3 +np.float32,0x3e150b42,0xc031f22b,3 +np.float32,0x1f84b4,0xc300059a,3 +np.float32,0x3f71ce21,0xbda88c2a,3 +np.float32,0x2625c4,0xc2ff7e33,3 +np.float32,0x3dd0b293,0xc052dcdc,3 +np.float32,0x625c11,0xc2fcc290,3 +np.float32,0x3f610297,0xbe3e9f24,3 +np.float32,0x7ebdd5e5,0x42fd2320,3 +np.float32,0x3e883458,0xbff486ff,3 +np.float32,0x782313,0xc2fc2ed4,3 +np.float32,0x7f39c843,0x42ff132f,3 +np.float32,0x7f326aa7,0x42fef54d,3 +np.float32,0x4d2c71,0xc2fd75be,3 +np.float32,0x3f55747c,0xbe86409e,3 +np.float32,0x7f7f0867,0x42fffd34,3 +np.float32,0x321316,0xc2feb53f,3 +np.float32,0x3e1b37ed,0xc02e32b0,3 +np.float32,0x80edf,0xc301fd54,3 +np.float32,0x3f0b08ad,0xbf617607,3 +np.float32,0x7f3f4174,0x42ff28a2,3 +np.float32,0x3d79306d,0xc0813eb0,3 +np.float32,0x3f5f657a,0xbe49413d,3 +np.float32,0x3f56c63a,0xbe81b376,3 +np.float32,0x7f667123,0x42ffb24f,3 +np.float32,0x3f71021b,0xbdb24d43,3 +np.float32,0x7f434ab1,0x42ff380f,3 +np.float32,0x3dcae496,0xc055779c,3 +np.float32,0x3f5a7d88,0xbe6a0f5b,3 +np.float32,0x3cdf5c32,0xc0a64bf5,3 +np.float32,0x3e56222c,0xc0107d11,3 +np.float32,0x561a3a,0xc2fd24df,3 +np.float32,0x7ddd953c,0x42f9955a,3 +np.float32,0x7e35d839,0x42fb035c,3 +np.float32,0x3ec1816c,0xbfb3aeb2,3 +np.float32,0x7c87cfcd,0x42f42bc2,3 +np.float32,0xd9cd,0xc3053baf,3 +np.float32,0x3f388234,0xbef1e5b7,3 +np.float32,0x3edfcaca,0xbf98d47b,3 +np.float32,0x3ef28852,0xbf89fac8,3 +np.float32,0x7f7525df,0x42ffe001,3 +np.float32,0x7f6c33ef,0x42ffc48c,3 +np.float32,0x3ea4a881,0xbfd17e61,3 +np.float32,0x3f3e379f,0xbedb63c6,3 +np.float32,0x3f0524c1,0xbf717301,3 +np.float32,0x3db3e7f0,0xc06091d3,3 +np.float32,0x800000,0xc2fc0000,3 +np.float32,0x3f2f2897,0xbf0c27ce,3 +np.float32,0x7eb1776d,0x42fcf15c,3 +np.float32,0x3f039018,0xbf75dc37,3 +np.float32,0x3c4055,0xc2fe2c96,3 +np.float32,0x3f603653,0xbe43dea5,3 +np.float32,0x7f700d24,0x42ffd07c,3 +np.float32,0x3f4741a3,0xbeb918dc,3 +np.float32,0x3f5fe959,0xbe45da2d,3 +np.float32,0x3f3e4401,0xbedb33b1,3 +np.float32,0x7f0705ff,0x42fe2775,3 +np.float32,0x3ea85662,0xbfcd69b0,3 +np.float32,0x3f15f49f,0xbf458829,3 +np.float32,0x3f17c50e,0xbf411728,3 +np.float32,0x3e483f60,0xc016add2,3 +np.float32,0x3f1ab9e5,0xbf39f71b,3 +np.float32,0x3de0b6fb,0xc04c08fe,3 +np.float32,0x7e671225,0x42fbb452,3 +np.float32,0x80800000,0xffc00000,3 +np.float32,0xe2df3,0xc3012c9d,3 +np.float32,0x3ede1e3c,0xbf9a3770,3 +np.float32,0x3df2ffde,0xc044cfec,3 +np.float32,0x3eed8da5,0xbf8dcf6c,3 +np.float32,0x3ead15c3,0xbfc846e1,3 +np.float32,0x7ef3750a,0x42fddae4,3 +np.float32,0x7e6ab7c0,0x42fbbfe4,3 +np.float32,0x7ea4bbe5,0x42fcba5d,3 +np.float32,0x3f227706,0xbf27f0a1,3 +np.float32,0x3ef39bfd,0xbf89295a,3 +np.float32,0x3f289a20,0xbf1a3edd,3 +np.float32,0x7f225f82,0x42feafb4,3 +np.float32,0x768963,0xc2fc38bc,3 +np.float32,0x3f493c00,0xbeb1ccfc,3 +np.float32,0x3f4e7249,0xbe9ee9a7,3 +np.float32,0x1d0c3a,0xc30023c0,3 +np.float32,0x7f3c5f78,0x42ff1d6a,3 +np.float32,0xff7fffff,0xffc00000,3 +np.float32,0x3ee7896a,0xbf928c2a,3 +np.float32,0x3e788479,0xc002bd2e,3 +np.float32,0x3ee4df17,0xbf94af84,3 +np.float32,0x5e06d7,0xc2fce3d7,3 +np.float32,0x3d7b2776,0xc080e1dc,3 +np.float32,0x3e3d39d3,0xc01be7fd,3 +np.float32,0x7c81dece,0x42f40ab7,3 +np.float32,0x3f7d2085,0xbc856255,3 +np.float32,0x7f7f6627,0x42fffe44,3 +np.float32,0x7f5f2e94,0x42ff9aaa,3 +np.float32,0x7f5835f2,0x42ff8339,3 +np.float32,0x3f6a0e32,0xbe046580,3 +np.float32,0x7e16f586,0x42fa79dd,3 +np.float32,0x3f04a2f2,0xbf72dbc5,3 +np.float32,0x3f35e334,0xbefc7740,3 +np.float32,0x3f0d056e,0xbf5c3824,3 +np.float32,0x7ebeb95e,0x42fd2693,3 +np.float32,0x3c6192,0xc2fe2aff,3 +np.float32,0x3e892b4f,0xbff33958,3 +np.float32,0x3f61d694,0xbe3931df,3 +np.float32,0x29d183,0xc2ff3a56,3 +np.float32,0x7f0b0598,0x42fe3d04,3 +np.float32,0x7f743b28,0x42ffdd3d,3 +np.float32,0x3a2ed6,0xc2fe4663,3 +np.float32,0x3e27403a,0xc0274de8,3 +np.float32,0x3f58ee78,0xbe74a349,3 +np.float32,0x3eaa4b,0xc2fe0f92,3 +np.float32,0x3ecb613b,0xbfaa7de8,3 +np.float32,0x7f637d81,0x42ffa8c9,3 +np.float32,0x3f026e96,0xbf790c73,3 +np.float32,0x386cdf,0xc2fe5d0c,3 +np.float32,0x35abd1,0xc2fe8202,3 +np.float32,0x3eac3cd1,0xbfc92ee8,3 +np.float32,0x3f567869,0xbe82bf47,3 +np.float32,0x3f65c643,0xbe1faae6,3 +np.float32,0x7f5422b9,0x42ff752b,3 +np.float32,0x7c26e9,0xc2fc168c,3 +np.float32,0x7eff5cfd,0x42fdfe29,3 +np.float32,0x3f728e7f,0xbd9f6142,3 +np.float32,0x3f10fd43,0xbf51f874,3 +np.float32,0x7e7ada08,0x42fbf0fe,3 +np.float32,0x3e82a611,0xbffc37be,3 +np.float32,0xbf800000,0xffc00000,3 +np.float32,0x3dbe2e12,0xc05b711c,3 +np.float32,0x7e768fa9,0x42fbe440,3 +np.float32,0x5e44e8,0xc2fce1f0,3 +np.float32,0x7f25071a,0x42febbae,3 +np.float32,0x3f54db5e,0xbe885339,3 +np.float32,0x3f0f2c26,0xbf56a0b8,3 +np.float32,0x22f9a7,0xc2ffbe55,3 +np.float32,0x7ed63dcb,0x42fd7c77,3 +np.float32,0x7ea4fae2,0x42fcbb78,3 +np.float32,0x3f1d7766,0xbf337b47,3 +np.float32,0x7f16d59f,0x42fe7941,3 +np.float32,0x3f3a1bb6,0xbeeb855c,3 +np.float32,0x3ef57128,0xbf87c709,3 +np.float32,0xb24ff,0xc3018591,3 +np.float32,0x3ef99e27,0xbf84a983,3 +np.float32,0x3eac2ccf,0xbfc94013,3 +np.float32,0x3e9d3e1e,0xbfda00dc,3 +np.float32,0x718213,0xc2fc58c1,3 +np.float32,0x7edbf509,0x42fd8fea,3 +np.float32,0x70c7f1,0xc2fc5d80,3 +np.float32,0x3f7012f5,0xbdbdc6cd,3 +np.float32,0x12cba,0xc304c487,3 +np.float32,0x7f5d445d,0x42ff944c,3 +np.float32,0x7f3e30bd,0x42ff2481,3 +np.float32,0x63b110,0xc2fcb8a0,3 +np.float32,0x3f39f728,0xbeec1680,3 +np.float32,0x3f5bea58,0xbe6074b1,3 +np.float32,0x3f350749,0xbefff679,3 +np.float32,0x3e91ab2c,0xbfe81f3e,3 +np.float32,0x7ec53fe0,0x42fd3f6d,3 +np.float32,0x3f6cbbdc,0xbde72c8e,3 +np.float32,0x3f4df49f,0xbea0abcf,3 +np.float32,0x3e9c9638,0xbfdac674,3 +np.float32,0x7f3b82ec,0x42ff1a07,3 +np.float32,0x7f612a09,0x42ffa132,3 +np.float32,0x7ea26650,0x42fcafd3,3 +np.float32,0x3a615138,0xc122f26d,3 +np.float32,0x3f1108bd,0xbf51db39,3 +np.float32,0x6f80f6,0xc2fc65ea,3 +np.float32,0x3f7cb578,0xbc98ecb1,3 +np.float32,0x7f54d31a,0x42ff7790,3 +np.float32,0x196868,0xc3005532,3 +np.float32,0x3f01ee0a,0xbf7a7925,3 +np.float32,0x3e184013,0xc02ffb11,3 +np.float32,0xadde3,0xc3018ee3,3 +np.float32,0x252a91,0xc2ff9173,3 +np.float32,0x3f0382c2,0xbf7601a9,3 +np.float32,0x6d818c,0xc2fc7345,3 +np.float32,0x3bfbfd,0xc2fe2fdd,3 +np.float32,0x7f3cad19,0x42ff1e9a,3 +np.float32,0x4169a7,0xc2fdefdf,3 +np.float32,0x3f615d96,0xbe3c4a2b,3 +np.float32,0x3f036480,0xbf7656ac,3 +np.float32,0x7f5fbda3,0x42ff9c83,3 +np.float32,0x3d202d,0xc2fe21f1,3 +np.float32,0x3d0f5e5d,0xc09ac3e9,3 +np.float32,0x3f0fff6e,0xbf548142,3 +np.float32,0x7f11ed32,0x42fe60d2,3 +np.float32,0x3e6f856b,0xc00624b6,3 +np.float32,0x7f7c4dd7,0x42fff542,3 +np.float32,0x3e76fb86,0xc0034fa0,3 +np.float32,0x3e8a0d6e,0xbff209e7,3 +np.float32,0x3eacad19,0xbfc8b6ad,3 +np.float32,0xa7776,0xc3019cbe,3 +np.float32,0x3dc84d74,0xc056a754,3 +np.float32,0x3efb8052,0xbf834626,3 +np.float32,0x3f0e55fc,0xbf58cacc,3 +np.float32,0x7e0e71e3,0x42fa4efb,3 +np.float32,0x3ed5a800,0xbfa1639c,3 +np.float32,0x3f33335b,0xbf03babf,3 +np.float32,0x38cad7,0xc2fe5842,3 +np.float32,0x3bc21256,0xc0ecc927,3 +np.float32,0x3f09522d,0xbf660a19,3 +np.float32,0xcbd5d,0xc3015428,3 +np.float32,0x492752,0xc2fd9d42,3 +np.float32,0x3f2b9b32,0xbf13b904,3 +np.float32,0x6544ac,0xc2fcad09,3 +np.float32,0x52eb12,0xc2fd40b5,3 +np.float32,0x3f66a7c0,0xbe1a03e8,3 +np.float32,0x7ab289,0xc2fc1f41,3 +np.float32,0x62af5e,0xc2fcc020,3 +np.float32,0x7f73e9cf,0x42ffdc46,3 +np.float32,0x3e5eca,0xc2fe130e,3 +np.float32,0x3e3a10f4,0xc01d7602,3 +np.float32,0x3f04db46,0xbf723f0d,3 +np.float32,0x18fc4a,0xc3005b63,3 +np.float32,0x525bcb,0xc2fd45b6,3 +np.float32,0x3f6b9108,0xbdf5c769,3 +np.float32,0x3e992e8c,0xbfded5c5,3 +np.float32,0x7efea647,0x42fdfc18,3 +np.float32,0x7e8371db,0x42fc139e,3 +np.float32,0x3f397cfb,0xbeedfc69,3 +np.float32,0x7e46d233,0x42fb454a,3 +np.float32,0x7d5281ad,0x42f76f79,3 +np.float32,0x7f4c1878,0x42ff58a1,3 +np.float32,0x3e96ca5e,0xbfe1bd97,3 +np.float32,0x6a2743,0xc2fc8a3d,3 +np.float32,0x7f688781,0x42ffb8f8,3 +np.float32,0x7814b7,0xc2fc2f2d,3 +np.float32,0x3f2ffdc9,0xbf0a6756,3 +np.float32,0x3f766fa8,0xbd60fe24,3 +np.float32,0x4dc64e,0xc2fd7003,3 +np.float32,0x3a296f,0xc2fe46a8,3 +np.float32,0x3f2af942,0xbf15162e,3 +np.float32,0x7f702c32,0x42ffd0dc,3 +np.float32,0x7e61e318,0x42fba390,3 +np.float32,0x7f7d3bdb,0x42fff7fa,3 +np.float32,0x3ee87f3f,0xbf91c881,3 +np.float32,0x2bbc28,0xc2ff193c,3 +np.float32,0x3e01f918,0xc03e966e,3 +np.float32,0x7f0b39f4,0x42fe3e1a,3 +np.float32,0x3eaa4d64,0xbfcb4516,3 +np.float32,0x3e53901e,0xc0119a88,3 +np.float32,0x603cb,0xc3026957,3 +np.float32,0x7e81f926,0x42fc0b4d,3 +np.float32,0x5dab7c,0xc2fce6a6,3 +np.float32,0x3f46fefd,0xbeba1018,3 +np.float32,0x648448,0xc2fcb28a,3 +np.float32,0x3ec49470,0xbfb0c58b,3 +np.float32,0x3e8a5393,0xbff1ac2b,3 +np.float32,0x3f27ccfc,0xbf1c014e,3 +np.float32,0x3ed886e6,0xbf9eeca8,3 +np.float32,0x7cfbe06e,0x42f5f401,3 +np.float32,0x3f5aa7ba,0xbe68f229,3 +np.float32,0x9500d,0xc301c7e3,3 +np.float32,0x3f4861,0xc2fe0853,3 +np.float32,0x3e5ae104,0xc00e76f5,3 +np.float32,0x71253a,0xc2fc5b1e,3 +np.float32,0xcf7b8,0xc3014d9c,3 +np.float32,0x7f7edd2d,0x42fffcb7,3 +np.float32,0x3e9039ee,0xbfe9f5ab,3 +np.float32,0x2fd54e,0xc2fed712,3 +np.float32,0x3f600752,0xbe45147a,3 +np.float32,0x3f4da8f6,0xbea1bb5c,3 +np.float32,0x3f2d34a9,0xbf104bd9,3 +np.float32,0x3e1e66dd,0xc02c52d2,3 +np.float32,0x798276,0xc2fc2670,3 +np.float32,0xd55e2,0xc3014347,3 +np.float32,0x80000001,0xffc00000,3 +np.float32,0x3e7a5ead,0xc0020da6,3 +np.float32,0x7ec4c744,0x42fd3da9,3 +np.float32,0x597e00,0xc2fd085a,3 +np.float32,0x3dff6bf4,0xc0403575,3 +np.float32,0x5d6f1a,0xc2fce883,3 +np.float32,0x7e21faff,0x42faadea,3 +np.float32,0x3e570fea,0xc01016c6,3 +np.float32,0x28e6b6,0xc2ff4ab7,3 +np.float32,0x7e77062d,0x42fbe5a3,3 +np.float32,0x74cac4,0xc2fc43b0,3 +np.float32,0x3f707273,0xbdb93078,3 +np.float32,0x228e96,0xc2ffc737,3 +np.float32,0x686ac1,0xc2fc966b,3 +np.float32,0x3d76400d,0xc081cae8,3 +np.float32,0x3e9f502f,0xbfd7966b,3 +np.float32,0x3f6bc656,0xbdf32b1f,3 +np.float32,0x3edb828b,0xbf9c65d4,3 +np.float32,0x6c6e56,0xc2fc7a8e,3 +np.float32,0x3f04552e,0xbf73b48f,3 +np.float32,0x3f39cb69,0xbeecc457,3 +np.float32,0x7f681c44,0x42ffb7a3,3 +np.float32,0x7f5b44ee,0x42ff8d99,3 +np.float32,0x3e71430a,0xc005798d,3 +np.float32,0x3edcfde3,0xbf9b27c6,3 +np.float32,0x3f616a5a,0xbe3bf67f,3 +np.float32,0x3f523936,0xbe918548,3 +np.float32,0x3f39ce3a,0xbeecb925,3 +np.float32,0x3eac589a,0xbfc91120,3 +np.float32,0x7efc8d3d,0x42fdf5fc,3 +np.float32,0x5704b0,0xc2fd1d0f,3 +np.float32,0x7e7972e9,0x42fbecda,3 +np.float32,0x3eb0811c,0xbfc4aa13,3 +np.float32,0x7f1efcbb,0x42fea023,3 +np.float32,0x3e0b9e32,0xc037fa6b,3 +np.float32,0x7eef6a48,0x42fdce87,3 +np.float32,0x3cc0a373,0xc0ad20c0,3 +np.float32,0x3f2a75bb,0xbf1632ba,3 +np.float32,0x0,0xff800000,3 +np.float32,0x7ecdb6f4,0x42fd5e77,3 +np.float32,0x7f2e2dfd,0x42fee38d,3 +np.float32,0x3ee17f6e,0xbf976d8c,3 +np.float32,0x3f51e7ee,0xbe92a319,3 +np.float32,0x3f06942f,0xbf6d7d3c,3 +np.float32,0x3f7ba528,0xbccac6f1,3 +np.float32,0x3f413787,0xbecfd513,3 +np.float32,0x3e085e48,0xc03a2716,3 +np.float32,0x7e4c5e0e,0x42fb599c,3 +np.float32,0x306f76,0xc2fecdd4,3 +np.float32,0x7f5c2203,0x42ff9081,3 +np.float32,0x3d5355b4,0xc088da05,3 +np.float32,0x9a2a,0xc305bb4f,3 +np.float32,0x3db93a1f,0xc05de0db,3 +np.float32,0x4e50c6,0xc2fd6ae4,3 +np.float32,0x7ec4afed,0x42fd3d51,3 +np.float32,0x3a8f27,0xc2fe41a0,3 +np.float32,0x7f213caf,0x42feaa84,3 +np.float32,0x7e7b5f00,0x42fbf286,3 +np.float32,0x7e367194,0x42fb05ca,3 +np.float32,0x7f56e6de,0x42ff7ebd,3 +np.float32,0x3ed7383e,0xbfa00aef,3 +np.float32,0x7e844752,0x42fc184a,3 +np.float32,0x15157,0xc3049a19,3 +np.float32,0x3f78cd92,0xbd28824a,3 +np.float32,0x7ecddb16,0x42fd5ef9,3 +np.float32,0x3e479f16,0xc016f7d8,3 +np.float32,0x3f5cb418,0xbe5b2bd3,3 +np.float32,0x7c0934cb,0x42f2334e,3 +np.float32,0x3ebe5505,0xbfb6bc69,3 +np.float32,0x3eb1335a,0xbfc3eff5,3 +np.float32,0x3f2488a3,0xbf234444,3 +np.float32,0x642906,0xc2fcb52a,3 +np.float32,0x3da635fa,0xc067e15a,3 +np.float32,0x7e0d80db,0x42fa4a15,3 +np.float32,0x4f0b9d,0xc2fd640a,3 +np.float32,0x7e083806,0x42fa2df8,3 +np.float32,0x7f77f8c6,0x42ffe877,3 +np.float32,0x3e7bb46a,0xc0018ff5,3 +np.float32,0x3f06eb2e,0xbf6c8eca,3 +np.float32,0x7eae8f7c,0x42fce52a,3 +np.float32,0x3de481a0,0xc04a7d7f,3 +np.float32,0x3eed4311,0xbf8e096f,3 +np.float32,0x3f7b0300,0xbce8903d,3 +np.float32,0x3811b,0xc30330dd,3 +np.float32,0x3eb6f8e1,0xbfbe04bc,3 +np.float32,0x3ec35210,0xbfb1f55a,3 +np.float32,0x3d386916,0xc08f24a5,3 +np.float32,0x3f1fa197,0xbf2e704d,3 +np.float32,0x7f2020a5,0x42fea56a,3 +np.float32,0x7e1ea53f,0x42fa9e8c,3 +np.float32,0x3f148903,0xbf490bf9,3 +np.float32,0x3f2f56a0,0xbf0bc6c9,3 +np.float32,0x7da9fc,0xc2fc0d9b,3 +np.float32,0x3d802134,0xc07fe810,3 +np.float32,0x3f6cb927,0xbde74e57,3 +np.float32,0x7e05b125,0x42fa2023,3 +np.float32,0x3f3307f9,0xbf041433,3 +np.float32,0x5666bf,0xc2fd2250,3 +np.float32,0x3f51c93b,0xbe930f28,3 +np.float32,0x3eb5dcfe,0xbfbf241e,3 +np.float32,0xb2773,0xc301853f,3 +np.float32,0x7f4dee96,0x42ff5f3f,3 +np.float32,0x3e3f5c33,0xc01adee1,3 +np.float32,0x3f2ed29a,0xbf0cdd4a,3 +np.float32,0x3e3c01ef,0xc01c80ab,3 +np.float32,0x3ec2236e,0xbfb31458,3 +np.float32,0x7e841dc4,0x42fc1761,3 +np.float32,0x3df2cd8e,0xc044e30c,3 +np.float32,0x3f010901,0xbf7d0670,3 +np.float32,0x3c05ceaa,0xc0ddf39b,3 +np.float32,0x3f517226,0xbe944206,3 +np.float32,0x3f23c83d,0xbf24f522,3 +np.float32,0x7fc9da,0xc2fc0139,3 +np.float32,0x7f1bde53,0x42fe9181,3 +np.float32,0x3ea3786c,0xbfd2d4a5,3 +np.float32,0x3e83a71b,0xbffacdd2,3 +np.float32,0x3f6f0d4f,0xbdca61d5,3 +np.float32,0x7f5ab613,0x42ff8bb7,3 +np.float32,0x3ab1ec,0xc2fe3fea,3 +np.float32,0x4fbf58,0xc2fd5d82,3 +np.float32,0x3dea141b,0xc0484403,3 +np.float32,0x7d86ad3b,0x42f8258f,3 +np.float32,0x7f345315,0x42fefd29,3 +np.float32,0x3f3752fe,0xbef6a780,3 +np.float32,0x64830d,0xc2fcb293,3 +np.float32,0x3d9dc1eb,0xc06cb32a,3 +np.float32,0x3f2f935a,0xbf0b46f6,3 +np.float32,0xb90a4,0xc30177e3,3 +np.float32,0x4111dd,0xc2fdf3c1,3 +np.float32,0x3d4cd078,0xc08a4c68,3 +np.float32,0x3e95c3f1,0xbfe30011,3 +np.float32,0x3ec9f356,0xbfabcb4e,3 +np.float32,0x1b90d5,0xc3003717,3 +np.float32,0xee70f,0xc3011a3e,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x3f74cdb6,0xbd8422af,3 +np.float32,0x3d9b56fe,0xc06e2037,3 +np.float32,0x3f1853df,0xbf3fbc40,3 +np.float32,0x7d86a011,0x42f82547,3 +np.float32,0x3dff9629,0xc0402634,3 +np.float32,0x46f8c9,0xc2fdb39f,3 +np.float32,0x3e9b410b,0xbfdc5a87,3 +np.float32,0x3f5aed42,0xbe671cac,3 +np.float32,0x3b739886,0xc101257f,3 +np.float64,0x3fe2f58d6565eb1b,0xbfe82a641138e19a,2 +np.float64,0x3fee7f0642fcfe0d,0xbfb1c702f6974932,2 +np.float64,0x25b71f244b6e5,0xc090030d3b3c5d2b,2 +np.float64,0x8c9cc8e1193b,0xc0900b752a678fa8,2 +np.float64,0x3fd329b5d326536c,0xbffbd607f6db945c,2 +np.float64,0x3fb5109b3a2a2136,0xc00cd36bd15dfb18,2 +np.float64,0x3fd5393ae12a7276,0xbff97a7e4a157154,2 +np.float64,0x3fd374d1b926e9a3,0xbffb7c3e1a3a7ed3,2 +np.float64,0x3fe2c7f4e2658fea,0xbfe899f15ca78fcb,2 +np.float64,0x7fe3d6b81ee7ad6f,0x408ffa7b63d407ee,2 +np.float64,0x3fe086d097e10da1,0xbfee81456ce8dd03,2 +np.float64,0x7fd374a64ca6e94c,0x408ff241c7306d39,2 +np.float64,0x3fc0709a5b20e135,0xc007afdede31b29c,2 +np.float64,0x3fd4218f4b28431f,0xbffab2c696966e2d,2 +np.float64,0x143134c828628,0xc09006a8372c4d8a,2 +np.float64,0x3f8bd0aa0037a154,0xc018cf0e8b9c3107,2 +np.float64,0x7fe0ce905ee19d20,0x408ff8915e71bd67,2 +np.float64,0x3fda0f5f32b41ebe,0xbff4bd5e0869e820,2 +np.float64,0x7fe9ae63d0b35cc7,0x408ffd760ca4f292,2 +np.float64,0x3fe75abd9eeeb57b,0xbfdd1476fc8b3089,2 +np.float64,0x786c3110f0d87,0xc08ff8b44cedbeea,2 +np.float64,0x22c5fe80458d,0xc09013853591c2f2,2 +np.float64,0x3fdc250797384a0f,0xbff2f6a02c961f0b,2 +np.float64,0x3fa2b367b02566cf,0xc013199238485054,2 +np.float64,0x3fd26a910ca4d522,0xbffcc0e2089b1c0c,2 +np.float64,0x8068d3b300d1b,0xc08ff7f690210aac,2 +np.float64,0x3fe663bfa9ecc77f,0xbfe07cd95a43a5ce,2 +np.float64,0x3fd0ddb07321bb61,0xbffec886665e895e,2 +np.float64,0x3f91c730b0238e61,0xc0176452badc8d22,2 +np.float64,0x4dd10d309ba22,0xc08ffdbe738b1d8d,2 +np.float64,0x7fe322afa4a6455e,0x408ffa10c038f9de,2 +np.float64,0x7fdf7f7c42befef8,0x408ff7d147ddaad5,2 +np.float64,0x7fd673f386ace7e6,0x408ff3e920d00eef,2 +np.float64,0x3feaebfcadb5d7f9,0xbfcfe8ec27083478,2 +np.float64,0x3fdc6dc23738db84,0xbff2bb46794f07b8,2 +np.float64,0xcd8819599b103,0xc08ff288c5b2cf0f,2 +np.float64,0xfda00e77fb402,0xc08ff01b895d2236,2 +np.float64,0x840b02ff08161,0xc08ff7a41e41114c,2 +np.float64,0x3fbdce3a383b9c74,0xc008d1e61903a289,2 +np.float64,0x3fd24ed3c4a49da8,0xbffce3c12136b6d3,2 +np.float64,0x3fe8d0834131a107,0xbfd77b194e7051d4,2 +np.float64,0x3fdd0cb11aba1962,0xbff23b9dbd554455,2 +np.float64,0x1a32d97e3465c,0xc090052781a37271,2 +np.float64,0x3fdb09d2b1b613a5,0xbff3e396b862bd83,2 +np.float64,0x3fe04c848aa09909,0xbfef2540dd90103a,2 +np.float64,0x3fce0c48613c1891,0xc000b9f76877d744,2 +np.float64,0x3fc37109a226e213,0xc005c05d8b2b9a2f,2 +np.float64,0x81cf3837039e7,0xc08ff7d686517dff,2 +np.float64,0xd9342c29b2686,0xc08ff1e591c9a895,2 +np.float64,0x7fec731b0638e635,0x408ffea4884550a9,2 +np.float64,0x3fba0fc138341f82,0xc00a5e839b085f64,2 +np.float64,0x7fdda893b03b5126,0x408ff71f7c5a2797,2 +np.float64,0xd2a4bb03a5498,0xc08ff2402f7a907c,2 +np.float64,0x3fea61fb0d34c3f6,0xbfd1d293fbe76183,2 +np.float64,0x3fed5cf486fab9e9,0xbfbfc2e01a7ffff1,2 +np.float64,0x3fcbabc2bf375785,0xc001ad7750c9dbdf,2 +np.float64,0x3fdb5fff53b6bfff,0xbff39a7973a0c6a5,2 +np.float64,0x7feef05a00bde0b3,0x408fff9c5cbc8651,2 +np.float64,0xb1cf24f1639e5,0xc08ff434de10fffb,2 +np.float64,0x3fa583989c2b0731,0xc0124a8a3bbf18ce,2 +np.float64,0x7feae90bf9f5d217,0x408ffe002e7bbbea,2 +np.float64,0x3fe9ef41c4b3de84,0xbfd367878ae4528e,2 +np.float64,0x9be24ce337c4a,0xc08ff5b9b1c31cf9,2 +np.float64,0x3fe916894cb22d13,0xbfd677f915d58503,2 +np.float64,0x3fec1bab20f83756,0xbfc7f2777aabe8ee,2 +np.float64,0x3feaabf2873557e5,0xbfd0d11f28341233,2 +np.float64,0x3fd4d3c3b529a787,0xbff9e9e47acc8ca9,2 +np.float64,0x3fe4cfe96c699fd3,0xbfe3dc53fa739169,2 +np.float64,0xccfdb97399fb7,0xc08ff2908d893400,2 +np.float64,0x3fec7598be78eb31,0xbfc5a750f8f3441a,2 +np.float64,0x355be5fc6ab7e,0xc090010ca315b50b,2 +np.float64,0x3fba9f9074353f21,0xc00a1f80eaf5e581,2 +np.float64,0x7fdcaff189395fe2,0x408ff6bd1c5b90d9,2 +np.float64,0x3fd94d3b64b29a77,0xbff56be1b43d25f3,2 +np.float64,0x4e5f29949cbe6,0xc08ffda972da1d73,2 +np.float64,0x3fe654e2d9aca9c6,0xbfe09b88dcd8f15d,2 +np.float64,0x7fdc130190b82602,0x408ff67d496c1a27,2 +np.float64,0x3fbcd4701e39a8e0,0xc009343e36627e80,2 +np.float64,0x7fdaa4d38f3549a6,0x408ff5e2c6d8678f,2 +np.float64,0x3febe95e5237d2bd,0xbfc93e16d453fe3a,2 +np.float64,0x9ef5ca553deba,0xc08ff57ff4f7883d,2 +np.float64,0x7fe878e91170f1d1,0x408ffce795868fc8,2 +np.float64,0x3fe63dff466c7bff,0xbfe0caf2b79c9e5f,2 +np.float64,0x6561446ccac29,0xc08ffab0e383834c,2 +np.float64,0x30c6c2ae618d9,0xc09001914b30381b,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x3fe5c9daf1ab93b6,0xbfe1be81baf4dbdb,2 +np.float64,0x3fe0a03e24a1407c,0xbfee3a73c4c0e8f8,2 +np.float64,0xff2a2cf3fe546,0xc08ff009a7e6e782,2 +np.float64,0x7fcf0332213e0663,0x408fefa36235e210,2 +np.float64,0x3fb612affc2c2560,0xc00c494be9c8c33b,2 +np.float64,0x3fd2b259702564b3,0xbffc67967f077e75,2 +np.float64,0x7fcb63685d36c6d0,0x408fee343343f913,2 +np.float64,0x3fe369f1d5a6d3e4,0xbfe71251139939ad,2 +np.float64,0x3fdd17c618ba2f8c,0xbff232d11c986251,2 +np.float64,0x3f92cc8040259901,0xc01711d8e06b52ee,2 +np.float64,0x69a81dc2d3504,0xc08ffa36cdaf1141,2 +np.float64,0x3fea0fad99b41f5b,0xbfd2f4625a652645,2 +np.float64,0xd1cd5799a39ab,0xc08ff24c02b90d26,2 +np.float64,0x324e59ce649cc,0xc0900163ad091c76,2 +np.float64,0x3fc3d460a227a8c1,0xc00585f903dc7a7f,2 +np.float64,0xa7185ec74e30c,0xc08ff4ec7d65ccd9,2 +np.float64,0x3fa254eaac24a9d5,0xc01337053963321a,2 +np.float64,0x3feaeb112435d622,0xbfcfef3be17f81f6,2 +np.float64,0x60144c3ac028a,0xc08ffb4f8eb94595,2 +np.float64,0x7fa4d2ec6829a5d8,0x408fdb0a9670ab83,2 +np.float64,0x3fed1372f97a26e6,0xbfc1b1fe50d48a55,2 +np.float64,0x3fd5ade5972b5bcb,0xbff8fcf28f525031,2 +np.float64,0x7fe72e335bee5c66,0x408ffc4759236437,2 +np.float64,0x7fdfafab143f5f55,0x408ff7e2e22a8129,2 +np.float64,0x3fe90d0db9321a1b,0xbfd69ae5fe10eb9e,2 +np.float64,0x7fe20a59072414b1,0x408ff962a2492484,2 +np.float64,0x3fed853690bb0a6d,0xbfbdc9dc5f199d2b,2 +np.float64,0x3fd709d469ae13a9,0xbff795a218deb700,2 +np.float64,0x3fe21c35f5e4386c,0xbfea47d71789329b,2 +np.float64,0x9ea5ec053d4be,0xc08ff585c2f6b7a3,2 +np.float64,0x3fc0580f9e20b01f,0xc007c1268f49d037,2 +np.float64,0xd99127abb3225,0xc08ff1e0a1ff339d,2 +np.float64,0x3fdc8c9bbfb91937,0xbff2a2478354effb,2 +np.float64,0x3fe15fc6b162bf8d,0xbfec323ac358e008,2 +np.float64,0xffefffffffffffff,0x7ff8000000000000,2 +np.float64,0x3fee341afb3c6836,0xbfb556b6faee9a84,2 +np.float64,0x3fe4b64c56296c99,0xbfe4154835ad2afe,2 +np.float64,0x85de22810bbc5,0xc08ff77b914fe5b5,2 +np.float64,0x3fd22c72e3a458e6,0xbffd0f4269d20bb9,2 +np.float64,0xc090e5218123,0xc09009a4a65a8a8f,2 +np.float64,0x7fd9641692b2c82c,0x408ff5547782bdfc,2 +np.float64,0x3fd9b9cb28b37396,0xbff509a8fb59a9f1,2 +np.float64,0x3fcd2726f93a4e4e,0xc001135059a22117,2 +np.float64,0x3fa4b493d4296928,0xc0128323c7a55f4a,2 +np.float64,0x47455e788e8ac,0xc08ffec2101c1e82,2 +np.float64,0x3fe0d7e2e261afc6,0xbfeda0f1e2d0f4bd,2 +np.float64,0x3fe860fc5b70c1f9,0xbfd91dc42eaf72c2,2 +np.float64,0xa5d7805b4baf0,0xc08ff502bc819ff6,2 +np.float64,0xd83395b1b0673,0xc08ff1f33c3f94c2,2 +np.float64,0x3f865972e02cb2e6,0xc01a1243651565c8,2 +np.float64,0x52fc6952a5f8e,0xc08ffd006b158179,2 +np.float64,0x7fecac6c793958d8,0x408ffebbb1c09a70,2 +np.float64,0x7fe621ff606c43fe,0x408ffbbeb2b1473a,2 +np.float64,0x3fdb9f3f9db73e7f,0xbff365610c52bda7,2 +np.float64,0x7feab92992757252,0x408ffdeb92a04813,2 +np.float64,0xcc46c79f988d9,0xc08ff29adf03fb7c,2 +np.float64,0x3fe3156a03262ad4,0xbfe7dd0f598781c7,2 +np.float64,0x3fc00e3a61201c75,0xc007f5c121a87302,2 +np.float64,0x3fdce8e9f739d1d4,0xbff2581d41ef50ef,2 +np.float64,0x0,0xfff0000000000000,2 +np.float64,0x7d373ac4fa6e8,0xc08ff840fa8beaec,2 +np.float64,0x3fee41e0653c83c1,0xbfb4ae786f2a0d54,2 +np.float64,0x3ff0000000000000,0x0,2 +np.float64,0x7feca6fff9794dff,0x408ffeb982a70556,2 +np.float64,0x7fc532716d2a64e2,0x408feb3f0f6c095b,2 +np.float64,0x3fe4ec2954a9d853,0xbfe39dd44aa5a040,2 +np.float64,0x7fd3321d52a6643a,0x408ff21a0ab9cd85,2 +np.float64,0x7fd8f1b2dfb1e365,0x408ff52001fa7922,2 +np.float64,0x3fee5e58cabcbcb2,0xbfb3539734a24d8b,2 +np.float64,0x3feebf6e7dfd7edd,0xbfad7c648f025102,2 +np.float64,0x6008026ec0101,0xc08ffb5108b54a93,2 +np.float64,0x3fea06f5e2340dec,0xbfd3134a48283360,2 +np.float64,0x41cad13c8395b,0xc08fffae654b2426,2 +np.float64,0x7fedb5c9353b6b91,0x408fff249f1f32b6,2 +np.float64,0xe00c5af9c018c,0xc08ff189e68c655f,2 +np.float64,0x7feac398ddf58731,0x408ffdf01374de9f,2 +np.float64,0x3fed21127c7a4225,0xbfc15b8cf55628fa,2 +np.float64,0x3fd3446711a688ce,0xbffbb5f7252a9fa3,2 +np.float64,0x7fe75fa07a6ebf40,0x408ffc5fdb096018,2 +np.float64,0x3feeb1618cbd62c3,0xbfaece3bd0863070,2 +np.float64,0x7f5226e180244dc2,0x408fb174d506e52f,2 +np.float64,0x3fcd67deca3acfbe,0xc000f9cd7a490749,2 +np.float64,0xdc6f30efb8de6,0xc08ff1b9f2a22d2e,2 +np.float64,0x9c14931338293,0xc08ff5b5f975ec5d,2 +np.float64,0x7fe93e802df27cff,0x408ffd4354eba0e0,2 +np.float64,0x3feb92ae5077255d,0xbfcb7f2084e44dbb,2 +np.float64,0xd78dbfddaf1b8,0xc08ff1fc19fa5a13,2 +np.float64,0x7fe14c301fa2985f,0x408ff8e666cb6592,2 +np.float64,0xbda3d8b77b47b,0xc08ff37689f4b2e5,2 +np.float64,0x8a42953b14853,0xc08ff71c2db3b8cf,2 +np.float64,0x7fe4ca7e186994fb,0x408ffb05e94254a7,2 +np.float64,0x7fe92ffc5e325ff8,0x408ffd3cb0265b12,2 +np.float64,0x91b262912364d,0xc08ff681619be214,2 +np.float64,0x33fe2b0667fc6,0xc0900132f3fab55e,2 +np.float64,0x3fde10e9183c21d2,0xbff17060fb4416c7,2 +np.float64,0xb6b811cb6d702,0xc08ff3e46303b541,2 +np.float64,0x3fe4a7bda0a94f7b,0xbfe435c6481cd0e3,2 +np.float64,0x7fd9fe6057b3fcc0,0x408ff599c79a822c,2 +np.float64,0x3fef44bf917e897f,0xbfa11484e351a6e9,2 +np.float64,0x3fe57d701daafae0,0xbfe2618ab40fc01b,2 +np.float64,0x7fe52d2adbaa5a55,0x408ffb3c2fb1c99d,2 +np.float64,0xb432f66d6865f,0xc08ff40d6b4084fe,2 +np.float64,0xbff0000000000000,0x7ff8000000000000,2 +np.float64,0x7fecd2292bf9a451,0x408ffecad860de6f,2 +np.float64,0x3fddd2ae153ba55c,0xbff1a059adaca33e,2 +np.float64,0x3fee55d6e5bcabae,0xbfb3bb1c6179d820,2 +np.float64,0x7fc1d0085623a010,0x408fe93d16ada7a7,2 +np.float64,0x829b000105360,0xc08ff7c47629a68f,2 +np.float64,0x7fe1e0257523c04a,0x408ff94782cf0717,2 +np.float64,0x7fd652f9ad2ca5f2,0x408ff3d820ec892e,2 +np.float64,0x3fef2246203e448c,0xbfa444ab6209d8cd,2 +np.float64,0x3fec6c0ae178d816,0xbfc5e559ebd4e790,2 +np.float64,0x3fe6ddfee92dbbfe,0xbfdf06dd7d3fa7a8,2 +np.float64,0x3fb7fbcbea2ff798,0xc00b5404d859d148,2 +np.float64,0x7feb9a154d37342a,0x408ffe4b26c29e55,2 +np.float64,0x3fe4db717aa9b6e3,0xbfe3c2c6b3ef13bc,2 +np.float64,0x3fbae17dda35c2fc,0xc00a030f7f4b37e7,2 +np.float64,0x7fd632b9082c6571,0x408ff3c76826ef19,2 +np.float64,0x7fc4184a15283093,0x408feaa14adf00be,2 +np.float64,0x3fe052d19920a5a3,0xbfef136b5df81a3e,2 +np.float64,0x7fe38b872b67170d,0x408ffa4f51aafc86,2 +np.float64,0x3fef9842d03f3086,0xbf92d3d2a21d4be2,2 +np.float64,0x9cea662139d4d,0xc08ff5a634810daa,2 +np.float64,0x3fe35f0855e6be11,0xbfe72c4b564e62aa,2 +np.float64,0x3fecee3d3779dc7a,0xbfc29ee942f8729e,2 +np.float64,0x3fe7903fd72f2080,0xbfdc41db9b5f4048,2 +np.float64,0xb958889572b11,0xc08ff3ba366cf84b,2 +np.float64,0x3fcb3a67c53674d0,0xc001dd21081ad1ea,2 +np.float64,0xe3b1b53fc7637,0xc08ff15a3505e1ce,2 +np.float64,0xe5954ae9cb2aa,0xc08ff141cbbf0ae4,2 +np.float64,0x3fe394af74e7295f,0xbfe6ad1d13f206e8,2 +np.float64,0x7fe21dd704643bad,0x408ff96f13f80c1a,2 +np.float64,0x3fd23a7cf02474fa,0xbffcfd7454117a05,2 +np.float64,0x7fe257515e24aea2,0x408ff99378764d52,2 +np.float64,0x7fe4c5d0a6e98ba0,0x408ffb03503cf939,2 +np.float64,0x3fadc2c1603b8583,0xc0106b2c17550e3a,2 +np.float64,0x3fc0f7f02421efe0,0xc007525ac446864c,2 +np.float64,0x3feaf0b27275e165,0xbfcfc8a03eaa32ad,2 +np.float64,0x5ce7503cb9ceb,0xc08ffbb2de365fa8,2 +np.float64,0x2a0014f654003,0xc090026e41761a0d,2 +np.float64,0x7fe2c848a8e59090,0x408ff9d9b723ee89,2 +np.float64,0x7f66f54bc02dea97,0x408fbc2ae0ec5623,2 +np.float64,0xa35a890146b6,0xc0900a97b358ddbd,2 +np.float64,0x7fee267ded7c4cfb,0x408fff501560c9f5,2 +np.float64,0x3fe07c328520f865,0xbfee9ef7c3435b58,2 +np.float64,0x3fe67122cf6ce246,0xbfe06147001932ba,2 +np.float64,0x3fdacc8925359912,0xbff41824cece219e,2 +np.float64,0xffa3047fff461,0xc08ff00431ec9be3,2 +np.float64,0x3e1af43e7c35f,0xc090002c6573d29b,2 +np.float64,0x86fa94590df53,0xc08ff7632525ed92,2 +np.float64,0x7fec4c76227898eb,0x408ffe94d032c657,2 +np.float64,0x7fe2274ce1e44e99,0x408ff975194cfdff,2 +np.float64,0x7fe670e1b4ace1c2,0x408ffbe78cc451de,2 +np.float64,0x7fe853871db0a70d,0x408ffcd5e6a6ff47,2 +np.float64,0x3fcbf265db37e4cc,0xc0019026336e1176,2 +np.float64,0x3fef033cef3e067a,0xbfa726712eaae7f0,2 +np.float64,0x5d74973abae94,0xc08ffba15e6bb992,2 +np.float64,0x7fdd9c99b6bb3932,0x408ff71ad24a7ae0,2 +np.float64,0xbdc8e09b7b91c,0xc08ff3744939e9a3,2 +np.float64,0xdbfcff71b7fa0,0xc08ff1bfeecc9dfb,2 +np.float64,0xf9b38cf5f3672,0xc08ff0499af34a43,2 +np.float64,0x3fea820aa6b50415,0xbfd162a38e1927b1,2 +np.float64,0x3fe67f59a12cfeb3,0xbfe04412adca49dc,2 +np.float64,0x3feb301d9c76603b,0xbfce17e6edeb92d5,2 +np.float64,0x828ce00b0519c,0xc08ff7c5b5c57cde,2 +np.float64,0x4f935e229f26c,0xc08ffd7c67c1c54f,2 +np.float64,0x7fcd139e023a273b,0x408feee4f12ff11e,2 +np.float64,0x666a9944ccd54,0xc08ffa92d5e5cd64,2 +np.float64,0x3fe792f0fa6f25e2,0xbfdc374fda28f470,2 +np.float64,0xe996029bd32c1,0xc08ff10eb9b47a11,2 +np.float64,0x3fe7b0dd1eef61ba,0xbfdbc2676dc77db0,2 +np.float64,0x7fd3ec0127a7d801,0x408ff287bf47e27d,2 +np.float64,0x3fe793a8ea6f2752,0xbfdc347f7717e48d,2 +np.float64,0x7fdb89d15e3713a2,0x408ff64457a13ea2,2 +np.float64,0x3fe35b3cbbe6b679,0xbfe73557c8321b70,2 +np.float64,0x66573c94ccae8,0xc08ffa9504af7eb5,2 +np.float64,0x3fc620a2302c4144,0xc00442036b944a67,2 +np.float64,0x49b2fe0693660,0xc08ffe5f131c3c7e,2 +np.float64,0x7fda936cdfb526d9,0x408ff5db3ab3f701,2 +np.float64,0xc774ceef8ee9a,0xc08ff2e16d082fa1,2 +np.float64,0x4da9f8a09b55,0xc0900ee2206d0c88,2 +np.float64,0x3fe2ca5d5ae594bb,0xbfe89406611a5f1a,2 +np.float64,0x7fe0832497e10648,0x408ff85d1de6056e,2 +np.float64,0x3fe6a9e3222d53c6,0xbfdfda35a9bc2de1,2 +np.float64,0x3fed3d92c8ba7b26,0xbfc0a73620db8b98,2 +np.float64,0x3fdd2ec093ba5d81,0xbff2209cf78ce3f1,2 +np.float64,0x62fcb968c5f98,0xc08ffaf775a593c7,2 +np.float64,0xfcfb019ff9f60,0xc08ff0230e95bd16,2 +np.float64,0x3fd7a63e8f2f4c7d,0xbff6faf4fff7dbe0,2 +np.float64,0x3fef23b0ec3e4762,0xbfa4230cb176f917,2 +np.float64,0x340d1e6a681a5,0xc09001314b68a0a2,2 +np.float64,0x7fc0b85ba02170b6,0x408fe8821487b802,2 +np.float64,0x7fe9976e84f32edc,0x408ffd6bb6aaf467,2 +np.float64,0x329a0e9e65343,0xc090015b044e3270,2 +np.float64,0x3fea4928d3f49252,0xbfd2299b05546eab,2 +np.float64,0x3f188c70003118e0,0xc02ac3ce23bc5d5a,2 +np.float64,0x3fecce5020b99ca0,0xbfc36b23153d5f50,2 +np.float64,0x3fe203873e24070e,0xbfea86edb3690830,2 +np.float64,0x3fe02d9eaa205b3d,0xbfef7d18c54a76d2,2 +np.float64,0xef7537ebdeea7,0xc08ff0c55e9d89e7,2 +np.float64,0x3fedf7572efbeeae,0xbfb840af357cf07c,2 +np.float64,0xd1a97a61a354,0xc0900926fdfb96cc,2 +np.float64,0x7fe6a0daeced41b5,0x408ffc001edf1407,2 +np.float64,0x3fe5063625aa0c6c,0xbfe3647cfb949d62,2 +np.float64,0x7fe9b28d31736519,0x408ffd77eb4a922b,2 +np.float64,0x7feea90d033d5219,0x408fff81a4bbff62,2 +np.float64,0x3fe9494d17f2929a,0xbfd5bde02eb5287a,2 +np.float64,0x7feee17a8cbdc2f4,0x408fff96cf0dc16a,2 +np.float64,0xb2ad18ef655a3,0xc08ff4267eda8af8,2 +np.float64,0x3fad3b52683a76a5,0xc01085ab75b797ce,2 +np.float64,0x2300a65846016,0xc090037b81ce9500,2 +np.float64,0x3feb1041f9b62084,0xbfcef0c87d8b3249,2 +np.float64,0x3fdd887d3e3b10fa,0xbff1da0e1ede6db2,2 +np.float64,0x3fd3e410eb27c822,0xbffaf9b5fc9cc8cc,2 +np.float64,0x3fe0aa53e3e154a8,0xbfee1e7b5c486578,2 +np.float64,0x7fe33e389aa67c70,0x408ffa214fe50961,2 +np.float64,0x3fd27e3a43a4fc75,0xbffca84a79e8adeb,2 +np.float64,0x3fb309e0082613c0,0xc00dfe407b77a508,2 +np.float64,0x7feaf2ed8cf5e5da,0x408ffe046a9d1ba9,2 +np.float64,0x1e76167a3cec4,0xc0900448cd35ec67,2 +np.float64,0x3fe0a18e1721431c,0xbfee36cf1165a0d4,2 +np.float64,0x3fa73b78c02e76f2,0xc011d9069823b172,2 +np.float64,0x3fef6d48287eda90,0xbf9ab2d08722c101,2 +np.float64,0x8fdf0da31fbe2,0xc08ff6a6a2accaa1,2 +np.float64,0x3fc3638db826c71b,0xc005c86191688826,2 +np.float64,0xaa9c09c555381,0xc08ff4aefe1d9473,2 +np.float64,0x7fccb0f4523961e8,0x408feebd84773f23,2 +np.float64,0xede75dcfdbcec,0xc08ff0d89ba887d1,2 +np.float64,0x7f8a051520340a29,0x408fcd9cc17f0d95,2 +np.float64,0x3fef5ca2babeb945,0xbf9dc221f3618e6a,2 +np.float64,0x7fea0ff4bcf41fe8,0x408ffda193359f22,2 +np.float64,0x7fe05c53fd20b8a7,0x408ff841dc7123e8,2 +np.float64,0x3fc625664b2c4acd,0xc0043f8749b9a1d8,2 +np.float64,0x7fed58f98f7ab1f2,0x408fff00585f48c2,2 +np.float64,0x3fb3e5e51427cbca,0xc00d7bcb6528cafe,2 +np.float64,0x3fe728bd3d6e517a,0xbfdddafa72bd0f60,2 +np.float64,0x3fe3f005dd27e00c,0xbfe5d7b3ec93bca0,2 +np.float64,0x3fd74fbd1a2e9f7a,0xbff750001b63ce81,2 +np.float64,0x3fd3af6d85a75edb,0xbffb371d678d11b4,2 +np.float64,0x7fa690ad8c2d215a,0x408fdbf7db9c7640,2 +np.float64,0x3fbdfd38e23bfa72,0xc008bfc1c5c9b89e,2 +np.float64,0x3fe2374684a46e8d,0xbfea030c4595dfba,2 +np.float64,0x7fc0806c372100d7,0x408fe85b36fee334,2 +np.float64,0x3fef3ac47b7e7589,0xbfa2007195c5213f,2 +np.float64,0x3fb55473922aa8e7,0xc00cae7af8230e0c,2 +np.float64,0x7fe018dc152031b7,0x408ff811e0d712fa,2 +np.float64,0x3fe3b3fca56767f9,0xbfe6638ae2c99c62,2 +np.float64,0x7fac79818c38f302,0x408fdea720b39c3c,2 +np.float64,0x7fefffffffffffff,0x4090000000000000,2 +np.float64,0xd2b290cba5652,0xc08ff23f6d7152a6,2 +np.float64,0x7fc5848eb52b091c,0x408feb6b6f8b77d0,2 +np.float64,0xf399f62de733f,0xc08ff092ae319ad8,2 +np.float64,0x7fdec56c12bd8ad7,0x408ff78c4ddbc667,2 +np.float64,0x3fca640f1e34c81e,0xc0023969c5cbfa4c,2 +np.float64,0x3fd55225db2aa44c,0xbff95f7442a2189e,2 +np.float64,0x7fefa009a97f4012,0x408fffdd2f42ef9f,2 +np.float64,0x4a3b70609478,0xc0900f24e449bc3d,2 +np.float64,0x7fe3738b1ba6e715,0x408ffa411f2cb5e7,2 +np.float64,0x7fe5e53f0b6bca7d,0x408ffb9ed8d95cea,2 +np.float64,0x3fe274dd24a4e9ba,0xbfe967fb114b2a83,2 +np.float64,0x3fcbc58b8c378b17,0xc001a2bb1e158bcc,2 +np.float64,0x3fefc2c0043f8580,0xbf862c9b464dcf38,2 +np.float64,0xc2c4fafd858a0,0xc08ff327aecc409b,2 +np.float64,0x3fd8bc39a9b17873,0xbff5f1ad46e5a51c,2 +np.float64,0x3fdf341656be682d,0xbff094f41e7cb4c4,2 +np.float64,0x3fef8495c13f092c,0xbf966cf6313bae4c,2 +np.float64,0x3fe14e0f05229c1e,0xbfec6166f26b7161,2 +np.float64,0x3fed42d3b2ba85a7,0xbfc0860b773d35d8,2 +np.float64,0x7fd92bbac5b25775,0x408ff53abcb3fe0c,2 +np.float64,0xb1635b6f62c6c,0xc08ff43bdf47accf,2 +np.float64,0x4a3a2dbc94746,0xc08ffe49fabddb36,2 +np.float64,0x87d831290fb06,0xc08ff750419dc6fb,2 +np.float64,0x3fec4713f7f88e28,0xbfc6d6217c9f5cf9,2 +np.float64,0x7fed43ba2d3a8773,0x408ffef7fa2fc303,2 +np.float64,0x7fd1ec5b56a3d8b6,0x408ff14f62615f1e,2 +np.float64,0x3fee534b6c7ca697,0xbfb3da1951aa3e68,2 +np.float64,0x3febb564c2b76aca,0xbfca9737062e55e7,2 +np.float64,0x943e6b0f287ce,0xc08ff64e2d09335c,2 +np.float64,0xf177d957e2efb,0xc08ff0acab2999fa,2 +np.float64,0x7fb5b881a82b7102,0x408fe3872b4fde5e,2 +np.float64,0x3fdb2b4a97b65695,0xbff3c715c91359bc,2 +np.float64,0x3fac0a17e4381430,0xc010c330967309fb,2 +np.float64,0x7fd8057990b00af2,0x408ff4b0a287a348,2 +np.float64,0x1f9026a23f206,0xc09004144f3a19dd,2 +np.float64,0x3fdb2977243652ee,0xbff3c8a2fd05803d,2 +np.float64,0x3fe0f6e74b21edcf,0xbfed4c3bb956bae0,2 +np.float64,0xde9cc3bbbd399,0xc08ff19ce5c1e762,2 +np.float64,0x3fe72ce106ae59c2,0xbfddca7ab14ceba2,2 +np.float64,0x3fa8ee14e031dc2a,0xc01170d54ca88e86,2 +np.float64,0x3fe0b09bbb216137,0xbfee0d189a95b877,2 +np.float64,0x7fdfdcb157bfb962,0x408ff7f33cf2afea,2 +np.float64,0x3fef84d5f53f09ac,0xbf966134e2a154f4,2 +np.float64,0x3fea0e0b1bb41c16,0xbfd2fa2d36637d19,2 +np.float64,0x1ab76fd6356ef,0xc090050a9616ffbd,2 +np.float64,0x7fd0ccf79a2199ee,0x408ff09045af2dee,2 +np.float64,0x7fea929345f52526,0x408ffddadc322b07,2 +np.float64,0x3fe9ef629cf3dec5,0xbfd367129c166838,2 +np.float64,0x3feedf0ea2fdbe1d,0xbfaa862afca44c00,2 +np.float64,0x7fce725f723ce4be,0x408fef6cfd2769a8,2 +np.float64,0x7fe4313b3ca86275,0x408ffaaf9557ef8c,2 +np.float64,0xe2d46463c5a8d,0xc08ff165725c6b08,2 +np.float64,0x7fbacb4ace359695,0x408fe5f3647bd0d5,2 +np.float64,0x3fbafd009635fa01,0xc009f745a7a5c5d5,2 +np.float64,0x3fe3cea66ce79d4d,0xbfe6253b895e2838,2 +np.float64,0x7feaa71484354e28,0x408ffde3c0bad2a6,2 +np.float64,0x3fd755b8b42eab71,0xbff74a1444c6e654,2 +np.float64,0x3fc313e2172627c4,0xc005f830e77940c3,2 +np.float64,0x12d699a225ad4,0xc090070ec00f2338,2 +np.float64,0x3fa975fe8432ebfd,0xc01151b3da48b3f9,2 +np.float64,0x7fdce3103b39c61f,0x408ff6d19b3326fa,2 +np.float64,0x7fd341cbba268396,0x408ff2237490fdca,2 +np.float64,0x3fd8405885b080b1,0xbff6666d8802a7d5,2 +np.float64,0x3fe0f0cca3a1e199,0xbfed5cdb3e600791,2 +np.float64,0x7fbd56680c3aaccf,0x408fe6ff55bf378d,2 +np.float64,0x3f939c4f3027389e,0xc016d364dd6313fb,2 +np.float64,0x3fe9e87fac73d0ff,0xbfd37f9a2be4fe38,2 +np.float64,0x7fc93c6a883278d4,0x408fed4260e614f1,2 +np.float64,0x7fa88c0ff031181f,0x408fdcf09a46bd3a,2 +np.float64,0xd5487f99aa910,0xc08ff21b6390ab3b,2 +np.float64,0x3fe34acc96e69599,0xbfe75c9d290428fb,2 +np.float64,0x3fd17f5964a2feb3,0xbffdef50b524137b,2 +np.float64,0xe23dec0dc47be,0xc08ff16d1ce61dcb,2 +np.float64,0x3fec8bd64fb917ad,0xbfc5173941614b8f,2 +np.float64,0x3fc81d97d7303b30,0xc00343ccb791401d,2 +np.float64,0x7fe79ad18e2f35a2,0x408ffc7cf0ab0f2a,2 +np.float64,0x3f96306b402c60d7,0xc0161ce54754cac1,2 +np.float64,0xfb09fc97f6140,0xc08ff039d1d30123,2 +np.float64,0x3fec9c4afa793896,0xbfc4ace43ee46079,2 +np.float64,0x3f9262dac824c5b6,0xc01732a3a7eeb598,2 +np.float64,0x3fa5cd33f42b9a68,0xc01236ed4d315a3a,2 +np.float64,0x3fe7bb336caf7667,0xbfdb9a268a82e267,2 +np.float64,0xc6c338f98d867,0xc08ff2ebb8475bbc,2 +np.float64,0x3fd50714482a0e29,0xbff9b14a9f84f2c2,2 +np.float64,0xfff0000000000000,0x7ff8000000000000,2 +np.float64,0x3fde2cd0f93c59a2,0xbff15afe35a43a37,2 +np.float64,0xf1719cb9e2e34,0xc08ff0acf77b06d3,2 +np.float64,0xfd3caaf9fa796,0xc08ff020101771bd,2 +np.float64,0x7f750d63a02a1ac6,0x408fc32ad0caa362,2 +np.float64,0x7fcc50f4e238a1e9,0x408fee96a5622f1a,2 +np.float64,0x421d1da0843a4,0xc08fff9ffe62d869,2 +np.float64,0x3fd9e17023b3c2e0,0xbff4e631d687ee8e,2 +np.float64,0x3fe4999a09693334,0xbfe4556b3734c215,2 +np.float64,0xd619ef03ac33e,0xc08ff21013c85529,2 +np.float64,0x3fc4da522229b4a4,0xc004f150b2c573aa,2 +np.float64,0x3feb04b053b60961,0xbfcf3fc9e00ebc40,2 +np.float64,0x3fbedec5ea3dbd8c,0xc0086a33dc22fab5,2 +np.float64,0x7fec3b217ab87642,0x408ffe8dbc8ca041,2 +np.float64,0xdb257d33b64b0,0xc08ff1cb42d3c182,2 +np.float64,0x7fa2d92ec025b25d,0x408fd9e414d11cb0,2 +np.float64,0x3fa425c550284b8b,0xc012ab7cbf83be12,2 +np.float64,0x10b4869021692,0xc09007c0487d648a,2 +np.float64,0x7f97918c902f2318,0x408fd47867806574,2 +np.float64,0x3fe4f91238e9f224,0xbfe38160b4e99919,2 +np.float64,0x3fc2b1af6125635f,0xc00634343bc58461,2 +np.float64,0x3fc2a98071255301,0xc0063942bc8301be,2 +np.float64,0x3fe4cfc585299f8b,0xbfe3dca39f114f34,2 +np.float64,0x3fd1ea75b3a3d4eb,0xbffd63acd02c5406,2 +np.float64,0x3fd6bf48492d7e91,0xbff7e0cd249f80f9,2 +np.float64,0x76643d36ecc88,0xc08ff8e68f13b38c,2 +np.float64,0x7feeabab3e7d5755,0x408fff82a0fd4501,2 +np.float64,0x46c0d4a68d81b,0xc08ffed79abaddc9,2 +np.float64,0x3fd088d57ca111ab,0xbfff3dd0ed7128ea,2 +np.float64,0x3fed25887cba4b11,0xbfc13f47639bd645,2 +np.float64,0x7fd90984b4b21308,0x408ff52b022c7fb4,2 +np.float64,0x3fe6ef31daadde64,0xbfdec185760cbf21,2 +np.float64,0x3fe48dbe83291b7d,0xbfe47005b99920bd,2 +np.float64,0x3fdce8422f39d084,0xbff258a33a96cc8e,2 +np.float64,0xb8ecdef771d9c,0xc08ff3c0eca61b10,2 +np.float64,0x3fe9bbf9a03377f3,0xbfd41ecfdcc336b9,2 +np.float64,0x7fe2565339a4aca5,0x408ff992d8851eaf,2 +np.float64,0x3fe1693e3822d27c,0xbfec1919da2ca697,2 +np.float64,0x3fd3680488a6d009,0xbffb8b7330275947,2 +np.float64,0x7fbe4f3d2c3c9e79,0x408fe75fa3f4e600,2 +np.float64,0x7fd4cfef3ca99fdd,0x408ff308ee3ab50f,2 +np.float64,0x3fd9c9a51cb3934a,0xbff4fb7440055ce6,2 +np.float64,0x3fe08a9640a1152d,0xbfee76bd1bfbf5c2,2 +np.float64,0x3fef012c41fe0259,0xbfa757a2da7f9707,2 +np.float64,0x3fee653fe2fcca80,0xbfb2ffae0c95025c,2 +np.float64,0x7fd0776933a0eed1,0x408ff054e7b43d41,2 +np.float64,0x4c94e5c09929d,0xc08ffdedb7f49e5e,2 +np.float64,0xca3e3d17947c8,0xc08ff2b86dce2f7a,2 +np.float64,0x3fb528e1342a51c2,0xc00cc626c8e2d9ba,2 +np.float64,0xd774df81aee9c,0xc08ff1fd6f0a7548,2 +np.float64,0x3fc47a9b6128f537,0xc00526c577b80849,2 +np.float64,0x3fe29a6f6a6534df,0xbfe90a5f83644911,2 +np.float64,0x3fecda4f59f9b49f,0xbfc31e4a80c4cbb6,2 +np.float64,0x7fe51d44f5aa3a89,0x408ffb3382437426,2 +np.float64,0x3fd677fc412ceff9,0xbff82999086977e7,2 +np.float64,0x3fe2a3c7e7254790,0xbfe8f33415cdba9d,2 +np.float64,0x3fe6d8d1dc6db1a4,0xbfdf1bc61bc24dff,2 +np.float64,0x7febb32d8ef7665a,0x408ffe55a043ded1,2 +np.float64,0x60677860c0d0,0xc0900da2caa7d571,2 +np.float64,0x7390c2e0e7219,0xc08ff92df18bb5d2,2 +np.float64,0x3fca53711b34a6e2,0xc00240b07a9b529b,2 +np.float64,0x7fe7ce6dd8ef9cdb,0x408ffc961164ead9,2 +np.float64,0x7fc0c9de0d2193bb,0x408fe88e245767f6,2 +np.float64,0xc0ee217981dc4,0xc08ff343b77ea770,2 +np.float64,0x72bd4668e57a9,0xc08ff94323fd74fc,2 +np.float64,0x7fd6970e252d2e1b,0x408ff3fb1e2fead2,2 +np.float64,0x7fdcb61040396c20,0x408ff6bf926bc98f,2 +np.float64,0xda4faa25b49f6,0xc08ff1d68b3877f0,2 +np.float64,0x3feb344749f6688f,0xbfcdfba2d66c72c5,2 +np.float64,0x3fe2aa4284e55485,0xbfe8e32ae0683f57,2 +np.float64,0x3f8e8fcfd03d1fa0,0xc01843efb2129908,2 +np.float64,0x8000000000000000,0xfff0000000000000,2 +np.float64,0x3fd8e01155b1c023,0xbff5d0529dae9515,2 +np.float64,0x3fe8033f3370067e,0xbfda837c80b87e7c,2 +np.float64,0x7fc5bf831e2b7f05,0x408feb8ae3b039a0,2 +np.float64,0x3fd8dcdf5331b9bf,0xbff5d349e1ed422a,2 +np.float64,0x3fe58b4e302b169c,0xbfe243c9cbccde44,2 +np.float64,0x3fea8a2e47b5145d,0xbfd1464e37221894,2 +np.float64,0x75cd1e88eb9a4,0xc08ff8f553ef0475,2 +np.float64,0x7fcfc876e23f90ed,0x408fefebe6cc95e6,2 +np.float64,0x7f51aceb002359d5,0x408fb1263f9003fb,2 +np.float64,0x7fc2a1b877254370,0x408fe9c1ec52f8b9,2 +np.float64,0x7fd495810e292b01,0x408ff2e859414d31,2 +np.float64,0x7fd72048632e4090,0x408ff440690cebdb,2 +np.float64,0x7fd7aafaffaf6,0xc08ff803a390779f,2 +np.float64,0x7fe18067d4a300cf,0x408ff9090a02693f,2 +np.float64,0x3fdc1080f8b82102,0xbff3077bf44a89bd,2 +np.float64,0x3fc34a462f26948c,0xc005d777b3cdf139,2 +np.float64,0x3fe21e4a1fe43c94,0xbfea428acfbc6ea9,2 +np.float64,0x1f0d79083e1b0,0xc090042c65a7abf2,2 +np.float64,0x3fe8d0d15931a1a3,0xbfd779f6bbd4db78,2 +np.float64,0x3fe74578022e8af0,0xbfdd68b6c15e9f5e,2 +np.float64,0x50995dd0a132c,0xc08ffd56a5c8accf,2 +np.float64,0x3f9a6342b034c685,0xc0151ce1973c62bd,2 +np.float64,0x3f30856a00210ad4,0xc027e852f4d1fcbc,2 +np.float64,0x3febcf7646b79eed,0xbfc9e9cc9d12425c,2 +np.float64,0x8010000000000000,0x7ff8000000000000,2 +np.float64,0x3fdf520c02bea418,0xbff07ed5013f3062,2 +np.float64,0x3fe5433ecbea867e,0xbfe2df38968b6d14,2 +np.float64,0x3fb933a84e326751,0xc00ac1a144ad26c5,2 +np.float64,0x7b6d72c2f6daf,0xc08ff86b7a67f962,2 +np.float64,0xaef5dae75debc,0xc08ff46496bb2932,2 +np.float64,0x522d869aa45b1,0xc08ffd1d55281e98,2 +np.float64,0xa2462b05448c6,0xc08ff542fe0ac5fd,2 +np.float64,0x3fe2b71dd6e56e3c,0xbfe8c3690cf15415,2 +np.float64,0x3fe5778231aaef04,0xbfe26e495d09b783,2 +np.float64,0x3fe9b8d564f371ab,0xbfd42a161132970d,2 +np.float64,0x3f89ebc34033d787,0xc019373f90bfc7f1,2 +np.float64,0x3fe438ddc6e871bc,0xbfe53039341b0a93,2 +np.float64,0x873c75250e78f,0xc08ff75d8478dccd,2 +np.float64,0x807134cb00e27,0xc08ff7f5cf59c57a,2 +np.float64,0x3fac459878388b31,0xc010b6fe803bcdc2,2 +np.float64,0xca9dc7eb953b9,0xc08ff2b2fb480784,2 +np.float64,0x7feb38587bb670b0,0x408ffe21ff6d521e,2 +np.float64,0x7fd70e9b782e1d36,0x408ff437936b393a,2 +np.float64,0x3fa4037bbc2806f7,0xc012b55744c65ab2,2 +np.float64,0x3fd3d4637427a8c7,0xbffb0beebf4311ef,2 +np.float64,0x7fdabbda5db577b4,0x408ff5ecbc0d4428,2 +np.float64,0x7fda9be0a2b537c0,0x408ff5dee5d03d5a,2 +np.float64,0x7fe9c74396338e86,0x408ffd813506a18a,2 +np.float64,0x3fd058243e20b048,0xbfff822ffd8a7f21,2 +np.float64,0x3fe6aa6ca9ed54d9,0xbfdfd805629ff49e,2 +np.float64,0x3fd91431d5322864,0xbff5a025eea8c78b,2 +np.float64,0x7fe4d7f02329afdf,0x408ffb0d5d9b7878,2 +np.float64,0x3fe2954a12252a94,0xbfe917266e3e22d5,2 +np.float64,0x3fb25f7c8224bef9,0xc00e6764c81b3718,2 +np.float64,0x3fda4bddeeb497bc,0xbff4880638908c81,2 +np.float64,0x55dfd12eabbfb,0xc08ffc9b54ff4002,2 +np.float64,0x3fe8f399e031e734,0xbfd6f8e5c4dcd93f,2 +np.float64,0x3fd954a24832a945,0xbff56521f4707a06,2 +np.float64,0x3fdea911f2bd5224,0xbff0fcb2d0c2b2e2,2 +np.float64,0x3fe6b4ff8a2d69ff,0xbfdfacfc85cafeab,2 +np.float64,0x3fc7fa02042ff404,0xc00354e13b0767ad,2 +np.float64,0x3fe955088c72aa11,0xbfd593130f29949e,2 +np.float64,0xd7e74ec1afcea,0xc08ff1f74f61721c,2 +np.float64,0x3fe9d69c1ab3ad38,0xbfd3bf710a337e06,2 +np.float64,0x3fd85669a2b0acd3,0xbff65176143ccc1e,2 +np.float64,0x3fea99b285353365,0xbfd11062744783f2,2 +np.float64,0x3fe2c79f80a58f3f,0xbfe89ac33f990289,2 +np.float64,0x3f8332ba30266574,0xc01af2cb7b635783,2 +np.float64,0x30d0150061a1,0xc090119030f74c5d,2 +np.float64,0x3fdbf4cb06b7e996,0xbff31e5207aaa754,2 +np.float64,0x3fe6b56c216d6ad8,0xbfdfab42fb2941c5,2 +np.float64,0x7fc4dc239829b846,0x408feb0fb0e13fbe,2 +np.float64,0x3fd0ab85ef21570c,0xbfff0d95d6c7a35c,2 +np.float64,0x7fe13d75e5e27aeb,0x408ff8dc8efa476b,2 +np.float64,0x3fece3b832f9c770,0xbfc2e21b165d583f,2 +np.float64,0x3fe3a279c4e744f4,0xbfe68ca4fbb55dbf,2 +np.float64,0x3feb64659ef6c8cb,0xbfccb6204b6bf724,2 +np.float64,0x2279a6bc44f36,0xc0900391eeeb3e7c,2 +np.float64,0xb88046d571009,0xc08ff3c7b5b45300,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x3fe49af059a935e1,0xbfe4526c294f248f,2 +np.float64,0xa3e5508147cc,0xc0900a92ce5924b1,2 +np.float64,0x7fc56def3d2adbdd,0x408feb5f46c360e8,2 +np.float64,0x7fd99f3574333e6a,0x408ff56f3807987c,2 +np.float64,0x3fdc38d56fb871ab,0xbff2e667cad8f36a,2 +np.float64,0xd0b03507a1607,0xc08ff25bbcf8aa9d,2 +np.float64,0xc493f9078927f,0xc08ff30c5fa4e759,2 +np.float64,0x3fc86ddbcb30dbb8,0xc0031da1fcb56d75,2 +np.float64,0x7fe75dc395aebb86,0x408ffc5eef841491,2 +np.float64,0x1647618a2c8ed,0xc0900616ef9479c1,2 +np.float64,0xdf144763be289,0xc08ff196b527f3c9,2 +np.float64,0x3fe0b29da6a1653b,0xbfee078b5f4d7744,2 +np.float64,0x3feb055852b60ab1,0xbfcf3b4db5779a7a,2 +np.float64,0x3fe8bc1625f1782c,0xbfd7c739ade904bc,2 +np.float64,0x7fd19bfb8ea337f6,0x408ff11b2b55699c,2 +np.float64,0x3fed1d80d1ba3b02,0xbfc1722e8d3ce094,2 +np.float64,0x2d9c65925b38e,0xc09001f46bcd3bc5,2 +np.float64,0x7fed6f4d857ade9a,0x408fff091cf6a3b4,2 +np.float64,0x3fd070cd6ba0e19b,0xbfff5f7609ca29e8,2 +np.float64,0x7fea3508b8f46a10,0x408ffdb1f30bd6be,2 +np.float64,0x508b897ca1172,0xc08ffd58a0eb3583,2 +np.float64,0x7feba367b07746ce,0x408ffe4f0bf4bd4e,2 +np.float64,0x3fefebd5c4bfd7ac,0xbf6d20b4fcf21b69,2 +np.float64,0x3fd8ef07b8b1de0f,0xbff5c2745c0795a5,2 +np.float64,0x3fd38ed518271daa,0xbffb5d75f00f6900,2 +np.float64,0x6de0fecedbc20,0xc08ff9c307bbc647,2 +np.float64,0xafc0ffc35f820,0xc08ff45737e5d6b4,2 +np.float64,0x7fd282097ca50412,0x408ff1ae3b27bf3b,2 +np.float64,0x3fe2f2d50b65e5aa,0xbfe831042e6a1e99,2 +np.float64,0x3faa437bac3486f7,0xc01123d8d962205a,2 +np.float64,0x3feea54434fd4a88,0xbfaff202cc456647,2 +np.float64,0x3fc9e65b8633ccb7,0xc00270e77ffd19da,2 +np.float64,0x7fee15af61fc2b5e,0x408fff49a49154a3,2 +np.float64,0x7fefe670a73fcce0,0x408ffff6c44c1005,2 +np.float64,0x3fc0832d0f21065a,0xc007a2dc2f25384a,2 +np.float64,0x3fecfc96bcb9f92d,0xbfc24367c3912620,2 +np.float64,0x3feb705682b6e0ad,0xbfcc65b1bb16f9c5,2 +np.float64,0x3fe185c4f9630b8a,0xbfebcdb401af67a4,2 +np.float64,0x3fb0a5a9f6214b54,0xc00f8ada2566a047,2 +np.float64,0x7fe2908cdda52119,0x408ff9b744861fb1,2 +np.float64,0x7fee776e183ceedb,0x408fff6ee7c2f86e,2 +np.float64,0x3fce1d608f3c3ac1,0xc000b3685d006474,2 +np.float64,0x7fecf92aa339f254,0x408ffeda6c998267,2 +np.float64,0xce13cb519c27a,0xc08ff280f02882a9,2 +np.float64,0x1,0xc090c80000000000,2 +np.float64,0x3fe485a8afa90b51,0xbfe4823265d5a50a,2 +np.float64,0x3feea60908bd4c12,0xbfafdf7ad7fe203f,2 +np.float64,0x3fd2253033a44a60,0xbffd187d0ec8d5b9,2 +np.float64,0x435338fc86a68,0xc08fff6a591059dd,2 +np.float64,0x7fce8763a73d0ec6,0x408fef74f1e715ff,2 +np.float64,0x3fbe5ddb783cbbb7,0xc0089acc5afa794b,2 +np.float64,0x7fe4cf19ada99e32,0x408ffb0877ca302b,2 +np.float64,0x3fe94c9ea1b2993d,0xbfd5b1c2e867b911,2 +np.float64,0x3fe75541c72eaa84,0xbfdd2a27aa117699,2 +np.float64,0x8000000000000001,0x7ff8000000000000,2 +np.float64,0x7fdbec7f2c37d8fd,0x408ff66d69a7f818,2 +np.float64,0x8ef10d091de22,0xc08ff6b9ca5094f8,2 +np.float64,0x3fea69025b74d205,0xbfd1b9fe2c252c70,2 +np.float64,0x562376d0ac46f,0xc08ffc924111cd31,2 +np.float64,0x8e8097ab1d013,0xc08ff6c2e2706f67,2 +np.float64,0x3fca6803ed34d008,0xc00237aef808825b,2 +np.float64,0x7fe8fe9067b1fd20,0x408ffd25f459a7d1,2 +np.float64,0x3f918e8c7f233,0xc0900009fe011d54,2 +np.float64,0x3fdfe773833fcee7,0xbff011bc1af87bb9,2 +np.float64,0xefffef6fdfffe,0xc08ff0beb0f09eb0,2 +np.float64,0x7fe64610282c8c1f,0x408ffbd17209db18,2 +np.float64,0xe66be8c1ccd7d,0xc08ff13706c056e1,2 +np.float64,0x2837e570506fd,0xc09002ae4dae0c1a,2 +np.float64,0x3febe3a081f7c741,0xbfc964171f2a5a47,2 +np.float64,0x3fe21ed09a243da1,0xbfea41342d29c3ff,2 +np.float64,0x3fe1596c8162b2d9,0xbfec431eee30823a,2 +np.float64,0x8f2b9a131e574,0xc08ff6b51104ed4e,2 +np.float64,0x3fe88ed179711da3,0xbfd870d08a4a4b0c,2 +np.float64,0x34159bc2682b4,0xc09001305a885f94,2 +np.float64,0x1ed31e543da65,0xc0900437481577f8,2 +np.float64,0x3feafbe9de75f7d4,0xbfcf7bcdbacf1c61,2 +np.float64,0xfb16fb27f62e0,0xc08ff03938e682a2,2 +np.float64,0x3fe5cd5ba7eb9ab7,0xbfe1b7165771af3c,2 +np.float64,0x7fe72905e76e520b,0x408ffc44c4e7e80c,2 +np.float64,0x7fb7136e2e2e26db,0x408fe439fd383fb7,2 +np.float64,0x8fa585e11f4c,0xc0900b55a08a486b,2 +np.float64,0x7fed985ce47b30b9,0x408fff192b596821,2 +np.float64,0x3feaaf0869755e11,0xbfd0c671571b3764,2 +np.float64,0x3fa40fd4ec281faa,0xc012b1c8dc0b9e5f,2 +np.float64,0x7fda2a70993454e0,0x408ff5ad47b0c68a,2 +np.float64,0x3fe5f7e931abefd2,0xbfe15d52b3605abf,2 +np.float64,0x3fe9fc6d3533f8da,0xbfd338b06a790994,2 +np.float64,0x3fe060649420c0c9,0xbfeeed1756111891,2 +np.float64,0x3fce8435e33d086c,0xc0008c41cea9ed40,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0x617820aec2f05,0xc08ffb251e9af0f0,2 +np.float64,0x7fcc4ab6ee38956d,0x408fee9419c8f77d,2 +np.float64,0x7fdefda2fc3dfb45,0x408ff7a15063bc05,2 +np.float64,0x7fe5138ccaaa2719,0x408ffb2e30f3a46e,2 +np.float64,0x3fe3817a836702f5,0xbfe6da7c2b25e35a,2 +np.float64,0x3fb8a7dafa314fb6,0xc00b025bc0784ebe,2 +np.float64,0x349dc420693d,0xc09011215825d2c8,2 +np.float64,0x6b0e504ad61cb,0xc08ffa0fee9c5cd6,2 +np.float64,0x273987644e732,0xc09002d34294ed79,2 +np.float64,0x3fc0bd8a6e217b15,0xc0077a5828b4d2f5,2 +np.float64,0x758b48c4eb16a,0xc08ff8fbc8fbe46a,2 +np.float64,0x3fc8a9a52631534a,0xc00301854ec0ef81,2 +np.float64,0x7fe79d29a76f3a52,0x408ffc7e1607a4c1,2 +np.float64,0x3fd7d3ebce2fa7d8,0xbff6ce8a94aebcda,2 +np.float64,0x7fd1cb68a52396d0,0x408ff13a17533b2b,2 +np.float64,0x7fda514a5d34a294,0x408ff5be5e081578,2 +np.float64,0x3fc40b4382281687,0xc0056632c8067228,2 +np.float64,0x7feff1208c3fe240,0x408ffffaa180fa0d,2 +np.float64,0x8f58739f1eb0f,0xc08ff6b17402689d,2 +np.float64,0x1fdbe9a23fb7e,0xc090040685b2d24f,2 +np.float64,0xcb1d0e87963a2,0xc08ff2abbd903b82,2 +np.float64,0x3fc45a6a1a28b4d4,0xc00538f86c4aeaee,2 +np.float64,0x3fe61885b1ac310b,0xbfe118fd2251d2ec,2 +np.float64,0x3fedf584c8fbeb0a,0xbfb8572433ff67a9,2 +np.float64,0x7fb0bddd1a217bb9,0x408fe085e0d621db,2 +np.float64,0x72d8d3e0e5b3,0xc0900ca02f68c7a1,2 +np.float64,0x5cca6ff6b994f,0xc08ffbb6751fda01,2 +np.float64,0x7fe3197839a632ef,0x408ffa0b2fccfb68,2 +np.float64,0x3fcce4d9c139c9b4,0xc0012dae05baa91b,2 +np.float64,0x3fe76d00f62eda02,0xbfdccc5f12799be1,2 +np.float64,0x3fc53c22f72a7846,0xc004bbaa9cbc7958,2 +np.float64,0x7fdda02f1ebb405d,0x408ff71c37c71659,2 +np.float64,0x3fe0844eaba1089d,0xbfee884722762583,2 +np.float64,0x3febb438dc776872,0xbfca9f05e1c691f1,2 +np.float64,0x3fdf4170cdbe82e2,0xbff08b1561c8d848,2 +np.float64,0x3fce1b8d6f3c371b,0xc000b41b69507671,2 +np.float64,0x8370e60706e1d,0xc08ff7b19ea0b4ca,2 +np.float64,0x7fa5bf92382b7f23,0x408fdb8aebb3df87,2 +np.float64,0x7fe4a59979a94b32,0x408ffaf15c1358cd,2 +np.float64,0x3faa66086034cc11,0xc0111c466b7835d6,2 +np.float64,0x7fb7a958262f52af,0x408fe48408b1e093,2 +np.float64,0x3fdaacc5f635598c,0xbff43390d06b5614,2 +np.float64,0x3fd2825b9e2504b7,0xbffca3234264f109,2 +np.float64,0x3fcede160a3dbc2c,0xc0006a759e29060c,2 +np.float64,0x7fd3b19603a7632b,0x408ff265b528371c,2 +np.float64,0x7fcf8a86ea3f150d,0x408fefd552e7f3b2,2 +np.float64,0xedbcc0f7db798,0xc08ff0daad12096b,2 +np.float64,0xf1e1683de3c2d,0xc08ff0a7a0a37e00,2 +np.float64,0xb6ebd9bf6dd7b,0xc08ff3e11e28378d,2 +np.float64,0x3fec8090d6f90122,0xbfc56031b72194cc,2 +np.float64,0x3fd3e10e37a7c21c,0xbffafd34a3ebc933,2 +np.float64,0x7fbb1c96aa36392c,0x408fe616347b3342,2 +np.float64,0x3fe2f3996f25e733,0xbfe82f25bc5d1bbd,2 +np.float64,0x7fe8709da870e13a,0x408ffce3ab6ce59a,2 +np.float64,0x7fea3233d1b46467,0x408ffdb0b3bbc6de,2 +np.float64,0x65fa4112cbf49,0xc08ffa9f85eb72b9,2 +np.float64,0x3fca2cae9f34595d,0xc00251bb275afb87,2 +np.float64,0x8135fd9f026c0,0xc08ff7e42e14dce7,2 +np.float64,0x7fe0a6f057e14de0,0x408ff876081a4bfe,2 +np.float64,0x10000000000000,0xc08ff00000000000,2 +np.float64,0x3fe1fd506263faa1,0xbfea96dd8c543b72,2 +np.float64,0xa5532c554aa66,0xc08ff50bf5bfc66d,2 +np.float64,0xc239d00b8473a,0xc08ff32ff0ea3f92,2 +np.float64,0x7fdb5314e336a629,0x408ff62d4ff60d82,2 +np.float64,0x3fe5f506e2abea0e,0xbfe16362a4682120,2 +np.float64,0x3fa20c60202418c0,0xc0134e08d82608b6,2 +np.float64,0x7fe03864b22070c8,0x408ff82866d65e9a,2 +np.float64,0x3fe72cf5656e59eb,0xbfddca298969effa,2 +np.float64,0x5c295386b852b,0xc08ffbca90b136c9,2 +np.float64,0x7fd71e5020ae3c9f,0x408ff43f6d58eb7c,2 +np.float64,0x3fd1905a842320b5,0xbffdd8ecd288159c,2 +np.float64,0x3fe6bddb256d7bb6,0xbfdf88fee1a820bb,2 +np.float64,0xe061b967c0c37,0xc08ff18581951561,2 +np.float64,0x3fe534f65cea69ed,0xbfe2fe45fe7d3040,2 +np.float64,0xdc7dae07b8fb6,0xc08ff1b93074ea76,2 +np.float64,0x3fd0425082a084a1,0xbfffa11838b21633,2 +np.float64,0xba723fc974e48,0xc08ff3a8b8d01c58,2 +np.float64,0x3fce42ffc73c8600,0xc000a5062678406e,2 +np.float64,0x3f2e6d3c7e5ce,0xc090001304cfd1c7,2 +np.float64,0x3fd4b2e5f7a965cc,0xbffa0e6e6bae0a68,2 +np.float64,0x3fe6db1d18edb63a,0xbfdf128158ee92d9,2 +np.float64,0x7fe4e5792f29caf1,0x408ffb14d9dbf133,2 +np.float64,0x3fc11cdf992239bf,0xc00739569619cd77,2 +np.float64,0x3fc05ea11220bd42,0xc007bc841b48a890,2 +np.float64,0x4bd592d497ab3,0xc08ffe0ab1c962e2,2 +np.float64,0x280068fc5000e,0xc09002b64955e865,2 +np.float64,0x7fe2f2637065e4c6,0x408ff9f379c1253a,2 +np.float64,0x3fefc38467ff8709,0xbf85e53e64b9a424,2 +np.float64,0x2d78ec5a5af1e,0xc09001f8ea8601e0,2 +np.float64,0x7feeef2b957dde56,0x408fff9bebe995f7,2 +np.float64,0x2639baf44c738,0xc09002f9618d623b,2 +np.float64,0x3fc562964d2ac52d,0xc004a6d76959ef78,2 +np.float64,0x3fe21b071fe4360e,0xbfea4adb2cd96ade,2 +np.float64,0x7fe56aa6802ad54c,0x408ffb5d81d1a898,2 +np.float64,0x4296b452852d7,0xc08fff8ad7fbcbe1,2 +np.float64,0x7fe3fac4ff27f589,0x408ffa9049eec479,2 +np.float64,0x7fe7a83e6caf507c,0x408ffc837f436604,2 +np.float64,0x3fc4ac5b872958b7,0xc0050add72381ac3,2 +np.float64,0x3fd6d697c02dad30,0xbff7c931a3eefb01,2 +np.float64,0x3f61e391c023c724,0xc021ad91e754f94b,2 +np.float64,0x10817f9c21031,0xc09007d20434d7bc,2 +np.float64,0x3fdb9c4c4cb73899,0xbff367d8615c5ece,2 +np.float64,0x3fe26ead6b64dd5b,0xbfe977771def5989,2 +np.float64,0x3fc43ea5c3287d4c,0xc00548c2163ae631,2 +np.float64,0x3fe05bd8bba0b7b1,0xbfeef9ea0db91abc,2 +np.float64,0x3feac78369358f07,0xbfd071e2b0aeab39,2 +np.float64,0x7fe254922ca4a923,0x408ff991bdd4e5d3,2 +np.float64,0x3fe5a2f5842b45eb,0xbfe21135c9a71666,2 +np.float64,0x3fd5daf98c2bb5f3,0xbff8cd24f7c07003,2 +np.float64,0x3fcb2a1384365427,0xc001e40f0d04299a,2 +np.float64,0x3fe073974360e72f,0xbfeeb7183a9930b7,2 +np.float64,0xcf3440819e688,0xc08ff270d3a71001,2 +np.float64,0x3fd35656cda6acae,0xbffba083fba4939d,2 +np.float64,0x7fe6c59b4ded8b36,0x408ffc12ce725425,2 +np.float64,0x3fba896f943512df,0xc00a291cb6947701,2 +np.float64,0x7fe54917e86a922f,0x408ffb4b5e0fb848,2 +np.float64,0x7fed2a3f51ba547e,0x408ffeede945a948,2 +np.float64,0x3fdc72bd5038e57b,0xbff2b73b7e93e209,2 +np.float64,0x7fefdb3f9f3fb67e,0x408ffff2b702a768,2 +np.float64,0x3fb0184430203088,0xc00fee8c1351763c,2 +np.float64,0x7d6c3668fad87,0xc08ff83c195f2cca,2 +np.float64,0x3fd5aa254aab544b,0xbff900f16365991b,2 +np.float64,0x3f963daab02c7b55,0xc0161974495b1b71,2 +np.float64,0x3fa7a9c5982f538b,0xc011bde0f6052a89,2 +np.float64,0xb3a5a74b674b5,0xc08ff4167bc97c81,2 +np.float64,0x7fad0c14503a1828,0x408fdee1f2d56cd7,2 +np.float64,0x43e0e9d887c1e,0xc08fff522837b13b,2 +np.float64,0x3fe513b20aea2764,0xbfe346ea994100e6,2 +np.float64,0x7fe4e10393e9c206,0x408ffb12630f6a06,2 +np.float64,0x68b286e2d1651,0xc08ffa51c0d795d4,2 +np.float64,0x7fe8de453331bc89,0x408ffd17012b75ac,2 +np.float64,0x1b3d77d4367b0,0xc09004edea60aa36,2 +np.float64,0x3fd351cbc326a398,0xbffba5f0f4d5fdba,2 +np.float64,0x3fd264951b24c92a,0xbffcc8636788b9bf,2 +np.float64,0xd2465761a48cb,0xc08ff2455c9c53e5,2 +np.float64,0x7fe46a0ef028d41d,0x408ffacfe32c6f5d,2 +np.float64,0x3fafd8ac4c3fb159,0xc010071bf33195d0,2 +np.float64,0x902aec5d2055e,0xc08ff6a08e28aabc,2 +np.float64,0x3fcea61bb03d4c37,0xc0007f76e509b657,2 +np.float64,0x7fe8d90f9571b21e,0x408ffd1495f952e7,2 +np.float64,0x7fa650c9442ca192,0x408fdbd6ff22fdd8,2 +np.float64,0x3fe8ecfdf171d9fc,0xbfd7115df40e8580,2 +np.float64,0x7fd4e6fe7f29cdfc,0x408ff315b0dae183,2 +np.float64,0x77df4c52efbea,0xc08ff8c1d5c1df33,2 +np.float64,0xe200b0cfc4016,0xc08ff1703cfb8e79,2 +np.float64,0x3fe230ea7e2461d5,0xbfea132d2385160e,2 +np.float64,0x7fd1f7ced723ef9d,0x408ff156bfbf92a4,2 +np.float64,0x3fea762818f4ec50,0xbfd18c12a88e5f79,2 +np.float64,0x7feea4ba7c7d4974,0x408fff8004164054,2 +np.float64,0x833ec605067d9,0xc08ff7b606383841,2 +np.float64,0x7fd0c2d7fea185af,0x408ff0894f3a0cf4,2 +np.float64,0x3fe1d7d61d23afac,0xbfeaf76fee875d3e,2 +np.float64,0x65adecb0cb5be,0xc08ffaa82cb09d68,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv new file mode 100644 index 00000000..03e76ffc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv @@ -0,0 +1,1370 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x004b4716,2 +np.float32,0x007b2490,0x007b2490,2 +np.float32,0x007c99fa,0x007c99fa,2 +np.float32,0x00734a0c,0x00734a0c,2 +np.float32,0x0070de24,0x0070de24,2 +np.float32,0x007fffff,0x007fffff,2 +np.float32,0x00000001,0x00000001,2 +## -ve denormals ## +np.float32,0x80495d65,0x80495d65,2 +np.float32,0x806894f6,0x806894f6,2 +np.float32,0x80555a76,0x80555a76,2 +np.float32,0x804e1fb8,0x804e1fb8,2 +np.float32,0x80687de9,0x80687de9,2 +np.float32,0x807fffff,0x807fffff,2 +np.float32,0x80000001,0x80000001,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x00000000,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x00800000,0x00800000,2 +np.float32,0x80800000,0x80800000,2 +## 1.00f ## +np.float32,0x3f800000,0x3f576aa4,2 +np.float32,0x3f800001,0x3f576aa6,2 +np.float32,0x3f800002,0x3f576aa7,2 +np.float32,0xc090a8b0,0x3f7b4e48,2 +np.float32,0x41ce3184,0x3f192d43,2 +np.float32,0xc1d85848,0xbf7161cb,2 +np.float32,0x402b8820,0x3ee3f29f,2 +np.float32,0x42b4e454,0x3f1d0151,2 +np.float32,0x42a67a60,0x3f7ffa4c,2 +np.float32,0x41d92388,0x3f67beef,2 +np.float32,0x422dd66c,0xbeffb0c1,2 +np.float32,0xc28f5be6,0xbf0bae79,2 +np.float32,0x41ab2674,0x3f0ffe2b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0xbf3504f3,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x4016cbe4,0x3f3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x407b53d2,0xbf3504f5,2 +np.float32,0xc07b53d2,0x3f3504f5,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x40afede0,0xbf3504ef,2 +np.float32,0xc0afede0,0x3f3504ef,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0xbf3504f3,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x410a3ae7,0x3f3504eb,2 +np.float32,0xc10a3ae7,0xbf3504eb,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x41235ce2,0xbf3504f7,2 +np.float32,0xc1235ce2,0x3f3504f7,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x413c7edd,0xbf3504f3,2 +np.float32,0xc13c7edd,0x3f3504f3,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x4155a0d9,0x3f3504fb,2 +np.float32,0xc155a0d9,0xbf3504fb,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x416ec2d4,0x3f3504ef,2 +np.float32,0xc16ec2d4,0xbf3504ef,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x4183f268,0xbf3504ff,2 +np.float32,0xc183f268,0x3f3504ff,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x41908365,0xbf3504f6,2 +np.float32,0xc1908365,0x3f3504f6,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x419d1463,0x3f3504f8,2 +np.float32,0xc19d1463,0xbf3504f8,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x41a9a561,0x3f3504e7,2 +np.float32,0xc1a9a561,0xbf3504e7,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x41b6365e,0xbf3504f0,2 +np.float32,0xc1b6365e,0x3f3504f0,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x41c2c75c,0xbf3504ef,2 +np.float32,0xc1c2c75c,0x3f3504ef,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x41cf585a,0x3f3504ff,2 +np.float32,0xc1cf585a,0xbf3504ff,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x41dbe958,0x3f3504e0,2 +np.float32,0xc1dbe958,0xbf3504e0,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x41e87a55,0xbf3504f8,2 +np.float32,0xc1e87a55,0x3f3504f8,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x41f50b53,0xbf3504e7,2 +np.float32,0xc1f50b53,0x3f3504e7,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x4200ce28,0x3f3504f0,2 +np.float32,0xc200ce28,0xbf3504f0,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x420716a7,0x3f3504ee,2 +np.float32,0xc20716a7,0xbf3504ee,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x420d5f26,0xbf350500,2 +np.float32,0xc20d5f26,0x3f350500,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x4213a7a5,0xbf3504df,2 +np.float32,0xc213a7a5,0x3f3504df,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4219f024,0x3f35050f,2 +np.float32,0xc219f024,0xbf35050f,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x431d1463,0x36455799,2 +np.float32,0xc31d1463,0xb6455799,2 +np.float32,0x422038a3,0x3f3504d0,2 +np.float32,0xc22038a3,0xbf3504d0,2 +np.float32,0x42a038a3,0xbf800000,2 +np.float32,0xc2a038a3,0x3f800000,2 +np.float32,0x432038a3,0xb746cd61,2 +np.float32,0xc32038a3,0x3746cd61,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x43235ce2,0x36b889b6,2 +np.float32,0xc3235ce2,0xb6b889b6,2 +np.float32,0x42268121,0xbf3504f1,2 +np.float32,0xc2268121,0x3f3504f1,2 +np.float32,0x42a68121,0x3f800000,2 +np.float32,0xc2a68121,0xbf800000,2 +np.float32,0x43268121,0x35643aac,2 +np.float32,0xc3268121,0xb5643aac,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x4329a561,0x370733d0,2 +np.float32,0xc329a561,0xb70733d0,2 +np.float32,0x422cc9a0,0xbf3504ee,2 +np.float32,0xc22cc9a0,0x3f3504ee,2 +np.float32,0x42acc9a0,0xbf800000,2 +np.float32,0xc2acc9a0,0x3f800000,2 +np.float32,0x432cc9a0,0xb5e55a50,2 +np.float32,0xc32cc9a0,0x35e55a50,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x432fede0,0x373222c4,2 +np.float32,0xc32fede0,0xb73222c4,2 +np.float32,0x4233121f,0x3f350500,2 +np.float32,0xc233121f,0xbf350500,2 +np.float32,0x42b3121f,0x3f800000,2 +np.float32,0xc2b3121f,0xbf800000,2 +np.float32,0x4333121f,0xb68f347d,2 +np.float32,0xc333121f,0x368f347d,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x4336365e,0xb60bb91c,2 +np.float32,0xc336365e,0x360bb91c,2 +np.float32,0x42395a9e,0x3f3504df,2 +np.float32,0xc2395a9e,0xbf3504df,2 +np.float32,0x42b95a9e,0xbf800000,2 +np.float32,0xc2b95a9e,0x3f800000,2 +np.float32,0x43395a9e,0xb6e51267,2 +np.float32,0xc3395a9e,0x36e51267,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x433c7edd,0x35000add,2 +np.float32,0xc33c7edd,0xb5000add,2 +np.float32,0x423fa31d,0xbf35050f,2 +np.float32,0xc23fa31d,0x3f35050f,2 +np.float32,0x42bfa31d,0x3f800000,2 +np.float32,0xc2bfa31d,0xbf800000,2 +np.float32,0x433fa31d,0xb71d7828,2 +np.float32,0xc33fa31d,0x371d7828,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x4342c75c,0x364bbe8a,2 +np.float32,0xc342c75c,0xb64bbe8a,2 +np.float32,0x4245eb9c,0xbf3504d0,2 +np.float32,0xc245eb9c,0x3f3504d0,2 +np.float32,0x42c5eb9c,0xbf800000,2 +np.float32,0xc2c5eb9c,0x3f800000,2 +np.float32,0x4345eb9c,0xb748671d,2 +np.float32,0xc345eb9c,0x3748671d,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x43490fdb,0x36bbbd2e,2 +np.float32,0xc3490fdb,0xb6bbbd2e,2 +np.float32,0x424c341a,0x3f3504f1,2 +np.float32,0xc24c341a,0xbf3504f1,2 +np.float32,0x42cc341a,0x3f800000,2 +np.float32,0xc2cc341a,0xbf800000,2 +np.float32,0x434c341a,0x354a9ee6,2 +np.float32,0xc34c341a,0xb54a9ee6,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x434f585a,0x3708cd8c,2 +np.float32,0xc34f585a,0xb708cd8c,2 +np.float32,0x42527c99,0x3f3504ee,2 +np.float32,0xc2527c99,0xbf3504ee,2 +np.float32,0x42d27c99,0xbf800000,2 +np.float32,0xc2d27c99,0x3f800000,2 +np.float32,0x43527c99,0xb5f22833,2 +np.float32,0xc3527c99,0x35f22833,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x4355a0d9,0x3733bc81,2 +np.float32,0xc355a0d9,0xb733bc81,2 +np.float32,0x4258c518,0xbf350500,2 +np.float32,0xc258c518,0x3f350500,2 +np.float32,0x42d8c518,0x3f800000,2 +np.float32,0xc2d8c518,0xbf800000,2 +np.float32,0x4358c518,0xb69267f6,2 +np.float32,0xc358c518,0x369267f6,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x435be958,0x375eab75,2 +np.float32,0xc35be958,0xb75eab75,2 +np.float32,0x425f0d97,0xbf3504df,2 +np.float32,0xc25f0d97,0x3f3504df,2 +np.float32,0x42df0d97,0xbf800000,2 +np.float32,0xc2df0d97,0x3f800000,2 +np.float32,0x435f0d97,0xb6e845e0,2 +np.float32,0xc35f0d97,0x36e845e0,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x436231d6,0x3519a6a2,2 +np.float32,0xc36231d6,0xb519a6a2,2 +np.float32,0x42655616,0x3f35050f,2 +np.float32,0xc2655616,0xbf35050f,2 +np.float32,0x42e55616,0x3f800000,2 +np.float32,0xc2e55616,0xbf800000,2 +np.float32,0x43655616,0xb71f11e5,2 +np.float32,0xc3655616,0x371f11e5,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x43687a55,0x3652257b,2 +np.float32,0xc3687a55,0xb652257b,2 +np.float32,0x426b9e95,0x3f3504cf,2 +np.float32,0xc26b9e95,0xbf3504cf,2 +np.float32,0x42eb9e95,0xbf800000,2 +np.float32,0xc2eb9e95,0x3f800000,2 +np.float32,0x436b9e95,0xb74a00d9,2 +np.float32,0xc36b9e95,0x374a00d9,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x436ec2d4,0x36bef0a7,2 +np.float32,0xc36ec2d4,0xb6bef0a7,2 +np.float32,0x4271e713,0xbf3504f1,2 +np.float32,0xc271e713,0x3f3504f1,2 +np.float32,0x42f1e713,0x3f800000,2 +np.float32,0xc2f1e713,0xbf800000,2 +np.float32,0x4371e713,0x35310321,2 +np.float32,0xc371e713,0xb5310321,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x43750b53,0x370a6748,2 +np.float32,0xc3750b53,0xb70a6748,2 +np.float32,0x42782f92,0xbf3504ee,2 +np.float32,0xc2782f92,0x3f3504ee,2 +np.float32,0x42f82f92,0xbf800000,2 +np.float32,0xc2f82f92,0x3f800000,2 +np.float32,0x43782f92,0xb5fef616,2 +np.float32,0xc3782f92,0x35fef616,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x437b53d2,0x3735563d,2 +np.float32,0xc37b53d2,0xb735563d,2 +np.float32,0x427e7811,0x3f350500,2 +np.float32,0xc27e7811,0xbf350500,2 +np.float32,0x42fe7811,0x3f800000,2 +np.float32,0xc2fe7811,0xbf800000,2 +np.float32,0x437e7811,0xb6959b6f,2 +np.float32,0xc37e7811,0x36959b6f,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4380ce28,0xb5fdd672,2 +np.float32,0xc380ce28,0x35fdd672,2 +np.float32,0x42826048,0x3f3504de,2 +np.float32,0xc2826048,0xbf3504de,2 +np.float32,0x43026048,0xbf800000,2 +np.float32,0xc3026048,0x3f800000,2 +np.float32,0x43826048,0xb6eb7958,2 +np.float32,0xc3826048,0x36eb7958,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x4383f268,0x37859a13,2 +np.float32,0xc383f268,0xb7859a13,2 +np.float32,0x42858487,0xbf3504e2,2 +np.float32,0xc2858487,0x3f3504e2,2 +np.float32,0x43058487,0x3f800000,2 +np.float32,0xc3058487,0xbf800000,2 +np.float32,0x43858487,0x36bea8be,2 +np.float32,0xc3858487,0xb6bea8be,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x438716a7,0x36588c6d,2 +np.float32,0xc38716a7,0xb6588c6d,2 +np.float32,0x4288a8c7,0xbf3504cf,2 +np.float32,0xc288a8c7,0x3f3504cf,2 +np.float32,0x4308a8c7,0xbf800000,2 +np.float32,0xc308a8c7,0x3f800000,2 +np.float32,0x4388a8c7,0xb74b9a96,2 +np.float32,0xc388a8c7,0x374b9a96,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x438a3ae7,0x37b08908,2 +np.float32,0xc38a3ae7,0xb7b08908,2 +np.float32,0x428bcd06,0x3f3504f2,2 +np.float32,0xc28bcd06,0xbf3504f2,2 +np.float32,0x430bcd06,0x3f800000,2 +np.float32,0xc30bcd06,0xbf800000,2 +np.float32,0x438bcd06,0x3517675b,2 +np.float32,0xc38bcd06,0xb517675b,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x438d5f26,0x370c0105,2 +np.float32,0xc38d5f26,0xb70c0105,2 +np.float32,0x428ef146,0x3f3504c0,2 +np.float32,0xc28ef146,0xbf3504c0,2 +np.float32,0x430ef146,0xbf800000,2 +np.float32,0xc30ef146,0x3f800000,2 +np.float32,0x438ef146,0xb790bc40,2 +np.float32,0xc38ef146,0x3790bc40,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x43908365,0xb692200d,2 +np.float32,0xc3908365,0x3692200d,2 +np.float32,0x42921585,0xbf350501,2 +np.float32,0xc2921585,0x3f350501,2 +np.float32,0x43121585,0x3f800000,2 +np.float32,0xc3121585,0xbf800000,2 +np.float32,0x43921585,0xb698cee8,2 +np.float32,0xc3921585,0x3698cee8,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4393a7a5,0x3761deee,2 +np.float32,0xc393a7a5,0xb761deee,2 +np.float32,0x429539c5,0xbf3504b1,2 +np.float32,0xc29539c5,0x3f3504b1,2 +np.float32,0x431539c5,0xbf800000,2 +np.float32,0xc31539c5,0x3f800000,2 +np.float32,0x439539c5,0xb7bbab34,2 +np.float32,0xc39539c5,0x37bbab34,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4396cbe4,0x354cde2e,2 +np.float32,0xc396cbe4,0xb54cde2e,2 +np.float32,0x42985e04,0x3f350510,2 +np.float32,0xc2985e04,0xbf350510,2 +np.float32,0x43185e04,0x3f800000,2 +np.float32,0xc3185e04,0xbf800000,2 +np.float32,0x43985e04,0xb722455d,2 +np.float32,0xc3985e04,0x3722455d,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x4399f024,0x379bde6c,2 +np.float32,0xc399f024,0xb79bde6c,2 +np.float32,0x429b8243,0x3f3504fc,2 +np.float32,0xc29b8243,0xbf3504fc,2 +np.float32,0x431b8243,0xbf800000,2 +np.float32,0xc31b8243,0x3f800000,2 +np.float32,0x439b8243,0x364b2eb8,2 +np.float32,0xc39b8243,0xb64b2eb8,2 +np.float32,0x435b2047,0xbf350525,2 +np.float32,0x42a038a2,0xbf800000,2 +np.float32,0x432038a2,0x3664ca7e,2 +np.float32,0x4345eb9b,0x365e638c,2 +np.float32,0x42c5eb9b,0xbf800000,2 +np.float32,0x42eb9e94,0xbf800000,2 +np.float32,0x4350ea79,0x3f800000,2 +np.float32,0x42dbe957,0x3585522a,2 +np.float32,0x425be957,0xbf800000,2 +np.float32,0x435be957,0xb605522a,2 +np.float32,0x476362a2,0xbd7ff911,2 +np.float32,0x464c99a4,0x3e7f4d41,2 +np.float32,0x4471f73d,0x3e7fe1b0,2 +np.float32,0x445a6752,0x3e7ef367,2 +np.float32,0x474fa400,0x3e7f9fcd,2 +np.float32,0x45c1e72f,0xbe7fc7af,2 +np.float32,0x4558c91d,0x3e7e9f31,2 +np.float32,0x43784f94,0xbdff6654,2 +np.float32,0x466e8500,0xbe7ea0a3,2 +np.float32,0x468e1c25,0x3e7e22fb,2 +np.float32,0x44ea6cfc,0x3dff70c3,2 +np.float32,0x4605126c,0x3e7f89ef,2 +np.float32,0x4788b3c6,0xbb87d853,2 +np.float32,0x4531b042,0x3dffd163,2 +np.float32,0x43f1f71d,0x3dfff387,2 +np.float32,0x462c3fa5,0xbd7fe13d,2 +np.float32,0x441c5354,0xbdff76b4,2 +np.float32,0x44908b69,0x3e7dcf0d,2 +np.float32,0x478813ad,0xbe7e9d80,2 +np.float32,0x441c4351,0x3dff937b,2 +np.float64,0x1,0x1,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fefffffffffffff,0x3f7452fc98b34e97,1 +np.float64,0xffefffffffffffff,0xbf7452fc98b34e97,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfda51b226b4a364,0xbfd9956328ff876c,1 +np.float64,0xbfb4a65aee294cb8,0xbfb4a09fd744f8a5,1 +np.float64,0xbfd73b914fae7722,0xbfd6b9cce55af379,1 +np.float64,0xbfd90c12b4b21826,0xbfd869a3867b51c2,1 +np.float64,0x3fe649bb3d6c9376,0x3fe48778d9b48a21,1 +np.float64,0xbfd5944532ab288a,0xbfd52c30e1951b42,1 +np.float64,0x3fb150c45222a190,0x3fb14d633eb8275d,1 +np.float64,0x3fe4a6ffa9e94e00,0x3fe33f8a95c33299,1 +np.float64,0x3fe8d2157171a42a,0x3fe667d904ac95a6,1 +np.float64,0xbfa889f52c3113f0,0xbfa8878d90a23fa5,1 +np.float64,0x3feb3234bef6646a,0x3fe809d541d9017a,1 +np.float64,0x3fc6de266f2dbc50,0x3fc6bf0ee80a0d86,1 +np.float64,0x3fe8455368f08aa6,0x3fe6028254338ed5,1 +np.float64,0xbfe5576079eaaec1,0xbfe3cb4a8f6bc3f5,1 +np.float64,0xbfe9f822ff73f046,0xbfe7360d7d5cb887,1 +np.float64,0xbfb1960e7e232c20,0xbfb1928438258602,1 +np.float64,0xbfca75938d34eb28,0xbfca4570979bf2fa,1 +np.float64,0x3fd767dd15aecfbc,0x3fd6e33039018bab,1 +np.float64,0xbfe987750ef30eea,0xbfe6e7ed30ce77f0,1 +np.float64,0xbfe87f95a1f0ff2b,0xbfe62ca7e928bb2a,1 +np.float64,0xbfd2465301a48ca6,0xbfd2070245775d76,1 +np.float64,0xbfb1306ed22260e0,0xbfb12d2088eaa4f9,1 +np.float64,0xbfd8089010b01120,0xbfd778f9db77f2f3,1 +np.float64,0x3fbf9cf4ee3f39f0,0x3fbf88674fde1ca2,1 +np.float64,0x3fe6d8468a6db08e,0x3fe4f403f38b7bec,1 +np.float64,0xbfd9e5deefb3cbbe,0xbfd932692c722351,1 +np.float64,0x3fd1584d55a2b09c,0x3fd122253eeecc2e,1 +np.float64,0x3fe857979cf0af30,0x3fe60fc12b5ba8db,1 +np.float64,0x3fe3644149e6c882,0x3fe239f47013cfe6,1 +np.float64,0xbfe22ea62be45d4c,0xbfe13834c17d56fe,1 +np.float64,0xbfe8d93e1df1b27c,0xbfe66cf4ee467fd2,1 +np.float64,0xbfe9c497c9f38930,0xbfe7127417da4204,1 +np.float64,0x3fd6791cecacf238,0x3fd6039ccb5a7fde,1 +np.float64,0xbfc1dc1b1523b838,0xbfc1cd48edd9ae19,1 +np.float64,0xbfc92a8491325508,0xbfc901176e0158a5,1 +np.float64,0x3fa8649b3430c940,0x3fa8623e82d9504f,1 +np.float64,0x3fe0bed6a1617dae,0x3fdffbb307fb1abe,1 +np.float64,0x3febdf7765f7beee,0x3fe87ad01a89b74a,1 +np.float64,0xbfd3a56d46a74ada,0xbfd356cf41bf83cd,1 +np.float64,0x3fd321d824a643b0,0x3fd2d93846a224b3,1 +np.float64,0xbfc6a49fb52d4940,0xbfc686704906e7d3,1 +np.float64,0xbfdd4103c9ba8208,0xbfdc3ef0c03615b4,1 +np.float64,0xbfe0b78a51e16f14,0xbfdfef0d9ffc38b5,1 +np.float64,0xbfdac7a908b58f52,0xbfda0158956ceecf,1 +np.float64,0xbfbfbf12f23f7e28,0xbfbfaa428989258c,1 +np.float64,0xbfd55f5aa2aabeb6,0xbfd4fa39de65f33a,1 +np.float64,0x3fe06969abe0d2d4,0x3fdf6744fafdd9cf,1 +np.float64,0x3fe56ab8be6ad572,0x3fe3da7a1986d543,1 +np.float64,0xbfeefbbec67df77e,0xbfea5d426132f4aa,1 +np.float64,0x3fe6e1f49cedc3ea,0x3fe4fb53f3d8e3d5,1 +np.float64,0x3feceb231c79d646,0x3fe923d3efa55414,1 +np.float64,0xbfd03dd08ea07ba2,0xbfd011549aa1998a,1 +np.float64,0xbfd688327aad1064,0xbfd611c61b56adbe,1 +np.float64,0xbfde3249d8bc6494,0xbfdd16a7237a39d5,1 +np.float64,0x3febd4b65677a96c,0x3fe873e1a401ef03,1 +np.float64,0xbfe46bd2b368d7a6,0xbfe31023c2467749,1 +np.float64,0x3fbf9f5cde3f3ec0,0x3fbf8aca8ec53c45,1 +np.float64,0x3fc20374032406e8,0x3fc1f43f1f2f4d5e,1 +np.float64,0xbfec143b16f82876,0xbfe89caa42582381,1 +np.float64,0xbfd14fa635a29f4c,0xbfd119ced11da669,1 +np.float64,0x3fe25236d4e4a46e,0x3fe156242d644b7a,1 +np.float64,0xbfe4ed793469daf2,0xbfe377a88928fd77,1 +np.float64,0xbfb363572626c6b0,0xbfb35e98d8fe87ae,1 +np.float64,0xbfb389d5aa2713a8,0xbfb384fae55565a7,1 +np.float64,0x3fca6e001934dc00,0x3fca3e0661eaca84,1 +np.float64,0x3fe748f3f76e91e8,0x3fe548ab2168aea6,1 +np.float64,0x3fef150efdfe2a1e,0x3fea6b92d74f60d3,1 +np.float64,0xbfd14b52b1a296a6,0xbfd115a387c0fa93,1 +np.float64,0x3fe3286b5ce650d6,0x3fe208a6469a7527,1 +np.float64,0xbfd57b4f4baaf69e,0xbfd514a12a9f7ab0,1 +np.float64,0xbfef14bd467e297b,0xbfea6b64bbfd42ce,1 +np.float64,0xbfe280bc90650179,0xbfe17d2c49955dba,1 +np.float64,0x3fca8759d7350eb0,0x3fca56d5c17bbc14,1 +np.float64,0xbfdf988f30bf311e,0xbfde53f96f69b05f,1 +np.float64,0x3f6b6eeb4036de00,0x3f6b6ee7e3f86f9a,1 +np.float64,0xbfed560be8faac18,0xbfe9656c5cf973d8,1 +np.float64,0x3fc6102c592c2058,0x3fc5f43efad5396d,1 +np.float64,0xbfdef64ed2bdec9e,0xbfddc4b7fbd45aea,1 +np.float64,0x3fe814acd570295a,0x3fe5df183d543bfe,1 +np.float64,0x3fca21313f344260,0x3fc9f2d47f64fbe2,1 +np.float64,0xbfe89932cc713266,0xbfe63f186a2f60ce,1 +np.float64,0x3fe4ffcff169ffa0,0x3fe386336115ee21,1 +np.float64,0x3fee6964087cd2c8,0x3fea093d31e2c2c5,1 +np.float64,0xbfbeea604e3dd4c0,0xbfbed72734852669,1 +np.float64,0xbfea1954fb7432aa,0xbfe74cdad8720032,1 +np.float64,0x3fea3e1a5ef47c34,0x3fe765ffba65a11d,1 +np.float64,0x3fcedb850b3db708,0x3fce8f39d92f00ba,1 +np.float64,0x3fd3b52d41a76a5c,0x3fd365d22b0003f9,1 +np.float64,0xbfa4108a0c282110,0xbfa40f397fcd844f,1 +np.float64,0x3fd7454c57ae8a98,0x3fd6c2e5542c6c83,1 +np.float64,0xbfeecd3c7a7d9a79,0xbfea42ca943a1695,1 +np.float64,0xbfdddda397bbbb48,0xbfdccb27283d4c4c,1 +np.float64,0x3fe6b52cf76d6a5a,0x3fe4d96ff32925ff,1 +np.float64,0xbfa39a75ec2734f0,0xbfa3993c0da84f87,1 +np.float64,0x3fdd3fe6fdba7fcc,0x3fdc3df12fe9e525,1 +np.float64,0xbfb57a98162af530,0xbfb5742525d5fbe2,1 +np.float64,0xbfd3e166cfa7c2ce,0xbfd38ff2891be9b0,1 +np.float64,0x3fdb6a04f9b6d408,0x3fda955e5018e9dc,1 +np.float64,0x3fe4ab03a4e95608,0x3fe342bfa76e1aa8,1 +np.float64,0xbfe6c8480b6d9090,0xbfe4e7eaa935b3f5,1 +np.float64,0xbdd6b5a17bae,0xbdd6b5a17bae,1 +np.float64,0xd6591979acb23,0xd6591979acb23,1 +np.float64,0x5adbed90b5b7e,0x5adbed90b5b7e,1 +np.float64,0xa664c5314cc99,0xa664c5314cc99,1 +np.float64,0x1727fb162e500,0x1727fb162e500,1 +np.float64,0xdb49a93db6935,0xdb49a93db6935,1 +np.float64,0xb10c958d62193,0xb10c958d62193,1 +np.float64,0xad38276f5a705,0xad38276f5a705,1 +np.float64,0x1d5d0b983aba2,0x1d5d0b983aba2,1 +np.float64,0x915f48e122be9,0x915f48e122be9,1 +np.float64,0x475958ae8eb2c,0x475958ae8eb2c,1 +np.float64,0x3af8406675f09,0x3af8406675f09,1 +np.float64,0x655e88a4cabd2,0x655e88a4cabd2,1 +np.float64,0x40fee8ce81fde,0x40fee8ce81fde,1 +np.float64,0xab83103f57062,0xab83103f57062,1 +np.float64,0x7cf934b8f9f27,0x7cf934b8f9f27,1 +np.float64,0x29f7524853eeb,0x29f7524853eeb,1 +np.float64,0x4a5e954894bd3,0x4a5e954894bd3,1 +np.float64,0x24638f3a48c73,0x24638f3a48c73,1 +np.float64,0xa4f32fc749e66,0xa4f32fc749e66,1 +np.float64,0xf8e92df7f1d26,0xf8e92df7f1d26,1 +np.float64,0x292e9d50525d4,0x292e9d50525d4,1 +np.float64,0xe937e897d26fd,0xe937e897d26fd,1 +np.float64,0xd3bde1d5a77bc,0xd3bde1d5a77bc,1 +np.float64,0xa447ffd548900,0xa447ffd548900,1 +np.float64,0xa3b7b691476f7,0xa3b7b691476f7,1 +np.float64,0x490095c892013,0x490095c892013,1 +np.float64,0xfc853235f90a7,0xfc853235f90a7,1 +np.float64,0x5a8bc082b5179,0x5a8bc082b5179,1 +np.float64,0x1baca45a37595,0x1baca45a37595,1 +np.float64,0x2164120842c83,0x2164120842c83,1 +np.float64,0x66692bdeccd26,0x66692bdeccd26,1 +np.float64,0xf205bdd3e40b8,0xf205bdd3e40b8,1 +np.float64,0x7c3fff98f8801,0x7c3fff98f8801,1 +np.float64,0xccdf10e199bf,0xccdf10e199bf,1 +np.float64,0x92db8e8125b8,0x92db8e8125b8,1 +np.float64,0x5789a8d6af136,0x5789a8d6af136,1 +np.float64,0xbdda869d7bb51,0xbdda869d7bb51,1 +np.float64,0xb665e0596ccbc,0xb665e0596ccbc,1 +np.float64,0x74e6b46ee9cd7,0x74e6b46ee9cd7,1 +np.float64,0x4f39cf7c9e73b,0x4f39cf7c9e73b,1 +np.float64,0xfdbf3907fb7e7,0xfdbf3907fb7e7,1 +np.float64,0xafdef4d55fbdf,0xafdef4d55fbdf,1 +np.float64,0xb49858236930b,0xb49858236930b,1 +np.float64,0x3ebe21d47d7c5,0x3ebe21d47d7c5,1 +np.float64,0x5b620512b6c41,0x5b620512b6c41,1 +np.float64,0x31918cda63232,0x31918cda63232,1 +np.float64,0x68b5741ed16af,0x68b5741ed16af,1 +np.float64,0xa5c09a5b4b814,0xa5c09a5b4b814,1 +np.float64,0x55f51c14abea4,0x55f51c14abea4,1 +np.float64,0xda8a3e41b515,0xda8a3e41b515,1 +np.float64,0x9ea9c8513d539,0x9ea9c8513d539,1 +np.float64,0x7f23b964fe478,0x7f23b964fe478,1 +np.float64,0xf6e08c7bedc12,0xf6e08c7bedc12,1 +np.float64,0x7267aa24e4cf6,0x7267aa24e4cf6,1 +np.float64,0x236bb93a46d78,0x236bb93a46d78,1 +np.float64,0x9a98430b35309,0x9a98430b35309,1 +np.float64,0xbb683fef76d08,0xbb683fef76d08,1 +np.float64,0x1ff0eb6e3fe1e,0x1ff0eb6e3fe1e,1 +np.float64,0xf524038fea481,0xf524038fea481,1 +np.float64,0xd714e449ae29d,0xd714e449ae29d,1 +np.float64,0x4154fd7682aa0,0x4154fd7682aa0,1 +np.float64,0x5b8d2f6cb71a7,0x5b8d2f6cb71a7,1 +np.float64,0xc91aa21d92355,0xc91aa21d92355,1 +np.float64,0xbd94fd117b2a0,0xbd94fd117b2a0,1 +np.float64,0x685b207ad0b65,0x685b207ad0b65,1 +np.float64,0xd2485b05a490c,0xd2485b05a490c,1 +np.float64,0x151ea5e62a3d6,0x151ea5e62a3d6,1 +np.float64,0x2635a7164c6b6,0x2635a7164c6b6,1 +np.float64,0x88ae3b5d115c8,0x88ae3b5d115c8,1 +np.float64,0x8a055a55140ac,0x8a055a55140ac,1 +np.float64,0x756f7694eadef,0x756f7694eadef,1 +np.float64,0x866d74630cdaf,0x866d74630cdaf,1 +np.float64,0x39e44f2873c8b,0x39e44f2873c8b,1 +np.float64,0x2a07ceb6540fb,0x2a07ceb6540fb,1 +np.float64,0xc52b96398a573,0xc52b96398a573,1 +np.float64,0x9546543b2a8cb,0x9546543b2a8cb,1 +np.float64,0x5b995b90b732c,0x5b995b90b732c,1 +np.float64,0x2de10a565bc22,0x2de10a565bc22,1 +np.float64,0x3b06ee94760df,0x3b06ee94760df,1 +np.float64,0xb18e77a5631cf,0xb18e77a5631cf,1 +np.float64,0x3b89ae3a77137,0x3b89ae3a77137,1 +np.float64,0xd9b0b6e5b3617,0xd9b0b6e5b3617,1 +np.float64,0x30b2310861647,0x30b2310861647,1 +np.float64,0x326a3ab464d48,0x326a3ab464d48,1 +np.float64,0x4c18610a9830d,0x4c18610a9830d,1 +np.float64,0x541dea42a83be,0x541dea42a83be,1 +np.float64,0xcd027dbf9a050,0xcd027dbf9a050,1 +np.float64,0x780a0f80f015,0x780a0f80f015,1 +np.float64,0x740ed5b2e81db,0x740ed5b2e81db,1 +np.float64,0xc226814d844d0,0xc226814d844d0,1 +np.float64,0xde958541bd2b1,0xde958541bd2b1,1 +np.float64,0xb563d3296ac7b,0xb563d3296ac7b,1 +np.float64,0x1db3b0b83b677,0x1db3b0b83b677,1 +np.float64,0xa7b0275d4f605,0xa7b0275d4f605,1 +np.float64,0x72f8d038e5f1b,0x72f8d038e5f1b,1 +np.float64,0x860ed1350c1da,0x860ed1350c1da,1 +np.float64,0x79f88262f3f11,0x79f88262f3f11,1 +np.float64,0x8817761f102ef,0x8817761f102ef,1 +np.float64,0xac44784b5888f,0xac44784b5888f,1 +np.float64,0x800fd594241fab28,0x800fd594241fab28,1 +np.float64,0x800ede32f8ddbc66,0x800ede32f8ddbc66,1 +np.float64,0x800de4c1121bc982,0x800de4c1121bc982,1 +np.float64,0x80076ebcddcedd7a,0x80076ebcddcedd7a,1 +np.float64,0x800b3fee06567fdc,0x800b3fee06567fdc,1 +np.float64,0x800b444426b68889,0x800b444426b68889,1 +np.float64,0x800b1c037a563807,0x800b1c037a563807,1 +np.float64,0x8001eb88c2a3d712,0x8001eb88c2a3d712,1 +np.float64,0x80058aae6dab155e,0x80058aae6dab155e,1 +np.float64,0x80083df2d4f07be6,0x80083df2d4f07be6,1 +np.float64,0x800e3b19d97c7634,0x800e3b19d97c7634,1 +np.float64,0x800a71c6f374e38e,0x800a71c6f374e38e,1 +np.float64,0x80048557f1490ab1,0x80048557f1490ab1,1 +np.float64,0x8000a00e6b01401e,0x8000a00e6b01401e,1 +np.float64,0x800766a3e2cecd49,0x800766a3e2cecd49,1 +np.float64,0x80015eb44602bd69,0x80015eb44602bd69,1 +np.float64,0x800bde885a77bd11,0x800bde885a77bd11,1 +np.float64,0x800224c53ea4498b,0x800224c53ea4498b,1 +np.float64,0x80048e8c6a291d1a,0x80048e8c6a291d1a,1 +np.float64,0x800b667e4af6ccfd,0x800b667e4af6ccfd,1 +np.float64,0x800ae3d7e395c7b0,0x800ae3d7e395c7b0,1 +np.float64,0x80086c245550d849,0x80086c245550d849,1 +np.float64,0x800d7d25f6fafa4c,0x800d7d25f6fafa4c,1 +np.float64,0x800f8d9ab0ff1b35,0x800f8d9ab0ff1b35,1 +np.float64,0x800690e949cd21d3,0x800690e949cd21d3,1 +np.float64,0x8003022381060448,0x8003022381060448,1 +np.float64,0x80085e0dad70bc1c,0x80085e0dad70bc1c,1 +np.float64,0x800e2ffc369c5ff9,0x800e2ffc369c5ff9,1 +np.float64,0x800b629b5af6c537,0x800b629b5af6c537,1 +np.float64,0x800fdc964b7fb92d,0x800fdc964b7fb92d,1 +np.float64,0x80036bb4b1c6d76a,0x80036bb4b1c6d76a,1 +np.float64,0x800b382f7f16705f,0x800b382f7f16705f,1 +np.float64,0x800ebac9445d7593,0x800ebac9445d7593,1 +np.float64,0x80015075c3e2a0ec,0x80015075c3e2a0ec,1 +np.float64,0x8002a6ec5ce54dd9,0x8002a6ec5ce54dd9,1 +np.float64,0x8009fab74a93f56f,0x8009fab74a93f56f,1 +np.float64,0x800c94b9ea992974,0x800c94b9ea992974,1 +np.float64,0x800dc2efd75b85e0,0x800dc2efd75b85e0,1 +np.float64,0x800be6400d57cc80,0x800be6400d57cc80,1 +np.float64,0x80021f6858443ed1,0x80021f6858443ed1,1 +np.float64,0x800600e2ac4c01c6,0x800600e2ac4c01c6,1 +np.float64,0x800a2159e6b442b4,0x800a2159e6b442b4,1 +np.float64,0x800c912f4bb9225f,0x800c912f4bb9225f,1 +np.float64,0x800a863a9db50c76,0x800a863a9db50c76,1 +np.float64,0x800ac16851d582d1,0x800ac16851d582d1,1 +np.float64,0x8003f7d32e87efa7,0x8003f7d32e87efa7,1 +np.float64,0x800be4eee3d7c9de,0x800be4eee3d7c9de,1 +np.float64,0x80069ff0ac4d3fe2,0x80069ff0ac4d3fe2,1 +np.float64,0x80061c986d4c3932,0x80061c986d4c3932,1 +np.float64,0x8000737b4de0e6f7,0x8000737b4de0e6f7,1 +np.float64,0x8002066ef7440cdf,0x8002066ef7440cdf,1 +np.float64,0x8001007050c200e1,0x8001007050c200e1,1 +np.float64,0x8008df9fa351bf40,0x8008df9fa351bf40,1 +np.float64,0x800f8394ee5f072a,0x800f8394ee5f072a,1 +np.float64,0x80008e0b01c11c17,0x80008e0b01c11c17,1 +np.float64,0x800f7088ed3ee112,0x800f7088ed3ee112,1 +np.float64,0x800285b86f650b72,0x800285b86f650b72,1 +np.float64,0x8008ec18af51d832,0x8008ec18af51d832,1 +np.float64,0x800da08523bb410a,0x800da08523bb410a,1 +np.float64,0x800de853ca7bd0a8,0x800de853ca7bd0a8,1 +np.float64,0x8008c8aefad1915e,0x8008c8aefad1915e,1 +np.float64,0x80010c39d5821874,0x80010c39d5821874,1 +np.float64,0x8009208349724107,0x8009208349724107,1 +np.float64,0x800783783f0f06f1,0x800783783f0f06f1,1 +np.float64,0x80025caf9984b960,0x80025caf9984b960,1 +np.float64,0x800bc76fa6778ee0,0x800bc76fa6778ee0,1 +np.float64,0x80017e2f89a2fc60,0x80017e2f89a2fc60,1 +np.float64,0x800ef169843de2d3,0x800ef169843de2d3,1 +np.float64,0x80098a5f7db314bf,0x80098a5f7db314bf,1 +np.float64,0x800d646f971ac8df,0x800d646f971ac8df,1 +np.float64,0x800110d1dc6221a4,0x800110d1dc6221a4,1 +np.float64,0x800f8b422a1f1684,0x800f8b422a1f1684,1 +np.float64,0x800785c97dcf0b94,0x800785c97dcf0b94,1 +np.float64,0x800da201283b4403,0x800da201283b4403,1 +np.float64,0x800a117cc7b422fa,0x800a117cc7b422fa,1 +np.float64,0x80024731cfa48e64,0x80024731cfa48e64,1 +np.float64,0x800199d456c333a9,0x800199d456c333a9,1 +np.float64,0x8005f66bab8becd8,0x8005f66bab8becd8,1 +np.float64,0x8008e7227c11ce45,0x8008e7227c11ce45,1 +np.float64,0x8007b66cc42f6cda,0x8007b66cc42f6cda,1 +np.float64,0x800669e6f98cd3cf,0x800669e6f98cd3cf,1 +np.float64,0x800aed917375db23,0x800aed917375db23,1 +np.float64,0x8008b6dd15116dbb,0x8008b6dd15116dbb,1 +np.float64,0x800f49869cfe930d,0x800f49869cfe930d,1 +np.float64,0x800a712661b4e24d,0x800a712661b4e24d,1 +np.float64,0x800944e816f289d1,0x800944e816f289d1,1 +np.float64,0x800eba0f8a1d741f,0x800eba0f8a1d741f,1 +np.float64,0x800cf6ded139edbe,0x800cf6ded139edbe,1 +np.float64,0x80023100c6246202,0x80023100c6246202,1 +np.float64,0x800c5a94add8b52a,0x800c5a94add8b52a,1 +np.float64,0x800adf329b95be66,0x800adf329b95be66,1 +np.float64,0x800af9afc115f360,0x800af9afc115f360,1 +np.float64,0x800d66ce837acd9d,0x800d66ce837acd9d,1 +np.float64,0x8003ffb5e507ff6d,0x8003ffb5e507ff6d,1 +np.float64,0x80027d280024fa51,0x80027d280024fa51,1 +np.float64,0x800fc37e1d1f86fc,0x800fc37e1d1f86fc,1 +np.float64,0x800fc7258b9f8e4b,0x800fc7258b9f8e4b,1 +np.float64,0x8003fb5789e7f6b0,0x8003fb5789e7f6b0,1 +np.float64,0x800eb4e7a13d69cf,0x800eb4e7a13d69cf,1 +np.float64,0x800951850952a30a,0x800951850952a30a,1 +np.float64,0x3fed4071be3a80e3,0x3fe95842074431df,1 +np.float64,0x3f8d2341203a4682,0x3f8d2300b453bd9f,1 +np.float64,0x3fdc8ce332b919c6,0x3fdb9cdf1440c28f,1 +np.float64,0x3fdc69bd84b8d37b,0x3fdb7d25c8166b7b,1 +np.float64,0x3fc4c22ad0298456,0x3fc4aae73e231b4f,1 +np.float64,0x3fea237809f446f0,0x3fe753cc6ca96193,1 +np.float64,0x3fd34cf6462699ed,0x3fd30268909bb47e,1 +np.float64,0x3fafce20643f9c41,0x3fafc8e41a240e35,1 +np.float64,0x3fdc6d416538da83,0x3fdb805262292863,1 +np.float64,0x3fe7d8362aefb06c,0x3fe5b2ce659db7fd,1 +np.float64,0x3fe290087de52011,0x3fe189f9a3eb123d,1 +np.float64,0x3fa62d2bf82c5a58,0x3fa62b65958ca2b8,1 +np.float64,0x3fafd134403fa269,0x3fafcbf670f8a6f3,1 +np.float64,0x3fa224e53c2449ca,0x3fa223ec5de1631b,1 +np.float64,0x3fb67e2c2c2cfc58,0x3fb676c445fb70a0,1 +np.float64,0x3fda358d01346b1a,0x3fd97b9441666eb2,1 +np.float64,0x3fdd30fc4bba61f9,0x3fdc308da423778d,1 +np.float64,0x3fc56e99c52add34,0x3fc5550004492621,1 +np.float64,0x3fe32d08de265a12,0x3fe20c761a73cec2,1 +np.float64,0x3fd46cf932a8d9f2,0x3fd414a7f3db03df,1 +np.float64,0x3fd94cfa2b3299f4,0x3fd8a5961b3e4bdd,1 +np.float64,0x3fed6ea3a6fadd47,0x3fe9745b2f6c9204,1 +np.float64,0x3fe4431d1768863a,0x3fe2ef61d0481de0,1 +np.float64,0x3fe1d8e00ea3b1c0,0x3fe0efab5050ee78,1 +np.float64,0x3fe56f37dcaade70,0x3fe3de00b0f392e0,1 +np.float64,0x3fde919a2dbd2334,0x3fdd6b6d2dcf2396,1 +np.float64,0x3fe251e3d4a4a3c8,0x3fe155de69605d60,1 +np.float64,0x3fe5e0ecc5abc1da,0x3fe436a5de5516cf,1 +np.float64,0x3fcd48780c3a90f0,0x3fcd073fa907ba9b,1 +np.float64,0x3fe4e8149229d029,0x3fe37360801d5b66,1 +np.float64,0x3fb9ef159633de2b,0x3fb9e3bc05a15d1d,1 +np.float64,0x3fc24a3f0424947e,0x3fc23a5432ca0e7c,1 +np.float64,0x3fe55ca196aab943,0x3fe3cf6b3143435a,1 +np.float64,0x3fe184544c2308a9,0x3fe0a7b49fa80aec,1 +np.float64,0x3fe2c76e83658edd,0x3fe1b8355c1ea771,1 +np.float64,0x3fea8d2c4ab51a59,0x3fe79ba85aabc099,1 +np.float64,0x3fd74f98abae9f31,0x3fd6cc85005d0593,1 +np.float64,0x3fec6de9a678dbd3,0x3fe8d59a1d23cdd1,1 +np.float64,0x3fec8a0e50f9141d,0x3fe8e7500f6f6a00,1 +np.float64,0x3fe9de6d08b3bcda,0x3fe7245319508767,1 +np.float64,0x3fe4461fd1688c40,0x3fe2f1cf0b93aba6,1 +np.float64,0x3fde342d9d3c685b,0x3fdd185609d5719d,1 +np.float64,0x3feb413fc8368280,0x3fe813c091d2519a,1 +np.float64,0x3fe64333156c8666,0x3fe48275b9a6a358,1 +np.float64,0x3fe03c65226078ca,0x3fdf18b26786be35,1 +np.float64,0x3fee11054dbc220b,0x3fe9d579a1cfa7ad,1 +np.float64,0x3fbaefccae35df99,0x3fbae314fef7c7ea,1 +np.float64,0x3feed4e3487da9c7,0x3fea4729241c8811,1 +np.float64,0x3fbb655df836cabc,0x3fbb57fcf9a097be,1 +np.float64,0x3fe68b0273ed1605,0x3fe4b96109afdf76,1 +np.float64,0x3fd216bfc3242d80,0x3fd1d957363f6a43,1 +np.float64,0x3fe01328d4a02652,0x3fded083bbf94aba,1 +np.float64,0x3fe3f9a61ae7f34c,0x3fe2b3f701b79028,1 +np.float64,0x3fed4e7cf8fa9cfa,0x3fe960d27084fb40,1 +np.float64,0x3faec08e343d811c,0x3faebbd2aa07ac1f,1 +np.float64,0x3fd2d1bbeea5a378,0x3fd28c9aefcf48ad,1 +np.float64,0x3fd92e941fb25d28,0x3fd889857f88410d,1 +np.float64,0x3fe43decb7e87bd9,0x3fe2eb32b4ee4667,1 +np.float64,0x3fef49cabcfe9395,0x3fea892f9a233f76,1 +np.float64,0x3fe3e96812e7d2d0,0x3fe2a6c6b45dd6ee,1 +np.float64,0x3fd24c0293a49805,0x3fd20c76d54473cb,1 +np.float64,0x3fb43d6b7e287ad7,0x3fb438060772795a,1 +np.float64,0x3fe87bf7d3f0f7f0,0x3fe62a0c47411c62,1 +np.float64,0x3fee82a2e07d0546,0x3fea17e27e752b7b,1 +np.float64,0x3fe40c01bbe81803,0x3fe2c2d9483f44d8,1 +np.float64,0x3fd686ccae2d0d99,0x3fd610763fb61097,1 +np.float64,0x3fe90fcf2af21f9e,0x3fe693c12df59ba9,1 +np.float64,0x3fefb3ce11ff679c,0x3feac3dd4787529d,1 +np.float64,0x3fcec53ff63d8a80,0x3fce79992af00c58,1 +np.float64,0x3fe599dd7bab33bb,0x3fe3ff5da7575d85,1 +np.float64,0x3fe9923b1a732476,0x3fe6ef71d13db456,1 +np.float64,0x3febf76fcef7eee0,0x3fe88a3952e11373,1 +np.float64,0x3fc2cfd128259fa2,0x3fc2be7fd47fd811,1 +np.float64,0x3fe4d37ae269a6f6,0x3fe36300d45e3745,1 +np.float64,0x3fe23aa2e4247546,0x3fe1424e172f756f,1 +np.float64,0x3fe4f0596ca9e0b3,0x3fe379f0c49de7ef,1 +np.float64,0x3fe2e4802fe5c900,0x3fe1d062a8812601,1 +np.float64,0x3fe5989c79eb3139,0x3fe3fe6308552dec,1 +np.float64,0x3fe3c53cb4e78a79,0x3fe28956e573aca4,1 +np.float64,0x3fe6512beeeca258,0x3fe48d2d5ece979f,1 +np.float64,0x3fd8473ddb308e7c,0x3fd7b33e38adc6ad,1 +np.float64,0x3fecd09c9679a139,0x3fe91361fa0c5bcb,1 +np.float64,0x3fc991530e3322a6,0x3fc965e2c514a9e9,1 +np.float64,0x3f6d4508403a8a11,0x3f6d45042b68acc5,1 +np.float64,0x3fea1f198f743e33,0x3fe750ce918d9330,1 +np.float64,0x3fd0a0bb4da14177,0x3fd07100f9c71e1c,1 +np.float64,0x3fd30c45ffa6188c,0x3fd2c499f9961f66,1 +np.float64,0x3fcad98e7c35b31d,0x3fcaa74293cbc52e,1 +np.float64,0x3fec8e4a5eb91c95,0x3fe8e9f898d118db,1 +np.float64,0x3fd19fdb79233fb7,0x3fd1670c00febd24,1 +np.float64,0x3fea9fcbb1f53f97,0x3fe7a836b29c4075,1 +np.float64,0x3fc6d12ea12da25d,0x3fc6b24bd2f89f59,1 +np.float64,0x3fd6af3658ad5e6d,0x3fd636613e08df3f,1 +np.float64,0x3fe31bc385a63787,0x3fe1fe3081621213,1 +np.float64,0x3fc0dbba2221b774,0x3fc0cf42c9313dba,1 +np.float64,0x3fef639ce87ec73a,0x3fea9795454f1036,1 +np.float64,0x3fee5f29dcbcbe54,0x3fea0349b288f355,1 +np.float64,0x3fed46bdb37a8d7b,0x3fe95c199f5aa569,1 +np.float64,0x3fef176afa3e2ed6,0x3fea6ce78b2aa3aa,1 +np.float64,0x3fc841e7683083cf,0x3fc81cccb84848cc,1 +np.float64,0xbfda3ec9a2347d94,0xbfd9840d180e9de3,1 +np.float64,0xbfcd5967ae3ab2d0,0xbfcd17be13142bb9,1 +np.float64,0xbfedf816573bf02d,0xbfe9c6bb06476c60,1 +np.float64,0xbfd0d6e10e21adc2,0xbfd0a54f99d2f3dc,1 +np.float64,0xbfe282df096505be,0xbfe17ef5e2e80760,1 +np.float64,0xbfd77ae6e62ef5ce,0xbfd6f4f6b603ad8a,1 +np.float64,0xbfe37b171aa6f62e,0xbfe24cb4b2d0ade4,1 +np.float64,0xbfef9e5ed9bf3cbe,0xbfeab817b41000bd,1 +np.float64,0xbfe624d6f96c49ae,0xbfe46b1e9c9aff86,1 +np.float64,0xbfefb5da65ff6bb5,0xbfeac4fc9c982772,1 +np.float64,0xbfd29a65d52534cc,0xbfd2579df8ff87b9,1 +np.float64,0xbfd40270172804e0,0xbfd3af6471104aef,1 +np.float64,0xbfb729ee7a2e53e0,0xbfb721d7dbd2705e,1 +np.float64,0xbfb746f1382e8de0,0xbfb73ebc1207f8e3,1 +np.float64,0xbfd3c7e606a78fcc,0xbfd377a8aa1b0dd9,1 +np.float64,0xbfd18c4880231892,0xbfd1543506584ad5,1 +np.float64,0xbfea988080753101,0xbfe7a34cba0d0fa1,1 +np.float64,0xbf877400e02ee800,0xbf8773df47fa7e35,1 +np.float64,0xbfb07e050820fc08,0xbfb07b198d4a52c9,1 +np.float64,0xbfee0a3621fc146c,0xbfe9d1745a05ba77,1 +np.float64,0xbfe78de246ef1bc4,0xbfe57bf2baab91c8,1 +np.float64,0xbfcdbfd3bd3b7fa8,0xbfcd7b728a955a06,1 +np.float64,0xbfe855ea79b0abd5,0xbfe60e8a4a17b921,1 +np.float64,0xbfd86c8e3530d91c,0xbfd7d5e36c918dc1,1 +np.float64,0xbfe4543169e8a863,0xbfe2fd23d42f552e,1 +np.float64,0xbfe41efbf1283df8,0xbfe2d235a2faed1a,1 +np.float64,0xbfd9a55464b34aa8,0xbfd8f7083f7281e5,1 +np.float64,0xbfe5f5078d6bea0f,0xbfe44637d910c270,1 +np.float64,0xbfe6d83e3dedb07c,0xbfe4f3fdadd10552,1 +np.float64,0xbfdb767e70b6ecfc,0xbfdaa0b6c17f3fb1,1 +np.float64,0xbfdfc91b663f9236,0xbfde7eb0dfbeaa26,1 +np.float64,0xbfbfbd18783f7a30,0xbfbfa84bf2fa1c8d,1 +np.float64,0xbfe51199242a2332,0xbfe39447dbe066ae,1 +np.float64,0xbfdbb94814b77290,0xbfdadd63bd796972,1 +np.float64,0xbfd8c6272cb18c4e,0xbfd828f2d9e8607e,1 +np.float64,0xbfce51e0b63ca3c0,0xbfce097ee908083a,1 +np.float64,0xbfe99a177d73342f,0xbfe6f4ec776a57ae,1 +np.float64,0xbfefde2ab0ffbc55,0xbfeadafdcbf54733,1 +np.float64,0xbfcccb5c1c3996b8,0xbfcc8d586a73d126,1 +np.float64,0xbfdf7ddcedbefbba,0xbfde3c749a906de7,1 +np.float64,0xbfef940516ff280a,0xbfeab26429e89f4b,1 +np.float64,0xbfe08009f1e10014,0xbfdf8eab352997eb,1 +np.float64,0xbfe9c02682b3804d,0xbfe70f5fd05f79ee,1 +np.float64,0xbfb3ca1732279430,0xbfb3c50bec5b453a,1 +np.float64,0xbfe368e81926d1d0,0xbfe23dc704d0887c,1 +np.float64,0xbfbd20cc2e3a4198,0xbfbd10b7e6d81c6c,1 +np.float64,0xbfd67ece4d2cfd9c,0xbfd608f527dcc5e7,1 +np.float64,0xbfdc02d1333805a2,0xbfdb20104454b79f,1 +np.float64,0xbfc007a626200f4c,0xbfbff9dc9dc70193,1 +np.float64,0xbfda9e4f8fb53ca0,0xbfd9db8af35dc630,1 +np.float64,0xbfd8173d77302e7a,0xbfd786a0cf3e2914,1 +np.float64,0xbfeb8fcbd0b71f98,0xbfe84734debc10fb,1 +np.float64,0xbfe4bf1cb7697e3a,0xbfe352c891113f29,1 +np.float64,0xbfc18624d5230c48,0xbfc178248e863b64,1 +np.float64,0xbfcf184bac3e3098,0xbfceca3b19be1ebe,1 +np.float64,0xbfd2269c42a44d38,0xbfd1e8920d72b694,1 +np.float64,0xbfe8808526b1010a,0xbfe62d5497292495,1 +np.float64,0xbfe498bd1da9317a,0xbfe334245eadea93,1 +np.float64,0xbfef0855aebe10ab,0xbfea6462f29aeaf9,1 +np.float64,0xbfdeb186c93d630e,0xbfdd87c37943c602,1 +np.float64,0xbfb29fe2ae253fc8,0xbfb29bae3c87efe4,1 +np.float64,0xbfddd9c6c3bbb38e,0xbfdcc7b400bf384b,1 +np.float64,0xbfe3506673e6a0cd,0xbfe2299f26295553,1 +np.float64,0xbfe765957a2ecb2b,0xbfe55e03cf22edab,1 +np.float64,0xbfecc9876c79930f,0xbfe90efaf15b6207,1 +np.float64,0xbfefb37a0a7f66f4,0xbfeac3af3898e7c2,1 +np.float64,0xbfeefa0da7bdf41b,0xbfea5c4cde53c1c3,1 +np.float64,0xbfe6639ee9ecc73e,0xbfe49b4e28a72482,1 +np.float64,0xbfef91a4bb7f2349,0xbfeab114ac9e25dd,1 +np.float64,0xbfc8b392bb316724,0xbfc88c657f4441a3,1 +np.float64,0xbfc88a358231146c,0xbfc863cb900970fe,1 +np.float64,0xbfef25a9d23e4b54,0xbfea74eda432aabe,1 +np.float64,0xbfe6aceea0ed59de,0xbfe4d32e54a3fd01,1 +np.float64,0xbfefe2b3e37fc568,0xbfeadd74f4605835,1 +np.float64,0xbfa9eecb8833dd90,0xbfa9ebf4f4cb2591,1 +np.float64,0xbfd42bad7428575a,0xbfd3d69de8e52d0a,1 +np.float64,0xbfbc366b4a386cd8,0xbfbc27ceee8f3019,1 +np.float64,0xbfd9bca7be337950,0xbfd90c80e6204e57,1 +np.float64,0xbfe8173f53f02e7f,0xbfe5e0f8d8ed329c,1 +np.float64,0xbfce22dbcb3c45b8,0xbfcddbc8159b63af,1 +np.float64,0xbfea2d7ba7345af7,0xbfe75aa62ad5b80a,1 +np.float64,0xbfc08b783e2116f0,0xbfc07faf8d501558,1 +np.float64,0xbfb8c4161c318830,0xbfb8ba33950748ec,1 +np.float64,0xbfddd930bcbbb262,0xbfdcc72dffdf51bb,1 +np.float64,0xbfd108ce8a22119e,0xbfd0d5801e7698bd,1 +np.float64,0xbfd5bd2b5dab7a56,0xbfd552c52c468c76,1 +np.float64,0xbfe7ffe67fefffcd,0xbfe5cfe96e35e6e5,1 +np.float64,0xbfa04ec6bc209d90,0xbfa04e120a2c25cc,1 +np.float64,0xbfef7752cc7eeea6,0xbfeaa28715addc4f,1 +np.float64,0xbfe7083c2eae1078,0xbfe5182bf8ddfc8e,1 +np.float64,0xbfe05dafd0a0bb60,0xbfdf52d397cfe5f6,1 +np.float64,0xbfacb4f2243969e0,0xbfacb118991ea235,1 +np.float64,0xbfc7d47e422fa8fc,0xbfc7b1504714a4fd,1 +np.float64,0xbfbd70b2243ae168,0xbfbd60182efb61de,1 +np.float64,0xbfe930e49cb261c9,0xbfe6ab272b3f9cfc,1 +np.float64,0xbfb5f537e62bea70,0xbfb5ee540dcdc635,1 +np.float64,0xbfbb0c8278361908,0xbfbaffa1f7642a87,1 +np.float64,0xbfe82af2447055e4,0xbfe5ef54ca8db9e8,1 +np.float64,0xbfe92245e6f2448c,0xbfe6a0d32168040b,1 +np.float64,0xbfb799a8522f3350,0xbfb7911a7ada3640,1 +np.float64,0x7faa8290c8350521,0x3fe5916f67209cd6,1 +np.float64,0x7f976597082ecb2d,0x3fcf94dce396bd37,1 +np.float64,0x7fede721237bce41,0x3fe3e7b1575b005f,1 +np.float64,0x7fd5f674d72bece9,0x3fe3210628eba199,1 +np.float64,0x7f9b0f1aa0361e34,0x3feffd34d15d1da7,1 +np.float64,0x7fec48346ab89068,0x3fe93dd84253d9a2,1 +np.float64,0x7f9cac76283958eb,0xbfec4cd999653868,1 +np.float64,0x7fed51ab6bbaa356,0x3fecc27fb5f37bca,1 +np.float64,0x7fded3c116bda781,0xbfda473efee47cf1,1 +np.float64,0x7fd19c48baa33890,0xbfe25700cbfc0326,1 +np.float64,0x7fe5c8f478ab91e8,0xbfee4ab6d84806be,1 +np.float64,0x7fe53c64e46a78c9,0x3fee19c3f227f4e1,1 +np.float64,0x7fc2ad1936255a31,0xbfe56db9b877f807,1 +np.float64,0x7fe2b071b52560e2,0xbfce3990a8d390a9,1 +np.float64,0x7fc93f3217327e63,0xbfd1f6d7ef838d2b,1 +np.float64,0x7fec26df08784dbd,0x3fd5397be41c93d9,1 +np.float64,0x7fcf4770183e8edf,0x3fe6354f5a785016,1 +np.float64,0x7fdc9fcc0bb93f97,0xbfeeeae952e8267d,1 +np.float64,0x7feb21f29c7643e4,0x3fec20122e33f1bf,1 +np.float64,0x7fd0b51273216a24,0x3fefb09f8daba00b,1 +np.float64,0x7fe747a9d76e8f53,0x3feb46a3232842a4,1 +np.float64,0x7fd58885972b110a,0xbfce5ea57c186221,1 +np.float64,0x7fca3ce85c3479d0,0x3fef93a24548e8ca,1 +np.float64,0x7fe1528a46a2a514,0xbfb54bb578d9da91,1 +np.float64,0x7fcc58b21b38b163,0x3feffb5b741ffc2d,1 +np.float64,0x7fdabcaaf5357955,0x3fecbf855db524d1,1 +np.float64,0x7fdd27c6933a4f8c,0xbfef2f41bb80144b,1 +np.float64,0x7fbda4e1be3b49c2,0x3fdb9b33f84f5381,1 +np.float64,0x7fe53363362a66c5,0x3fe4daff3a6a4ed0,1 +np.float64,0x7fe5719d62eae33a,0xbfef761d98f625d5,1 +np.float64,0x7f982ce5a83059ca,0x3fd0b27c3365f0a8,1 +np.float64,0x7fe6db8c42edb718,0x3fe786f4b1fe11a6,1 +np.float64,0x7fe62cca1b2c5993,0x3fd425b6c4c9714a,1 +np.float64,0x7feea88850bd5110,0xbfd7bbb432017175,1 +np.float64,0x7fad6c6ae43ad8d5,0x3fe82e49098bc6de,1 +np.float64,0x7fe70542f02e0a85,0x3fec3017960b4822,1 +np.float64,0x7feaf0bcbb35e178,0xbfc3aac74dd322d5,1 +np.float64,0x7fb5e152fe2bc2a5,0x3fd4b27a4720614c,1 +np.float64,0x7fe456ee5be8addc,0xbfe9e15ab5cff229,1 +np.float64,0x7fd4b53a8d296a74,0xbfefff450f503326,1 +np.float64,0x7fd7149d7a2e293a,0x3fef4ef0a9009096,1 +np.float64,0x7fd43fc5a8a87f8a,0x3fe0c929fee9dce7,1 +np.float64,0x7fef97022aff2e03,0x3fd4ea52a813da20,1 +np.float64,0x7fe035950ae06b29,0x3fef4e125394fb05,1 +np.float64,0x7fecd0548979a0a8,0x3fe89d226244037b,1 +np.float64,0x7fc79b3ac22f3675,0xbfee9c9cf78c8270,1 +np.float64,0x7fd8b8e8263171cf,0x3fe8e24437961db0,1 +np.float64,0x7fc288c23e251183,0xbfbaf8eca50986ca,1 +np.float64,0x7fe436b4b6686d68,0xbfecd661741931c4,1 +np.float64,0x7fcdf99abe3bf334,0x3feaa75c90830b92,1 +np.float64,0x7fd9f9739233f2e6,0xbfebbfcb301b0da5,1 +np.float64,0x7fd6fcbd1b2df979,0xbfccf2c77cb65f56,1 +np.float64,0x7fe242a97b248552,0xbfe5b0f13bcbabc8,1 +np.float64,0x7fe38bf3e06717e7,0x3fbc8fa9004d2668,1 +np.float64,0x7fecd0e8d479a1d1,0xbfe886a6b4f73a4a,1 +np.float64,0x7fe958d60232b1ab,0xbfeb7c4cf0cee2dd,1 +np.float64,0x7f9d492b583a9256,0xbfebe975d00221cb,1 +np.float64,0x7fd6c9983bad932f,0xbfefe817621a31f6,1 +np.float64,0x7fed0d7239fa1ae3,0x3feac7e1b6455b4b,1 +np.float64,0x7fe61dac90ec3b58,0x3fef845b9efe8421,1 +np.float64,0x7f9acd3010359a5f,0xbfe460d376200130,1 +np.float64,0x7fedced9673b9db2,0xbfeeaf23445e1944,1 +np.float64,0x7fd9f271a733e4e2,0xbfd41544535ecb78,1 +np.float64,0x7fe703339bee0666,0x3fef93334626b56c,1 +np.float64,0x7fec7761b7b8eec2,0xbfe6da9179e8e714,1 +np.float64,0x7fdd9fff043b3ffd,0xbfc0761dfb8d94f9,1 +np.float64,0x7fdc10ed17b821d9,0x3fe1481e2a26c77f,1 +np.float64,0x7fe7681e72aed03c,0x3fefff94a6d47c84,1 +np.float64,0x7fe18c29e1e31853,0x3fe86ebd2fd89456,1 +np.float64,0x7fb2fb273c25f64d,0xbfefc136f57e06de,1 +np.float64,0x7fac2bbb90385776,0x3fe25d8e3cdae7e3,1 +np.float64,0x7fed16789efa2cf0,0x3fe94555091fdfd9,1 +np.float64,0x7fd8fe8f7831fd1e,0xbfed58d520361902,1 +np.float64,0x7fa59bde3c2b37bb,0x3fef585391c077ff,1 +np.float64,0x7fda981b53353036,0x3fde02ca08737b5f,1 +np.float64,0x7fd29f388aa53e70,0xbfe04f5499246df2,1 +np.float64,0x7fcd0232513a0464,0xbfd9737f2f565829,1 +np.float64,0x7fe9a881bcf35102,0xbfe079cf285b35dd,1 +np.float64,0x7fdbe399a9b7c732,0x3fe965bc4220f340,1 +np.float64,0x7feb77414af6ee82,0xbfb7df2fcd491f55,1 +np.float64,0x7fa26e86c424dd0d,0xbfea474c3d65b9be,1 +np.float64,0x7feaee869e35dd0c,0xbfd7b333a888cd14,1 +np.float64,0x7fcbd67f6137acfe,0xbfe15a7a15dfcee6,1 +np.float64,0x7fe36991e766d323,0xbfeb288077c4ed9f,1 +np.float64,0x7fdcf4f4fcb9e9e9,0xbfea331ef7a75e7b,1 +np.float64,0x7fbe3445643c688a,0x3fedf21b94ae8e37,1 +np.float64,0x7fd984cfd2b3099f,0x3fc0d3ade71c395e,1 +np.float64,0x7fdec987b23d930e,0x3fe4af5e48f6c26e,1 +np.float64,0x7fde56a9953cad52,0x3fc8e7762cefb8b0,1 +np.float64,0x7fd39fb446273f68,0xbfe6c3443208f44d,1 +np.float64,0x7fc609c1a72c1382,0x3fe884e639571baa,1 +np.float64,0x7fe001be4b20037c,0xbfed0d90cbcb6010,1 +np.float64,0x7fce7ace283cf59b,0xbfd0303792e51f49,1 +np.float64,0x7fe27ba93da4f751,0x3fe548b5ce740d71,1 +np.float64,0x7fcc13c79b38278e,0xbfe2e14f5b64a1e9,1 +np.float64,0x7fc058550620b0a9,0x3fe44bb55ebd0590,1 +np.float64,0x7fa4ba8bf8297517,0x3fee59b39f9d08c4,1 +np.float64,0x7fe50d6872ea1ad0,0xbfea1eaa2d059e13,1 +np.float64,0x7feb7e33b476fc66,0xbfeff28a4424dd3e,1 +np.float64,0x7fe2d7d2a165afa4,0xbfdbaff0ba1ea460,1 +np.float64,0xffd126654b224cca,0xbfef0cd3031fb97c,1 +np.float64,0xffb5f884942bf108,0x3fe0de589bea2e4c,1 +np.float64,0xffe011b4bfe02369,0xbfe805a0edf1e1f2,1 +np.float64,0xffec13eae9b827d5,0x3fb5f30347d78447,1 +np.float64,0xffa6552ae82caa50,0x3fb1ecee60135f2f,1 +np.float64,0xffb62d38b02c5a70,0x3fbd35903148fd12,1 +np.float64,0xffe2c44ea425889d,0xbfd7616547f99a7d,1 +np.float64,0xffea24c61a74498c,0x3fef4a1b15ae9005,1 +np.float64,0xffd23a4ab2a47496,0x3fe933bfaa569ae9,1 +np.float64,0xffc34a073d269410,0xbfeec0f510bb7474,1 +np.float64,0xffeead84cfbd5b09,0x3feb2d635e5a78bd,1 +np.float64,0xffcfd8f3b43fb1e8,0xbfdd59625801771b,1 +np.float64,0xffd3c7f662a78fec,0x3f9cf3209edfbc4e,1 +np.float64,0xffe7b7e4f72f6fca,0xbfefdcff4925632c,1 +np.float64,0xffe48cab05e91956,0x3fe6b41217948423,1 +np.float64,0xffeb6980b336d301,0xbfca5de148f69324,1 +np.float64,0xffe3f15c4aa7e2b8,0xbfeb18efae892081,1 +np.float64,0xffcf290c713e5218,0x3fefe6f1a513ed26,1 +np.float64,0xffd80979b43012f4,0xbfde6c8df91af976,1 +np.float64,0xffc3181e0026303c,0x3fe7448f681def38,1 +np.float64,0xffedfa68f97bf4d1,0xbfeca6efb802d109,1 +np.float64,0xffca0931c0341264,0x3fe31b9f073b08cd,1 +np.float64,0xffe4c44934e98892,0x3feda393a2e8a0f7,1 +np.float64,0xffe65bb56f2cb76a,0xbfeffaf638a4b73e,1 +np.float64,0xffe406a332a80d46,0x3fe8151dadb853c1,1 +np.float64,0xffdb7eae9c36fd5e,0xbfeff89abf5ab16e,1 +np.float64,0xffe245a02da48b40,0x3fef1fb43e85f4b8,1 +np.float64,0xffe2bafa732575f4,0x3fcbab115c6fd86e,1 +np.float64,0xffe8b1eedb7163dd,0x3feff263df6f6b12,1 +np.float64,0xffe6c76c796d8ed8,0xbfe61a8668511293,1 +np.float64,0xffefe327d1ffc64f,0xbfd9b92887a84827,1 +np.float64,0xffa452180c28a430,0xbfa9b9e578a4e52f,1 +np.float64,0xffe9867d0bf30cf9,0xbfca577867588408,1 +np.float64,0xffdfe9b923bfd372,0x3fdab5c15f085c2d,1 +np.float64,0xffed590c6abab218,0xbfd7e7b6c5a120e6,1 +np.float64,0xffeaebcfbab5d79f,0x3fed58be8a9e2c3b,1 +np.float64,0xffe2ba83a8257507,0x3fe6c42a4ac1d4d9,1 +np.float64,0xffe01d5b0ee03ab6,0xbfe5dad6c9247db7,1 +np.float64,0xffe51095d52a212b,0x3fef822cebc32d8e,1 +np.float64,0xffebd7a901b7af51,0xbfe5e63f3e3b1185,1 +np.float64,0xffe4efdcde29dfb9,0xbfe811294dfa758f,1 +np.float64,0xffe3be1aa4a77c35,0x3fdd8dcfcd409bb1,1 +np.float64,0xffbe6f2f763cde60,0x3fd13766e43bd622,1 +np.float64,0xffeed3d80fbda7af,0x3fec10a23c1b7a4a,1 +np.float64,0xffd6ebff37add7fe,0xbfe6177411607c86,1 +np.float64,0xffe85a90f4b0b521,0x3fc09fdd66c8fde9,1 +np.float64,0xffea3d58c2b47ab1,0x3feb5bd4a04b3562,1 +np.float64,0xffef675be6beceb7,0x3fecd840683d1044,1 +np.float64,0xff726a088024d400,0x3feff2b4f47b5214,1 +np.float64,0xffc90856733210ac,0xbfe3c6ffbf6840a5,1 +np.float64,0xffc0b58d9a216b1c,0xbfe10314267d0611,1 +np.float64,0xffee1f3d0abc3e79,0xbfd12ea7efea9067,1 +np.float64,0xffd988c41a331188,0x3febe83802d8a32e,1 +np.float64,0xffe8f1ac9bb1e358,0xbfdbf5fa7e84f2f2,1 +np.float64,0xffe47af279e8f5e4,0x3fef11e339e5fa78,1 +np.float64,0xff9960a7f832c140,0xbfa150363f8ec5b2,1 +np.float64,0xffcac40fa7358820,0xbfec3d5847a3df1d,1 +np.float64,0xffcb024a9d360494,0xbfd060fa31fd6b6a,1 +np.float64,0xffe385ffb3270bff,0xbfee6859e8dcd9e8,1 +np.float64,0xffef62f2c53ec5e5,0x3fe0a71ffddfc718,1 +np.float64,0xffed87ff20fb0ffd,0xbfe661db7c4098e3,1 +np.float64,0xffe369278526d24e,0x3fd64d89a41822fc,1 +np.float64,0xff950288c02a0520,0x3fe1df91d1ad7d5c,1 +np.float64,0xffe70e7c2cee1cf8,0x3fc9fece08df2fd8,1 +np.float64,0xffbaf020b635e040,0xbfc68c43ff9911a7,1 +np.float64,0xffee0120b0fc0240,0x3f9f792e17b490b0,1 +np.float64,0xffe1fa4be7a3f498,0xbfef4b18ab4b319e,1 +np.float64,0xffe61887bf2c310f,0x3fe846714826cb32,1 +np.float64,0xffdc3cf77f3879ee,0x3fe033b948a36125,1 +np.float64,0xffcc2b86f238570c,0xbfefdcceac3f220f,1 +np.float64,0xffe1f030c0a3e061,0x3fef502a808c359a,1 +np.float64,0xffb872c4ee30e588,0x3fef66ed8d3e6175,1 +np.float64,0xffeac8fc617591f8,0xbfe5d8448602aac9,1 +np.float64,0xffe5be16afab7c2d,0x3fee75ccde3cd14d,1 +np.float64,0xffae230ad83c4610,0xbfe49bbe6074d459,1 +np.float64,0xffc8fbeff531f7e0,0x3f77201e0c927f97,1 +np.float64,0xffdc314f48b8629e,0x3fef810dfc5db118,1 +np.float64,0xffec1f8970783f12,0x3fe15567102e042a,1 +np.float64,0xffc6995f902d32c0,0xbfecd5d2eedf342c,1 +np.float64,0xffdc7af76b38f5ee,0xbfd6e754476ab320,1 +np.float64,0xffb30cf8682619f0,0x3fd5ac3dfc4048d0,1 +np.float64,0xffd3a77695a74eee,0xbfefb5d6889e36e9,1 +np.float64,0xffd8b971803172e4,0xbfeb7f62f0b6c70b,1 +np.float64,0xffde4c0234bc9804,0xbfed50ba9e16d5e0,1 +np.float64,0xffb62b3f342c5680,0xbfeabc0de4069b84,1 +np.float64,0xff9af5674035eac0,0xbfed6c198b6b1bd8,1 +np.float64,0xffdfe20cb43fc41a,0x3fb11f8238f66306,1 +np.float64,0xffd2ecd7a0a5d9b0,0xbfec17ef1a62b1e3,1 +np.float64,0xffce60f7863cc1f0,0x3fe6dbcad3e3a006,1 +np.float64,0xffbbb8306a377060,0xbfbfd0fbef485c4c,1 +np.float64,0xffd1b2bd2b23657a,0xbfda3e046d987b99,1 +np.float64,0xffc480f4092901e8,0xbfeeff0427f6897b,1 +np.float64,0xffe6e02d926dc05a,0xbfcd59552778890b,1 +np.float64,0xffd302e5b7a605cc,0xbfee7c08641366b0,1 +np.float64,0xffec2eb92f785d72,0xbfef5c9c7f771050,1 +np.float64,0xffea3e31a9747c62,0xbfc49cd54755faf0,1 +np.float64,0xffce0a4e333c149c,0x3feeb9a6d0db4aee,1 +np.float64,0xffdc520a2db8a414,0x3fefc7b72613dcd0,1 +np.float64,0xffe056b968a0ad72,0xbfe47a9fe1f827fb,1 +np.float64,0xffe5a10f4cab421e,0x3fec2b1f74b73dec,1 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv new file mode 100644 index 00000000..5888c91c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfee27582,0xff800000,2 +np.float32,0xff19f092,0xff800000,2 +np.float32,0xbf393576,0xbf49cb31,2 +np.float32,0x8020fdea,0x8020fdea,2 +np.float32,0x455f4e,0x455f4e,2 +np.float32,0xff718c35,0xff800000,2 +np.float32,0x3f3215e3,0x3f40cce5,2 +np.float32,0x19e833,0x19e833,2 +np.float32,0xff2dcd49,0xff800000,2 +np.float32,0x7e8f6c95,0x7f800000,2 +np.float32,0xbf159dac,0xbf1e47a5,2 +np.float32,0x100d3d,0x100d3d,2 +np.float32,0xff673441,0xff800000,2 +np.float32,0x80275355,0x80275355,2 +np.float32,0x4812d0,0x4812d0,2 +np.float32,0x8072b956,0x8072b956,2 +np.float32,0xff3bb918,0xff800000,2 +np.float32,0x0,0x0,2 +np.float32,0xfe327798,0xff800000,2 +np.float32,0x41d4e2,0x41d4e2,2 +np.float32,0xfe34b1b8,0xff800000,2 +np.float32,0x80199f72,0x80199f72,2 +np.float32,0x807242ce,0x807242ce,2 +np.float32,0x3ef4202d,0x3efd7b48,2 +np.float32,0x763529,0x763529,2 +np.float32,0x4f6662,0x4f6662,2 +np.float32,0x3f18efe9,0x3f2232b5,2 +np.float32,0x80701846,0x80701846,2 +np.float32,0x3f599948,0x3f74c393,2 +np.float32,0x5a3d69,0x5a3d69,2 +np.float32,0xbf4a7e65,0xbf6047a3,2 +np.float32,0xff0d4c82,0xff800000,2 +np.float32,0x7a74db,0x7a74db,2 +np.float32,0x803388e6,0x803388e6,2 +np.float32,0x7f4430bb,0x7f800000,2 +np.float32,0x14c5b1,0x14c5b1,2 +np.float32,0xfa113400,0xff800000,2 +np.float32,0x7f4b3209,0x7f800000,2 +np.float32,0x8038d88c,0x8038d88c,2 +np.float32,0xbef2f9de,0xbefc330b,2 +np.float32,0xbe147b38,0xbe15008f,2 +np.float32,0x2b61e6,0x2b61e6,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x8060456c,0x8060456c,2 +np.float32,0x3f30fa82,0x3f3f6a99,2 +np.float32,0xfd1f0220,0xff800000,2 +np.float32,0xbf2b7555,0xbf389151,2 +np.float32,0xff100b7a,0xff800000,2 +np.float32,0x70d3cd,0x70d3cd,2 +np.float32,0x2a8d4a,0x2a8d4a,2 +np.float32,0xbf7b733f,0xbf92f05f,2 +np.float32,0x3f7106dc,0x3f8b1fc6,2 +np.float32,0x3f39da7a,0x3f4a9d79,2 +np.float32,0x3f5dd73f,0x3f7aaab5,2 +np.float32,0xbe8c8754,0xbe8e4cba,2 +np.float32,0xbf6c74c9,0xbf87c556,2 +np.float32,0x800efbbb,0x800efbbb,2 +np.float32,0xff054ab5,0xff800000,2 +np.float32,0x800b4b46,0x800b4b46,2 +np.float32,0xff77fd74,0xff800000,2 +np.float32,0x257d0,0x257d0,2 +np.float32,0x7caa0c,0x7caa0c,2 +np.float32,0x8025d24d,0x8025d24d,2 +np.float32,0x3d9f1b60,0x3d9f445c,2 +np.float32,0xbe3bf6e8,0xbe3d0595,2 +np.float32,0x54bb93,0x54bb93,2 +np.float32,0xbf3e6a45,0xbf507716,2 +np.float32,0x3f4bb26e,0x3f61e1cd,2 +np.float32,0x3f698edc,0x3f85aac5,2 +np.float32,0xff7bd0ef,0xff800000,2 +np.float32,0xbed07b68,0xbed64a8e,2 +np.float32,0xbf237c72,0xbf2ed3d2,2 +np.float32,0x27b0fa,0x27b0fa,2 +np.float32,0x3f7606d1,0x3f8ed7d6,2 +np.float32,0x790dc0,0x790dc0,2 +np.float32,0x7f68f3ac,0x7f800000,2 +np.float32,0xbed39288,0xbed9a52f,2 +np.float32,0x3f6f8266,0x3f8a0187,2 +np.float32,0x3fbdca,0x3fbdca,2 +np.float32,0xbf7c3e5d,0xbf938b2c,2 +np.float32,0x802321a8,0x802321a8,2 +np.float32,0x3eecab66,0x3ef53031,2 +np.float32,0x62b324,0x62b324,2 +np.float32,0x3f13afac,0x3f1c03fe,2 +np.float32,0xff315ad7,0xff800000,2 +np.float32,0xbf1fac0d,0xbf2a3a63,2 +np.float32,0xbf543984,0xbf6d61d6,2 +np.float32,0x71a212,0x71a212,2 +np.float32,0x114fbe,0x114fbe,2 +np.float32,0x3f5b6ff2,0x3f77505f,2 +np.float32,0xff6ff89e,0xff800000,2 +np.float32,0xff4527a1,0xff800000,2 +np.float32,0x22cb3,0x22cb3,2 +np.float32,0x7f53bb6b,0x7f800000,2 +np.float32,0xff3d2dea,0xff800000,2 +np.float32,0xfd21dac0,0xff800000,2 +np.float32,0xfc486140,0xff800000,2 +np.float32,0x7e2b693a,0x7f800000,2 +np.float32,0x8022a9fb,0x8022a9fb,2 +np.float32,0x80765de0,0x80765de0,2 +np.float32,0x13d299,0x13d299,2 +np.float32,0x7ee53713,0x7f800000,2 +np.float32,0xbde1c770,0xbde23c96,2 +np.float32,0xbd473fc0,0xbd4753de,2 +np.float32,0x3f1cb455,0x3f26acf3,2 +np.float32,0x683e49,0x683e49,2 +np.float32,0x3ed5a9fc,0x3edbeb79,2 +np.float32,0x3f4fe3f6,0x3f67814f,2 +np.float32,0x802a2bce,0x802a2bce,2 +np.float32,0x7e951b4c,0x7f800000,2 +np.float32,0xbe6eb260,0xbe70dd44,2 +np.float32,0xbe3daca8,0xbe3ec2cb,2 +np.float32,0xbe9c38b2,0xbe9ea822,2 +np.float32,0xff2e29dc,0xff800000,2 +np.float32,0x7f62c7cc,0x7f800000,2 +np.float32,0xbf6799a4,0xbf84416c,2 +np.float32,0xbe30a7f0,0xbe318898,2 +np.float32,0xc83d9,0xc83d9,2 +np.float32,0x3f05abf4,0x3f0bd447,2 +np.float32,0x7e9b018a,0x7f800000,2 +np.float32,0xbf0ed72e,0xbf165e5b,2 +np.float32,0x8011ac8c,0x8011ac8c,2 +np.float32,0xbeb7c706,0xbebbbfcb,2 +np.float32,0x803637f9,0x803637f9,2 +np.float32,0xfe787cc8,0xff800000,2 +np.float32,0x3f533d4b,0x3f6c0a50,2 +np.float32,0x3f5c0f1c,0x3f782dde,2 +np.float32,0x3f301f36,0x3f3e590d,2 +np.float32,0x2dc929,0x2dc929,2 +np.float32,0xff15018a,0xff800000,2 +np.float32,0x3f4d0c56,0x3f63afeb,2 +np.float32,0xbf7a2ae3,0xbf91f6e4,2 +np.float32,0xbe771b84,0xbe798346,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f5689ba,0x7f800000,2 +np.float32,0x3f1c3177,0x3f2610df,2 +np.float32,0x3f1b9664,0x3f255825,2 +np.float32,0x3f7e5066,0x3f9520d4,2 +np.float32,0xbf1935f8,0xbf2285ab,2 +np.float32,0x3f096cc7,0x3f101ef9,2 +np.float32,0x8030c180,0x8030c180,2 +np.float32,0x6627ed,0x6627ed,2 +np.float32,0x454595,0x454595,2 +np.float32,0x7de66a33,0x7f800000,2 +np.float32,0xbf800000,0xbf966cfe,2 +np.float32,0xbf35c0a8,0xbf456939,2 +np.float32,0x3f6a6266,0x3f8643e0,2 +np.float32,0x3f0cbcee,0x3f13ef6a,2 +np.float32,0x7efd1e58,0x7f800000,2 +np.float32,0xfe9a74c6,0xff800000,2 +np.float32,0x807ebe6c,0x807ebe6c,2 +np.float32,0x80656736,0x80656736,2 +np.float32,0x800e0608,0x800e0608,2 +np.float32,0xbf30e39a,0xbf3f4e00,2 +np.float32,0x802015fd,0x802015fd,2 +np.float32,0x3e3ce26d,0x3e3df519,2 +np.float32,0x7ec142ac,0x7f800000,2 +np.float32,0xbf68c9ce,0xbf851c78,2 +np.float32,0xfede8356,0xff800000,2 +np.float32,0xbf1507ce,0xbf1d978d,2 +np.float32,0x3e53914c,0x3e551374,2 +np.float32,0x7f3e1c14,0x7f800000,2 +np.float32,0x8070d2ba,0x8070d2ba,2 +np.float32,0xbf4eb793,0xbf65ecee,2 +np.float32,0x7365a6,0x7365a6,2 +np.float32,0x8045cba2,0x8045cba2,2 +np.float32,0x7e4af521,0x7f800000,2 +np.float32,0xbf228625,0xbf2da9e1,2 +np.float32,0x7ee0536c,0x7f800000,2 +np.float32,0x3e126607,0x3e12e5d5,2 +np.float32,0x80311d92,0x80311d92,2 +np.float32,0xbf386b8b,0xbf48ca54,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8049ec7a,0x8049ec7a,2 +np.float32,0xbf1dfde4,0xbf2836be,2 +np.float32,0x7e719a8c,0x7f800000,2 +np.float32,0x3eb9c856,0x3ebde2e6,2 +np.float32,0xfe3efda8,0xff800000,2 +np.float32,0xbe89d60c,0xbe8b81d1,2 +np.float32,0x3eaad338,0x3eae0317,2 +np.float32,0x7f4e5217,0x7f800000,2 +np.float32,0x3e9d0f40,0x3e9f88ce,2 +np.float32,0xbe026708,0xbe02c155,2 +np.float32,0x5fc22f,0x5fc22f,2 +np.float32,0x1c4572,0x1c4572,2 +np.float32,0xbed89d96,0xbedf22c5,2 +np.float32,0xbf3debee,0xbf4fd441,2 +np.float32,0xbf465520,0xbf5ac6e5,2 +np.float32,0x3f797081,0x3f9169b3,2 +np.float32,0xbf250734,0xbf30b2aa,2 +np.float32,0x7f5068e9,0x7f800000,2 +np.float32,0x3f1b814e,0x3f253f0c,2 +np.float32,0xbf27c5d3,0xbf340b05,2 +np.float32,0x3f1b78ae,0x3f2534c8,2 +np.float32,0x8059b51a,0x8059b51a,2 +np.float32,0x8059f182,0x8059f182,2 +np.float32,0xbf1bb36e,0xbf257ab8,2 +np.float32,0x41ac35,0x41ac35,2 +np.float32,0x68f41f,0x68f41f,2 +np.float32,0xbea504dc,0xbea7e40f,2 +np.float32,0x1,0x1,2 +np.float32,0x3e96b5b0,0x3e98e542,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3c557a80,0x3c557c0c,2 +np.float32,0x800ca3ec,0x800ca3ec,2 +np.float32,0x8077d4aa,0x8077d4aa,2 +np.float32,0x3f000af0,0x3f0572d6,2 +np.float32,0x3e0434dd,0x3e0492f8,2 +np.float32,0x7d1a710a,0x7f800000,2 +np.float32,0x3f70f996,0x3f8b15f8,2 +np.float32,0x8033391d,0x8033391d,2 +np.float32,0x11927c,0x11927c,2 +np.float32,0x7f7784be,0x7f800000,2 +np.float32,0x7acb22af,0x7f800000,2 +np.float32,0x7e8b153c,0x7f800000,2 +np.float32,0x66d402,0x66d402,2 +np.float32,0xfed6e7b0,0xff800000,2 +np.float32,0x7f6872d3,0x7f800000,2 +np.float32,0x1bd49c,0x1bd49c,2 +np.float32,0xfdc4f1b8,0xff800000,2 +np.float32,0xbed8a466,0xbedf2a33,2 +np.float32,0x7ee789,0x7ee789,2 +np.float32,0xbece94b4,0xbed43b52,2 +np.float32,0x3cf3f734,0x3cf4006f,2 +np.float32,0x7e44aa00,0x7f800000,2 +np.float32,0x7f19e99c,0x7f800000,2 +np.float32,0x806ff1bc,0x806ff1bc,2 +np.float32,0x80296934,0x80296934,2 +np.float32,0x7f463363,0x7f800000,2 +np.float32,0xbf212ac3,0xbf2c06bb,2 +np.float32,0x3dc63778,0x3dc686ba,2 +np.float32,0x7f1b4328,0x7f800000,2 +np.float32,0x6311f6,0x6311f6,2 +np.float32,0xbf6b6fb6,0xbf870751,2 +np.float32,0xbf2c44cf,0xbf399155,2 +np.float32,0x3e7a67bc,0x3e7ce887,2 +np.float32,0x7f57c5f7,0x7f800000,2 +np.float32,0x7f2bb4ff,0x7f800000,2 +np.float32,0xbe9d448e,0xbe9fc0a4,2 +np.float32,0xbf4840f0,0xbf5d4f6b,2 +np.float32,0x7f1e1176,0x7f800000,2 +np.float32,0xff76638e,0xff800000,2 +np.float32,0xff055555,0xff800000,2 +np.float32,0x3f32b82b,0x3f419834,2 +np.float32,0xff363aa8,0xff800000,2 +np.float32,0x7f737fd0,0x7f800000,2 +np.float32,0x3da5d798,0x3da60602,2 +np.float32,0x3f1cc126,0x3f26bc3e,2 +np.float32,0x7eb07541,0x7f800000,2 +np.float32,0x3f7b2ff2,0x3f92bd2a,2 +np.float32,0x474f7,0x474f7,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xff2b0a4e,0xff800000,2 +np.float32,0xfeb24f16,0xff800000,2 +np.float32,0x2cb9fc,0x2cb9fc,2 +np.float32,0x67189d,0x67189d,2 +np.float32,0x8033d854,0x8033d854,2 +np.float32,0xbe85e94c,0xbe87717a,2 +np.float32,0x80767c6c,0x80767c6c,2 +np.float32,0x7ea84d65,0x7f800000,2 +np.float32,0x3f024bc7,0x3f07fead,2 +np.float32,0xbdcb0100,0xbdcb5625,2 +np.float32,0x3f160a9e,0x3f1ec7c9,2 +np.float32,0xff1734c8,0xff800000,2 +np.float32,0x7f424d5e,0x7f800000,2 +np.float32,0xbf75b215,0xbf8e9862,2 +np.float32,0x3f262a42,0x3f3214c4,2 +np.float32,0xbf4cfb53,0xbf639927,2 +np.float32,0x3f4ac8b8,0x3f60aa7c,2 +np.float32,0x3e90e593,0x3e92d6b3,2 +np.float32,0xbf66bccf,0xbf83a2d8,2 +np.float32,0x7d3d851a,0x7f800000,2 +np.float32,0x7bac783c,0x7f800000,2 +np.float32,0x8001c626,0x8001c626,2 +np.float32,0xbdffd480,0xbe003f7b,2 +np.float32,0x7f6680bf,0x7f800000,2 +np.float32,0xbecf448e,0xbed4f9bb,2 +np.float32,0x584c7,0x584c7,2 +np.float32,0x3f3e8ea0,0x3f50a5fb,2 +np.float32,0xbf5a5f04,0xbf75d56e,2 +np.float32,0x8065ae47,0x8065ae47,2 +np.float32,0xbf48dce3,0xbf5e1dba,2 +np.float32,0xbe8dae2e,0xbe8f7ed8,2 +np.float32,0x3f7ca6ab,0x3f93dace,2 +np.float32,0x4c3e81,0x4c3e81,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3ee1f7d9,0x3ee96033,2 +np.float32,0x80588c6f,0x80588c6f,2 +np.float32,0x5ba34e,0x5ba34e,2 +np.float32,0x80095d28,0x80095d28,2 +np.float32,0xbe7ba198,0xbe7e2bdd,2 +np.float32,0xbe0bdcb4,0xbe0c4c22,2 +np.float32,0x1776f7,0x1776f7,2 +np.float32,0x80328b2a,0x80328b2a,2 +np.float32,0x3e978d37,0x3e99c63e,2 +np.float32,0x7ed50906,0x7f800000,2 +np.float32,0x3f776a54,0x3f8fe2bd,2 +np.float32,0xbed624c4,0xbedc7120,2 +np.float32,0x7f0b6a31,0x7f800000,2 +np.float32,0x7eb13913,0x7f800000,2 +np.float32,0xbe733684,0xbe758190,2 +np.float32,0x80016474,0x80016474,2 +np.float32,0x7a51ee,0x7a51ee,2 +np.float32,0x3f6cb91e,0x3f87f729,2 +np.float32,0xbd99b050,0xbd99d540,2 +np.float32,0x7c6e3cba,0x7f800000,2 +np.float32,0xbf00179a,0xbf05811e,2 +np.float32,0x3e609b29,0x3e626954,2 +np.float32,0xff3fd71a,0xff800000,2 +np.float32,0x5d8c2,0x5d8c2,2 +np.float32,0x7ee93662,0x7f800000,2 +np.float32,0x4b0b31,0x4b0b31,2 +np.float32,0x3ec243b7,0x3ec6f594,2 +np.float32,0x804d60f1,0x804d60f1,2 +np.float32,0xbf0cb784,0xbf13e929,2 +np.float32,0x3f13b74d,0x3f1c0cee,2 +np.float32,0xfe37cb64,0xff800000,2 +np.float32,0x1a88,0x1a88,2 +np.float32,0x3e22a472,0x3e2353ba,2 +np.float32,0x7f07d6a0,0x7f800000,2 +np.float32,0x3f78f435,0x3f910bb5,2 +np.float32,0x555a4a,0x555a4a,2 +np.float32,0x3e306c1f,0x3e314be3,2 +np.float32,0x8005877c,0x8005877c,2 +np.float32,0x4df389,0x4df389,2 +np.float32,0x8069ffc7,0x8069ffc7,2 +np.float32,0x3f328f24,0x3f4164c6,2 +np.float32,0x53a31b,0x53a31b,2 +np.float32,0xbe4d6768,0xbe4ec8be,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f484c1b,0x3f5d5e2f,2 +np.float32,0x8038be05,0x8038be05,2 +np.float32,0x58ac0f,0x58ac0f,2 +np.float32,0x7ed7fb72,0x7f800000,2 +np.float32,0x5a22e1,0x5a22e1,2 +np.float32,0xbebb7394,0xbebfaad6,2 +np.float32,0xbda98160,0xbda9b2ef,2 +np.float32,0x7f3e5c42,0x7f800000,2 +np.float32,0xfed204ae,0xff800000,2 +np.float32,0xbf5ef782,0xbf7c3ec5,2 +np.float32,0xbef7a0a8,0xbf00b292,2 +np.float32,0xfee6e176,0xff800000,2 +np.float32,0xfe121140,0xff800000,2 +np.float32,0xfe9e13be,0xff800000,2 +np.float32,0xbf3c98b1,0xbf4e2003,2 +np.float32,0x77520d,0x77520d,2 +np.float32,0xf17b2,0xf17b2,2 +np.float32,0x724d2f,0x724d2f,2 +np.float32,0x7eb326f5,0x7f800000,2 +np.float32,0x3edd6bf2,0x3ee4636e,2 +np.float32,0x350f57,0x350f57,2 +np.float32,0xff7d4435,0xff800000,2 +np.float32,0x802b2b9d,0x802b2b9d,2 +np.float32,0xbf7fbeee,0xbf963acf,2 +np.float32,0x804f3100,0x804f3100,2 +np.float32,0x7c594a71,0x7f800000,2 +np.float32,0x3ef49340,0x3efdfbb6,2 +np.float32,0x2e0659,0x2e0659,2 +np.float32,0x8006d5fe,0x8006d5fe,2 +np.float32,0xfd2a00b0,0xff800000,2 +np.float32,0xbee1c016,0xbee922ed,2 +np.float32,0x3e3b7de8,0x3e3c8a8b,2 +np.float32,0x805e6bba,0x805e6bba,2 +np.float32,0x1a7da2,0x1a7da2,2 +np.float32,0x6caba4,0x6caba4,2 +np.float32,0x802f7eab,0x802f7eab,2 +np.float32,0xff68b16b,0xff800000,2 +np.float32,0x8064f5e5,0x8064f5e5,2 +np.float32,0x2e39b4,0x2e39b4,2 +np.float32,0x800000,0x800000,2 +np.float32,0xfd0334c0,0xff800000,2 +np.float32,0x3e952fc4,0x3e974e7e,2 +np.float32,0x80057d33,0x80057d33,2 +np.float32,0x3ed3ddc4,0x3ed9f6f1,2 +np.float32,0x3f74ce18,0x3f8dedf4,2 +np.float32,0xff6bb7c0,0xff800000,2 +np.float32,0xff43bc21,0xff800000,2 +np.float32,0x80207570,0x80207570,2 +np.float32,0x7e1dda75,0x7f800000,2 +np.float32,0x3efe335c,0x3f0462ff,2 +np.float32,0xbf252c0c,0xbf30df70,2 +np.float32,0x3ef4b8e3,0x3efe25ba,2 +np.float32,0x7c33938d,0x7f800000,2 +np.float32,0x3eb1593c,0x3eb4ea95,2 +np.float32,0xfe1d0068,0xff800000,2 +np.float32,0xbf10da9b,0xbf18b551,2 +np.float32,0xfeb65748,0xff800000,2 +np.float32,0xfe8c6014,0xff800000,2 +np.float32,0x3f0503e2,0x3f0b14e3,2 +np.float32,0xfe5e5248,0xff800000,2 +np.float32,0xbd10afa0,0xbd10b754,2 +np.float32,0xff64b609,0xff800000,2 +np.float32,0xbf674a96,0xbf84089c,2 +np.float32,0x7f5d200d,0x7f800000,2 +np.float32,0x3cf44900,0x3cf45245,2 +np.float32,0x8044445a,0x8044445a,2 +np.float32,0xff35b676,0xff800000,2 +np.float32,0x806452cd,0x806452cd,2 +np.float32,0xbf2930fb,0xbf35c7b4,2 +np.float32,0x7e500617,0x7f800000,2 +np.float32,0x543719,0x543719,2 +np.float32,0x3ed11068,0x3ed6ec1d,2 +np.float32,0xbd8db068,0xbd8dcd59,2 +np.float32,0x3ede62c8,0x3ee571d0,2 +np.float32,0xbf00a410,0xbf061f9c,2 +np.float32,0xbf44fa39,0xbf58ff5b,2 +np.float32,0x3f1c3114,0x3f261069,2 +np.float32,0xbdea6210,0xbdeae521,2 +np.float32,0x80059f6d,0x80059f6d,2 +np.float32,0xbdba15f8,0xbdba578c,2 +np.float32,0x6d8a61,0x6d8a61,2 +np.float32,0x6f5428,0x6f5428,2 +np.float32,0x18d0e,0x18d0e,2 +np.float32,0x50e131,0x50e131,2 +np.float32,0x3f2f52be,0x3f3d5a7e,2 +np.float32,0x7399d8,0x7399d8,2 +np.float32,0x106524,0x106524,2 +np.float32,0x7ebf1c53,0x7f800000,2 +np.float32,0x80276458,0x80276458,2 +np.float32,0x3ebbde67,0x3ec01ceb,2 +np.float32,0x80144d9d,0x80144d9d,2 +np.float32,0x8017ea6b,0x8017ea6b,2 +np.float32,0xff38f201,0xff800000,2 +np.float32,0x7f2daa82,0x7f800000,2 +np.float32,0x3f3cb7c7,0x3f4e47ed,2 +np.float32,0x7f08c779,0x7f800000,2 +np.float32,0xbecc907a,0xbed20cec,2 +np.float32,0x7d440002,0x7f800000,2 +np.float32,0xbd410d80,0xbd411fcd,2 +np.float32,0x3d63ae07,0x3d63cc0c,2 +np.float32,0x805a9c13,0x805a9c13,2 +np.float32,0x803bdcdc,0x803bdcdc,2 +np.float32,0xbe88b354,0xbe8a5497,2 +np.float32,0x3f4eaf43,0x3f65e1c2,2 +np.float32,0x3f15e5b8,0x3f1e9c60,2 +np.float32,0x3e8a870c,0x3e8c394e,2 +np.float32,0x7e113de9,0x7f800000,2 +np.float32,0x7ee5ba41,0x7f800000,2 +np.float32,0xbe73d178,0xbe7620eb,2 +np.float32,0xfe972e6a,0xff800000,2 +np.float32,0xbf65567d,0xbf82a25a,2 +np.float32,0x3f38247e,0x3f487010,2 +np.float32,0xbece1c62,0xbed3b918,2 +np.float32,0x442c8d,0x442c8d,2 +np.float32,0x2dc52,0x2dc52,2 +np.float32,0x802ed923,0x802ed923,2 +np.float32,0x788cf8,0x788cf8,2 +np.float32,0x8024888e,0x8024888e,2 +np.float32,0x3f789bde,0x3f90c8fc,2 +np.float32,0x3f5de620,0x3f7abf88,2 +np.float32,0x3f0ffc45,0x3f17b2a7,2 +np.float32,0xbf709678,0xbf8accd4,2 +np.float32,0x12181f,0x12181f,2 +np.float32,0xfe54bbe4,0xff800000,2 +np.float32,0x7f1daba0,0x7f800000,2 +np.float32,0xbf6226df,0xbf805e3c,2 +np.float32,0xbd120610,0xbd120dfb,2 +np.float32,0x7f75e951,0x7f800000,2 +np.float32,0x80068048,0x80068048,2 +np.float32,0x45f04a,0x45f04a,2 +np.float32,0xff4c4f58,0xff800000,2 +np.float32,0x311604,0x311604,2 +np.float32,0x805e809c,0x805e809c,2 +np.float32,0x3d1d62c0,0x3d1d6caa,2 +np.float32,0x7f14ccf9,0x7f800000,2 +np.float32,0xff10017c,0xff800000,2 +np.float32,0xbf43ec48,0xbf579df4,2 +np.float32,0xff64da57,0xff800000,2 +np.float32,0x7f0622c5,0x7f800000,2 +np.float32,0x7f5460cd,0x7f800000,2 +np.float32,0xff0ef1c6,0xff800000,2 +np.float32,0xbece1146,0xbed3ad13,2 +np.float32,0x3f4d457f,0x3f63fc70,2 +np.float32,0xbdc1da28,0xbdc2244b,2 +np.float32,0xbe46d3f4,0xbe481463,2 +np.float32,0xff36b3d6,0xff800000,2 +np.float32,0xbec2e76c,0xbec7a540,2 +np.float32,0x8078fb81,0x8078fb81,2 +np.float32,0x7ec819cb,0x7f800000,2 +np.float32,0x39c4d,0x39c4d,2 +np.float32,0xbe8cddc2,0xbe8ea670,2 +np.float32,0xbf36dffb,0xbf46d48b,2 +np.float32,0xbf2302a3,0xbf2e4065,2 +np.float32,0x3e7b34a2,0x3e7dbb9a,2 +np.float32,0x3e3d87e1,0x3e3e9d62,2 +np.float32,0x7f3c94b1,0x7f800000,2 +np.float32,0x80455a85,0x80455a85,2 +np.float32,0xfd875568,0xff800000,2 +np.float32,0xbf618103,0xbf7fd1c8,2 +np.float32,0xbe332e3c,0xbe3418ac,2 +np.float32,0x80736b79,0x80736b79,2 +np.float32,0x3f705d9a,0x3f8aa2e6,2 +np.float32,0xbf3a36d2,0xbf4b134b,2 +np.float32,0xfddc55c0,0xff800000,2 +np.float32,0x805606fd,0x805606fd,2 +np.float32,0x3f4f0bc4,0x3f665e25,2 +np.float32,0xfebe7494,0xff800000,2 +np.float32,0xff0c541b,0xff800000,2 +np.float32,0xff0b8e7f,0xff800000,2 +np.float32,0xbcc51640,0xbcc51b1e,2 +np.float32,0x7ec1c4d0,0x7f800000,2 +np.float32,0xfc5c8e00,0xff800000,2 +np.float32,0x7f48d682,0x7f800000,2 +np.float32,0x7d5c7d8d,0x7f800000,2 +np.float32,0x8052ed03,0x8052ed03,2 +np.float32,0x7d4db058,0x7f800000,2 +np.float32,0xff3a65ee,0xff800000,2 +np.float32,0x806eeb93,0x806eeb93,2 +np.float32,0x803f9733,0x803f9733,2 +np.float32,0xbf2d1388,0xbf3a90e3,2 +np.float32,0x68e260,0x68e260,2 +np.float32,0x3e47a69f,0x3e48eb0e,2 +np.float32,0x3f0c4623,0x3f136646,2 +np.float32,0x3f37a831,0x3f47d249,2 +np.float32,0xff153a0c,0xff800000,2 +np.float32,0x2e8086,0x2e8086,2 +np.float32,0xc3f5e,0xc3f5e,2 +np.float32,0x7f31dc14,0x7f800000,2 +np.float32,0xfee37d68,0xff800000,2 +np.float32,0x711d4,0x711d4,2 +np.float32,0x7ede2ce4,0x7f800000,2 +np.float32,0xbf5d76d0,0xbf7a23d0,2 +np.float32,0xbe2b9eb4,0xbe2c6cac,2 +np.float32,0x2b14d7,0x2b14d7,2 +np.float32,0x3ea1db72,0x3ea4910e,2 +np.float32,0x7f3f03f7,0x7f800000,2 +np.float32,0x92de5,0x92de5,2 +np.float32,0x80322e1b,0x80322e1b,2 +np.float32,0xbf5eb214,0xbf7bdd55,2 +np.float32,0xbf21bf87,0xbf2cba14,2 +np.float32,0xbf5d4b78,0xbf79e73a,2 +np.float32,0xbc302840,0xbc30291e,2 +np.float32,0xfee567c6,0xff800000,2 +np.float32,0x7f70ee14,0x7f800000,2 +np.float32,0x7e5c4b33,0x7f800000,2 +np.float32,0x3f1e7b64,0x3f28ccfd,2 +np.float32,0xbf6309f7,0xbf80ff3e,2 +np.float32,0x1c2fe3,0x1c2fe3,2 +np.float32,0x8e78d,0x8e78d,2 +np.float32,0x7f2fce73,0x7f800000,2 +np.float32,0x7f25f690,0x7f800000,2 +np.float32,0x8074cba5,0x8074cba5,2 +np.float32,0x16975f,0x16975f,2 +np.float32,0x8012cf5c,0x8012cf5c,2 +np.float32,0x7da72138,0x7f800000,2 +np.float32,0xbf563f35,0xbf7025be,2 +np.float32,0x3f69d3f5,0x3f85dcbe,2 +np.float32,0xbf15c148,0xbf1e7184,2 +np.float32,0xbe7a077c,0xbe7c8564,2 +np.float32,0x3ebb6ef1,0x3ebfa5e3,2 +np.float32,0xbe41fde4,0xbe43277b,2 +np.float32,0x7f10b479,0x7f800000,2 +np.float32,0x3e021ace,0x3e02747d,2 +np.float32,0x3e93d984,0x3e95e9be,2 +np.float32,0xfe17e924,0xff800000,2 +np.float32,0xfe21a7cc,0xff800000,2 +np.float32,0x8019b660,0x8019b660,2 +np.float32,0x7e954631,0x7f800000,2 +np.float32,0x7e7330d1,0x7f800000,2 +np.float32,0xbe007d98,0xbe00d3fb,2 +np.float32,0x3ef3870e,0x3efcd077,2 +np.float32,0x7f5bbde8,0x7f800000,2 +np.float32,0x14a5b3,0x14a5b3,2 +np.float32,0x3e84d23f,0x3e8650e8,2 +np.float32,0x80763017,0x80763017,2 +np.float32,0xfe871f36,0xff800000,2 +np.float32,0x7ed43150,0x7f800000,2 +np.float32,0x3cc44547,0x3cc44a16,2 +np.float32,0x3ef0c0fa,0x3ef9b97d,2 +np.float32,0xbede9944,0xbee5ad86,2 +np.float32,0xbf10f0b2,0xbf18cf0a,2 +np.float32,0x3ecdaa78,0x3ed33dd9,2 +np.float32,0x3f7cc058,0x3f93ee6b,2 +np.float32,0x2d952f,0x2d952f,2 +np.float32,0x3f2cf2de,0x3f3a687a,2 +np.float32,0x8029b33c,0x8029b33c,2 +np.float32,0xbf22c737,0xbf2df888,2 +np.float32,0xff53c84a,0xff800000,2 +np.float32,0x40a509,0x40a509,2 +np.float32,0x56abce,0x56abce,2 +np.float32,0xff7fffff,0xff800000,2 +np.float32,0xbf3e67f6,0xbf50741c,2 +np.float32,0xfde67580,0xff800000,2 +np.float32,0x3f103e9b,0x3f17ffc7,2 +np.float32,0x3f3f7232,0x3f51cbe2,2 +np.float32,0x803e6d78,0x803e6d78,2 +np.float32,0x3a61da,0x3a61da,2 +np.float32,0xbc04de80,0xbc04dedf,2 +np.float32,0x7f1e7c52,0x7f800000,2 +np.float32,0x8058ee88,0x8058ee88,2 +np.float32,0x806dd660,0x806dd660,2 +np.float32,0x7e4af9,0x7e4af9,2 +np.float32,0x80702d27,0x80702d27,2 +np.float32,0x802cdad1,0x802cdad1,2 +np.float32,0x3e9b5c23,0x3e9dc149,2 +np.float32,0x7f076e89,0x7f800000,2 +np.float32,0x7f129d68,0x7f800000,2 +np.float32,0x7f6f0b0a,0x7f800000,2 +np.float32,0x7eafafb5,0x7f800000,2 +np.float32,0xbf2ef2ca,0xbf3ce332,2 +np.float32,0xff34c000,0xff800000,2 +np.float32,0x7f559274,0x7f800000,2 +np.float32,0xfed08556,0xff800000,2 +np.float32,0xbf014621,0xbf06d6ad,2 +np.float32,0xff23086a,0xff800000,2 +np.float32,0x6cb33f,0x6cb33f,2 +np.float32,0xfe6e3ffc,0xff800000,2 +np.float32,0x3e6bbec0,0x3e6dd546,2 +np.float32,0x8036afa6,0x8036afa6,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x3e0ed05c,0x3e0f46ff,2 +np.float32,0x3ec9215c,0x3ece57e6,2 +np.float32,0xbf449fa4,0xbf5888aa,2 +np.float32,0xff2c6640,0xff800000,2 +np.float32,0x7f08f4a7,0x7f800000,2 +np.float32,0xbf4f63e5,0xbf66d4c1,2 +np.float32,0x3f800000,0x3f966cfe,2 +np.float32,0xfe86c7d2,0xff800000,2 +np.float32,0x3f63f969,0x3f81a970,2 +np.float32,0xbd7022d0,0xbd704609,2 +np.float32,0xbead906c,0xbeb0e853,2 +np.float32,0x7ef149ee,0x7f800000,2 +np.float32,0xff0b9ff7,0xff800000,2 +np.float32,0x3f38380d,0x3f4888e7,2 +np.float32,0x3ef3a3e2,0x3efcf09e,2 +np.float32,0xff616477,0xff800000,2 +np.float32,0x3f3f83e4,0x3f51e2c3,2 +np.float32,0xbf79963c,0xbf918642,2 +np.float32,0x801416f4,0x801416f4,2 +np.float32,0xff75ce6d,0xff800000,2 +np.float32,0xbdbf3588,0xbdbf7cad,2 +np.float32,0xbe6ea938,0xbe70d3dc,2 +np.float32,0x8066f977,0x8066f977,2 +np.float32,0x3f5b5362,0x3f7728aa,2 +np.float32,0xbf72052c,0xbf8bdbd8,2 +np.float32,0xbe21ed74,0xbe229a6f,2 +np.float32,0x8062d19c,0x8062d19c,2 +np.float32,0x3ed8d01f,0x3edf59e6,2 +np.float32,0x803ed42b,0x803ed42b,2 +np.float32,0xbe099a64,0xbe0a0481,2 +np.float32,0xbe173eb4,0xbe17cba2,2 +np.float32,0xbebdcf02,0xbec22faf,2 +np.float32,0x7e3ff29e,0x7f800000,2 +np.float32,0x367c92,0x367c92,2 +np.float32,0xbf5c9db8,0xbf78f4a4,2 +np.float32,0xff0b49ea,0xff800000,2 +np.float32,0x3f4f9bc4,0x3f672001,2 +np.float32,0x85d4a,0x85d4a,2 +np.float32,0x80643e33,0x80643e33,2 +np.float32,0x8013aabd,0x8013aabd,2 +np.float32,0xff6997c3,0xff800000,2 +np.float32,0x3f4dd43c,0x3f64bbb6,2 +np.float32,0xff13bbb9,0xff800000,2 +np.float32,0x3f34efa2,0x3f446187,2 +np.float32,0x3e4b2f10,0x3e4c850d,2 +np.float32,0xfef695c6,0xff800000,2 +np.float32,0x7f7e0057,0x7f800000,2 +np.float32,0x3f6e1b9c,0x3f88fa40,2 +np.float32,0x806e46cf,0x806e46cf,2 +np.float32,0x3f15a88a,0x3f1e546c,2 +np.float32,0xbd2de7d0,0xbd2df530,2 +np.float32,0xbf63cae0,0xbf818854,2 +np.float32,0xbdc3e1a0,0xbdc42e1e,2 +np.float32,0xbf11a038,0xbf199b98,2 +np.float32,0xbec13706,0xbec5d56b,2 +np.float32,0x3f1c5f54,0x3f26478d,2 +np.float32,0x3e9ea97e,0x3ea136b4,2 +np.float32,0xfeb5a508,0xff800000,2 +np.float32,0x7f4698f4,0x7f800000,2 +np.float32,0xff51ee2c,0xff800000,2 +np.float32,0xff5994df,0xff800000,2 +np.float32,0x4b9fb9,0x4b9fb9,2 +np.float32,0xfda10d98,0xff800000,2 +np.float32,0x525555,0x525555,2 +np.float32,0x7ed571ef,0x7f800000,2 +np.float32,0xbf600d18,0xbf7dc50c,2 +np.float32,0x3ec674ca,0x3ecb768b,2 +np.float32,0x3cb69115,0x3cb694f3,2 +np.float32,0x7eac75f2,0x7f800000,2 +np.float32,0x804d4d75,0x804d4d75,2 +np.float32,0xfed5292e,0xff800000,2 +np.float32,0x800ed06a,0x800ed06a,2 +np.float32,0xfec37584,0xff800000,2 +np.float32,0x3ef96ac7,0x3f01b326,2 +np.float32,0x42f743,0x42f743,2 +np.float32,0x3f56f442,0x3f711e39,2 +np.float32,0xbf7ea726,0xbf956375,2 +np.float32,0x806c7202,0x806c7202,2 +np.float32,0xbd8ee980,0xbd8f0733,2 +np.float32,0xbdf2e930,0xbdf37b18,2 +np.float32,0x3f103910,0x3f17f955,2 +np.float32,0xff123e8f,0xff800000,2 +np.float32,0x806e4b5d,0x806e4b5d,2 +np.float32,0xbf4f3bfc,0xbf669f07,2 +np.float32,0xbf070c16,0xbf0d6609,2 +np.float32,0xff00e0ba,0xff800000,2 +np.float32,0xff49d828,0xff800000,2 +np.float32,0x7e47f04a,0x7f800000,2 +np.float32,0x7e984dac,0x7f800000,2 +np.float32,0x3f77473c,0x3f8fc858,2 +np.float32,0x3f017439,0x3f070ac8,2 +np.float32,0x118417,0x118417,2 +np.float32,0xbcf7a2c0,0xbcf7ac68,2 +np.float32,0xfee46fee,0xff800000,2 +np.float32,0x3e42a648,0x3e43d2e9,2 +np.float32,0x80131916,0x80131916,2 +np.float32,0x806209d3,0x806209d3,2 +np.float32,0x807c1f12,0x807c1f12,2 +np.float32,0x2f3696,0x2f3696,2 +np.float32,0xff28722b,0xff800000,2 +np.float32,0x7f1416a1,0x7f800000,2 +np.float32,0x8054e7a1,0x8054e7a1,2 +np.float32,0xbddc39a0,0xbddca656,2 +np.float32,0x7dc60175,0x7f800000,2 +np.float64,0x7fd0ae584da15cb0,0x7ff0000000000000,2 +np.float64,0x7fd41d68e5283ad1,0x7ff0000000000000,2 +np.float64,0x7fe93073bb7260e6,0x7ff0000000000000,2 +np.float64,0x3fb4fd19d229fa34,0x3fb5031f57dbac0f,2 +np.float64,0x85609ce10ac2,0x85609ce10ac2,2 +np.float64,0xbfd7aa12ccaf5426,0xbfd8351003a320e2,2 +np.float64,0x8004487c9b4890fa,0x8004487c9b4890fa,2 +np.float64,0x7fe7584cfd2eb099,0x7ff0000000000000,2 +np.float64,0x800ea8edc6dd51dc,0x800ea8edc6dd51dc,2 +np.float64,0x3fe0924aa5a12495,0x3fe15276e271c6dc,2 +np.float64,0x3feb1abf6d36357f,0x3fee76b4d3d06964,2 +np.float64,0x3fa8c14534318280,0x3fa8c3bd5ce5923c,2 +np.float64,0x800b9f5915d73eb3,0x800b9f5915d73eb3,2 +np.float64,0xffc05aaa7820b554,0xfff0000000000000,2 +np.float64,0x800157eda8c2afdc,0x800157eda8c2afdc,2 +np.float64,0xffe8d90042b1b200,0xfff0000000000000,2 +np.float64,0x3feda02ea93b405d,0x3ff1057e61d08d59,2 +np.float64,0xffd03b7361a076e6,0xfff0000000000000,2 +np.float64,0x3fe1a8ecd7e351da,0x3fe291eda9080847,2 +np.float64,0xffc5bfdff82b7fc0,0xfff0000000000000,2 +np.float64,0xbfe6fb3d386df67a,0xbfe9022c05df0565,2 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,2 +np.float64,0x7fa10c340c221867,0x7ff0000000000000,2 +np.float64,0x3fe55cbf1daab97e,0x3fe6fc1648258b75,2 +np.float64,0xbfddeb5f60bbd6be,0xbfdf056d4fb5825f,2 +np.float64,0xffddb1a8213b6350,0xfff0000000000000,2 +np.float64,0xbfb20545e4240a88,0xbfb2091579375176,2 +np.float64,0x3f735ded2026bbda,0x3f735df1dad4ee3a,2 +np.float64,0xbfd1eb91efa3d724,0xbfd227c044dead61,2 +np.float64,0xffd737c588ae6f8c,0xfff0000000000000,2 +np.float64,0x3fc46818ec28d032,0x3fc47e416c4237a6,2 +np.float64,0x0,0x0,2 +np.float64,0xffb632097a2c6410,0xfff0000000000000,2 +np.float64,0xbfcb5ae84b36b5d0,0xbfcb905613af55b8,2 +np.float64,0xbfe7b926402f724c,0xbfe9f4f0be6aacc3,2 +np.float64,0x80081840b3f03082,0x80081840b3f03082,2 +np.float64,0x3fe767a656eecf4d,0x3fe98c53b4779de7,2 +np.float64,0x8005834c088b0699,0x8005834c088b0699,2 +np.float64,0x80074e92658e9d26,0x80074e92658e9d26,2 +np.float64,0x80045d60c268bac2,0x80045d60c268bac2,2 +np.float64,0xffb9aecfe8335da0,0xfff0000000000000,2 +np.float64,0x7fcad3e1cd35a7c3,0x7ff0000000000000,2 +np.float64,0xbf881853d03030c0,0xbf8818783e28fc87,2 +np.float64,0xe18c6d23c318e,0xe18c6d23c318e,2 +np.float64,0x7fcb367b8f366cf6,0x7ff0000000000000,2 +np.float64,0x5c13436cb8269,0x5c13436cb8269,2 +np.float64,0xffe5399938aa7332,0xfff0000000000000,2 +np.float64,0xbfdc45dbc3b88bb8,0xbfdd33958222c27e,2 +np.float64,0xbfd714691bae28d2,0xbfd7954edbef810b,2 +np.float64,0xbfdf18b02b3e3160,0xbfe02ad13634c651,2 +np.float64,0x8003e6f276e7cde6,0x8003e6f276e7cde6,2 +np.float64,0x3febb6b412776d68,0x3fef4f753def31f9,2 +np.float64,0x7fe016a3b4a02d46,0x7ff0000000000000,2 +np.float64,0x3fdc899ac7b91336,0x3fdd7e1cee1cdfc8,2 +np.float64,0x800219271e24324f,0x800219271e24324f,2 +np.float64,0x1529d93e2a53c,0x1529d93e2a53c,2 +np.float64,0x800d5bc827fab790,0x800d5bc827fab790,2 +np.float64,0x3e1495107c293,0x3e1495107c293,2 +np.float64,0x3fe89da0f2b13b42,0x3feb1dc1f3015ad7,2 +np.float64,0x800ba8c17b975183,0x800ba8c17b975183,2 +np.float64,0x8002dacf0265b59f,0x8002dacf0265b59f,2 +np.float64,0xffe6d0a4cc2da149,0xfff0000000000000,2 +np.float64,0x3fdf23fe82be47fc,0x3fe03126d8e2b309,2 +np.float64,0xffe41b1f1c28363e,0xfff0000000000000,2 +np.float64,0xbfd635c634ac6b8c,0xbfd6a8966da6adaa,2 +np.float64,0x800755bc08eeab79,0x800755bc08eeab79,2 +np.float64,0x800ba4c47c374989,0x800ba4c47c374989,2 +np.float64,0x7fec9f7649793eec,0x7ff0000000000000,2 +np.float64,0x7fdbf45738b7e8ad,0x7ff0000000000000,2 +np.float64,0x3f5597f07eab4,0x3f5597f07eab4,2 +np.float64,0xbfbf4599183e8b30,0xbfbf5985d8c65097,2 +np.float64,0xbf5b200580364000,0xbf5b2006501b21ae,2 +np.float64,0x7f91868370230d06,0x7ff0000000000000,2 +np.float64,0x3838e2a67071d,0x3838e2a67071d,2 +np.float64,0xffefe3ff5d3fc7fe,0xfff0000000000000,2 +np.float64,0xffe66b26d06cd64d,0xfff0000000000000,2 +np.float64,0xbfd830a571b0614a,0xbfd8c526927c742c,2 +np.float64,0x7fe8442122f08841,0x7ff0000000000000,2 +np.float64,0x800efa8c637df519,0x800efa8c637df519,2 +np.float64,0xf0026835e004d,0xf0026835e004d,2 +np.float64,0xffb11beefe2237e0,0xfff0000000000000,2 +np.float64,0x3fef9bbb327f3776,0x3ff2809f10641c32,2 +np.float64,0x350595306a0b3,0x350595306a0b3,2 +np.float64,0xf7f6538befecb,0xf7f6538befecb,2 +np.float64,0xffe36379c4a6c6f3,0xfff0000000000000,2 +np.float64,0x28b1d82e5163c,0x28b1d82e5163c,2 +np.float64,0x70a3d804e147c,0x70a3d804e147c,2 +np.float64,0xffd96c1bc9b2d838,0xfff0000000000000,2 +np.float64,0xffce8e00893d1c00,0xfff0000000000000,2 +np.float64,0x800f2bdcb25e57b9,0x800f2bdcb25e57b9,2 +np.float64,0xbfe0d9c63361b38c,0xbfe1a3eb02192b76,2 +np.float64,0xbfdc7b8711b8f70e,0xbfdd6e9db3a01e51,2 +np.float64,0x99e22ec133c46,0x99e22ec133c46,2 +np.float64,0xffeaef6ddab5dedb,0xfff0000000000000,2 +np.float64,0x7fe89c22c0f13845,0x7ff0000000000000,2 +np.float64,0x8002d5207de5aa42,0x8002d5207de5aa42,2 +np.float64,0x3fd1b13353236267,0x3fd1eb1b9345dfca,2 +np.float64,0x800ccae0a41995c1,0x800ccae0a41995c1,2 +np.float64,0x3fdbdaba38b7b574,0x3fdcbdfcbca37ce6,2 +np.float64,0x5b06d12cb60db,0x5b06d12cb60db,2 +np.float64,0xffd52262752a44c4,0xfff0000000000000,2 +np.float64,0x5a17f050b42ff,0x5a17f050b42ff,2 +np.float64,0x3d24205e7a485,0x3d24205e7a485,2 +np.float64,0x7fbed4dec63da9bd,0x7ff0000000000000,2 +np.float64,0xbfe56e9776aadd2f,0xbfe71212863c284f,2 +np.float64,0x7fea0bc952341792,0x7ff0000000000000,2 +np.float64,0x800f692d139ed25a,0x800f692d139ed25a,2 +np.float64,0xffdb63feab36c7fe,0xfff0000000000000,2 +np.float64,0x3fe1c2297fe38452,0x3fe2af21293c9571,2 +np.float64,0x7fede384747bc708,0x7ff0000000000000,2 +np.float64,0x800440169288802e,0x800440169288802e,2 +np.float64,0xffe3241eeb26483e,0xfff0000000000000,2 +np.float64,0xffe28f3879651e70,0xfff0000000000000,2 +np.float64,0xa435cbc1486d,0xa435cbc1486d,2 +np.float64,0x7fe55e08db6abc11,0x7ff0000000000000,2 +np.float64,0x1405e624280be,0x1405e624280be,2 +np.float64,0x3fd861bdf0b0c37c,0x3fd8f9d2e33e45e5,2 +np.float64,0x3feeb67cdc3d6cfa,0x3ff1d337d81d1c14,2 +np.float64,0x3fd159a10e22b342,0x3fd1903be7c2ea0c,2 +np.float64,0x3fd84626bc308c4d,0x3fd8dc373645e65b,2 +np.float64,0xffd3da81d9a7b504,0xfff0000000000000,2 +np.float64,0xbfd4a768b8294ed2,0xbfd503aa7c240051,2 +np.float64,0x3fe3059f2a660b3e,0x3fe42983e0c6bb2e,2 +np.float64,0x3fe3b8353827706a,0x3fe4fdd635c7269b,2 +np.float64,0xbfe4af0399695e07,0xbfe6277d9002b46c,2 +np.float64,0xbfd7e18a92afc316,0xbfd87066b54c4fe6,2 +np.float64,0x800432bcab48657a,0x800432bcab48657a,2 +np.float64,0x80033d609d267ac2,0x80033d609d267ac2,2 +np.float64,0x7fef5f758e7ebeea,0x7ff0000000000000,2 +np.float64,0xbfed7833dbfaf068,0xbff0e85bf45a5ebc,2 +np.float64,0x3fe2283985a45073,0x3fe325b0a9099c74,2 +np.float64,0xe820b4b3d0417,0xe820b4b3d0417,2 +np.float64,0x8003ecb72aa7d96f,0x8003ecb72aa7d96f,2 +np.float64,0xbfeab2c755b5658f,0xbfede7c83e92a625,2 +np.float64,0xbfc7b287f72f6510,0xbfc7d53ef2ffe9dc,2 +np.float64,0xffd9a41d0f33483a,0xfff0000000000000,2 +np.float64,0x3fd3a5b6e3a74b6c,0x3fd3f516f39a4725,2 +np.float64,0x800bc72091578e42,0x800bc72091578e42,2 +np.float64,0x800ff405ce9fe80c,0x800ff405ce9fe80c,2 +np.float64,0x57918600af24,0x57918600af24,2 +np.float64,0x2a5be7fa54b7e,0x2a5be7fa54b7e,2 +np.float64,0xbfdca7886bb94f10,0xbfdd9f142b5b43e4,2 +np.float64,0xbfe216993ee42d32,0xbfe3112936590995,2 +np.float64,0xbfe06bd9cf20d7b4,0xbfe126cd353ab42f,2 +np.float64,0x8003e6c31827cd87,0x8003e6c31827cd87,2 +np.float64,0x8005f37d810be6fc,0x8005f37d810be6fc,2 +np.float64,0x800715b081ae2b62,0x800715b081ae2b62,2 +np.float64,0x3fef94c35bff2986,0x3ff27b4bed2f4051,2 +np.float64,0x6f5798e0deb0,0x6f5798e0deb0,2 +np.float64,0x3fcef1f05c3de3e1,0x3fcf3f557550598f,2 +np.float64,0xbf9a91c400352380,0xbf9a92876273b85c,2 +np.float64,0x3fc9143f7f322880,0x3fc93d678c05d26b,2 +np.float64,0x78ad847af15b1,0x78ad847af15b1,2 +np.float64,0x8000fdc088c1fb82,0x8000fdc088c1fb82,2 +np.float64,0x800200fd304401fb,0x800200fd304401fb,2 +np.float64,0x7fb8ab09dc315613,0x7ff0000000000000,2 +np.float64,0x3fe949771b7292ee,0x3fec00891c3fc5a2,2 +np.float64,0xbfc54cae0e2a995c,0xbfc565e0f3d0e3af,2 +np.float64,0xffd546161e2a8c2c,0xfff0000000000000,2 +np.float64,0x800fe1d1279fc3a2,0x800fe1d1279fc3a2,2 +np.float64,0x3fd9c45301b388a8,0x3fda77fa1f4c79bf,2 +np.float64,0x7fe10ff238221fe3,0x7ff0000000000000,2 +np.float64,0xbfbc2181ae384300,0xbfbc3002229155c4,2 +np.float64,0xbfe7bbfae4ef77f6,0xbfe9f895e91f468d,2 +np.float64,0x800d3d994f7a7b33,0x800d3d994f7a7b33,2 +np.float64,0xffe6e15a896dc2b4,0xfff0000000000000,2 +np.float64,0x800e6b6c8abcd6d9,0x800e6b6c8abcd6d9,2 +np.float64,0xbfd862c938b0c592,0xbfd8faf1cdcb09db,2 +np.float64,0xffe2411f8464823e,0xfff0000000000000,2 +np.float64,0xffd0b32efaa1665e,0xfff0000000000000,2 +np.float64,0x3ac4ace475896,0x3ac4ace475896,2 +np.float64,0xf9c3a7ebf3875,0xf9c3a7ebf3875,2 +np.float64,0xdb998ba5b7332,0xdb998ba5b7332,2 +np.float64,0xbfe438a14fe87142,0xbfe5981751e4c5cd,2 +np.float64,0xbfbcf48cbc39e918,0xbfbd045d60e65d3a,2 +np.float64,0x7fde499615bc932b,0x7ff0000000000000,2 +np.float64,0x800bba269057744e,0x800bba269057744e,2 +np.float64,0x3fc9bb1ba3337638,0x3fc9e78fdb6799c1,2 +np.float64,0xffd9f974fbb3f2ea,0xfff0000000000000,2 +np.float64,0x7fcf1ad1693e35a2,0x7ff0000000000000,2 +np.float64,0x7fe5dcedd32bb9db,0x7ff0000000000000,2 +np.float64,0xeb06500bd60ca,0xeb06500bd60ca,2 +np.float64,0x7fd73e7b592e7cf6,0x7ff0000000000000,2 +np.float64,0xbfe9d91ae873b236,0xbfecc08482849bcd,2 +np.float64,0xffc85338b730a670,0xfff0000000000000,2 +np.float64,0x7fbba41eee37483d,0x7ff0000000000000,2 +np.float64,0x3fed5624fb7aac4a,0x3ff0cf9f0de1fd54,2 +np.float64,0xffe566d80d6acdb0,0xfff0000000000000,2 +np.float64,0x3fd4477884a88ef1,0x3fd49ec7acdd25a0,2 +np.float64,0x3fcb98c5fd37318c,0x3fcbcfa20e2c2712,2 +np.float64,0xffdeba71d5bd74e4,0xfff0000000000000,2 +np.float64,0x8001edc59dc3db8c,0x8001edc59dc3db8c,2 +np.float64,0x3fe6b09e896d613e,0x3fe8a3bb541ec0e3,2 +np.float64,0x3fe8694b4970d296,0x3fead94d271d05cf,2 +np.float64,0xb52c27bf6a585,0xb52c27bf6a585,2 +np.float64,0x7fcb0a21d9361443,0x7ff0000000000000,2 +np.float64,0xbfd9efc68cb3df8e,0xbfdaa7058c0ccbd1,2 +np.float64,0x8007cd170fef9a2f,0x8007cd170fef9a2f,2 +np.float64,0x3fe83325e770664c,0x3fea92c55c9d567e,2 +np.float64,0x800bd0085537a011,0x800bd0085537a011,2 +np.float64,0xffe05b9e7820b73c,0xfff0000000000000,2 +np.float64,0x3fea4ce4347499c8,0x3fed5cea9fdc541b,2 +np.float64,0x7fe08aae1921155b,0x7ff0000000000000,2 +np.float64,0x3fe7a5e7deef4bd0,0x3fe9dc2e20cfb61c,2 +np.float64,0xbfe0ccc8e6e19992,0xbfe195175f32ee3f,2 +np.float64,0xbfe8649717f0c92e,0xbfead3298974dcf0,2 +np.float64,0x7fed6c5308bad8a5,0x7ff0000000000000,2 +np.float64,0xffdbd8c7af37b190,0xfff0000000000000,2 +np.float64,0xbfb2bc4d06257898,0xbfb2c09569912839,2 +np.float64,0x3fc62eca512c5d95,0x3fc64b4251bce8f9,2 +np.float64,0xbfcae2ddbd35c5bc,0xbfcb15971fc61312,2 +np.float64,0x18d26ce831a4f,0x18d26ce831a4f,2 +np.float64,0x7fe38b279267164e,0x7ff0000000000000,2 +np.float64,0x97e1d9ab2fc3b,0x97e1d9ab2fc3b,2 +np.float64,0xbfee8e4785fd1c8f,0xbff1b52d16807627,2 +np.float64,0xbfb189b4a6231368,0xbfb18d37e83860ee,2 +np.float64,0xffd435761ea86aec,0xfff0000000000000,2 +np.float64,0x3fe6c48ebced891e,0x3fe8bcea189c3867,2 +np.float64,0x7fdadd3678b5ba6c,0x7ff0000000000000,2 +np.float64,0x7fea8f15b7b51e2a,0x7ff0000000000000,2 +np.float64,0xbff0000000000000,0xbff2cd9fc44eb982,2 +np.float64,0x80004c071120980f,0x80004c071120980f,2 +np.float64,0x8005367adfea6cf6,0x8005367adfea6cf6,2 +np.float64,0x3fbdc9139a3b9220,0x3fbdda4aba667ce5,2 +np.float64,0x7fed5ee3ad7abdc6,0x7ff0000000000000,2 +np.float64,0x51563fb2a2ac9,0x51563fb2a2ac9,2 +np.float64,0xbfba7d26ce34fa50,0xbfba894229c50ea1,2 +np.float64,0x6c10db36d821c,0x6c10db36d821c,2 +np.float64,0xbfbdaec0d03b5d80,0xbfbdbfca6ede64f4,2 +np.float64,0x800a1cbe7414397d,0x800a1cbe7414397d,2 +np.float64,0x800ae6e7f2d5cdd0,0x800ae6e7f2d5cdd0,2 +np.float64,0x3fea63d3fef4c7a8,0x3fed7c1356688ddc,2 +np.float64,0xbfde1e3a88bc3c76,0xbfdf3dfb09cc2260,2 +np.float64,0xbfd082d75a2105ae,0xbfd0b1e28c84877b,2 +np.float64,0x7fea1e5e85f43cbc,0x7ff0000000000000,2 +np.float64,0xffe2237a1a6446f4,0xfff0000000000000,2 +np.float64,0x3fd1e2be8523c57d,0x3fd21e93dfd1bbc4,2 +np.float64,0x3fd1acd428a359a8,0x3fd1e6916a42bc3a,2 +np.float64,0x61a152f0c342b,0x61a152f0c342b,2 +np.float64,0xbfc61a6b902c34d8,0xbfc6369557690ba0,2 +np.float64,0x7fd1a84b1f235095,0x7ff0000000000000,2 +np.float64,0x1c5cc7e638b9a,0x1c5cc7e638b9a,2 +np.float64,0x8008039755f0072f,0x8008039755f0072f,2 +np.float64,0x80097532d6f2ea66,0x80097532d6f2ea66,2 +np.float64,0xbfc6d979a12db2f4,0xbfc6f89777c53f8f,2 +np.float64,0x8004293ab1085276,0x8004293ab1085276,2 +np.float64,0x3fc2af5c21255eb8,0x3fc2c05dc0652554,2 +np.float64,0xbfd9a5ab87b34b58,0xbfda56d1076abc98,2 +np.float64,0xbfebd360ba77a6c2,0xbfef779fd6595f9b,2 +np.float64,0xffd5313c43aa6278,0xfff0000000000000,2 +np.float64,0xbfe994a262b32945,0xbfec64b969852ed5,2 +np.float64,0x3fce01a52e3c034a,0x3fce48324eb29c31,2 +np.float64,0x56bd74b2ad7af,0x56bd74b2ad7af,2 +np.float64,0xb84093ff70813,0xb84093ff70813,2 +np.float64,0x7fe776df946eedbe,0x7ff0000000000000,2 +np.float64,0xbfe294ac2e652958,0xbfe3a480938afa26,2 +np.float64,0x7fe741b4d0ee8369,0x7ff0000000000000,2 +np.float64,0x800b7e8a1056fd15,0x800b7e8a1056fd15,2 +np.float64,0x7fd28f1269251e24,0x7ff0000000000000,2 +np.float64,0x8009d4492e73a893,0x8009d4492e73a893,2 +np.float64,0x3fe3f27fca67e500,0x3fe543aff825e244,2 +np.float64,0x3fd12447e5a24890,0x3fd158efe43c0452,2 +np.float64,0xbfd58df0f2ab1be2,0xbfd5f6d908e3ebce,2 +np.float64,0xffc0a8e4642151c8,0xfff0000000000000,2 +np.float64,0xbfedb197787b632f,0xbff112367ec9d3e7,2 +np.float64,0xffdde07a7f3bc0f4,0xfff0000000000000,2 +np.float64,0x3fe91f3e5b723e7d,0x3febc886a1d48364,2 +np.float64,0x3fe50415236a082a,0x3fe68f43a5468d8c,2 +np.float64,0xd9a0c875b3419,0xd9a0c875b3419,2 +np.float64,0xbfee04ccf4bc099a,0xbff14f4740a114cf,2 +np.float64,0xbfd2bcc6a125798e,0xbfd30198b1e7d7ed,2 +np.float64,0xbfeb3c16f8f6782e,0xbfeea4ce47d09f58,2 +np.float64,0xffd3ba19e4a77434,0xfff0000000000000,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0x3fdef0a642bde14d,0x3fe0146677b3a488,2 +np.float64,0x3fdc3dd0a2b87ba0,0x3fdd2abe65651487,2 +np.float64,0x3fdbb1fd47b763fb,0x3fdc915a2fd19f4b,2 +np.float64,0x7fbaa375e63546eb,0x7ff0000000000000,2 +np.float64,0x433ef8ee867e0,0x433ef8ee867e0,2 +np.float64,0xf5345475ea68b,0xf5345475ea68b,2 +np.float64,0xa126419b424c8,0xa126419b424c8,2 +np.float64,0x3fe0057248200ae5,0x3fe0b2f488339709,2 +np.float64,0xffc5e3b82f2bc770,0xfff0000000000000,2 +np.float64,0xffb215c910242b90,0xfff0000000000000,2 +np.float64,0xbfeba4ae0837495c,0xbfef3642e4b54aac,2 +np.float64,0xffbb187ebe363100,0xfff0000000000000,2 +np.float64,0x3fe4c6a496a98d49,0x3fe64440cdf06aab,2 +np.float64,0x800767a28f6ecf46,0x800767a28f6ecf46,2 +np.float64,0x3fdbed63b1b7dac8,0x3fdcd27318c0b683,2 +np.float64,0x80006d8339e0db07,0x80006d8339e0db07,2 +np.float64,0x8000b504f0416a0b,0x8000b504f0416a0b,2 +np.float64,0xbfe88055bfb100ac,0xbfeaf767bd2767b9,2 +np.float64,0x3fefe503317fca06,0x3ff2b8d4057240c8,2 +np.float64,0x7fe307538b660ea6,0x7ff0000000000000,2 +np.float64,0x944963c12892d,0x944963c12892d,2 +np.float64,0xbfd2c20b38a58416,0xbfd30717900f8233,2 +np.float64,0x7feed04e3e3da09b,0x7ff0000000000000,2 +np.float64,0x3fe639619cac72c3,0x3fe80de7b8560a8d,2 +np.float64,0x3fde066c66bc0cd9,0x3fdf237fb759a652,2 +np.float64,0xbfc56b22b52ad644,0xbfc584c267a47ebd,2 +np.float64,0x3fc710d5b12e21ab,0x3fc730d817ba0d0c,2 +np.float64,0x3fee1dfc347c3bf8,0x3ff161d9c3e15f68,2 +np.float64,0x3fde400954bc8013,0x3fdf639e5cc9e7a9,2 +np.float64,0x56e701f8adce1,0x56e701f8adce1,2 +np.float64,0xbfe33bbc89e67779,0xbfe46996b39381fe,2 +np.float64,0x7fec89e2f87913c5,0x7ff0000000000000,2 +np.float64,0xbfdad58b40b5ab16,0xbfdba098cc0ad5d3,2 +np.float64,0x3fe99c76a13338ed,0x3fec6f31bae613e7,2 +np.float64,0x3fe4242a29a84854,0x3fe57f6b45e5c0ef,2 +np.float64,0xbfe79d3199ef3a63,0xbfe9d0fb96c846ba,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfeb35a6cf766b4e,0xbfee9be4e7e943f7,2 +np.float64,0x3e047f267c091,0x3e047f267c091,2 +np.float64,0x4bf1376a97e28,0x4bf1376a97e28,2 +np.float64,0x800ef419685de833,0x800ef419685de833,2 +np.float64,0x3fe0efa61a21df4c,0x3fe1bce98baf2f0f,2 +np.float64,0x3fcc13c4d738278a,0x3fcc4d8c778bcaf7,2 +np.float64,0x800f1d291afe3a52,0x800f1d291afe3a52,2 +np.float64,0x3fd3f10e6da7e21d,0x3fd444106761ea1d,2 +np.float64,0x800706d6d76e0dae,0x800706d6d76e0dae,2 +np.float64,0xffa1ffbc9023ff80,0xfff0000000000000,2 +np.float64,0xbfe098f26d6131e5,0xbfe15a08a5f3eac0,2 +np.float64,0x3fe984f9cc7309f4,0x3fec4fcdbdb1cb9b,2 +np.float64,0x7fd7c2f1eaaf85e3,0x7ff0000000000000,2 +np.float64,0x800a8adb64f515b7,0x800a8adb64f515b7,2 +np.float64,0x80060d3ffc8c1a81,0x80060d3ffc8c1a81,2 +np.float64,0xbfec37e4aef86fc9,0xbff0029a6a1d61e2,2 +np.float64,0x800b21bcfcf6437a,0x800b21bcfcf6437a,2 +np.float64,0xbfc08facc1211f58,0xbfc09b8380ea8032,2 +np.float64,0xffebb4b52577696a,0xfff0000000000000,2 +np.float64,0x800b08096df61013,0x800b08096df61013,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0xffd2f0c9c8a5e194,0xfff0000000000000,2 +np.float64,0xffe78b2299af1644,0xfff0000000000000,2 +np.float64,0x7fd0444794a0888e,0x7ff0000000000000,2 +np.float64,0x307c47b460f8a,0x307c47b460f8a,2 +np.float64,0xffe6b4c851ad6990,0xfff0000000000000,2 +np.float64,0xffe1877224a30ee4,0xfff0000000000000,2 +np.float64,0x48d7b5c091af7,0x48d7b5c091af7,2 +np.float64,0xbfa1dc6b1c23b8d0,0xbfa1dd5889e1b7da,2 +np.float64,0x3fe5004737ea008e,0x3fe68a9c310b08c1,2 +np.float64,0x7fec5f0742b8be0e,0x7ff0000000000000,2 +np.float64,0x3fd0a86285a150c5,0x3fd0d8b238d557fa,2 +np.float64,0x7fed60380efac06f,0x7ff0000000000000,2 +np.float64,0xeeca74dfdd94f,0xeeca74dfdd94f,2 +np.float64,0x3fda05aaa8b40b54,0x3fdabebdbf405e84,2 +np.float64,0x800e530ceb1ca61a,0x800e530ceb1ca61a,2 +np.float64,0x800b3866379670cd,0x800b3866379670cd,2 +np.float64,0xffedb3e7fa3b67cf,0xfff0000000000000,2 +np.float64,0xffdfa4c0713f4980,0xfff0000000000000,2 +np.float64,0x7fe4679e0728cf3b,0x7ff0000000000000,2 +np.float64,0xffe978611ef2f0c2,0xfff0000000000000,2 +np.float64,0x7fc9f4601f33e8bf,0x7ff0000000000000,2 +np.float64,0x3fd4942de6a9285c,0x3fd4ef6e089357dd,2 +np.float64,0x3faafe064435fc00,0x3fab0139cd6564dc,2 +np.float64,0x800d145a519a28b5,0x800d145a519a28b5,2 +np.float64,0xbfd82636f2304c6e,0xbfd8b9f75ddd2f02,2 +np.float64,0xbfdf2e975e3e5d2e,0xbfe037174280788c,2 +np.float64,0x7fd7051d7c2e0a3a,0x7ff0000000000000,2 +np.float64,0x8007933d452f267b,0x8007933d452f267b,2 +np.float64,0xb2043beb64088,0xb2043beb64088,2 +np.float64,0x3febfd9708f7fb2e,0x3fefb2ef090f18d2,2 +np.float64,0xffd9bc6bc83378d8,0xfff0000000000000,2 +np.float64,0xc10f9fd3821f4,0xc10f9fd3821f4,2 +np.float64,0x3fe3c83413a79068,0x3fe510fa1dd8edf7,2 +np.float64,0x3fbe26ccda3c4da0,0x3fbe38a892279975,2 +np.float64,0x3fcc1873103830e6,0x3fcc5257a6ae168d,2 +np.float64,0xe7e000e9cfc00,0xe7e000e9cfc00,2 +np.float64,0xffda73852bb4e70a,0xfff0000000000000,2 +np.float64,0xbfe831be19f0637c,0xbfea90f1b34da3e5,2 +np.float64,0xbfeb568f3076ad1e,0xbfeec97eebfde862,2 +np.float64,0x510a6ad0a214e,0x510a6ad0a214e,2 +np.float64,0x3fe6ba7e35ed74fc,0x3fe8b032a9a28c6a,2 +np.float64,0xffeb5cdcff76b9b9,0xfff0000000000000,2 +np.float64,0x4f0a23e89e145,0x4f0a23e89e145,2 +np.float64,0x446ec20288dd9,0x446ec20288dd9,2 +np.float64,0x7fe2521b02e4a435,0x7ff0000000000000,2 +np.float64,0x8001cd2969e39a54,0x8001cd2969e39a54,2 +np.float64,0x3fdfe90600bfd20c,0x3fe09fdcca10001c,2 +np.float64,0x7fd660c5762cc18a,0x7ff0000000000000,2 +np.float64,0xbfb11b23aa223648,0xbfb11e661949b377,2 +np.float64,0x800e025285fc04a5,0x800e025285fc04a5,2 +np.float64,0xffb180bb18230178,0xfff0000000000000,2 +np.float64,0xaaf590df55eb2,0xaaf590df55eb2,2 +np.float64,0xbfe8637d9df0c6fb,0xbfead1ba429462ec,2 +np.float64,0x7fd2577866a4aef0,0x7ff0000000000000,2 +np.float64,0xbfcfb2ab5a3f6558,0xbfd002ee87f272b9,2 +np.float64,0x7fdd64ae2f3ac95b,0x7ff0000000000000,2 +np.float64,0xffd1a502c9234a06,0xfff0000000000000,2 +np.float64,0x7fc4be4b60297c96,0x7ff0000000000000,2 +np.float64,0xbfb46b712a28d6e0,0xbfb470fca9919172,2 +np.float64,0xffdef913033df226,0xfff0000000000000,2 +np.float64,0x3fd94a3545b2946b,0x3fd9f40431ce9f9c,2 +np.float64,0x7fef88a0b6ff1140,0x7ff0000000000000,2 +np.float64,0xbfbcc81876399030,0xbfbcd7a0ab6cb388,2 +np.float64,0x800a4acfdd9495a0,0x800a4acfdd9495a0,2 +np.float64,0xffe270b3d5e4e167,0xfff0000000000000,2 +np.float64,0xbfd23f601e247ec0,0xbfd27eeca50a49eb,2 +np.float64,0x7fec6e796a78dcf2,0x7ff0000000000000,2 +np.float64,0x3fb85e0c9630bc19,0x3fb867791ccd6c72,2 +np.float64,0x7fe49fc424a93f87,0x7ff0000000000000,2 +np.float64,0xbfe75a99fbaeb534,0xbfe97ba37663de4c,2 +np.float64,0xffe85011b630a023,0xfff0000000000000,2 +np.float64,0xffe5962e492b2c5c,0xfff0000000000000,2 +np.float64,0x6f36ed4cde6de,0x6f36ed4cde6de,2 +np.float64,0x3feb72170af6e42e,0x3feeefbe6f1a2084,2 +np.float64,0x80014d8d60629b1c,0x80014d8d60629b1c,2 +np.float64,0xbfe0eb40d321d682,0xbfe1b7e31f252bf1,2 +np.float64,0x31fe305663fc7,0x31fe305663fc7,2 +np.float64,0x3fd2cd6381a59ac7,0x3fd312edc9868a4d,2 +np.float64,0xffcf0720793e0e40,0xfff0000000000000,2 +np.float64,0xbfeef1ef133de3de,0xbff1ffd5e1a3b648,2 +np.float64,0xbfd01c787aa038f0,0xbfd0482be3158a01,2 +np.float64,0x3fda3607c5b46c10,0x3fdaf3301e217301,2 +np.float64,0xffda9a9911b53532,0xfff0000000000000,2 +np.float64,0x3fc0b37c392166f8,0x3fc0bfa076f3c43e,2 +np.float64,0xbfe06591c760cb24,0xbfe11fad179ea12c,2 +np.float64,0x8006e369c20dc6d4,0x8006e369c20dc6d4,2 +np.float64,0x3fdf2912a8be5224,0x3fe033ff74b92f4d,2 +np.float64,0xffc0feb07821fd60,0xfff0000000000000,2 +np.float64,0xa4b938c949727,0xa4b938c949727,2 +np.float64,0x8008fe676571fccf,0x8008fe676571fccf,2 +np.float64,0xbfdda68459bb4d08,0xbfdeb8faab34fcbc,2 +np.float64,0xbfda18b419343168,0xbfdad360ca52ec7c,2 +np.float64,0x3febcbae35b7975c,0x3fef6cd51c9ebc15,2 +np.float64,0x3fbec615f63d8c30,0x3fbed912ba729926,2 +np.float64,0x7f99a831c8335063,0x7ff0000000000000,2 +np.float64,0x3fe663e8826cc7d1,0x3fe84330bd9aada8,2 +np.float64,0x70a9f9e6e1540,0x70a9f9e6e1540,2 +np.float64,0x8a13a5db14275,0x8a13a5db14275,2 +np.float64,0x7fc4330a3b286613,0x7ff0000000000000,2 +np.float64,0xbfe580c6136b018c,0xbfe728806cc7a99a,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0xffec079d5df80f3a,0xfff0000000000000,2 +np.float64,0x8e1173c31c22f,0x8e1173c31c22f,2 +np.float64,0x3fe088456d21108b,0x3fe14712ca414103,2 +np.float64,0x3fe1b76f73636edf,0x3fe2a2b658557112,2 +np.float64,0xbfd4a1dd162943ba,0xbfd4fdd45cae8fb8,2 +np.float64,0x7fd60b46c8ac168d,0x7ff0000000000000,2 +np.float64,0xffe36cc3b166d987,0xfff0000000000000,2 +np.float64,0x3fdc2ae0cfb855c0,0x3fdd15f026773151,2 +np.float64,0xbfc41aa203283544,0xbfc42fd1b145fdd5,2 +np.float64,0xffed90c55fbb218a,0xfff0000000000000,2 +np.float64,0x3fe67e3a9aecfc75,0x3fe86440db65b4f6,2 +np.float64,0x7fd12dbeaba25b7c,0x7ff0000000000000,2 +np.float64,0xbfe1267c0de24cf8,0xbfe1fbb611bdf1e9,2 +np.float64,0x22e5619645cad,0x22e5619645cad,2 +np.float64,0x7fe327c72ea64f8d,0x7ff0000000000000,2 +np.float64,0x7fd2c3f545a587ea,0x7ff0000000000000,2 +np.float64,0x7fc7b689372f6d11,0x7ff0000000000000,2 +np.float64,0xc5e140bd8bc28,0xc5e140bd8bc28,2 +np.float64,0x3fccb3627a3966c5,0x3fccf11b44fa4102,2 +np.float64,0xbfd2cf725c259ee4,0xbfd315138d0e5dca,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0xbfd3dfa8b627bf52,0xbfd431d17b235477,2 +np.float64,0xbfb82124e6304248,0xbfb82a4b6d9c2663,2 +np.float64,0x3fdcd590d9b9ab22,0x3fddd1d548806347,2 +np.float64,0x7fdee0cd1b3dc199,0x7ff0000000000000,2 +np.float64,0x8004ebfc60a9d7fa,0x8004ebfc60a9d7fa,2 +np.float64,0x3fe8eb818b71d704,0x3feb842679806108,2 +np.float64,0xffdd5e8fe63abd20,0xfff0000000000000,2 +np.float64,0xbfe3efcbd9e7df98,0xbfe54071436645ee,2 +np.float64,0x3fd5102557aa204b,0x3fd57203d31a05b8,2 +np.float64,0x3fe6318af7ec6316,0x3fe8041a177cbf96,2 +np.float64,0x3fdf3cecdabe79da,0x3fe03f2084ffbc78,2 +np.float64,0x7fe0ab6673a156cc,0x7ff0000000000000,2 +np.float64,0x800037d5c6c06fac,0x800037d5c6c06fac,2 +np.float64,0xffce58b86a3cb170,0xfff0000000000000,2 +np.float64,0xbfe3455d6ce68abb,0xbfe475034cecb2b8,2 +np.float64,0x991b663d3236d,0x991b663d3236d,2 +np.float64,0x3fda82d37c3505a7,0x3fdb46973da05c12,2 +np.float64,0x3f9b736fa036e6df,0x3f9b74471c234411,2 +np.float64,0x8001c96525e392cb,0x8001c96525e392cb,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0xbfaf59122c3eb220,0xbfaf5e15f8b272b0,2 +np.float64,0xbf9aa7d288354fa0,0xbf9aa897d2a40cb5,2 +np.float64,0x8004a43428694869,0x8004a43428694869,2 +np.float64,0x7feead476dbd5a8e,0x7ff0000000000000,2 +np.float64,0xffca150f81342a20,0xfff0000000000000,2 +np.float64,0x80047ec3bc88fd88,0x80047ec3bc88fd88,2 +np.float64,0xbfee3e5b123c7cb6,0xbff179c8b8334278,2 +np.float64,0x3fd172359f22e46b,0x3fd1a9ba6b1420a1,2 +np.float64,0x3fe8e5e242f1cbc5,0x3feb7cbcaefc4d5c,2 +np.float64,0x8007fb059a6ff60c,0x8007fb059a6ff60c,2 +np.float64,0xe3899e71c7134,0xe3899e71c7134,2 +np.float64,0x7fe3b98326a77305,0x7ff0000000000000,2 +np.float64,0x7fec4e206cb89c40,0x7ff0000000000000,2 +np.float64,0xbfa3b012c4276020,0xbfa3b150c13b3cc5,2 +np.float64,0xffefffffffffffff,0xfff0000000000000,2 +np.float64,0xffe28a5b9aa514b6,0xfff0000000000000,2 +np.float64,0xbfd76a6cc2aed4da,0xbfd7f10f4d04e7f6,2 +np.float64,0xbc2b1c0178564,0xbc2b1c0178564,2 +np.float64,0x6d9d444adb3a9,0x6d9d444adb3a9,2 +np.float64,0xbfdcadd368395ba6,0xbfdda6037b5c429c,2 +np.float64,0x3fe11891fde23124,0x3fe1ebc1c204b14b,2 +np.float64,0x3fdd66c3eebacd88,0x3fde72526b5304c4,2 +np.float64,0xbfe79d85612f3b0b,0xbfe9d1673bd1f6d6,2 +np.float64,0x3fed60abdabac158,0x3ff0d7426b3800a2,2 +np.float64,0xbfb0ffa54021ff48,0xbfb102d81073a9f0,2 +np.float64,0xd2452af5a48a6,0xd2452af5a48a6,2 +np.float64,0xf4b835c1e971,0xf4b835c1e971,2 +np.float64,0x7e269cdafc4d4,0x7e269cdafc4d4,2 +np.float64,0x800097a21d812f45,0x800097a21d812f45,2 +np.float64,0x3fdfcc85e8bf990c,0x3fe08fcf770fd456,2 +np.float64,0xd8d53155b1aa6,0xd8d53155b1aa6,2 +np.float64,0x7fb8ed658831daca,0x7ff0000000000000,2 +np.float64,0xbfec865415b90ca8,0xbff03a4584d719f9,2 +np.float64,0xffd8cda62a319b4c,0xfff0000000000000,2 +np.float64,0x273598d84e6b4,0x273598d84e6b4,2 +np.float64,0x7fd566b5c32acd6b,0x7ff0000000000000,2 +np.float64,0xff61d9d48023b400,0xfff0000000000000,2 +np.float64,0xbfec5c3bf4f8b878,0xbff01c594243337c,2 +np.float64,0x7fd1be0561a37c0a,0x7ff0000000000000,2 +np.float64,0xffeaee3271b5dc64,0xfff0000000000000,2 +np.float64,0x800c0e1931b81c33,0x800c0e1931b81c33,2 +np.float64,0xbfad1171583a22e0,0xbfad1570e5c466d2,2 +np.float64,0x7fd783b0fe2f0761,0x7ff0000000000000,2 +np.float64,0x7fc39903e6273207,0x7ff0000000000000,2 +np.float64,0xffe00003c5600007,0xfff0000000000000,2 +np.float64,0x35a7b9c06b50,0x35a7b9c06b50,2 +np.float64,0x7fee441a22bc8833,0x7ff0000000000000,2 +np.float64,0xff6e47fbc03c9000,0xfff0000000000000,2 +np.float64,0xbfd3c3c9c8a78794,0xbfd41499b1912534,2 +np.float64,0x82c9c87f05939,0x82c9c87f05939,2 +np.float64,0xbfedeb0fe4fbd620,0xbff13c573ce9d3d0,2 +np.float64,0x2b79298656f26,0x2b79298656f26,2 +np.float64,0xbf5ee44f003dc800,0xbf5ee4503353c0ba,2 +np.float64,0xbfe1dd264e63ba4c,0xbfe2ce68116c7bf6,2 +np.float64,0x3fece10b7579c217,0x3ff07b21b11799c6,2 +np.float64,0x3fba47143a348e28,0x3fba52e601adf24c,2 +np.float64,0xffe9816e7a7302dc,0xfff0000000000000,2 +np.float64,0x8009a8047fd35009,0x8009a8047fd35009,2 +np.float64,0x800ac28e4e95851d,0x800ac28e4e95851d,2 +np.float64,0x80093facf4f27f5a,0x80093facf4f27f5a,2 +np.float64,0x3ff0000000000000,0x3ff2cd9fc44eb982,2 +np.float64,0x3fe76a9857eed530,0x3fe99018a5895a4f,2 +np.float64,0xbfd13c59a3a278b4,0xbfd171e133df0b16,2 +np.float64,0x7feb43bc83368778,0x7ff0000000000000,2 +np.float64,0xbfe2970c5fa52e18,0xbfe3a74a434c6efe,2 +np.float64,0xffd091c380212388,0xfff0000000000000,2 +np.float64,0x3febb3b9d2f76774,0x3fef4b4af2bd8580,2 +np.float64,0x7fec66787ef8ccf0,0x7ff0000000000000,2 +np.float64,0xbf935e185826bc40,0xbf935e640557a354,2 +np.float64,0x979df1552f3be,0x979df1552f3be,2 +np.float64,0x7fc096ee73212ddc,0x7ff0000000000000,2 +np.float64,0xbfe9de88faf3bd12,0xbfecc7d1ae691d1b,2 +np.float64,0x7fdc733f06b8e67d,0x7ff0000000000000,2 +np.float64,0xffd71be1a0ae37c4,0xfff0000000000000,2 +np.float64,0xb50dabd36a1b6,0xb50dabd36a1b6,2 +np.float64,0x7fce3d94d63c7b29,0x7ff0000000000000,2 +np.float64,0x7fbaf95e4435f2bc,0x7ff0000000000000,2 +np.float64,0x81a32a6f03466,0x81a32a6f03466,2 +np.float64,0xa99b5b4d5336c,0xa99b5b4d5336c,2 +np.float64,0x7f97c1eeb82f83dc,0x7ff0000000000000,2 +np.float64,0x3fe761636d6ec2c6,0x3fe98451160d2ffb,2 +np.float64,0xbfe3224ef5e6449e,0xbfe44b73eeadac52,2 +np.float64,0x7fde6feb0dbcdfd5,0x7ff0000000000000,2 +np.float64,0xbfee87f9ca7d0ff4,0xbff1b079e9d7f706,2 +np.float64,0x3fe46f4c9828de99,0x3fe5da2ab9609ea5,2 +np.float64,0xffb92fe882325fd0,0xfff0000000000000,2 +np.float64,0x80054bc63cea978d,0x80054bc63cea978d,2 +np.float64,0x3d988bea7b312,0x3d988bea7b312,2 +np.float64,0x3fe6468e1d6c8d1c,0x3fe81e64d37d39a8,2 +np.float64,0x3fd68eefc22d1de0,0x3fd7074264faeead,2 +np.float64,0xffb218a074243140,0xfff0000000000000,2 +np.float64,0x3fdbcb3b6cb79678,0x3fdcad011de40b7d,2 +np.float64,0x7fe3c161772782c2,0x7ff0000000000000,2 +np.float64,0x25575c904aaec,0x25575c904aaec,2 +np.float64,0x800fa43a8f5f4875,0x800fa43a8f5f4875,2 +np.float64,0x3fe41fc9e1e83f94,0x3fe57a25dd1a37f1,2 +np.float64,0x3fd895f4a7b12be9,0x3fd931e7b721a08a,2 +np.float64,0xce31469f9c629,0xce31469f9c629,2 +np.float64,0xffea0f55ca341eab,0xfff0000000000000,2 +np.float64,0xffe831c9ba306393,0xfff0000000000000,2 +np.float64,0x7fe2056f03a40add,0x7ff0000000000000,2 +np.float64,0x7fd6b075e02d60eb,0x7ff0000000000000,2 +np.float64,0x3fdfbef4273f7de8,0x3fe0882c1f59efc0,2 +np.float64,0x8005b9e094ab73c2,0x8005b9e094ab73c2,2 +np.float64,0x3fea881ac6351036,0x3fedad7a319b887c,2 +np.float64,0xbfe2c61c7ee58c39,0xbfe3de9a99d8a9c6,2 +np.float64,0x30b0d3786161b,0x30b0d3786161b,2 +np.float64,0x3fa51d56a02a3aad,0x3fa51edee2d2ecef,2 +np.float64,0x79745732f2e8c,0x79745732f2e8c,2 +np.float64,0x800d55b4907aab69,0x800d55b4907aab69,2 +np.float64,0xbfbe8fcf0a3d1fa0,0xbfbea267fbb5bfdf,2 +np.float64,0xbfd04e2756a09c4e,0xbfd07b74d079f9a2,2 +np.float64,0x3fc65170552ca2e1,0x3fc66e6eb00c82ed,2 +np.float64,0xbfb0674b8020ce98,0xbfb06a2b4771b64c,2 +np.float64,0x2059975840b34,0x2059975840b34,2 +np.float64,0x33d1385467a28,0x33d1385467a28,2 +np.float64,0x3fea41b74ff4836f,0x3fed4dc1a09e53cc,2 +np.float64,0xbfe8e08c9d71c119,0xbfeb75b4c59a6bec,2 +np.float64,0x7fdbbf14d6377e29,0x7ff0000000000000,2 +np.float64,0x3fcd8b71513b16e0,0x3fcdcec80174f9ad,2 +np.float64,0x5c50bc94b8a18,0x5c50bc94b8a18,2 +np.float64,0x969a18f52d343,0x969a18f52d343,2 +np.float64,0x3fd7ae44462f5c89,0x3fd8398bc34e395c,2 +np.float64,0xffdd0f8617ba1f0c,0xfff0000000000000,2 +np.float64,0xfff0000000000000,0xfff0000000000000,2 +np.float64,0xbfe2f9badb65f376,0xbfe41b771320ece8,2 +np.float64,0x3fd140bc7fa29,0x3fd140bc7fa29,2 +np.float64,0xbfe14523b5628a48,0xbfe21ee850972043,2 +np.float64,0x3feedd0336bdba06,0x3ff1f01afc1f3a06,2 +np.float64,0x800de423ad7bc848,0x800de423ad7bc848,2 +np.float64,0x4cef857c99df1,0x4cef857c99df1,2 +np.float64,0xbfea55e0e374abc2,0xbfed691e41d648dd,2 +np.float64,0x3fe70d7a18ae1af4,0x3fe91955a34d8094,2 +np.float64,0xbfc62fc3032c5f88,0xbfc64c3ec25decb8,2 +np.float64,0x3fc915abb5322b58,0x3fc93edac5cc73fe,2 +np.float64,0x69aaff66d3561,0x69aaff66d3561,2 +np.float64,0x5c6a90f2b8d53,0x5c6a90f2b8d53,2 +np.float64,0x3fefe30dc1bfc61c,0x3ff2b752257bdacd,2 +np.float64,0x3fef15db15fe2bb6,0x3ff21aea05601396,2 +np.float64,0xbfe353e5ac66a7cc,0xbfe48644e6553d1a,2 +np.float64,0x3fe6d30cffada61a,0x3fe8cf3e4c61ddac,2 +np.float64,0x7fb7857eb62f0afc,0x7ff0000000000000,2 +np.float64,0xbfdd9b53d23b36a8,0xbfdeac91a7af1340,2 +np.float64,0x3fd1456357228ac7,0x3fd17b3f7d39b27a,2 +np.float64,0x3fb57d10ae2afa21,0x3fb5838702b806f4,2 +np.float64,0x800c59c96c98b393,0x800c59c96c98b393,2 +np.float64,0x7fc1f2413823e481,0x7ff0000000000000,2 +np.float64,0xbfa3983624273070,0xbfa3996fa26c419a,2 +np.float64,0x7fb28874ae2510e8,0x7ff0000000000000,2 +np.float64,0x3fe826d02a304da0,0x3fea82bec50bc0b6,2 +np.float64,0x8008d6f0d3d1ade2,0x8008d6f0d3d1ade2,2 +np.float64,0xffe7c970ca2f92e1,0xfff0000000000000,2 +np.float64,0x7fcf42bcaa3e8578,0x7ff0000000000000,2 +np.float64,0x7fda1ab517343569,0x7ff0000000000000,2 +np.float64,0xbfe7926a65ef24d5,0xbfe9c323dd890d5b,2 +np.float64,0xbfcaf6282d35ec50,0xbfcb294f36a0a33d,2 +np.float64,0x800ca49df8d9493c,0x800ca49df8d9493c,2 +np.float64,0xffea18d26af431a4,0xfff0000000000000,2 +np.float64,0x3fb72f276e2e5e50,0x3fb7374539fd1221,2 +np.float64,0xffa6b613842d6c20,0xfff0000000000000,2 +np.float64,0xbfeb3c7263f678e5,0xbfeea54cdb60b54c,2 +np.float64,0x3fc976d2ba32eda5,0x3fc9a1e83a058de4,2 +np.float64,0xbfe4acd4b0e959aa,0xbfe624d5d4f9b9a6,2 +np.float64,0x7fca410a0f348213,0x7ff0000000000000,2 +np.float64,0xbfde368f77bc6d1e,0xbfdf5910c8c8bcb0,2 +np.float64,0xbfed7412937ae825,0xbff0e55afc428453,2 +np.float64,0xffef6b7b607ed6f6,0xfff0000000000000,2 +np.float64,0xbfb936f17e326de0,0xbfb941629a53c694,2 +np.float64,0x800dbb0c469b7619,0x800dbb0c469b7619,2 +np.float64,0x800f68b0581ed161,0x800f68b0581ed161,2 +np.float64,0x3fe25b2aad64b656,0x3fe361266fa9c5eb,2 +np.float64,0xbfb87e445a30fc88,0xbfb887d676910c3f,2 +np.float64,0x6e6ba9b6dcd76,0x6e6ba9b6dcd76,2 +np.float64,0x3fad27ce583a4f9d,0x3fad2bd72782ffdb,2 +np.float64,0xbfec0bc5d638178c,0xbfefc6e8c8f9095f,2 +np.float64,0x7fcba4a296374944,0x7ff0000000000000,2 +np.float64,0x8004ca237cc99448,0x8004ca237cc99448,2 +np.float64,0xffe85b8c3270b718,0xfff0000000000000,2 +np.float64,0x7fe7ee3eddafdc7d,0x7ff0000000000000,2 +np.float64,0xffd275967ca4eb2c,0xfff0000000000000,2 +np.float64,0xbfa95bc3a032b780,0xbfa95e6b288ecf43,2 +np.float64,0x3fc9e3214b33c643,0x3fca10667e7e7ff4,2 +np.float64,0x8001b89c5d837139,0x8001b89c5d837139,2 +np.float64,0xbf8807dfc0300fc0,0xbf880803e3badfbd,2 +np.float64,0x800aca94b895952a,0x800aca94b895952a,2 +np.float64,0x7fd79534a02f2a68,0x7ff0000000000000,2 +np.float64,0x3fe1b81179e37023,0x3fe2a371d8cc26f0,2 +np.float64,0x800699539d6d32a8,0x800699539d6d32a8,2 +np.float64,0xffe51dfbb3aa3bf7,0xfff0000000000000,2 +np.float64,0xbfdfb775abbf6eec,0xbfe083f48be2f98f,2 +np.float64,0x3fe87979d7b0f2f4,0x3feaee701d959079,2 +np.float64,0x3fd8e4e6a731c9cd,0x3fd986d29f25f982,2 +np.float64,0x3fe3dadaaf67b5b6,0x3fe527520fb02920,2 +np.float64,0x8003c2262bc7844d,0x8003c2262bc7844d,2 +np.float64,0x800c930add392616,0x800c930add392616,2 +np.float64,0xffb7a152a22f42a8,0xfff0000000000000,2 +np.float64,0x80028fe03dc51fc1,0x80028fe03dc51fc1,2 +np.float64,0xffe32ae60c6655cc,0xfff0000000000000,2 +np.float64,0x3fea3527e4746a50,0x3fed3cbbf47f18eb,2 +np.float64,0x800a53059e14a60c,0x800a53059e14a60c,2 +np.float64,0xbfd79e3b202f3c76,0xbfd828672381207b,2 +np.float64,0xffeed7e2eb7dafc5,0xfff0000000000000,2 +np.float64,0x3fec51ed6778a3db,0x3ff01509e34df61d,2 +np.float64,0xbfd84bc577b0978a,0xbfd8e23ec55e42e8,2 +np.float64,0x2483aff849077,0x2483aff849077,2 +np.float64,0x6f57883adeaf2,0x6f57883adeaf2,2 +np.float64,0xffd3fd74d927faea,0xfff0000000000000,2 +np.float64,0x7fca49ec773493d8,0x7ff0000000000000,2 +np.float64,0x7fd08fe2e8211fc5,0x7ff0000000000000,2 +np.float64,0x800852086db0a411,0x800852086db0a411,2 +np.float64,0x3fe5b1f2c9eb63e6,0x3fe7654f511bafc6,2 +np.float64,0xbfe01e2a58e03c54,0xbfe0cedb68f021e6,2 +np.float64,0x800988421d331085,0x800988421d331085,2 +np.float64,0xffd5038b18aa0716,0xfff0000000000000,2 +np.float64,0x8002c9264c85924d,0x8002c9264c85924d,2 +np.float64,0x3fd21ca302243946,0x3fd25ac653a71aab,2 +np.float64,0xbfea60d6e6f4c1ae,0xbfed78031d9dfa2b,2 +np.float64,0xffef97b6263f2f6b,0xfff0000000000000,2 +np.float64,0xbfd524732faa48e6,0xbfd5876ecc415dcc,2 +np.float64,0x660387e8cc072,0x660387e8cc072,2 +np.float64,0x7fcfc108a33f8210,0x7ff0000000000000,2 +np.float64,0x7febe5b0f877cb61,0x7ff0000000000000,2 +np.float64,0xbfa55fdfac2abfc0,0xbfa56176991851a8,2 +np.float64,0x25250f4c4a4a3,0x25250f4c4a4a3,2 +np.float64,0xffe2f6a2f2a5ed46,0xfff0000000000000,2 +np.float64,0x7fa754fcc02ea9f9,0x7ff0000000000000,2 +np.float64,0x3febd19dea37a33c,0x3fef75279f75d3b8,2 +np.float64,0xc5ed55218bdab,0xc5ed55218bdab,2 +np.float64,0x3fe72ff6b3ee5fed,0x3fe945388b979882,2 +np.float64,0xbfe16b854e22d70a,0xbfe24b10fc0dff14,2 +np.float64,0xffb22cbe10245980,0xfff0000000000000,2 +np.float64,0xa54246b54a849,0xa54246b54a849,2 +np.float64,0x3fe7f4cda76fe99c,0x3fea41edc74888b6,2 +np.float64,0x1,0x1,2 +np.float64,0x800d84acce9b095a,0x800d84acce9b095a,2 +np.float64,0xb0eef04761dde,0xb0eef04761dde,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0xffecaf1dbb795e3b,0xfff0000000000000,2 +np.float64,0x90dbab8d21b76,0x90dbab8d21b76,2 +np.float64,0x3fe79584a9ef2b09,0x3fe9c71fa9e40eb5,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv new file mode 100644 index 00000000..083cdb2f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfd97ece0,0xc11186e9,4 +np.float32,0x8013bb34,0x8013bb34,4 +np.float32,0x316389,0x316389,4 +np.float32,0x7f7fffff,0xbf1c9eca,4 +np.float32,0x3f7674bb,0x3fb7e450,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x7f5995e8,0xbf94106c,4 +np.float32,0x74527,0x74527,4 +np.float32,0x7f08caea,0xbeceddb6,4 +np.float32,0x2d49b2,0x2d49b2,4 +np.float32,0x3f74e5e4,0x3fb58695,4 +np.float32,0x3f3fcd51,0x3f6e1e81,4 +np.float32,0xbf4f3608,0xbf864d3d,4 +np.float32,0xbed974a0,0xbee78c70,4 +np.float32,0xff5f483c,0x3ecf3cb2,4 +np.float32,0x7f4532f4,0xc0b96f7b,4 +np.float32,0x3f0a4f7c,0x3f198cc0,4 +np.float32,0x210193,0x210193,4 +np.float32,0xfeebad7a,0xbf92eba8,4 +np.float32,0xfed29f74,0xc134cab6,4 +np.float32,0x803433a0,0x803433a0,4 +np.float32,0x64eb46,0x64eb46,4 +np.float32,0xbf54ef22,0xbf8c757b,4 +np.float32,0x3f3d5fdd,0x3f69a17b,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x800a837a,0x800a837a,4 +np.float32,0x6ff0be,0x6ff0be,4 +np.float32,0xfe8f1186,0x3f518820,4 +np.float32,0x804963e5,0x804963e5,4 +np.float32,0xfebaa59a,0x3fa1dbb0,4 +np.float32,0x637970,0x637970,4 +np.float32,0x3e722a6b,0x3e76c89a,4 +np.float32,0xff2b0478,0xbddccb5f,4 +np.float32,0xbf7bd85b,0xbfc06821,4 +np.float32,0x3ec33600,0x3ecd4126,4 +np.float32,0x3e0a43b9,0x3e0b1c69,4 +np.float32,0x7f7511b6,0xbe427083,4 +np.float32,0x3f28c114,0x3f465a73,4 +np.float32,0x3f179e1c,0x3f2c3e7c,4 +np.float32,0x7b2963,0x7b2963,4 +np.float32,0x3f423d06,0x3f72b442,4 +np.float32,0x3f5a24c6,0x3f925508,4 +np.float32,0xff18c834,0xbf79b5c8,4 +np.float32,0x3f401ece,0x3f6eb6ac,4 +np.float32,0x7b8a3013,0xbffab968,4 +np.float32,0x80091ff0,0x80091ff0,4 +np.float32,0x3f389c51,0x3f610b47,4 +np.float32,0x5ea174,0x5ea174,4 +np.float32,0x807a9eb2,0x807a9eb2,4 +np.float32,0x806ce61e,0x806ce61e,4 +np.float32,0xbe956acc,0xbe99cefc,4 +np.float32,0x7e60e247,0xbf5e64a5,4 +np.float32,0x7f398e24,0x404d12ed,4 +np.float32,0x3d9049f8,0x3d908735,4 +np.float32,0x7db17ffc,0xbf5b3d87,4 +np.float32,0xff453f78,0xc0239c9f,4 +np.float32,0x3f024aac,0x3f0ed802,4 +np.float32,0xbe781c30,0xbe7d1508,4 +np.float32,0x3f77962a,0x3fb9a28e,4 +np.float32,0xff7fffff,0x3f1c9eca,4 +np.float32,0x3f7152e3,0x3fb03f9d,4 +np.float32,0xff7cb167,0x3f9ce831,4 +np.float32,0x3e763e30,0x3e7b1a10,4 +np.float32,0xbf126527,0xbf24c253,4 +np.float32,0x803f6660,0x803f6660,4 +np.float32,0xbf79de38,0xbfbd38b1,4 +np.float32,0x8046c2f0,0x8046c2f0,4 +np.float32,0x6dc74e,0x6dc74e,4 +np.float32,0xbec9c45e,0xbed4e768,4 +np.float32,0x3f0eedb6,0x3f1fe610,4 +np.float32,0x7e031999,0xbcc13026,4 +np.float32,0x7efc2fd7,0x41e4b284,4 +np.float32,0xbeab7454,0xbeb22a1b,4 +np.float32,0x805ee67b,0x805ee67b,4 +np.float32,0x7f76e58e,0xc2436659,4 +np.float32,0xbe62b024,0xbe667718,4 +np.float32,0x3eea0808,0x3efbd182,4 +np.float32,0xbf7fd00c,0xbfc70719,4 +np.float32,0x7f27b640,0xbf0d97e0,4 +np.float32,0x3f1b58a4,0x3f31b6f4,4 +np.float32,0x252a9f,0x252a9f,4 +np.float32,0x7f65f95a,0xbead5de3,4 +np.float32,0xfc6ea780,0x42d15801,4 +np.float32,0x7eac4c52,0xc0682424,4 +np.float32,0xbe8a3f5a,0xbe8db54d,4 +np.float32,0xbf1644e2,0xbf2a4abd,4 +np.float32,0x3fc96a,0x3fc96a,4 +np.float32,0x7f38c0e4,0x3cc04af8,4 +np.float32,0x3f623d75,0x3f9c065d,4 +np.float32,0x3ee6a51a,0x3ef7a058,4 +np.float32,0x3dd11020,0x3dd1cacf,4 +np.float32,0xb6918,0xb6918,4 +np.float32,0xfdd7a540,0x3f22f081,4 +np.float32,0x80798563,0x80798563,4 +np.float32,0x3e9a8b7a,0x3e9f6a7e,4 +np.float32,0xbea515d4,0xbeab0df5,4 +np.float32,0xbea9b9f4,0xbeb03abe,4 +np.float32,0xbf11a5fa,0xbf23b478,4 +np.float32,0xfd6cadf0,0xbfa2a878,4 +np.float32,0xbf6edd07,0xbfacbb78,4 +np.float32,0xff5c5328,0x3e2d1552,4 +np.float32,0xbea2f788,0xbea8b3f5,4 +np.float32,0x802efaeb,0x802efaeb,4 +np.float32,0xff1c85e5,0x41f8560e,4 +np.float32,0x3f53b123,0x3f8b18e1,4 +np.float32,0xff798c4a,0x4092e66f,4 +np.float32,0x7f2e6fe7,0xbdcbd58f,4 +np.float32,0xfe8a8196,0x3fd7fc56,4 +np.float32,0x5e7ad4,0x5e7ad4,4 +np.float32,0xbf23a02d,0xbf3e4533,4 +np.float32,0x3f31c55c,0x3f5531bf,4 +np.float32,0x80331be3,0x80331be3,4 +np.float32,0x8056960a,0x8056960a,4 +np.float32,0xff1c06ae,0xbfd26992,4 +np.float32,0xbe0cc4b0,0xbe0da96c,4 +np.float32,0x7e925ad5,0xbf8dba54,4 +np.float32,0x2c8cec,0x2c8cec,4 +np.float32,0x8011951e,0x8011951e,4 +np.float32,0x3f2caf84,0x3f4cb89f,4 +np.float32,0xbd32c220,0xbd32df33,4 +np.float32,0xbec358d6,0xbecd6996,4 +np.float32,0x3f6e4930,0x3fabeb92,4 +np.float32,0xbf6a3afd,0xbfa65a3a,4 +np.float32,0x80067764,0x80067764,4 +np.float32,0x3d8df1,0x3d8df1,4 +np.float32,0x7ee51cf2,0x409e4061,4 +np.float32,0x435f5d,0x435f5d,4 +np.float32,0xbf5b17f7,0xbf936ebe,4 +np.float32,0x3ecaacb5,0x3ed5f81f,4 +np.float32,0x807b0aa5,0x807b0aa5,4 +np.float32,0x52b40b,0x52b40b,4 +np.float32,0x146a97,0x146a97,4 +np.float32,0x7f42b952,0xbfdcb413,4 +np.float32,0xbf1a1af2,0xbf2fe1bb,4 +np.float32,0x3f312034,0x3f541aa2,4 +np.float32,0x3f281d60,0x3f4554f9,4 +np.float32,0x50e451,0x50e451,4 +np.float32,0xbe45838c,0xbe480016,4 +np.float32,0xff7d0aeb,0x3eb0746e,4 +np.float32,0x7f32a489,0xbf96af6d,4 +np.float32,0xbf1b4e27,0xbf31a769,4 +np.float32,0x3f242936,0x3f3f1a44,4 +np.float32,0xbf7482ff,0xbfb4f201,4 +np.float32,0x4bda38,0x4bda38,4 +np.float32,0xbf022208,0xbf0ea2bb,4 +np.float32,0x7d08ca95,0xbe904602,4 +np.float32,0x7ed2f356,0xc02b55ad,4 +np.float32,0xbf131204,0xbf25b734,4 +np.float32,0xff3464b4,0x3fb23706,4 +np.float32,0x5a97cf,0x5a97cf,4 +np.float32,0xbe52db70,0xbe55e388,4 +np.float32,0x3f52934f,0x3f89e2aa,4 +np.float32,0xfeea866a,0x40a2b33f,4 +np.float32,0x80333925,0x80333925,4 +np.float32,0xfef5d13e,0xc00139ec,4 +np.float32,0x3f4750ab,0x3f7c87ad,4 +np.float32,0x3e41bfdd,0x3e44185a,4 +np.float32,0xbf5b0572,0xbf935935,4 +np.float32,0xbe93c9da,0xbe9808d8,4 +np.float32,0x7f501f33,0xc0f9973c,4 +np.float32,0x800af035,0x800af035,4 +np.float32,0x3f29faf8,0x3f4852a8,4 +np.float32,0xbe1e4c20,0xbe1f920c,4 +np.float32,0xbf7e8616,0xbfc4d79d,4 +np.float32,0x43ffbf,0x43ffbf,4 +np.float32,0x7f28e8a9,0xbfa1ac24,4 +np.float32,0xbf1f9f92,0xbf3820bc,4 +np.float32,0x3f07e004,0x3f1641c4,4 +np.float32,0x3ef7ea7f,0x3f06a64a,4 +np.float32,0x7e013101,0x3f6080e6,4 +np.float32,0x7f122a4f,0xbf0a796f,4 +np.float32,0xfe096960,0x3ed7273a,4 +np.float32,0x3f06abf1,0x3f14a4b2,4 +np.float32,0x3e50ded3,0x3e53d0f1,4 +np.float32,0x7f50b346,0x3eabb536,4 +np.float32,0xff5adb0f,0xbd441972,4 +np.float32,0xbecefe46,0xbedb0f66,4 +np.float32,0x7da70bd4,0xbec66273,4 +np.float32,0x169811,0x169811,4 +np.float32,0xbee4dfee,0xbef5721a,4 +np.float32,0x3efbeae3,0x3f0936e6,4 +np.float32,0x8031bd61,0x8031bd61,4 +np.float32,0x8048e443,0x8048e443,4 +np.float32,0xff209aa6,0xbeb364cb,4 +np.float32,0xff477499,0x3c1b0041,4 +np.float32,0x803fe929,0x803fe929,4 +np.float32,0x3f70158b,0x3fae7725,4 +np.float32,0x7f795723,0x3e8e850a,4 +np.float32,0x3cba99,0x3cba99,4 +np.float32,0x80588d2a,0x80588d2a,4 +np.float32,0x805d1f05,0x805d1f05,4 +np.float32,0xff4ac09a,0xbefe614d,4 +np.float32,0x804af084,0x804af084,4 +np.float32,0x7c64ae63,0xc1a8b563,4 +np.float32,0x8078d793,0x8078d793,4 +np.float32,0x7f3e2436,0xbf8bf9d3,4 +np.float32,0x7ccec1,0x7ccec1,4 +np.float32,0xbf6462c7,0xbf9eb830,4 +np.float32,0x3f1002ca,0x3f216843,4 +np.float32,0xfe878ca6,0x409e73a5,4 +np.float32,0x3bd841d9,0x3bd842a7,4 +np.float32,0x7d406f41,0xbd9dcfa3,4 +np.float32,0x7c6d6,0x7c6d6,4 +np.float32,0x3f4ef360,0x3f86074b,4 +np.float32,0x805f534a,0x805f534a,4 +np.float32,0x1,0x1,4 +np.float32,0x3f739ee2,0x3fb39db2,4 +np.float32,0x3d0c2352,0x3d0c3153,4 +np.float32,0xfe8a4f2c,0x3edd8add,4 +np.float32,0x3e52eaa0,0x3e55f362,4 +np.float32,0x7bde9758,0xbf5ba5cf,4 +np.float32,0xff422654,0xbf41e487,4 +np.float32,0x385e5b,0x385e5b,4 +np.float32,0x5751dd,0x5751dd,4 +np.float32,0xff6c671c,0xc03e2d6d,4 +np.float32,0x1458be,0x1458be,4 +np.float32,0x80153d4d,0x80153d4d,4 +np.float32,0x7efd2adb,0x3e25458f,4 +np.float32,0xbe161880,0xbe172e12,4 +np.float32,0x7ecea1aa,0x40a66d79,4 +np.float32,0xbf5b02a2,0xbf9355f0,4 +np.float32,0x15d9ab,0x15d9ab,4 +np.float32,0x2dc7c7,0x2dc7c7,4 +np.float32,0xfebbf81a,0x4193f6e6,4 +np.float32,0xfe8e3594,0xc00a6695,4 +np.float32,0x185aa8,0x185aa8,4 +np.float32,0x3daea156,0x3daf0e00,4 +np.float32,0x3e071688,0x3e07e08e,4 +np.float32,0x802db9e6,0x802db9e6,4 +np.float32,0x7f7be2c4,0x3f1363dd,4 +np.float32,0x7eba3f5e,0xc13eb497,4 +np.float32,0x3de04a00,0x3de130a9,4 +np.float32,0xbf1022bc,0xbf2194eb,4 +np.float32,0xbf5b547e,0xbf93b53b,4 +np.float32,0x3e867bd6,0x3e89aa10,4 +np.float32,0xbea5eb5c,0xbeabfb73,4 +np.float32,0x7f1efae9,0x3ffca038,4 +np.float32,0xff5d0344,0xbe55dbbb,4 +np.float32,0x805167e7,0x805167e7,4 +np.float32,0xbdb3a020,0xbdb41667,4 +np.float32,0xbedea6b4,0xbeedd5fd,4 +np.float32,0x8053b45c,0x8053b45c,4 +np.float32,0x7ed370e9,0x3d90eba5,4 +np.float32,0xbefcd7da,0xbf09cf91,4 +np.float32,0x78b9ac,0x78b9ac,4 +np.float32,0xbf2f6dc0,0xbf5141ef,4 +np.float32,0x802d3a7b,0x802d3a7b,4 +np.float32,0xfd45d120,0x3fec31cc,4 +np.float32,0xbf7e7020,0xbfc4b2af,4 +np.float32,0xf04da,0xf04da,4 +np.float32,0xbe9819d4,0xbe9cbd35,4 +np.float32,0x8075ab35,0x8075ab35,4 +np.float32,0xbf052fdc,0xbf12aa2c,4 +np.float32,0x3f1530d0,0x3f28bd9f,4 +np.float32,0x80791881,0x80791881,4 +np.float32,0x67f309,0x67f309,4 +np.float32,0x3f12f16a,0x3f2588f5,4 +np.float32,0x3ecdac47,0x3ed97ff8,4 +np.float32,0xbf297fb7,0xbf478c39,4 +np.float32,0x8069fa80,0x8069fa80,4 +np.float32,0x807f940e,0x807f940e,4 +np.float32,0xbf648dc8,0xbf9eeecb,4 +np.float32,0x3de873b0,0x3de9748d,4 +np.float32,0x3f1aa645,0x3f30af1f,4 +np.float32,0xff227a62,0x3d8283cc,4 +np.float32,0xbf37187d,0xbf5e5f4c,4 +np.float32,0x803b1b1f,0x803b1b1f,4 +np.float32,0x3f58142a,0x3f8ff8da,4 +np.float32,0x8004339e,0x8004339e,4 +np.float32,0xbf0f5654,0xbf2077a4,4 +np.float32,0x3f17e509,0x3f2ca598,4 +np.float32,0x3f800000,0x3fc75923,4 +np.float32,0xfdf79980,0x42f13047,4 +np.float32,0x7f111381,0x3f13c4c9,4 +np.float32,0xbea40c70,0xbea9e724,4 +np.float32,0x110520,0x110520,4 +np.float32,0x60490d,0x60490d,4 +np.float32,0x3f6703ec,0x3fa21951,4 +np.float32,0xbf098256,0xbf187652,4 +np.float32,0x658951,0x658951,4 +np.float32,0x3f53bf16,0x3f8b2818,4 +np.float32,0xff451811,0xc0026068,4 +np.float32,0x80777ee0,0x80777ee0,4 +np.float32,0x3e4fcc19,0x3e52b286,4 +np.float32,0x7f387ee0,0x3ce93eb6,4 +np.float32,0xff51181f,0xbfca3ee4,4 +np.float32,0xbf5655ae,0xbf8e0304,4 +np.float32,0xff2f1dcd,0x40025471,4 +np.float32,0x7f6e58e5,0xbe9930d5,4 +np.float32,0x7adf11,0x7adf11,4 +np.float32,0xbe9a2bc2,0xbe9f0185,4 +np.float32,0x8065d3a0,0x8065d3a0,4 +np.float32,0x3ed6e826,0x3ee47c45,4 +np.float32,0x80598ea0,0x80598ea0,4 +np.float32,0x7f10b90a,0x40437bd0,4 +np.float32,0x27b447,0x27b447,4 +np.float32,0x7ecd861c,0x3fce250f,4 +np.float32,0x0,0x0,4 +np.float32,0xbeba82d6,0xbec3394c,4 +np.float32,0xbf4958b0,0xbf8048ea,4 +np.float32,0x7c643e,0x7c643e,4 +np.float32,0x580770,0x580770,4 +np.float32,0x805bf54a,0x805bf54a,4 +np.float32,0x7f1f3cee,0xbe1a54d6,4 +np.float32,0xfefefdea,0x3fa84576,4 +np.float32,0x7f007b7a,0x3e8a6d25,4 +np.float32,0xbf177959,0xbf2c0919,4 +np.float32,0xbf30fda0,0xbf53e058,4 +np.float32,0x3f0576be,0x3f130861,4 +np.float32,0x3f49380e,0x3f80283a,4 +np.float32,0xebc56,0xebc56,4 +np.float32,0x654e3b,0x654e3b,4 +np.float32,0x14a4d8,0x14a4d8,4 +np.float32,0xff69b3cb,0xbf822a88,4 +np.float32,0xbe9b6c1c,0xbea06109,4 +np.float32,0xbefddd7e,0xbf0a787b,4 +np.float32,0x4c4ebb,0x4c4ebb,4 +np.float32,0x7d0a74,0x7d0a74,4 +np.float32,0xbebb5f80,0xbec43635,4 +np.float32,0x7ee79723,0xc1c7f3f3,4 +np.float32,0x7f2be4c7,0xbfa6c693,4 +np.float32,0x805bc7d5,0x805bc7d5,4 +np.float32,0x8042f12c,0x8042f12c,4 +np.float32,0x3ef91be8,0x3f07697b,4 +np.float32,0x3cf37ac0,0x3cf38d1c,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbe1ebf4c,0xbe200806,4 +np.float32,0x7f380862,0xbeb512e8,4 +np.float32,0xbe320064,0xbe33d0fc,4 +np.float32,0xff300b0c,0xbfadb805,4 +np.float32,0x308a06,0x308a06,4 +np.float32,0xbf084f6e,0xbf16d7b6,4 +np.float32,0xff47cab6,0x3f892b65,4 +np.float32,0xbed99f4a,0xbee7bfd5,4 +np.float32,0xff7d74c0,0x3ee88c9a,4 +np.float32,0x3c3d23,0x3c3d23,4 +np.float32,0x8074bde8,0x8074bde8,4 +np.float32,0x80042164,0x80042164,4 +np.float32,0x3e97c92a,0x3e9c6500,4 +np.float32,0x3b80e0,0x3b80e0,4 +np.float32,0xbf16646a,0xbf2a783d,4 +np.float32,0x7f3b4cb1,0xc01339be,4 +np.float32,0xbf31f36e,0xbf557fd0,4 +np.float32,0x7f540618,0xbe5f6fc1,4 +np.float32,0x7eee47d0,0x40a27e94,4 +np.float32,0x7f12f389,0xbebed654,4 +np.float32,0x56cff5,0x56cff5,4 +np.float32,0x8056032b,0x8056032b,4 +np.float32,0x3ed34e40,0x3ee02e38,4 +np.float32,0x7d51a908,0xbf19a90e,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xfdf73fd0,0xbf0f8cad,4 +np.float32,0x7ee4fe6d,0xbf1ea7e4,4 +np.float32,0x1f15ba,0x1f15ba,4 +np.float32,0xd18c3,0xd18c3,4 +np.float32,0x80797705,0x80797705,4 +np.float32,0x7ef07091,0x3f2f3b9a,4 +np.float32,0x7f552f41,0x3faf608c,4 +np.float32,0x3f779977,0x3fb9a7ad,4 +np.float32,0xfe1a7a50,0xbdadc4d1,4 +np.float32,0xbf449cf0,0xbf7740db,4 +np.float32,0xbe44e620,0xbe475cad,4 +np.float32,0x3f63a098,0x3f9dc2b5,4 +np.float32,0xfed40a12,0x4164533a,4 +np.float32,0x7a2bbb,0x7a2bbb,4 +np.float32,0xff7f7b9e,0xbeee8740,4 +np.float32,0x7ee27f8b,0x4233f53b,4 +np.float32,0xbf044c06,0xbf117c28,4 +np.float32,0xbeffde54,0xbf0bc49f,4 +np.float32,0xfeaef2e8,0x3ff258fe,4 +np.float32,0x527451,0x527451,4 +np.float32,0xbcef8d00,0xbcef9e7c,4 +np.float32,0xbf0e20c0,0xbf1ec9b2,4 +np.float32,0x8024afda,0x8024afda,4 +np.float32,0x7ef6cb3e,0x422cad0b,4 +np.float32,0x3c120,0x3c120,4 +np.float32,0xbf125c8f,0xbf24b62c,4 +np.float32,0x7e770a93,0x402c9d86,4 +np.float32,0xbd30a4e0,0xbd30c0ee,4 +np.float32,0xbf4d3388,0xbf843530,4 +np.float32,0x3f529072,0x3f89df92,4 +np.float32,0xff0270b1,0xbf81be9a,4 +np.float32,0x5e07e7,0x5e07e7,4 +np.float32,0x7bec32,0x7bec32,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e3ba5e0,0x3e3dc6e9,4 +np.float32,0x3ecb62d4,0x3ed6ce2c,4 +np.float32,0x3eb3dde8,0x3ebba68f,4 +np.float32,0x8063f952,0x8063f952,4 +np.float32,0x7f204aeb,0x3e88614e,4 +np.float32,0xbeae1ddc,0xbeb5278e,4 +np.float32,0x6829e9,0x6829e9,4 +np.float32,0xbf361a99,0xbf5ca354,4 +np.float32,0xbf24fbe6,0xbf406326,4 +np.float32,0x3f329d41,0x3f56a061,4 +np.float32,0xfed6d666,0x3e8f71a5,4 +np.float32,0x337f92,0x337f92,4 +np.float32,0xbe1c4970,0xbe1d8305,4 +np.float32,0xbe6b7e18,0xbe6fbbde,4 +np.float32,0x3f2267b9,0x3f3c61da,4 +np.float32,0xbee1ee94,0xbef1d628,4 +np.float32,0x7ecffc1a,0x3f02987e,4 +np.float32,0xbe9b1306,0xbe9fff3b,4 +np.float32,0xbeffacae,0xbf0ba468,4 +np.float32,0x7f800000,0xffc00000,4 +np.float32,0xfefc9aa8,0xc19de2a3,4 +np.float32,0x7d7185bb,0xbf9090ec,4 +np.float32,0x7edfbafd,0x3fe9352f,4 +np.float32,0x4ef2ec,0x4ef2ec,4 +np.float32,0x7f4cab2e,0xbff4e5dd,4 +np.float32,0xff3b1788,0x3e3c22e9,4 +np.float32,0x4e15ee,0x4e15ee,4 +np.float32,0xbf5451e6,0xbf8bc8a7,4 +np.float32,0x3f7f6d2e,0x3fc65e8b,4 +np.float32,0xbf1d9184,0xbf35071b,4 +np.float32,0xbf3a81cf,0xbf646d9b,4 +np.float32,0xbe71acc4,0xbe7643ab,4 +np.float32,0x528b7d,0x528b7d,4 +np.float32,0x2cb1d0,0x2cb1d0,4 +np.float32,0x3f324bf8,0x3f56161a,4 +np.float32,0x80709a21,0x80709a21,4 +np.float32,0x4bc448,0x4bc448,4 +np.float32,0x3e8bd600,0x3e8f6b7a,4 +np.float32,0xbeb97d30,0xbec20dd6,4 +np.float32,0x2a5669,0x2a5669,4 +np.float32,0x805f2689,0x805f2689,4 +np.float32,0xfe569f50,0x3fc51952,4 +np.float32,0x1de44c,0x1de44c,4 +np.float32,0x3ec7036c,0x3ed1ae67,4 +np.float32,0x8052b8e5,0x8052b8e5,4 +np.float32,0xff740a6b,0x3f4981a8,4 +np.float32,0xfee9bb70,0xc05e23be,4 +np.float32,0xff4e12c9,0x4002b4ad,4 +np.float32,0x803de0c2,0x803de0c2,4 +np.float32,0xbf433a07,0xbf74966f,4 +np.float32,0x803e60ca,0x803e60ca,4 +np.float32,0xbf19ee98,0xbf2fa07a,4 +np.float32,0x92929,0x92929,4 +np.float32,0x7f709c27,0x4257ba2d,4 +np.float32,0x803167c6,0x803167c6,4 +np.float32,0xbf095ead,0xbf184607,4 +np.float32,0x617060,0x617060,4 +np.float32,0x2d85b3,0x2d85b3,4 +np.float32,0x53d20b,0x53d20b,4 +np.float32,0x3e046838,0x3e052666,4 +np.float32,0xbe7c5fdc,0xbe80ce4b,4 +np.float32,0x3d18d060,0x3d18e289,4 +np.float32,0x804dc031,0x804dc031,4 +np.float32,0x3f224166,0x3f3c26cd,4 +np.float32,0x7d683e3c,0xbea24f25,4 +np.float32,0xbf3a92aa,0xbf648be4,4 +np.float32,0x8072670b,0x8072670b,4 +np.float32,0xbe281aec,0xbe29a1bc,4 +np.float32,0x7f09d918,0xc0942490,4 +np.float32,0x7ca9fd07,0x4018b990,4 +np.float32,0x7d36ac5d,0x3cf57184,4 +np.float32,0x8039b62f,0x8039b62f,4 +np.float32,0x6cad7b,0x6cad7b,4 +np.float32,0x3c0fd9ab,0x3c0fda9d,4 +np.float32,0x80299883,0x80299883,4 +np.float32,0x3c2d0e3e,0x3c2d0fe4,4 +np.float32,0x8002cf62,0x8002cf62,4 +np.float32,0x801dde97,0x801dde97,4 +np.float32,0x80411856,0x80411856,4 +np.float32,0x6ebce8,0x6ebce8,4 +np.float32,0x7b7d1a,0x7b7d1a,4 +np.float32,0x8031d3de,0x8031d3de,4 +np.float32,0x8005c4ab,0x8005c4ab,4 +np.float32,0xbf7dd803,0xbfc3b3ef,4 +np.float32,0x8017ae60,0x8017ae60,4 +np.float32,0xfe9316ce,0xbfe0544a,4 +np.float32,0x3f136bfe,0x3f2636ff,4 +np.float32,0x3df87b80,0x3df9b57d,4 +np.float32,0xff44c356,0xbf11c7ad,4 +np.float32,0x4914ae,0x4914ae,4 +np.float32,0x80524c21,0x80524c21,4 +np.float32,0x805c7dc8,0x805c7dc8,4 +np.float32,0xfed3c0aa,0xbff0c0ab,4 +np.float32,0x7eb2bfbb,0xbf4600bc,4 +np.float32,0xfec8df84,0x3f5bd350,4 +np.float32,0x3e5431a4,0x3e5748c3,4 +np.float32,0xbee6a3a0,0xbef79e86,4 +np.float32,0xbf6cc9b2,0xbfa9d61a,4 +np.float32,0x3f132bd5,0x3f25dbd9,4 +np.float32,0x7e6d2e48,0x3f9d025b,4 +np.float32,0x3edf430c,0x3eee942d,4 +np.float32,0x3f0d1b8a,0x3f1d60e1,4 +np.float32,0xbdf2f688,0xbdf41bfb,4 +np.float32,0xbe47a284,0xbe4a33ff,4 +np.float32,0x3eaa9fbc,0x3eb13be7,4 +np.float32,0xfe98d45e,0x3eb84517,4 +np.float32,0x7efc23b3,0x3dcc1c99,4 +np.float32,0x3ca36242,0x3ca367ce,4 +np.float32,0x3f76a944,0x3fb834e3,4 +np.float32,0xbf45207c,0xbf783f9b,4 +np.float32,0x3e7c1220,0x3e80a4f8,4 +np.float32,0x3f018200,0x3f0dd14e,4 +np.float32,0x3f53cdde,0x3f8b3839,4 +np.float32,0xbdbacb58,0xbdbb5063,4 +np.float32,0x804af68d,0x804af68d,4 +np.float32,0x3e2c12fc,0x3e2db65b,4 +np.float32,0x3f039433,0x3f10895a,4 +np.float32,0x7ef5193d,0x3f4115f7,4 +np.float32,0x8030afbe,0x8030afbe,4 +np.float32,0x3f06fa2a,0x3f150d5d,4 +np.float32,0x3f124442,0x3f2493d2,4 +np.float32,0xbeb5b792,0xbebdc090,4 +np.float32,0xbedc90a4,0xbeeb4de9,4 +np.float32,0x3f3ff8,0x3f3ff8,4 +np.float32,0x3ee75bc5,0x3ef881e4,4 +np.float32,0xfe80e3de,0xbf5cd535,4 +np.float32,0xf52eb,0xf52eb,4 +np.float32,0x80660ee8,0x80660ee8,4 +np.float32,0x3e173a58,0x3e185648,4 +np.float32,0xfe49520c,0xbf728d7c,4 +np.float32,0xbecbb8ec,0xbed73373,4 +np.float32,0xbf027ae0,0xbf0f173e,4 +np.float32,0xbcab6740,0xbcab6da8,4 +np.float32,0xbf2a15e2,0xbf487e11,4 +np.float32,0x3b781b,0x3b781b,4 +np.float32,0x44f559,0x44f559,4 +np.float32,0xff6a0ca6,0xc174d7c3,4 +np.float32,0x6460ef,0x6460ef,4 +np.float32,0xfe58009c,0x3ee2bb30,4 +np.float32,0xfec3c038,0x3e30d617,4 +np.float32,0x7f0687c0,0xbf62c820,4 +np.float32,0xbf44655e,0xbf76d589,4 +np.float32,0xbf42968c,0xbf735e78,4 +np.float32,0x80385503,0x80385503,4 +np.float32,0xbea7e3a2,0xbeae2d59,4 +np.float32,0x3dd0b770,0x3dd17131,4 +np.float32,0xbf4bc185,0xbf82b907,4 +np.float32,0xfefd7d64,0xbee05650,4 +np.float32,0xfaac3c00,0xbff23bc9,4 +np.float32,0xbf562f0d,0xbf8dd7f4,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3e01bdb8,0x3e027098,4 +np.float32,0x3e2868ab,0x3e29f19e,4 +np.float32,0xfec55f2e,0x3f39f304,4 +np.float32,0xed4e,0xed4e,4 +np.float32,0x3e2b7330,0x3e2d11fa,4 +np.float32,0x7f738542,0x40cbbe16,4 +np.float32,0x3f123521,0x3f247e71,4 +np.float32,0x73572c,0x73572c,4 +np.float32,0x804936c8,0x804936c8,4 +np.float32,0x803b80d8,0x803b80d8,4 +np.float32,0x7f566c57,0xbee2855a,4 +np.float32,0xff0e3bd8,0xbff0543f,4 +np.float32,0x7d2b2fe7,0xbf94ba4c,4 +np.float32,0xbf0da470,0xbf1e1dc2,4 +np.float32,0xbd276500,0xbd277ce0,4 +np.float32,0xfcd15dc0,0x403ccc2a,4 +np.float32,0x80071e59,0x80071e59,4 +np.float32,0xbe9b0c34,0xbe9ff7be,4 +np.float32,0x3f4f9069,0x3f86ac50,4 +np.float32,0x80042a95,0x80042a95,4 +np.float32,0x7de28e39,0x3bc9b7f4,4 +np.float32,0xbf641935,0xbf9e5af8,4 +np.float32,0x8034f068,0x8034f068,4 +np.float32,0xff33a3d2,0xbf408e75,4 +np.float32,0xbcc51540,0xbcc51efc,4 +np.float32,0xff6d1ddf,0x3ef58f0e,4 +np.float32,0xbf64dfc4,0xbf9f5725,4 +np.float32,0xff068a06,0x3eea8987,4 +np.float32,0xff01c0af,0x3f24cdfe,4 +np.float32,0x3f4def7e,0x3f84f802,4 +np.float32,0xbf1b4ae7,0xbf31a299,4 +np.float32,0x8077df2d,0x8077df2d,4 +np.float32,0x3f0155c5,0x3f0d9785,4 +np.float32,0x5a54b2,0x5a54b2,4 +np.float32,0x7f271f9e,0x3efb2ef3,4 +np.float32,0xbf0ff2ec,0xbf215217,4 +np.float32,0x7f500130,0xbf8a7fdd,4 +np.float32,0xfed9891c,0xbf65c872,4 +np.float32,0xfecbfaae,0x403bdbc2,4 +np.float32,0x3f3a5aba,0x3f642772,4 +np.float32,0x7ebc681e,0xbd8df059,4 +np.float32,0xfe05e400,0xbfe35d74,4 +np.float32,0xbf295ace,0xbf4750ea,4 +np.float32,0x7ea055b2,0x3f62d6be,4 +np.float32,0xbd00b520,0xbd00bff9,4 +np.float32,0xbf7677aa,0xbfb7e8cf,4 +np.float32,0x3e83f788,0x3e86f816,4 +np.float32,0x801f6710,0x801f6710,4 +np.float32,0x801133cc,0x801133cc,4 +np.float32,0x41da2a,0x41da2a,4 +np.float32,0xff1622fd,0x3f023650,4 +np.float32,0x806c7a72,0x806c7a72,4 +np.float32,0x3f10779c,0x3f220bb4,4 +np.float32,0xbf08cf94,0xbf17848d,4 +np.float32,0xbecb55b4,0xbed6bebd,4 +np.float32,0xbf0a1528,0xbf193d7b,4 +np.float32,0x806a16bd,0x806a16bd,4 +np.float32,0xc222a,0xc222a,4 +np.float32,0x3930de,0x3930de,4 +np.float32,0x3f5c3588,0x3f94bca2,4 +np.float32,0x1215ad,0x1215ad,4 +np.float32,0x3ed15030,0x3eddcf67,4 +np.float32,0x7da83b2e,0x3fce0d39,4 +np.float32,0x32b0a8,0x32b0a8,4 +np.float32,0x805aed6b,0x805aed6b,4 +np.float32,0x3ef8e02f,0x3f074346,4 +np.float32,0xbdeb6780,0xbdec7250,4 +np.float32,0x3f6e3cec,0x3fabda61,4 +np.float32,0xfefd467a,0x3ef7821a,4 +np.float32,0xfef090fe,0x3bb752a2,4 +np.float32,0x8019c538,0x8019c538,4 +np.float32,0x3e8cf284,0x3e909e81,4 +np.float32,0xbe6c6618,0xbe70b0a2,4 +np.float32,0x7f50a539,0x3f367be1,4 +np.float32,0x8019fe2f,0x8019fe2f,4 +np.float32,0x800c3f48,0x800c3f48,4 +np.float32,0xfd054cc0,0xc0f52802,4 +np.float32,0x3d0cca20,0x3d0cd853,4 +np.float32,0xbf4a7c44,0xbf816e74,4 +np.float32,0x3f46fc40,0x3f7be153,4 +np.float32,0x807c5849,0x807c5849,4 +np.float32,0xd7e41,0xd7e41,4 +np.float32,0x70589b,0x70589b,4 +np.float32,0x80357b95,0x80357b95,4 +np.float32,0x3de239f0,0x3de326a5,4 +np.float32,0x800b08e3,0x800b08e3,4 +np.float32,0x807ec946,0x807ec946,4 +np.float32,0x3e2e4b83,0x3e2fff76,4 +np.float32,0x3f198e0f,0x3f2f12a6,4 +np.float32,0xbecb1aca,0xbed67979,4 +np.float32,0x80134082,0x80134082,4 +np.float32,0x3f3a269f,0x3f63ca05,4 +np.float32,0x3f1381e4,0x3f265622,4 +np.float32,0xff293080,0xbf10be6f,4 +np.float32,0xff800000,0xffc00000,4 +np.float32,0x37d196,0x37d196,4 +np.float32,0x7e57eea7,0x3e7d8138,4 +np.float32,0x804b1dae,0x804b1dae,4 +np.float32,0x7d9508f9,0xc1075b35,4 +np.float32,0x3f7bf468,0x3fc095e0,4 +np.float32,0x55472c,0x55472c,4 +np.float32,0x3ecdcd86,0x3ed9a738,4 +np.float32,0x3ed9be0f,0x3ee7e4e9,4 +np.float32,0x3e7e0ddb,0x3e81b2fe,4 +np.float32,0x7ee6c1d3,0x3f850634,4 +np.float32,0x800f6fad,0x800f6fad,4 +np.float32,0xfefb3bd6,0xbff68ecc,4 +np.float32,0x8013d6e2,0x8013d6e2,4 +np.float32,0x3f3a2cb6,0x3f63d4ee,4 +np.float32,0xff383c84,0x3e7854bb,4 +np.float32,0x3f21946e,0x3f3b1cea,4 +np.float32,0xff322ea2,0x3fb22f31,4 +np.float32,0x8065a024,0x8065a024,4 +np.float32,0x7f395e30,0xbefe0de1,4 +np.float32,0x5b52db,0x5b52db,4 +np.float32,0x7f7caea7,0x3dac8ded,4 +np.float32,0xbf0431f8,0xbf1159b2,4 +np.float32,0x7f15b25b,0xc02a3833,4 +np.float32,0x80131abc,0x80131abc,4 +np.float32,0x7e829d81,0xbeb2e93d,4 +np.float32,0x3f2c64d7,0x3f4c3e4d,4 +np.float32,0x7f228d48,0xc1518c74,4 +np.float32,0xfc3c6f40,0xbf00d585,4 +np.float32,0x7f754f0f,0x3e2152f5,4 +np.float32,0xff65d32b,0xbe8bd56c,4 +np.float32,0xfea6b8c0,0x41608655,4 +np.float32,0x3f7d4b05,0x3fc2c96a,4 +np.float32,0x3f463230,0x3f7a54da,4 +np.float32,0x805117bb,0x805117bb,4 +np.float32,0xbf2ad4f7,0xbf49b30e,4 +np.float32,0x3eaa01ff,0x3eb08b56,4 +np.float32,0xff7a02bb,0x3f095f73,4 +np.float32,0x759176,0x759176,4 +np.float32,0x803c18d5,0x803c18d5,4 +np.float32,0xbe0722d8,0xbe07ed16,4 +np.float32,0x3f4b4a99,0x3f823fc6,4 +np.float32,0x3f7d0451,0x3fc25463,4 +np.float32,0xfee31e40,0xbfb41091,4 +np.float32,0xbf733d2c,0xbfb30cf1,4 +np.float32,0x7ed81015,0x417c380c,4 +np.float32,0x7daafc3e,0xbe2a37ed,4 +np.float32,0x3e44f82b,0x3e476f67,4 +np.float32,0x7c8d99,0x7c8d99,4 +np.float32,0x3f7aec5a,0x3fbee991,4 +np.float32,0xff09fd55,0x3e0709d3,4 +np.float32,0xff4ba4df,0x4173c01f,4 +np.float32,0x3f43d944,0x3f75c7bd,4 +np.float32,0xff6a9106,0x40a10eff,4 +np.float32,0x3bc8341c,0x3bc834bf,4 +np.float32,0x3eea82,0x3eea82,4 +np.float32,0xfea36a3c,0x435729b2,4 +np.float32,0x7dcc1fb0,0x3e330053,4 +np.float32,0x3f616ae6,0x3f9b01ae,4 +np.float32,0x8030963f,0x8030963f,4 +np.float32,0x10d1e2,0x10d1e2,4 +np.float32,0xfeb9a8a6,0x40e6daac,4 +np.float32,0xbe1aba00,0xbe1bea3a,4 +np.float32,0x3cb6b4ea,0x3cb6bcac,4 +np.float32,0x3d8b0b64,0x3d8b422f,4 +np.float32,0x7b6894,0x7b6894,4 +np.float32,0x3e89dcde,0x3e8d4b4b,4 +np.float32,0x3f12b952,0x3f253974,4 +np.float32,0x1c316c,0x1c316c,4 +np.float32,0x7e2da535,0x3f95fe6b,4 +np.float32,0x3ae9a494,0x3ae9a4a4,4 +np.float32,0xbc5f5500,0xbc5f588b,4 +np.float32,0x3e7850fc,0x3e7d4d0e,4 +np.float32,0xbf800000,0xbfc75923,4 +np.float32,0x3e652d69,0x3e691502,4 +np.float32,0xbf6bdd26,0xbfa89129,4 +np.float32,0x3f441cfc,0x3f764a02,4 +np.float32,0x7f5445ff,0xc0906191,4 +np.float32,0x807b2ee3,0x807b2ee3,4 +np.float32,0xbeb6cab8,0xbebef9c0,4 +np.float32,0xff737277,0xbf327011,4 +np.float32,0xfc832aa0,0x402fd52e,4 +np.float32,0xbf0c7538,0xbf1c7c0f,4 +np.float32,0x7e1301c7,0xbf0ee63e,4 +np.float64,0xbfe0ef7df7a1defc,0xbfe2b76a8d8aeb35,4 +np.float64,0x7fdd9c2eae3b385c,0xbfc00d6885485039,4 +np.float64,0xbfb484c710290990,0xbfb4900e0a527555,4 +np.float64,0x7fe73e5d6cee7cba,0x3fefbf70a56b60d3,4 +np.float64,0x800a110aa8d42216,0x800a110aa8d42216,4 +np.float64,0xffedd4f3f3bba9e7,0xbff076f8c4124919,4 +np.float64,0x800093407f812682,0x800093407f812682,4 +np.float64,0x800a23150e54462a,0x800a23150e54462a,4 +np.float64,0xbfb1076864220ed0,0xbfb10dd95a74b733,4 +np.float64,0x3fed1f8b37fa3f16,0x3ff496100985211f,4 +np.float64,0x3fdf762f84beec5f,0x3fe1223eb04a17e0,4 +np.float64,0x53fd4e0aa7faa,0x53fd4e0aa7faa,4 +np.float64,0x3fdbd283bdb7a507,0x3fddb7ec9856a546,4 +np.float64,0xbfe43f449d687e89,0xbfe77724a0d3072b,4 +np.float64,0x618b73bcc316f,0x618b73bcc316f,4 +np.float64,0x67759424ceeb3,0x67759424ceeb3,4 +np.float64,0xbfe4b6f7d9a96df0,0xbfe831371f3bd7a8,4 +np.float64,0x800a531b8b74a637,0x800a531b8b74a637,4 +np.float64,0xffeeffd5c37dffab,0x3fea140cbc2c3726,4 +np.float64,0x3fe648e2002c91c4,0x3feac1b8816f972a,4 +np.float64,0x800f16242a1e2c48,0x800f16242a1e2c48,4 +np.float64,0xffeeff8e1dbdff1b,0xc000b555f117dce7,4 +np.float64,0x3fdf1cf73fbe39f0,0x3fe0e9032401135b,4 +np.float64,0x7fe19c388b633870,0x3fd5271b69317d5b,4 +np.float64,0x918f226d231e5,0x918f226d231e5,4 +np.float64,0x4cc19ab499834,0x4cc19ab499834,4 +np.float64,0xbd3121d57a624,0xbd3121d57a624,4 +np.float64,0xbfd145d334a28ba6,0xbfd1b468866124d6,4 +np.float64,0x8bdbf41517b7f,0x8bdbf41517b7f,4 +np.float64,0x3fd1b8cb3ea37198,0x3fd2306b13396cae,4 +np.float64,0xbfd632a959ac6552,0xbfd7220fcfb5ef78,4 +np.float64,0x1cdaafc639b57,0x1cdaafc639b57,4 +np.float64,0x3febdcce1577b99c,0x3ff2fe076195a2bc,4 +np.float64,0x7fca6e945934dd28,0x3ff43040df7024e8,4 +np.float64,0x3fbe08e78e3c11cf,0x3fbe2c60e6b48f75,4 +np.float64,0x7fc1ed0d0523da19,0x3ff55f8dcad9440f,4 +np.float64,0xbfdc729b8cb8e538,0xbfde7b6e15dd60c4,4 +np.float64,0x3fd219404f243281,0x3fd298d7b3546531,4 +np.float64,0x3fe715c3f56e2b88,0x3fec255b5a59456e,4 +np.float64,0x7fe8b88e74b1711c,0x3ff60efd2c81d13d,4 +np.float64,0xa1d2b9fd43a57,0xa1d2b9fd43a57,4 +np.float64,0xffc1818223230304,0xbfb85c6c1e8018e7,4 +np.float64,0x3fde38ac8b3c7159,0x3fe0580c7e228576,4 +np.float64,0x8008faf7b491f5f0,0x8008faf7b491f5f0,4 +np.float64,0xffe7a1d751af43ae,0xbf7114cd7bbcd981,4 +np.float64,0xffec2db1b4b85b62,0xbff5cae759667f83,4 +np.float64,0x7fefce1ae27f9c35,0x3ff4b8b88f4876cf,4 +np.float64,0x7fd1ff56a523feac,0xbff342ce192f14dd,4 +np.float64,0x80026b3e3f84d67d,0x80026b3e3f84d67d,4 +np.float64,0xffedee5879bbdcb0,0xc02fae11508b2be0,4 +np.float64,0x8003c0dc822781ba,0x8003c0dc822781ba,4 +np.float64,0xffe38a79eca714f4,0xc008aa23b7a63980,4 +np.float64,0xbfda70411eb4e082,0xbfdc0d7e29c89010,4 +np.float64,0x800a5e34f574bc6a,0x800a5e34f574bc6a,4 +np.float64,0x3fc19fac6e233f59,0x3fc1bc66ac0d73d4,4 +np.float64,0x3a8a61ea7514d,0x3a8a61ea7514d,4 +np.float64,0x3fb57b536e2af6a0,0x3fb588451f72f44c,4 +np.float64,0x7fd68c6d082d18d9,0xc032ac926b665c9a,4 +np.float64,0xd5b87cfdab710,0xd5b87cfdab710,4 +np.float64,0xfe80b20bfd017,0xfe80b20bfd017,4 +np.float64,0x3fef8781e37f0f04,0x3ff8215fe2c1315a,4 +np.float64,0xffedddbb9c3bbb76,0x3fd959b82258a32a,4 +np.float64,0x3fc7d41f382fa83e,0x3fc81b94c3a091ba,4 +np.float64,0xffc3275dcf264ebc,0x3fb2b3d4985c6078,4 +np.float64,0x7fe34d2b7ba69a56,0x40001f3618e3c7c9,4 +np.float64,0x3fd64ae35fac95c7,0x3fd73d77e0b730f8,4 +np.float64,0x800e53bf6b3ca77f,0x800e53bf6b3ca77f,4 +np.float64,0xbfddf7c9083bef92,0xbfe02f392744d2d1,4 +np.float64,0x1c237cc038471,0x1c237cc038471,4 +np.float64,0x3fe4172beea82e58,0x3fe739b4bf16bc7e,4 +np.float64,0xfa950523f52a1,0xfa950523f52a1,4 +np.float64,0xffc839a2c5307344,0xbff70ff8a3c9247f,4 +np.float64,0x264f828c4c9f1,0x264f828c4c9f1,4 +np.float64,0x148a650a2914e,0x148a650a2914e,4 +np.float64,0x3fe8d255c0b1a4ac,0x3fef623c3ea8d6e3,4 +np.float64,0x800f4fbb28be9f76,0x800f4fbb28be9f76,4 +np.float64,0x7fdca57bcfb94af7,0x3ff51207563fb6cb,4 +np.float64,0x3fe4944107692882,0x3fe7fad593235364,4 +np.float64,0x800119b4f1a2336b,0x800119b4f1a2336b,4 +np.float64,0xbfe734075e6e680e,0xbfec5b35381069f2,4 +np.float64,0xffeb3c00db767801,0xbfbbd7d22df7b4b3,4 +np.float64,0xbfe95c658cb2b8cb,0xbff03ad5e0bc888a,4 +np.float64,0xffeefeb58fbdfd6a,0xbfd5c9264deb0e11,4 +np.float64,0x7fccc80fde39901f,0xc012c60f914f3ca2,4 +np.float64,0x3fe5da289c2bb451,0x3fea07ad00a0ca63,4 +np.float64,0x800e364b0a5c6c96,0x800e364b0a5c6c96,4 +np.float64,0x3fcf9ea7d23f3d50,0x3fd023b72e8c9dcf,4 +np.float64,0x800a475cfc948eba,0x800a475cfc948eba,4 +np.float64,0xffd4e0d757a9c1ae,0xbfa89d573352e011,4 +np.float64,0xbfd4dbec8229b7da,0xbfd5a165f12c7c40,4 +np.float64,0xffe307ab51260f56,0x3fe6b1639da58c3f,4 +np.float64,0xbfe6955a546d2ab4,0xbfeb44ae2183fee9,4 +np.float64,0xbfca1f18f5343e30,0xbfca7d804ccccdf4,4 +np.float64,0xe9f4dfebd3e9c,0xe9f4dfebd3e9c,4 +np.float64,0xfff0000000000000,0xfff8000000000000,4 +np.float64,0x8008e69c0fb1cd38,0x8008e69c0fb1cd38,4 +np.float64,0xbfead1ccf975a39a,0xbff1c84b3db8ca93,4 +np.float64,0x25a982424b531,0x25a982424b531,4 +np.float64,0x8010000000000000,0x8010000000000000,4 +np.float64,0x80056204ea0ac40b,0x80056204ea0ac40b,4 +np.float64,0x800d1442d07a2886,0x800d1442d07a2886,4 +np.float64,0xbfaef3dadc3de7b0,0xbfaefd85ae6205f0,4 +np.float64,0x7fe969ce4b32d39c,0xbff3c4364fc6778f,4 +np.float64,0x7fe418bac0a83175,0x402167d16b1efe0b,4 +np.float64,0x3fd7c82a25af9054,0x3fd8f0c701315672,4 +np.float64,0x80013782a7826f06,0x80013782a7826f06,4 +np.float64,0x7fc031c7ee20638f,0x400747ab705e6904,4 +np.float64,0x3fe8cf327ff19e65,0x3fef5c14f8aafa89,4 +np.float64,0xbfe331a416a66348,0xbfe5e2290a098dd4,4 +np.float64,0x800607b2116c0f65,0x800607b2116c0f65,4 +np.float64,0x7fb40448f0280891,0xbfd43d4f0ffa1d64,4 +np.float64,0x7fefffffffffffff,0xbf74530cfe729484,4 +np.float64,0x3fe39b5444a736a9,0x3fe67eaa0b6acf27,4 +np.float64,0x3fee4733c4fc8e68,0x3ff631eabeef9696,4 +np.float64,0xbfec840f3b79081e,0xbff3cc8563ab2e74,4 +np.float64,0xbfc8f6854c31ed0c,0xbfc948caacb3bba0,4 +np.float64,0xffbcf754a639eea8,0xbfc88d17cad3992b,4 +np.float64,0x8000bd3163417a64,0x8000bd3163417a64,4 +np.float64,0x3fe766d0eaeecda2,0x3fecb660882f7024,4 +np.float64,0xb6cc30156d986,0xb6cc30156d986,4 +np.float64,0xffc0161f9f202c40,0x3fe19bdefe5cf8b1,4 +np.float64,0xffe1e462caa3c8c5,0x3fe392c47feea17b,4 +np.float64,0x30a36a566146e,0x30a36a566146e,4 +np.float64,0x3fa996f580332deb,0x3fa99c6b4f2abebe,4 +np.float64,0x3fba71716e34e2e0,0x3fba899f35edba1d,4 +np.float64,0xbfe8f7e5e971efcc,0xbfefac431a0e3d55,4 +np.float64,0xf48f1803e91e3,0xf48f1803e91e3,4 +np.float64,0x7fe3edc0a127db80,0xc03d1a579a5d74a8,4 +np.float64,0xffeba82056375040,0x3fdfd701308700db,4 +np.float64,0xbfeb5a924cf6b524,0xbff2640de7cd107f,4 +np.float64,0xfa4cd1a9f499a,0xfa4cd1a9f499a,4 +np.float64,0x800de1be7b9bc37d,0x800de1be7b9bc37d,4 +np.float64,0xffd44e56ad289cae,0x3fdf4b8085db9b67,4 +np.float64,0xbfe4fb3aea69f676,0xbfe89d2cc46fcc50,4 +np.float64,0xbfe596495d6b2c92,0xbfe997a589a1f632,4 +np.float64,0x6f55a2b8deab5,0x6f55a2b8deab5,4 +np.float64,0x7fe72dc4712e5b88,0x4039c4586b28c2bc,4 +np.float64,0x89348bd712692,0x89348bd712692,4 +np.float64,0xffe062156120c42a,0x4005f0580973bc77,4 +np.float64,0xbfeabc714d7578e2,0xbff1b07e2fa57dc0,4 +np.float64,0x8003a56b3e874ad7,0x8003a56b3e874ad7,4 +np.float64,0x800eeadfb85dd5c0,0x800eeadfb85dd5c0,4 +np.float64,0x46d77a4c8daf0,0x46d77a4c8daf0,4 +np.float64,0x8000c06e7dc180de,0x8000c06e7dc180de,4 +np.float64,0x3fe428d211e851a4,0x3fe754b1c00a89bc,4 +np.float64,0xc5be11818b7c2,0xc5be11818b7c2,4 +np.float64,0x7fefc244893f8488,0x401133dc54f52de5,4 +np.float64,0x3fde30eee93c61de,0x3fe0532b827543a6,4 +np.float64,0xbfd447f48b288fea,0xbfd4fd0654f90718,4 +np.float64,0xbfde98dc7b3d31b8,0xbfe094df12f84a06,4 +np.float64,0x3fed2c1a1dfa5834,0x3ff4a6c4f3470a65,4 +np.float64,0xbfe992165073242d,0xbff071ab039c9177,4 +np.float64,0x3fd0145d1b2028ba,0x3fd06d3867b703dc,4 +np.float64,0x3fe179457362f28b,0x3fe3722f1d045fda,4 +np.float64,0x800e28964fbc512d,0x800e28964fbc512d,4 +np.float64,0x8004a5d785294bb0,0x8004a5d785294bb0,4 +np.float64,0xbfd652f2272ca5e4,0xbfd7469713125120,4 +np.float64,0x7fe61f49036c3e91,0xbf9b6ccdf2d87e70,4 +np.float64,0xffb7d47dd02fa8f8,0xc004449a82320b13,4 +np.float64,0x3feb82f996b705f3,0x3ff29336c738a4c5,4 +np.float64,0x3fbb7fceea36ffa0,0x3fbb9b02c8ad7f93,4 +np.float64,0x80004519fb208a35,0x80004519fb208a35,4 +np.float64,0xbfe0539114e0a722,0xbfe1e86dc5aa039c,4 +np.float64,0x0,0x0,4 +np.float64,0xbfe99d1125f33a22,0xbff07cf8ec04300f,4 +np.float64,0xffd4fbeecc29f7de,0x3ffab76775a8455f,4 +np.float64,0xbfbf1c618e3e38c0,0xbfbf43d2764a8333,4 +np.float64,0x800cae02a9d95c06,0x800cae02a9d95c06,4 +np.float64,0x3febc47d3bf788fa,0x3ff2e0d7cf8ef509,4 +np.float64,0x3fef838f767f071f,0x3ff81aeac309bca0,4 +np.float64,0xbfd5e70716abce0e,0xbfd6ccb033ef7a35,4 +np.float64,0x3f9116fa60222df5,0x3f9117625f008e0b,4 +np.float64,0xffe02b1e5f20563c,0xbfe6b2ec293520b7,4 +np.float64,0xbf9b5aec3036b5e0,0xbf9b5c96c4c7f951,4 +np.float64,0xfdb0169bfb603,0xfdb0169bfb603,4 +np.float64,0x7fcdd1d51c3ba3a9,0x401f0e12fa0b7570,4 +np.float64,0xbfd088103fa11020,0xbfd0e8c4a333ffb2,4 +np.float64,0x3fe22df82ee45bf0,0x3fe46d03a7c14de2,4 +np.float64,0xbfd57b0c28aaf618,0xbfd65349a6191de5,4 +np.float64,0x3fe0a42f50a1485f,0x3fe252e26775d9a4,4 +np.float64,0x800fab4e363f569c,0x800fab4e363f569c,4 +np.float64,0xffe9f0ed63f3e1da,0xbfe278c341b171d5,4 +np.float64,0x7fe26c244664d848,0xbfb325269dad1996,4 +np.float64,0xffe830410bf06081,0xc00181a39f606e96,4 +np.float64,0x800c548a0c78a914,0x800c548a0c78a914,4 +np.float64,0x800f94761ebf28ec,0x800f94761ebf28ec,4 +np.float64,0x3fe5984845eb3091,0x3fe99aeb653c666d,4 +np.float64,0x7fe93e5bf8f27cb7,0xc010d159fa27396a,4 +np.float64,0xffefffffffffffff,0x3f74530cfe729484,4 +np.float64,0x4c83f1269907f,0x4c83f1269907f,4 +np.float64,0x3fde0065a8bc00cc,0x3fe034a1cdf026d4,4 +np.float64,0x800743810d6e8703,0x800743810d6e8703,4 +np.float64,0x80040662d5280cc6,0x80040662d5280cc6,4 +np.float64,0x3fed20b2c5ba4166,0x3ff497988519d7aa,4 +np.float64,0xffe8fa15e5f1f42b,0x3fff82ca76d797b4,4 +np.float64,0xbb72e22f76e5d,0xbb72e22f76e5d,4 +np.float64,0x7fc18ffa7c231ff4,0xbff4b8b4c3315026,4 +np.float64,0xbfe8d1ac44f1a358,0xbfef60efc4f821e3,4 +np.float64,0x3fd38c1fe8271840,0x3fd42dc37ff7262b,4 +np.float64,0xe577bee5caef8,0xe577bee5caef8,4 +np.float64,0xbff0000000000000,0xbff8eb245cbee3a6,4 +np.float64,0xffcb3a9dd436753c,0x3fcd1a3aff1c3fc7,4 +np.float64,0x7fe44bf2172897e3,0x3ff60bfe82a379f4,4 +np.float64,0x8009203823924071,0x8009203823924071,4 +np.float64,0x7fef8e0abc7f1c14,0x3fe90e4962d47ce5,4 +np.float64,0xffda50004434a000,0x3fb50dee03e1418b,4 +np.float64,0x7fe2ff276ea5fe4e,0xc0355b7d2a0a8d9d,4 +np.float64,0x3fd0711ba5a0e238,0x3fd0d03823d2d259,4 +np.float64,0xe7625b03cec4c,0xe7625b03cec4c,4 +np.float64,0xbfd492c8d7a92592,0xbfd55006cde8d300,4 +np.float64,0x8001fee99f23fdd4,0x8001fee99f23fdd4,4 +np.float64,0x7ff4000000000000,0x7ffc000000000000,4 +np.float64,0xfa15df97f42bc,0xfa15df97f42bc,4 +np.float64,0xbfec3fdca9787fb9,0xbff377164b13c7a9,4 +np.float64,0xbcec10e579d82,0xbcec10e579d82,4 +np.float64,0xbfc3b4e2132769c4,0xbfc3dd1fcc7150a6,4 +np.float64,0x80045b149ee8b62a,0x80045b149ee8b62a,4 +np.float64,0xffe044554c2088aa,0xbff741436d558785,4 +np.float64,0xffcc65f09f38cbe0,0xc0172b4adc2d317d,4 +np.float64,0xf68b2d3bed166,0xf68b2d3bed166,4 +np.float64,0x7fc7f44c572fe898,0x3fec69f3b1eca790,4 +np.float64,0x3fac51f61438a3ec,0x3fac595d34156002,4 +np.float64,0xbfeaa9f256f553e5,0xbff19bfdf5984326,4 +np.float64,0x800e4742149c8e84,0x800e4742149c8e84,4 +np.float64,0xbfc493df132927c0,0xbfc4c1ba4268ead9,4 +np.float64,0xbfbf0c56383e18b0,0xbfbf3389fcf50c72,4 +np.float64,0xbf978a0e082f1420,0xbf978b1dd1da3d3c,4 +np.float64,0xbfe04375356086ea,0xbfe1d34c57314dd1,4 +np.float64,0x3feaeeb29b75dd65,0x3ff1e8b772374979,4 +np.float64,0xbfe15e42c3a2bc86,0xbfe34d45d56c5c15,4 +np.float64,0x3fe507429a6a0e85,0x3fe8b058176b3225,4 +np.float64,0x3feee2b26c3dc565,0x3ff71b73203de921,4 +np.float64,0xbfd496577aa92cae,0xbfd553fa7fe15a5f,4 +np.float64,0x7fe2c10953e58212,0x3fc8ead6a0d14bbf,4 +np.float64,0x800035b77aa06b70,0x800035b77aa06b70,4 +np.float64,0x2329201e46525,0x2329201e46525,4 +np.float64,0xbfe6225c9a6c44b9,0xbfea80861590fa02,4 +np.float64,0xbfd6925030ad24a0,0xbfd78e70b1c2215d,4 +np.float64,0xbfd82225c4b0444c,0xbfd958a60f845b39,4 +np.float64,0xbb03d8a17609,0xbb03d8a17609,4 +np.float64,0x7fc33967b12672ce,0x40001e00c9af4002,4 +np.float64,0xff9373c6d026e780,0xbff308654a459d3d,4 +np.float64,0x3feab1f9c5f563f4,0x3ff1a4e0fd2f093d,4 +np.float64,0xbf993ef768327de0,0xbf994046b64e308b,4 +np.float64,0xffb87382fc30e708,0xbfde0accb83c891b,4 +np.float64,0x800bb3a118176743,0x800bb3a118176743,4 +np.float64,0x800c810250d90205,0x800c810250d90205,4 +np.float64,0xbfd2c4eb9ba589d8,0xbfd3539508b4a4a8,4 +np.float64,0xbee1f5437dc3f,0xbee1f5437dc3f,4 +np.float64,0x3fc07aeab520f5d8,0x3fc0926272f9d8e2,4 +np.float64,0xbfe23747a3246e90,0xbfe47a20a6e98687,4 +np.float64,0x3fde1296debc252c,0x3fe0401143ff6b5c,4 +np.float64,0xbfcec8c2f73d9184,0xbfcf644e25ed3b74,4 +np.float64,0xff9314f2c82629e0,0x40559a0f9099dfd1,4 +np.float64,0xbfe27487afa4e910,0xbfe4d0e01200bde6,4 +np.float64,0xffb3d6637627acc8,0x3fe326d4b1e1834f,4 +np.float64,0xffe6f84d642df09a,0x3fc73fa9f57c3acb,4 +np.float64,0xffe67cf76fecf9ee,0xc01cf48c97937ef9,4 +np.float64,0x7fdc73fc12b8e7f7,0xbfcfcecde9331104,4 +np.float64,0xffdcf8789239f0f2,0x3fe345e3b8e28776,4 +np.float64,0x800a70af5314e15f,0x800a70af5314e15f,4 +np.float64,0xffc862300730c460,0x3fc4e9ea813beca7,4 +np.float64,0xbfcc6961bd38d2c4,0xbfcce33bfa6c6bd1,4 +np.float64,0xbfc9b76bbf336ed8,0xbfca117456ac37e5,4 +np.float64,0x7fb86e829430dd04,0x400a5bd7a18e302d,4 +np.float64,0x7fb9813ef833027d,0xbfe5a6494f143625,4 +np.float64,0x8005085e2c2a10bd,0x8005085e2c2a10bd,4 +np.float64,0xffe5af099d6b5e12,0x40369bbe31e03e06,4 +np.float64,0xffde03b1fd3c0764,0x3ff061120aa1f52a,4 +np.float64,0x7fa4eb6cdc29d6d9,0x3fe9defbe9010322,4 +np.float64,0x800803f4b11007ea,0x800803f4b11007ea,4 +np.float64,0x7febd50f6df7aa1e,0xbffcf540ccf220dd,4 +np.float64,0x7fed454f08fa8a9d,0xbffc2a8b81079403,4 +np.float64,0xbfed7e8c69bafd19,0xbff5161e51ba6634,4 +np.float64,0xffef92e78eff25ce,0xbffefeecddae0ad3,4 +np.float64,0x7fe5b9b413ab7367,0xbfc681ba29704176,4 +np.float64,0x29284e805252,0x29284e805252,4 +np.float64,0xffed3955bcfa72ab,0xbfc695acb5f468de,4 +np.float64,0x3fe464ee1ca8c9dc,0x3fe7b140ce50fdca,4 +np.float64,0xffe522ae4bea455c,0x3feb957c146e66ef,4 +np.float64,0x8000000000000000,0x8000000000000000,4 +np.float64,0x3fd0c353a2a186a8,0x3fd1283aaa43a411,4 +np.float64,0x3fdb30a749b6614f,0x3fdcf40df006ed10,4 +np.float64,0x800109213cc21243,0x800109213cc21243,4 +np.float64,0xbfe72aa0c5ee5542,0xbfec4a713f513bc5,4 +np.float64,0x800865344ad0ca69,0x800865344ad0ca69,4 +np.float64,0x7feb7df60eb6fbeb,0x3fb1df06a67aa22f,4 +np.float64,0x3fe83a5dd93074bc,0x3fee3d63cda72636,4 +np.float64,0xbfde70e548bce1ca,0xbfe07b8e19c9dac6,4 +np.float64,0xbfeea38d537d471b,0xbff6bb18c230c0be,4 +np.float64,0x3fefeebbc47fdd78,0x3ff8cdaa53b7c7b4,4 +np.float64,0x7fe6512e20eca25b,0xbff623cee44a22b5,4 +np.float64,0xf8fa5ca3f1f4c,0xf8fa5ca3f1f4c,4 +np.float64,0x7fd12d00ed225a01,0xbfe90d518ea61faf,4 +np.float64,0x80027db43504fb69,0x80027db43504fb69,4 +np.float64,0xffc10a01aa221404,0x3fcc2065b3d0157b,4 +np.float64,0xbfef8286e87f050e,0xbff8193a54449b59,4 +np.float64,0xbfc73178092e62f0,0xbfc7735072ba4593,4 +np.float64,0x3fc859d70630b3ae,0x3fc8a626522af1c0,4 +np.float64,0x3fe4654c4268ca99,0x3fe7b1d2913eda1a,4 +np.float64,0xbfce93cd843d279c,0xbfcf2c2ef16a0957,4 +np.float64,0xffbcaa16d4395430,0xbfd511ced032d784,4 +np.float64,0xbfe91f980e723f30,0xbfeffb39cf8c7746,4 +np.float64,0x800556fb6f0aadf8,0x800556fb6f0aadf8,4 +np.float64,0xffd009cde520139c,0x3fe4fa83b1e93d28,4 +np.float64,0x7febc0675e3780ce,0x3feb53930c004dae,4 +np.float64,0xbfe7f975bdeff2ec,0xbfedc36e6729b010,4 +np.float64,0x45aff57c8b5ff,0x45aff57c8b5ff,4 +np.float64,0xbfec7ebd0138fd7a,0xbff3c5cab680aae0,4 +np.float64,0x8009448003b28900,0x8009448003b28900,4 +np.float64,0x3fca4b992d349732,0x3fcaabebcc86aa9c,4 +np.float64,0x3fca069161340d20,0x3fca63ecc742ff3a,4 +np.float64,0x80063bc80bec7791,0x80063bc80bec7791,4 +np.float64,0xbfe1764bffe2ec98,0xbfe36e1cb30cec94,4 +np.float64,0xffd0dba72f21b74e,0x3fb1834964d57ef6,4 +np.float64,0xbfe31848fc263092,0xbfe5bd066445cbc3,4 +np.float64,0xbfd1fb227323f644,0xbfd278334e27f02d,4 +np.float64,0xffdc59069fb8b20e,0xbfdfc363f559ea2c,4 +np.float64,0x3fdea52a52bd4a55,0x3fe09cada4e5344c,4 +np.float64,0x3f715e55a022bd00,0x3f715e5c72a2809e,4 +np.float64,0x1d1ac6023a35a,0x1d1ac6023a35a,4 +np.float64,0x7feacc71627598e2,0x400486b82121da19,4 +np.float64,0xa0287fa340510,0xa0287fa340510,4 +np.float64,0xffe352c5abe6a58b,0xc002623346060543,4 +np.float64,0x7fed577a23baaef3,0x3fda19bc8fa3b21f,4 +np.float64,0x3fde8dd5263d1baa,0x3fe08de0fedf7029,4 +np.float64,0x3feddd3be2bbba78,0x3ff599b2f3e018cc,4 +np.float64,0xc7a009f58f401,0xc7a009f58f401,4 +np.float64,0xbfef03d5a4fe07ab,0xbff74ee08681f47b,4 +np.float64,0x7fe2cf60eea59ec1,0x3fe905fb44f8cc60,4 +np.float64,0xbfe498fcab6931fa,0xbfe8023a6ff8becf,4 +np.float64,0xbfef7142acfee285,0xbff7fd196133a595,4 +np.float64,0xd214ffdba42a0,0xd214ffdba42a0,4 +np.float64,0x8006de7d78cdbcfc,0x8006de7d78cdbcfc,4 +np.float64,0xb247d34f648fb,0xb247d34f648fb,4 +np.float64,0xbfdd5bece6bab7da,0xbfdf9ba63ca2c5b2,4 +np.float64,0x7fe874650af0e8c9,0x3fe74204e122c10f,4 +np.float64,0x800768c49baed18a,0x800768c49baed18a,4 +np.float64,0x3fb4c0a192298140,0x3fb4cc4c8aa43300,4 +np.float64,0xbfa740531c2e80a0,0xbfa7446b7c74ae8e,4 +np.float64,0x7fe10d6edf221add,0x3fedbcd2eae26657,4 +np.float64,0xbfe9175d0f722eba,0xbfefeaca7f32c6e3,4 +np.float64,0x953e11d32a7c2,0x953e11d32a7c2,4 +np.float64,0x80032df90c465bf3,0x80032df90c465bf3,4 +np.float64,0xffec5b799638b6f2,0xbfe95cd2c69be12c,4 +np.float64,0xffe0c3cfa9a1879f,0x3fe20b99b0c108ce,4 +np.float64,0x3fb610d8e22c21b2,0x3fb61ee0d6c16df8,4 +np.float64,0xffe16bb39962d766,0xc016d370381b6b42,4 +np.float64,0xbfdc72edb238e5dc,0xbfde7bd2de10717a,4 +np.float64,0xffed52dee3baa5bd,0xc01994c08899129a,4 +np.float64,0xffa92aab08325550,0xbff2b881ce363cbd,4 +np.float64,0x7fe028282de0504f,0xc0157ff96c69a9c7,4 +np.float64,0xbfdb2151bf3642a4,0xbfdce196fcc35857,4 +np.float64,0x3fcffbd13c3ff7a2,0x3fd0554b5f0371ac,4 +np.float64,0x800d206bff1a40d8,0x800d206bff1a40d8,4 +np.float64,0x458f818c8b1f1,0x458f818c8b1f1,4 +np.float64,0x800a7b56a234f6ae,0x800a7b56a234f6ae,4 +np.float64,0xffe3d86161e7b0c2,0xbff58d0dbde9f188,4 +np.float64,0xe8ed82e3d1db1,0xe8ed82e3d1db1,4 +np.float64,0x3fe234e0176469c0,0x3fe476bd36b96a75,4 +np.float64,0xbfc7cb9c132f9738,0xbfc812c46e185e0b,4 +np.float64,0xbfeba116c1f7422e,0xbff2b6b7563ad854,4 +np.float64,0x7fe7041de62e083b,0x3f5d2b42aca47274,4 +np.float64,0xbfcf60f4ff3ec1e8,0xbfd002eb83406436,4 +np.float64,0xbfc06067a520c0d0,0xbfc0776e5839ecda,4 +np.float64,0x4384965a87093,0x4384965a87093,4 +np.float64,0xd2ed9d01a5db4,0xd2ed9d01a5db4,4 +np.float64,0x3fbea88cb63d5119,0x3fbece49cc34a379,4 +np.float64,0x3fe7e982ebefd306,0x3feda5bd4c435d43,4 +np.float64,0xffdb60a3e036c148,0xbfcb7ed21e7a8f49,4 +np.float64,0x7fdba9231eb75245,0xbfd750cab1536398,4 +np.float64,0x800d593534dab26b,0x800d593534dab26b,4 +np.float64,0xffdf15fb683e2bf6,0x3fb3aaea23357f06,4 +np.float64,0xbfd6f8a2e5adf146,0xbfd802e509d67c67,4 +np.float64,0x3feeaa31513d5463,0x3ff6c52147dc053c,4 +np.float64,0xf2f6dfd3e5edc,0xf2f6dfd3e5edc,4 +np.float64,0x7fd58d8279ab1b04,0x403243f23d02af2a,4 +np.float64,0x8000000000000001,0x8000000000000001,4 +np.float64,0x3fdffb8e0ebff71c,0x3fe1786cb0a6b0f3,4 +np.float64,0xc999826b93331,0xc999826b93331,4 +np.float64,0xffc4966f19292ce0,0x3ff0836c75c56cc7,4 +np.float64,0x7fef95a4b2ff2b48,0xbfbbe2c27c78154f,4 +np.float64,0xb8f1307f71e26,0xb8f1307f71e26,4 +np.float64,0x3fe807bc7eb00f79,0x3fedde19f2d3c42d,4 +np.float64,0x5e4b6580bc98,0x5e4b6580bc98,4 +np.float64,0xffe19353576326a6,0xc0278c51fee07d36,4 +np.float64,0xbfb0ca6f3e2194e0,0xbfb0d09be673fa72,4 +np.float64,0x3fea724211b4e484,0x3ff15ee06f0a0a13,4 +np.float64,0xbfda21e1c4b443c4,0xbfdbb041f3c86832,4 +np.float64,0x8008082b24901057,0x8008082b24901057,4 +np.float64,0xbfd031aa4ea06354,0xbfd08c77729634bb,4 +np.float64,0xbfc407e153280fc4,0xbfc432275711df5f,4 +np.float64,0xbb4fa4b5769f5,0xbb4fa4b5769f5,4 +np.float64,0x7fed6d1daffada3a,0xc037a14bc7b41fab,4 +np.float64,0xffeee589943dcb12,0x3ff2abfe47037778,4 +np.float64,0x301379d260270,0x301379d260270,4 +np.float64,0xbfec2fefc2b85fe0,0xbff36362c0363e06,4 +np.float64,0xbfe0b1c82e216390,0xbfe264f503f7c22c,4 +np.float64,0xbfea2bce78f4579d,0xbff112d6f07935ea,4 +np.float64,0x18508ef230a13,0x18508ef230a13,4 +np.float64,0x800667a74d6ccf4f,0x800667a74d6ccf4f,4 +np.float64,0x79ce5c8cf39cc,0x79ce5c8cf39cc,4 +np.float64,0x3feda61c8efb4c39,0x3ff54c9ade076f54,4 +np.float64,0x3fe27e06b0e4fc0d,0x3fe4de665c1dc3ca,4 +np.float64,0xbfd15fea2722bfd4,0xbfd1d081c55813b0,4 +np.float64,0xbfe5222c4cea4458,0xbfe8db62deb7d2ad,4 +np.float64,0xbfe8a16c33b142d8,0xbfef02d5831592a8,4 +np.float64,0x3fdb60e7c4b6c1d0,0x3fdd2e4265c4c3b6,4 +np.float64,0x800076d62b60edad,0x800076d62b60edad,4 +np.float64,0xbfec8f1527791e2a,0xbff3da7ed3641e8d,4 +np.float64,0x2af03bfe55e08,0x2af03bfe55e08,4 +np.float64,0xa862ee0950c5e,0xa862ee0950c5e,4 +np.float64,0x7fea5a7c1eb4b4f7,0xbffa6f07d28ef211,4 +np.float64,0x90e118fb21c23,0x90e118fb21c23,4 +np.float64,0xbfead0721bf5a0e4,0xbff1c6c7a771a128,4 +np.float64,0x3f63f4a4c027e94a,0x3f63f4a75665da67,4 +np.float64,0x3fece0efa579c1e0,0x3ff443bec52f021e,4 +np.float64,0xbfdbe743b737ce88,0xbfddd129bff89c15,4 +np.float64,0x3fd48c9b8fa91938,0x3fd5492a630a8cb5,4 +np.float64,0x3ff0000000000000,0x3ff8eb245cbee3a6,4 +np.float64,0xbfd51ea33baa3d46,0xbfd5ebd5dc710204,4 +np.float64,0x3fcfbab0183f7560,0x3fd032a054580b00,4 +np.float64,0x8007abce13cf579d,0x8007abce13cf579d,4 +np.float64,0xbfef0f4723be1e8e,0xbff760c7008e8913,4 +np.float64,0x8006340f524c681f,0x8006340f524c681f,4 +np.float64,0x87b7d7010f71,0x87b7d7010f71,4 +np.float64,0x3fe9422da9b2845b,0x3ff02052e6148c45,4 +np.float64,0x7fddd259b93ba4b2,0xc000731aa33d84b6,4 +np.float64,0x3fe0156d12202ada,0x3fe1972ba309cb29,4 +np.float64,0x8004f1264b89e24d,0x8004f1264b89e24d,4 +np.float64,0x3fececdcacb9d9b9,0x3ff4534d5861f731,4 +np.float64,0x3fd1790ab822f215,0x3fd1eb97b1bb6fb4,4 +np.float64,0xffce5d11863cba24,0xbfcb4f38c17210da,4 +np.float64,0x800a30c32a546187,0x800a30c32a546187,4 +np.float64,0x3fa58cc61c2b198c,0x3fa59008add7233e,4 +np.float64,0xbfe0ac77d62158f0,0xbfe25de3dba0bc4a,4 +np.float64,0xeb8c5753d718b,0xeb8c5753d718b,4 +np.float64,0x3fee5438dafca872,0x3ff644fef7e7adb5,4 +np.float64,0x3faad1eb2c35a3e0,0x3faad83499f94057,4 +np.float64,0x3fe39152c46722a6,0x3fe66fba0b96ab6e,4 +np.float64,0xffd6fd17712dfa2e,0xc010d697d1ab8731,4 +np.float64,0x5214a888a4296,0x5214a888a4296,4 +np.float64,0x8000127a5da024f5,0x8000127a5da024f5,4 +np.float64,0x7feb3a366cb6746c,0x3fbe49bd8d5f213a,4 +np.float64,0xca479501948f3,0xca479501948f3,4 +np.float64,0x7fe7c799ce6f8f33,0xbfd796cd98dc620c,4 +np.float64,0xffe20bcf30a4179e,0xbff8ca5453fa088f,4 +np.float64,0x3fe624638a6c48c7,0x3fea83f123832c3c,4 +np.float64,0xbfe5f1377c6be26f,0xbfea2e143a2d522c,4 +np.float64,0x7fd193f9f8a327f3,0xbfb04ee2602574d4,4 +np.float64,0xbfe7419d2fee833a,0xbfec737f140d363d,4 +np.float64,0x1,0x1,4 +np.float64,0x7fe2ac246c655848,0x3fd14fee3237727a,4 +np.float64,0xa459b42948b37,0xa459b42948b37,4 +np.float64,0x3fb26155ae24c2ab,0x3fb2696fc446d4c6,4 +np.float64,0xbfdd7b332e3af666,0xbfdfc296c21f1aa8,4 +np.float64,0xbfe00dbda4a01b7c,0xbfe18d2b060f0506,4 +np.float64,0x8003bb22d3e77646,0x8003bb22d3e77646,4 +np.float64,0x3fee21b0a57c4361,0x3ff5fb6a21dc911c,4 +np.float64,0x80ca69270194d,0x80ca69270194d,4 +np.float64,0xbfd6d80350adb006,0xbfd7ddb501edbde0,4 +np.float64,0xd2f8b801a5f2,0xd2f8b801a5f2,4 +np.float64,0xbfe856b3f170ad68,0xbfee7334fdc49296,4 +np.float64,0x3fed5c1b20bab836,0x3ff4e73ee5d5c7f3,4 +np.float64,0xbfd58085a5ab010c,0xbfd6596ddc381ffa,4 +np.float64,0x3fe4f0134b29e027,0x3fe88b70602fbd21,4 +np.float64,0xffc9098fdc321320,0x4011c334a74a92cf,4 +np.float64,0x794749bef28ea,0x794749bef28ea,4 +np.float64,0xbfc86b547f30d6a8,0xbfc8b84a4fafe0af,4 +np.float64,0x7fe1356b9da26ad6,0x3fd270bca208d899,4 +np.float64,0x7fca0ef1aa341de2,0xbff851044c0734fa,4 +np.float64,0x80064cb8b62c9972,0x80064cb8b62c9972,4 +np.float64,0xffd3a09a83a74136,0x3ffb66dae0accdf5,4 +np.float64,0x800e301aa15c6035,0x800e301aa15c6035,4 +np.float64,0x800e51f323bca3e6,0x800e51f323bca3e6,4 +np.float64,0x7ff0000000000000,0xfff8000000000000,4 +np.float64,0x800c4278c87884f2,0x800c4278c87884f2,4 +np.float64,0xbfe8481649f0902c,0xbfee576772695096,4 +np.float64,0xffe2344e3fa4689c,0x3fb10442ec0888de,4 +np.float64,0xbfeada313d75b462,0xbff1d1aee3fab3a9,4 +np.float64,0x8009ddfb1333bbf7,0x8009ddfb1333bbf7,4 +np.float64,0x7fed3314c93a6629,0x3ff7a9b12dc1cd37,4 +np.float64,0x3fd55c26da2ab84e,0x3fd630a7b8aac78a,4 +np.float64,0x800cdb5203f9b6a4,0x800cdb5203f9b6a4,4 +np.float64,0xffd04a875da0950e,0x4009a13810ab121d,4 +np.float64,0x800f1acb527e3597,0x800f1acb527e3597,4 +np.float64,0xbf9519bf282a3380,0xbf951a82e9b955ff,4 +np.float64,0x3fcd7a42fa3af486,0x3fce028f3c51072d,4 +np.float64,0xbfdd3e21b73a7c44,0xbfdf769f2ff2480b,4 +np.float64,0xffd4361e2aa86c3c,0xbfc211ce8e9f792c,4 +np.float64,0x7fccf97f6939f2fe,0xbff8464bad830f06,4 +np.float64,0x800ce47fb939c900,0x800ce47fb939c900,4 +np.float64,0xffe9e51df173ca3b,0xbfceaf990d652c4e,4 +np.float64,0x3fe05bba5b20b775,0x3fe1f326e4455442,4 +np.float64,0x800a29b4b134536a,0x800a29b4b134536a,4 +np.float64,0xe6f794b7cdef3,0xe6f794b7cdef3,4 +np.float64,0xffb5b688ce2b6d10,0x3ff924bb97ae2f6d,4 +np.float64,0x7fa74105d82e820b,0x3fd49643aaa9eee4,4 +np.float64,0x80020d15f7a41a2d,0x80020d15f7a41a2d,4 +np.float64,0x3fd6a983d5ad5308,0x3fd7a8cc8835b5b8,4 +np.float64,0x7fcd9798f03b2f31,0x3fc534c2f7bf4721,4 +np.float64,0xffdd31873a3a630e,0xbfe3171fcdffb3f7,4 +np.float64,0x80075183234ea307,0x80075183234ea307,4 +np.float64,0x82f3132505e63,0x82f3132505e63,4 +np.float64,0x3febfd9cb837fb39,0x3ff325bbf812515d,4 +np.float64,0xbfb4630fda28c620,0xbfb46e1f802ec278,4 +np.float64,0x3feeed7c89fddafa,0x3ff72c20ce5a9ee4,4 +np.float64,0x7fd3dcb3c127b967,0x40123d27ec9ec31d,4 +np.float64,0xbfe923450c72468a,0xbff00149c5742725,4 +np.float64,0x7fdef7f91abdeff1,0xbfe02ceb21f7923d,4 +np.float64,0x7fdd70d28fbae1a4,0xbfefcc5c9d10cdfd,4 +np.float64,0x800ca445a8d9488c,0x800ca445a8d9488c,4 +np.float64,0x7fec2754e1f84ea9,0x40173f6c1c97f825,4 +np.float64,0x7fcbca31f7379463,0x401e26bd2667075b,4 +np.float64,0x8003fa1d0847f43b,0x8003fa1d0847f43b,4 +np.float64,0xffe95cf85932b9f0,0xc01308e60278aa11,4 +np.float64,0x8009c53948f38a73,0x8009c53948f38a73,4 +np.float64,0x3fdcca9226b99524,0x3fdee7a008f75d41,4 +np.float64,0xbfe9ee241f33dc48,0xbff0d16bfff6c8e9,4 +np.float64,0xbfb3365058266ca0,0xbfb33f9176ebb51d,4 +np.float64,0x7fa98e10f4331c21,0x3fdee04ffd31314e,4 +np.float64,0xbfe1a11aea634236,0xbfe3a8e3d84fda38,4 +np.float64,0xbfd8df051131be0a,0xbfda342805d1948b,4 +np.float64,0x3d49a2407a935,0x3d49a2407a935,4 +np.float64,0xfc51eefff8a3e,0xfc51eefff8a3e,4 +np.float64,0xda63950bb4c73,0xda63950bb4c73,4 +np.float64,0x80050f3d4fea1e7b,0x80050f3d4fea1e7b,4 +np.float64,0x3fcdbd6e453b7ae0,0x3fce497478c28e77,4 +np.float64,0x7ebd4932fd7aa,0x7ebd4932fd7aa,4 +np.float64,0x7fa3904eac27209c,0xc0015f3125efc151,4 +np.float64,0x7fc59f956b2b3f2a,0xc00c012e7a2c281f,4 +np.float64,0xbfd436d716a86dae,0xbfd4ea13533a942b,4 +np.float64,0x9347ae3d268f6,0x9347ae3d268f6,4 +np.float64,0xffd001764d2002ec,0xbffab3462e515623,4 +np.float64,0x3fe6f406662de80d,0x3febe9bac3954999,4 +np.float64,0x3f943ecaf8287d96,0x3f943f77dee5e77f,4 +np.float64,0x3fd6250efcac4a1c,0x3fd712afa947d56f,4 +np.float64,0xbfe849ff777093ff,0xbfee5b089d03391f,4 +np.float64,0xffd3b8ef8f2771e0,0x4000463ff7f29214,4 +np.float64,0xbfc3bae9252775d4,0xbfc3e34c133f1933,4 +np.float64,0xbfea93943df52728,0xbff18355e4fc341d,4 +np.float64,0x3fc4d922ad29b245,0x3fc508d66869ef29,4 +np.float64,0x4329694a8652e,0x4329694a8652e,4 +np.float64,0x8834f1a71069e,0x8834f1a71069e,4 +np.float64,0xe0e5be8dc1cb8,0xe0e5be8dc1cb8,4 +np.float64,0x7fef4d103afe9a1f,0xc0047b88b94554fe,4 +np.float64,0x3fe9b57af4f36af6,0x3ff0963831d51c3f,4 +np.float64,0x3fe081e2fa6103c6,0x3fe22572e41be655,4 +np.float64,0x3fd78cf7b42f19ef,0x3fd8acafa1ad776a,4 +np.float64,0x7fbffd58d43ffab1,0x3fb16092c7de6036,4 +np.float64,0xbfe1e8bfae23d180,0xbfe40c1c6277dd52,4 +np.float64,0x800a9f59fb153eb4,0x800a9f59fb153eb4,4 +np.float64,0xffebe14e33b7c29c,0x3fe0ec532f4deedd,4 +np.float64,0xffc36ca00426d940,0xc000806a712d6e83,4 +np.float64,0xbfcc2be82d3857d0,0xbfcca2a7d372ec64,4 +np.float64,0x800c03b908780772,0x800c03b908780772,4 +np.float64,0xf315a64be62b5,0xf315a64be62b5,4 +np.float64,0xbfe644043cec8808,0xbfeab974d3dc6d80,4 +np.float64,0x3fedb7de3cbb6fbc,0x3ff56549a5acd324,4 +np.float64,0xbfb1a875522350e8,0xbfb1afa41dee338d,4 +np.float64,0xffee8d4a407d1a94,0x3fead1749a636ff6,4 +np.float64,0x8004061c13080c39,0x8004061c13080c39,4 +np.float64,0x3fe650ae7feca15c,0x3feacefb8bc25f64,4 +np.float64,0x3fda8340e6b50682,0x3fdc24275cab1df8,4 +np.float64,0x8009084344321087,0x8009084344321087,4 +np.float64,0x7fdd19cb823a3396,0xbfd1d8fb35d89e3f,4 +np.float64,0xbfe893172571262e,0xbfeee716b592b93c,4 +np.float64,0x8ff5acc11fec,0x8ff5acc11fec,4 +np.float64,0xbfdca0c57cb9418a,0xbfdeb42465a1b59e,4 +np.float64,0xffd77bd2a3aef7a6,0x4012cd69e85b82d8,4 +np.float64,0xbfe6ea78982dd4f1,0xbfebd8ec61fb9e1f,4 +np.float64,0x7fe14b1d80a2963a,0xc02241642102cf71,4 +np.float64,0x3fe712bf286e257e,0x3fec20012329a7fb,4 +np.float64,0x7fcb6fa4d636df49,0x400b899d14a886b3,4 +np.float64,0x3fb82cb39a305960,0x3fb83f29c5f0822e,4 +np.float64,0x7fed694c8b3ad298,0xbfe2724373c69808,4 +np.float64,0xbfcd21229f3a4244,0xbfcda497fc3e1245,4 +np.float64,0x564d3770ac9a8,0x564d3770ac9a8,4 +np.float64,0xf4409e13e8814,0xf4409e13e8814,4 +np.float64,0x80068dca9a8d1b96,0x80068dca9a8d1b96,4 +np.float64,0xbfe13f82afe27f06,0xbfe3236ddded353f,4 +np.float64,0x80023f8114647f03,0x80023f8114647f03,4 +np.float64,0xeafba7dfd5f75,0xeafba7dfd5f75,4 +np.float64,0x3feca74ddeb94e9c,0x3ff3f95dcce5a227,4 +np.float64,0x10000000000000,0x10000000000000,4 +np.float64,0xbfebdb4141f7b682,0xbff2fc29823ac64a,4 +np.float64,0xbfcd75ee2f3aebdc,0xbfcdfdfd87cc6a29,4 +np.float64,0x7fc010cda420219a,0x3fae4ca2cf1f2657,4 +np.float64,0x1a90209e35205,0x1a90209e35205,4 +np.float64,0x8008057d01900afa,0x8008057d01900afa,4 +np.float64,0x3f9cb5f280396be5,0x3f9cb7dfb4e4be4e,4 +np.float64,0xffe1bbb60b63776c,0xc00011b1ffcb2561,4 +np.float64,0xffda883f6fb5107e,0x4044238ef4e2a198,4 +np.float64,0x3fc07c0b4a20f817,0x3fc09387de9eebcf,4 +np.float64,0x8003a9ebc0c753d8,0x8003a9ebc0c753d8,4 +np.float64,0x1d7fd5923affc,0x1d7fd5923affc,4 +np.float64,0xbfe9cd8cf9b39b1a,0xbff0af43e567ba4a,4 +np.float64,0x11285cb42250c,0x11285cb42250c,4 +np.float64,0xffe81ae1ccb035c3,0xbfe038be7eb563a6,4 +np.float64,0xbfe56473b1eac8e8,0xbfe94654d8ab9e75,4 +np.float64,0x3fee904619fd208c,0x3ff69e198152fe17,4 +np.float64,0xbfeeb9a2cbfd7346,0xbff6dc8d96da78cd,4 +np.float64,0x8006cdfa59ed9bf5,0x8006cdfa59ed9bf5,4 +np.float64,0x8008f2366d31e46d,0x8008f2366d31e46d,4 +np.float64,0x8008d5f91e31abf3,0x8008d5f91e31abf3,4 +np.float64,0x3fe85886f8b0b10e,0x3fee76af16f5a126,4 +np.float64,0x3fefb9b2b73f7365,0x3ff8745128fa3e3b,4 +np.float64,0x7fdf3e721f3e7ce3,0xbfb19381541ca2a8,4 +np.float64,0x3fd2768c41a4ed18,0x3fd2fe2f85a3f3a6,4 +np.float64,0xbfcabe3c6a357c78,0xbfcb239fb88bc260,4 +np.float64,0xffdffb6a3dbff6d4,0xbff7af4759fd557c,4 +np.float64,0x800817f75f302fef,0x800817f75f302fef,4 +np.float64,0xbfe6a1d1762d43a3,0xbfeb5a399a095ef3,4 +np.float64,0x7fd6f32f912de65e,0x40016dedc51aabd0,4 +np.float64,0x3fc6cb26652d964d,0x3fc7099f047d924a,4 +np.float64,0x3fe8b975d67172ec,0x3fef31946123c0e7,4 +np.float64,0xffe44a09d1e89413,0x3fdee9e5eac6e540,4 +np.float64,0xbfece76d4cb9cedb,0xbff44c34849d07ba,4 +np.float64,0x7feb76027036ec04,0x3fe08595a5e263ac,4 +np.float64,0xffe194f591a329ea,0x3fbe5bd626400a70,4 +np.float64,0xbfc170698122e0d4,0xbfc18c3de8b63565,4 +np.float64,0x3fc82b2c0f305658,0x3fc875c3b5fbcd08,4 +np.float64,0x3fd5015634aa02ac,0x3fd5cb1df07213c3,4 +np.float64,0x7fe640884b6c8110,0xbff66255a420abb5,4 +np.float64,0x5a245206b448b,0x5a245206b448b,4 +np.float64,0xffe9d9fa2f73b3f4,0xc0272b0dd34ab9bf,4 +np.float64,0x3fd990e8aab321d0,0x3fdb04cd3a29bcc3,4 +np.float64,0xde9dda8bbd3bc,0xde9dda8bbd3bc,4 +np.float64,0xbfe81b32b4703666,0xbfee029937fa9f5a,4 +np.float64,0xbfe68116886d022d,0xbfeb21c62081cb73,4 +np.float64,0x3fb8da191231b432,0x3fb8ee28c71507d3,4 +np.float64,0x3fb111395a222273,0x3fb117b57de3dea4,4 +np.float64,0xffbafadc6a35f5b8,0x3ffcc6d2370297b9,4 +np.float64,0x8002ca475b05948f,0x8002ca475b05948f,4 +np.float64,0xbfeafef57875fdeb,0xbff1fb1315676f24,4 +np.float64,0x7fcda427d73b484f,0xbff9f70212694d17,4 +np.float64,0xffe2517b3ba4a2f6,0xc029ca6707305bf4,4 +np.float64,0x7fc5ee156b2bdc2a,0xbff8384b59e9056e,4 +np.float64,0xbfec22af3278455e,0xbff3530fe25816b4,4 +np.float64,0x6b5a8c2cd6b52,0x6b5a8c2cd6b52,4 +np.float64,0xffdaf6c4b935ed8a,0x4002f00ce58affcf,4 +np.float64,0x800a41813c748303,0x800a41813c748303,4 +np.float64,0xbfd09a1269213424,0xbfd0fc0a0c5de8eb,4 +np.float64,0x7fa2cb74d42596e9,0x3fc3d40e000fa69d,4 +np.float64,0x7ff8000000000000,0x7ff8000000000000,4 +np.float64,0x3fbfbf8ed63f7f1e,0x3fbfe97bcad9f53a,4 +np.float64,0x7fe0ebba65a1d774,0x401b0f17b28618df,4 +np.float64,0x3fd02c3a25a05874,0x3fd086aa55b19c9c,4 +np.float64,0xec628f95d8c52,0xec628f95d8c52,4 +np.float64,0x3fd319329fa63264,0x3fd3afb04e0dec63,4 +np.float64,0x180e0ade301c2,0x180e0ade301c2,4 +np.float64,0xbfe8d78324f1af06,0xbfef6c66153064ee,4 +np.float64,0xffb89fa200313f48,0xbfeb96ff2d9358dc,4 +np.float64,0x7fe6abcf86ed579e,0xc0269f4de86365ec,4 +np.float64,0x7fdff8cd65bff19a,0xbfd0f7c6b9052c9a,4 +np.float64,0xbfd2e3a53d25c74a,0xbfd37520cda5f6b2,4 +np.float64,0x7fe844b096708960,0x3ff696a6182e5a7a,4 +np.float64,0x7fdce0c7a3b9c18e,0x3fd42875d69ed379,4 +np.float64,0xffba5a91cc34b520,0x4001b571e8991951,4 +np.float64,0xffe78fe4a6ef1fc9,0x3ff4507b31f5b3bc,4 +np.float64,0xbfd7047493ae08ea,0xbfd810618a53fffb,4 +np.float64,0xc6559def8cab4,0xc6559def8cab4,4 +np.float64,0x3fe75d67a76ebacf,0x3feca56817de65e4,4 +np.float64,0xffd24adbd6a495b8,0xc012c491addf2df5,4 +np.float64,0x7fed35e28dba6bc4,0x403a0fa555ff7ec6,4 +np.float64,0x80078c4afa0f1897,0x80078c4afa0f1897,4 +np.float64,0xa6ec39114dd87,0xa6ec39114dd87,4 +np.float64,0x7fb1bd33ba237a66,0x4010092bb6810fd4,4 +np.float64,0x800ecf215edd9e43,0x800ecf215edd9e43,4 +np.float64,0x3fb7c169242f82d2,0x3fb7d2ed30c462e6,4 +np.float64,0xbf71b46d60236900,0xbf71b4749a10c112,4 +np.float64,0x800d7851787af0a3,0x800d7851787af0a3,4 +np.float64,0x3fcb4a45e7369488,0x3fcbb61701a1bcec,4 +np.float64,0x3fd4e3682429c6d0,0x3fd5a9bcb916eb94,4 +np.float64,0x800497564c292ead,0x800497564c292ead,4 +np.float64,0xbfca3737a1346e70,0xbfca96a86ae5d687,4 +np.float64,0x19aa87e03356,0x19aa87e03356,4 +np.float64,0xffb2593fe624b280,0xc05fedb99b467ced,4 +np.float64,0xbfdd8748fbbb0e92,0xbfdfd1a7df17252c,4 +np.float64,0x8004c7afc7098f60,0x8004c7afc7098f60,4 +np.float64,0x7fde48b2bf3c9164,0xbfe36ef1158ed420,4 +np.float64,0xbfec8e0eb0f91c1d,0xbff3d9319705a602,4 +np.float64,0xffea1be204f437c3,0xc0144f67298c3e6f,4 +np.float64,0x7fdb906b593720d6,0xbfce99233396eda7,4 +np.float64,0x3fef0f114ffe1e22,0x3ff76072a258a51b,4 +np.float64,0x3fe3e284c8e7c50a,0x3fe6e9b05e17c999,4 +np.float64,0xbfbda9eef23b53e0,0xbfbdcc1abb443597,4 +np.float64,0x3feb6454d4f6c8aa,0x3ff26f65a85baba4,4 +np.float64,0x3fea317439f462e8,0x3ff118e2187ef33f,4 +np.float64,0x376ad0646ed5b,0x376ad0646ed5b,4 +np.float64,0x7fdd461a1c3a8c33,0x3f7ba20fb79e785f,4 +np.float64,0xebc520a3d78a4,0xebc520a3d78a4,4 +np.float64,0x3fca90fe53352200,0x3fcaf45c7fae234d,4 +np.float64,0xbfe80dd1de701ba4,0xbfede97e12cde9de,4 +np.float64,0x3fd242b00ea48560,0x3fd2c5cf9bf69a31,4 +np.float64,0x7fe46c057828d80a,0xbfe2f76837488f94,4 +np.float64,0x3fc162bea322c580,0x3fc17e517c958867,4 +np.float64,0xffebf0452ff7e08a,0x3ffc3fd95c257b54,4 +np.float64,0xffd88043c6310088,0x4008b05598d0d95f,4 +np.float64,0x800d8c49da5b1894,0x800d8c49da5b1894,4 +np.float64,0xbfed33b487ba6769,0xbff4b0ea941f8a6a,4 +np.float64,0x16b881e22d711,0x16b881e22d711,4 +np.float64,0x288bae0051177,0x288bae0051177,4 +np.float64,0xffc83a0fe8307420,0x4006eff03da17f86,4 +np.float64,0x3fc7868b252f0d18,0x3fc7cb4954290324,4 +np.float64,0xbfe195514b232aa2,0xbfe398aae6c8ed76,4 +np.float64,0x800c001ae7f80036,0x800c001ae7f80036,4 +np.float64,0x7feb82abe7370557,0xbff1e13fe6fad23c,4 +np.float64,0xffecf609cdf9ec13,0xc0112aa1805ae59e,4 +np.float64,0xffddd654f63bacaa,0x3fe46cce899f710d,4 +np.float64,0x3fe2163138642c62,0x3fe44b9c760acd4c,4 +np.float64,0x4e570dc09cae2,0x4e570dc09cae2,4 +np.float64,0x7fe9e8d091f3d1a0,0xc000fe20f8e9a4b5,4 +np.float64,0x7fe60042952c0084,0x3fd0aa740f394c2a,4 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv new file mode 100644 index 00000000..9e3ddc60 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe26ebb0,0xbe25752f,2 +np.float32,0xbe22ecc0,0xbe219054,2 +np.float32,0x8010a6b3,0x8010a6b3,2 +np.float32,0x3135da,0x3135da,2 +np.float32,0xbe982afc,0xbe93d727,2 +np.float32,0x16a51f,0x16a51f,2 +np.float32,0x491e56,0x491e56,2 +np.float32,0x4bf7ca,0x4bf7ca,2 +np.float32,0x3eebc21c,0x3edc65b2,2 +np.float32,0x80155c94,0x80155c94,2 +np.float32,0x3e14f626,0x3e13eb6a,2 +np.float32,0x801a238f,0x801a238f,2 +np.float32,0xbde33a80,0xbde24cf9,2 +np.float32,0xbef8439c,0xbee67a51,2 +np.float32,0x7f60d0a5,0x3f800000,2 +np.float32,0x190ee3,0x190ee3,2 +np.float32,0x80759113,0x80759113,2 +np.float32,0x800afa9f,0x800afa9f,2 +np.float32,0x7110cf,0x7110cf,2 +np.float32,0x3cf709f0,0x3cf6f6c6,2 +np.float32,0x3ef58da4,0x3ee44fa7,2 +np.float32,0xbf220ff2,0xbf0f662c,2 +np.float32,0xfd888078,0xbf800000,2 +np.float32,0xbe324734,0xbe307f9b,2 +np.float32,0x3eb5cb4f,0x3eae8560,2 +np.float32,0xbf7e7d02,0xbf425493,2 +np.float32,0x3ddcdcf0,0x3ddc02c2,2 +np.float32,0x8026d27a,0x8026d27a,2 +np.float32,0x3d4c0fb1,0x3d4be484,2 +np.float32,0xbf27d2c9,0xbf134d7c,2 +np.float32,0x8029ff80,0x8029ff80,2 +np.float32,0x7f046d2c,0x3f800000,2 +np.float32,0x13f94b,0x13f94b,2 +np.float32,0x7f4ff922,0x3f800000,2 +np.float32,0x3f4ea2ed,0x3f2b03e4,2 +np.float32,0x3e7211f0,0x3e6da8cf,2 +np.float32,0x7f39d0cf,0x3f800000,2 +np.float32,0xfee57fc6,0xbf800000,2 +np.float32,0xff6fb326,0xbf800000,2 +np.float32,0xff800000,0xbf800000,2 +np.float32,0x3f0437a4,0x3ef32fcd,2 +np.float32,0xff546d1e,0xbf800000,2 +np.float32,0x3eb5645b,0x3eae2a5c,2 +np.float32,0x3f08a6e5,0x3ef9ff8f,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f3413da,0x3f800000,2 +np.float32,0xfd760140,0xbf800000,2 +np.float32,0x7f3ad24a,0x3f800000,2 +np.float32,0xbf56e812,0xbf2f7f14,2 +np.float32,0xbece0338,0xbec3920a,2 +np.float32,0xbeede54a,0xbede22ae,2 +np.float32,0x7eaeb215,0x3f800000,2 +np.float32,0x3c213c00,0x3c213aab,2 +np.float32,0x7eaac217,0x3f800000,2 +np.float32,0xbf2f740e,0xbf1851a6,2 +np.float32,0x7f6ca5b8,0x3f800000,2 +np.float32,0xff42ce95,0xbf800000,2 +np.float32,0x802e4189,0x802e4189,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf31f298,0xbf19ebbe,2 +np.float32,0x3dcb0e6c,0x3dca64c1,2 +np.float32,0xbf29599c,0xbf145204,2 +np.float32,0x2e33f2,0x2e33f2,2 +np.float32,0x1c11e7,0x1c11e7,2 +np.float32,0x3f3b188d,0x3f1fa302,2 +np.float32,0x113300,0x113300,2 +np.float32,0x8054589e,0x8054589e,2 +np.float32,0x2a9e69,0x2a9e69,2 +np.float32,0xff513af7,0xbf800000,2 +np.float32,0x7f2e987a,0x3f800000,2 +np.float32,0x807cd426,0x807cd426,2 +np.float32,0x7f0dc4e4,0x3f800000,2 +np.float32,0x7e7c0d56,0x3f800000,2 +np.float32,0x5cb076,0x5cb076,2 +np.float32,0x80576426,0x80576426,2 +np.float32,0xff616222,0xbf800000,2 +np.float32,0xbf7accb5,0xbf40c005,2 +np.float32,0xfe4118c8,0xbf800000,2 +np.float32,0x804b9327,0x804b9327,2 +np.float32,0x3ed2b428,0x3ec79026,2 +np.float32,0x3f4a048f,0x3f286d41,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7efceb9f,0x3f800000,2 +np.float32,0xbf5fe2d3,0xbf34246f,2 +np.float32,0x807e086a,0x807e086a,2 +np.float32,0x7ef5e856,0x3f800000,2 +np.float32,0xfc546f00,0xbf800000,2 +np.float32,0x3a65b890,0x3a65b88c,2 +np.float32,0x800cfa70,0x800cfa70,2 +np.float32,0x80672ea7,0x80672ea7,2 +np.float32,0x3f2bf3f2,0x3f160a12,2 +np.float32,0xbf0ab67e,0xbefd2004,2 +np.float32,0x3f2a0bb4,0x3f14c824,2 +np.float32,0xbeff5374,0xbeec12d7,2 +np.float32,0xbf221b58,0xbf0f6dff,2 +np.float32,0x7cc1f3,0x7cc1f3,2 +np.float32,0x7f234e3c,0x3f800000,2 +np.float32,0x3f60ff10,0x3f34b37d,2 +np.float32,0xbdd957f0,0xbdd887fe,2 +np.float32,0x801ce048,0x801ce048,2 +np.float32,0x7f3a8f76,0x3f800000,2 +np.float32,0xfdd13d08,0xbf800000,2 +np.float32,0x3e9af4a4,0x3e966445,2 +np.float32,0x1e55f3,0x1e55f3,2 +np.float32,0x327905,0x327905,2 +np.float32,0xbf03cf0b,0xbef28dad,2 +np.float32,0x3f0223d3,0x3eeff4f4,2 +np.float32,0xfdd96ff8,0xbf800000,2 +np.float32,0x428db8,0x428db8,2 +np.float32,0xbd74a200,0xbd7457a5,2 +np.float32,0x2a63a3,0x2a63a3,2 +np.float32,0x7e8aa9d7,0x3f800000,2 +np.float32,0x7f50b810,0x3f800000,2 +np.float32,0xbce5ec80,0xbce5dd0d,2 +np.float32,0x54711,0x54711,2 +np.float32,0x8074212a,0x8074212a,2 +np.float32,0xbf13d0ec,0xbf0551b5,2 +np.float32,0x80217f89,0x80217f89,2 +np.float32,0x3f300824,0x3f18b12f,2 +np.float32,0x7d252462,0x3f800000,2 +np.float32,0x807a154c,0x807a154c,2 +np.float32,0x8064d4b9,0x8064d4b9,2 +np.float32,0x804543b4,0x804543b4,2 +np.float32,0x4c269e,0x4c269e,2 +np.float32,0xff39823b,0xbf800000,2 +np.float32,0x3f5040b1,0x3f2be80b,2 +np.float32,0xbf7028c1,0xbf3bfee5,2 +np.float32,0x3e94eb78,0x3e90db93,2 +np.float32,0x3ccc1b40,0x3ccc1071,2 +np.float32,0xbe8796f0,0xbe8481a1,2 +np.float32,0xfc767bc0,0xbf800000,2 +np.float32,0xbdd81ed0,0xbdd75259,2 +np.float32,0xbed31bfc,0xbec7e82d,2 +np.float32,0xbf350a9e,0xbf1be1c6,2 +np.float32,0x33d41f,0x33d41f,2 +np.float32,0x3f73e076,0x3f3db0b5,2 +np.float32,0x3f800000,0x3f42f7d6,2 +np.float32,0xfee27c14,0xbf800000,2 +np.float32,0x7f6e4388,0x3f800000,2 +np.float32,0x4ea19b,0x4ea19b,2 +np.float32,0xff2d75f2,0xbf800000,2 +np.float32,0x7ee225ca,0x3f800000,2 +np.float32,0x3f31cb4b,0x3f19d2a4,2 +np.float32,0x80554a9d,0x80554a9d,2 +np.float32,0x3f4d57fa,0x3f2a4c03,2 +np.float32,0x3eac6a88,0x3ea62e72,2 +np.float32,0x773520,0x773520,2 +np.float32,0x8079c20a,0x8079c20a,2 +np.float32,0xfeb1eb94,0xbf800000,2 +np.float32,0xfe8d81c0,0xbf800000,2 +np.float32,0xfeed6902,0xbf800000,2 +np.float32,0x8066bb65,0x8066bb65,2 +np.float32,0x7f800000,0x3f800000,2 +np.float32,0x1,0x1,2 +np.float32,0x3f2c66a4,0x3f16554a,2 +np.float32,0x3cd231,0x3cd231,2 +np.float32,0x3e932a64,0x3e8f3e0c,2 +np.float32,0xbf3ab1c3,0xbf1f6420,2 +np.float32,0xbc902b20,0xbc902751,2 +np.float32,0x7dac0a5b,0x3f800000,2 +np.float32,0x3f2b7e06,0x3f15bc93,2 +np.float32,0x75de0,0x75de0,2 +np.float32,0x8020b7bc,0x8020b7bc,2 +np.float32,0x3f257cda,0x3f11bb6b,2 +np.float32,0x807480e5,0x807480e5,2 +np.float32,0xfe00d758,0xbf800000,2 +np.float32,0xbd9b54e0,0xbd9b08cd,2 +np.float32,0x4dfbe3,0x4dfbe3,2 +np.float32,0xff645788,0xbf800000,2 +np.float32,0xbe92c80a,0xbe8ee360,2 +np.float32,0x3eb9b400,0x3eb1f77c,2 +np.float32,0xff20b69c,0xbf800000,2 +np.float32,0x623c28,0x623c28,2 +np.float32,0xff235748,0xbf800000,2 +np.float32,0xbf3bbc56,0xbf2006f3,2 +np.float32,0x7e6f78b1,0x3f800000,2 +np.float32,0x7e1584e9,0x3f800000,2 +np.float32,0xff463423,0xbf800000,2 +np.float32,0x8002861e,0x8002861e,2 +np.float32,0xbf0491d8,0xbef3bb6a,2 +np.float32,0x7ea3bc17,0x3f800000,2 +np.float32,0xbedde7ea,0xbed0fb49,2 +np.float32,0xbf4bac48,0xbf295c8b,2 +np.float32,0xff28e276,0xbf800000,2 +np.float32,0x7e8f3bf5,0x3f800000,2 +np.float32,0xbf0a4a73,0xbefc7c9d,2 +np.float32,0x7ec5bd96,0x3f800000,2 +np.float32,0xbf4c22e8,0xbf299f2c,2 +np.float32,0x3e3970a0,0x3e377064,2 +np.float32,0x3ecb1118,0x3ec10c88,2 +np.float32,0xff548a7a,0xbf800000,2 +np.float32,0xfe8ec550,0xbf800000,2 +np.float32,0x3e158985,0x3e147bb2,2 +np.float32,0x7eb79ad7,0x3f800000,2 +np.float32,0xbe811384,0xbe7cd1ab,2 +np.float32,0xbdc4b9e8,0xbdc41f94,2 +np.float32,0xe0fd5,0xe0fd5,2 +np.float32,0x3f2485f2,0x3f11142b,2 +np.float32,0xfdd3c3d8,0xbf800000,2 +np.float32,0xfe8458e6,0xbf800000,2 +np.float32,0x3f06e398,0x3ef74dd8,2 +np.float32,0xff4752cf,0xbf800000,2 +np.float32,0x6998e3,0x6998e3,2 +np.float32,0x626751,0x626751,2 +np.float32,0x806631d6,0x806631d6,2 +np.float32,0xbf0c3cf4,0xbeff6c54,2 +np.float32,0x802860f8,0x802860f8,2 +np.float32,0xff2952cb,0xbf800000,2 +np.float32,0xff31d40b,0xbf800000,2 +np.float32,0x7c389473,0x3f800000,2 +np.float32,0x3dcd2f1b,0x3dcc8010,2 +np.float32,0x3d70c29f,0x3d707bbc,2 +np.float32,0x3f6bd386,0x3f39f979,2 +np.float32,0x1efec9,0x1efec9,2 +np.float32,0x3f675518,0x3f37d338,2 +np.float32,0x5fdbe3,0x5fdbe3,2 +np.float32,0x5d684e,0x5d684e,2 +np.float32,0xbedfe748,0xbed2a4c7,2 +np.float32,0x3f0cb07a,0x3f000cdc,2 +np.float32,0xbf77151e,0xbf3f1f5d,2 +np.float32,0x7f038ea0,0x3f800000,2 +np.float32,0x3ea91be9,0x3ea3376f,2 +np.float32,0xbdf20738,0xbdf0e861,2 +np.float32,0x807ea380,0x807ea380,2 +np.float32,0x2760ca,0x2760ca,2 +np.float32,0x7f20a544,0x3f800000,2 +np.float32,0x76ed83,0x76ed83,2 +np.float32,0x15a441,0x15a441,2 +np.float32,0x74c76d,0x74c76d,2 +np.float32,0xff3d5c2a,0xbf800000,2 +np.float32,0x7f6a76a6,0x3f800000,2 +np.float32,0x3eb87067,0x3eb0dabe,2 +np.float32,0xbf515cfa,0xbf2c83af,2 +np.float32,0xbdececc0,0xbdebdf9d,2 +np.float32,0x7f51b7c2,0x3f800000,2 +np.float32,0x3eb867ac,0x3eb0d30d,2 +np.float32,0xff50fd84,0xbf800000,2 +np.float32,0x806945e9,0x806945e9,2 +np.float32,0x298eed,0x298eed,2 +np.float32,0x441f53,0x441f53,2 +np.float32,0x8066d4b0,0x8066d4b0,2 +np.float32,0x3f6a479c,0x3f393dae,2 +np.float32,0xbf6ce2a7,0xbf3a7921,2 +np.float32,0x8064c3cf,0x8064c3cf,2 +np.float32,0xbf2d8146,0xbf170dfd,2 +np.float32,0x3b0e82,0x3b0e82,2 +np.float32,0xbea97574,0xbea387dc,2 +np.float32,0x67ad15,0x67ad15,2 +np.float32,0xbf68478f,0xbf38485a,2 +np.float32,0xff6f593b,0xbf800000,2 +np.float32,0xbeda26f2,0xbecdd806,2 +np.float32,0xbd216d50,0xbd2157ee,2 +np.float32,0x7a8544db,0x3f800000,2 +np.float32,0x801df20b,0x801df20b,2 +np.float32,0xbe14ba24,0xbe13b0a8,2 +np.float32,0xfdc6d8a8,0xbf800000,2 +np.float32,0x1d6b49,0x1d6b49,2 +np.float32,0x7f5ff1b8,0x3f800000,2 +np.float32,0x3f75e032,0x3f3e9625,2 +np.float32,0x7f2c5687,0x3f800000,2 +np.float32,0x3d95fb6c,0x3d95b6ee,2 +np.float32,0xbea515e4,0xbe9f97c8,2 +np.float32,0x7f2b2cd7,0x3f800000,2 +np.float32,0x3f076f7a,0x3ef8241e,2 +np.float32,0x5178ca,0x5178ca,2 +np.float32,0xbeb5976a,0xbeae5781,2 +np.float32,0x3e3c3563,0x3e3a1e13,2 +np.float32,0xbd208530,0xbd20702a,2 +np.float32,0x3eb03b04,0x3ea995ef,2 +np.float32,0x17fb9c,0x17fb9c,2 +np.float32,0xfca68e40,0xbf800000,2 +np.float32,0xbf5e7433,0xbf336a9f,2 +np.float32,0xff5b8d3d,0xbf800000,2 +np.float32,0x8003121d,0x8003121d,2 +np.float32,0xbe6dd344,0xbe69a3b0,2 +np.float32,0x67cc4,0x67cc4,2 +np.float32,0x9b01d,0x9b01d,2 +np.float32,0x127c13,0x127c13,2 +np.float32,0xfea5e3d6,0xbf800000,2 +np.float32,0xbdf5c610,0xbdf499c1,2 +np.float32,0x3aff4c00,0x3aff4beb,2 +np.float32,0x3b00afd0,0x3b00afc5,2 +np.float32,0x479618,0x479618,2 +np.float32,0x801cbd05,0x801cbd05,2 +np.float32,0x3ec9249f,0x3ebf6579,2 +np.float32,0x3535c4,0x3535c4,2 +np.float32,0xbeb4f662,0xbeadc915,2 +np.float32,0x8006fda6,0x8006fda6,2 +np.float32,0xbf4f3097,0xbf2b5239,2 +np.float32,0xbf3cb9a8,0xbf20a0e9,2 +np.float32,0x32ced0,0x32ced0,2 +np.float32,0x7ea34e76,0x3f800000,2 +np.float32,0x80063046,0x80063046,2 +np.float32,0x80727e8b,0x80727e8b,2 +np.float32,0xfd6b5780,0xbf800000,2 +np.float32,0x80109815,0x80109815,2 +np.float32,0xfdcc8a78,0xbf800000,2 +np.float32,0x81562,0x81562,2 +np.float32,0x803dfacc,0x803dfacc,2 +np.float32,0xbe204318,0xbe1ef75f,2 +np.float32,0xbf745d34,0xbf3de8e2,2 +np.float32,0xff13fdcc,0xbf800000,2 +np.float32,0x7f75ba8c,0x3f800000,2 +np.float32,0x806c04b4,0x806c04b4,2 +np.float32,0x3ec61ca6,0x3ebcc877,2 +np.float32,0xbeaea984,0xbea8301f,2 +np.float32,0xbf4dcd0e,0xbf2a8d34,2 +np.float32,0x802a01d3,0x802a01d3,2 +np.float32,0xbf747be5,0xbf3df6ad,2 +np.float32,0xbf75cbd2,0xbf3e8d0f,2 +np.float32,0x7db86576,0x3f800000,2 +np.float32,0xff49a2c3,0xbf800000,2 +np.float32,0xbedc5314,0xbecfa978,2 +np.float32,0x8078877b,0x8078877b,2 +np.float32,0xbead4824,0xbea6f499,2 +np.float32,0xbf3926e3,0xbf1e716c,2 +np.float32,0x807f4a1c,0x807f4a1c,2 +np.float32,0x7f2cd8fd,0x3f800000,2 +np.float32,0x806cfcca,0x806cfcca,2 +np.float32,0xff1aa048,0xbf800000,2 +np.float32,0x7eb9ea08,0x3f800000,2 +np.float32,0xbf1034bc,0xbf02ab3a,2 +np.float32,0xbd087830,0xbd086b44,2 +np.float32,0x7e071034,0x3f800000,2 +np.float32,0xbefcc9de,0xbeea122f,2 +np.float32,0x80796d7a,0x80796d7a,2 +np.float32,0x33ce46,0x33ce46,2 +np.float32,0x8074a783,0x8074a783,2 +np.float32,0xbe95a56a,0xbe918691,2 +np.float32,0xbf2ff3f4,0xbf18a42d,2 +np.float32,0x1633e9,0x1633e9,2 +np.float32,0x7f0f104b,0x3f800000,2 +np.float32,0xbf800000,0xbf42f7d6,2 +np.float32,0x3d2cd6,0x3d2cd6,2 +np.float32,0xfed43e16,0xbf800000,2 +np.float32,0x3ee6faec,0x3ed87d2c,2 +np.float32,0x3f2c32d0,0x3f163352,2 +np.float32,0xff4290c0,0xbf800000,2 +np.float32,0xbf66500e,0xbf37546a,2 +np.float32,0x7dfb8fe3,0x3f800000,2 +np.float32,0x3f20ba5d,0x3f0e7b16,2 +np.float32,0xff30c7ae,0xbf800000,2 +np.float32,0x1728a4,0x1728a4,2 +np.float32,0x340d82,0x340d82,2 +np.float32,0xff7870b7,0xbf800000,2 +np.float32,0xbeac6ac4,0xbea62ea7,2 +np.float32,0xbef936fc,0xbee73c36,2 +np.float32,0x3ec7e12c,0x3ebe4ef8,2 +np.float32,0x80673488,0x80673488,2 +np.float32,0xfdf14c90,0xbf800000,2 +np.float32,0x3f182568,0x3f08726e,2 +np.float32,0x7ed7dcd0,0x3f800000,2 +np.float32,0x3de4da34,0x3de3e790,2 +np.float32,0xff7fffff,0xbf800000,2 +np.float32,0x4ff90c,0x4ff90c,2 +np.float32,0x3efb0d1c,0x3ee8b1d6,2 +np.float32,0xbf66e952,0xbf379ef4,2 +np.float32,0xba9dc,0xba9dc,2 +np.float32,0xff67c766,0xbf800000,2 +np.float32,0x7f1ffc29,0x3f800000,2 +np.float32,0x3f51c906,0x3f2cbe99,2 +np.float32,0x3f2e5792,0x3f179968,2 +np.float32,0x3ecb9750,0x3ec17fa0,2 +np.float32,0x7f3fcefc,0x3f800000,2 +np.float32,0xbe4e30fc,0xbe4b72f9,2 +np.float32,0x7e9bc4ce,0x3f800000,2 +np.float32,0x7e70aa1f,0x3f800000,2 +np.float32,0x14c6e9,0x14c6e9,2 +np.float32,0xbcf327c0,0xbcf3157a,2 +np.float32,0xff1fd204,0xbf800000,2 +np.float32,0x7d934a03,0x3f800000,2 +np.float32,0x8028bf1e,0x8028bf1e,2 +np.float32,0x7f0800b7,0x3f800000,2 +np.float32,0xfe04825c,0xbf800000,2 +np.float32,0x807210ac,0x807210ac,2 +np.float32,0x3f7faf7c,0x3f42d5fd,2 +np.float32,0x3e04a543,0x3e03e899,2 +np.float32,0x3e98ea15,0x3e94863e,2 +np.float32,0x3d2a2e48,0x3d2a153b,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x20a488,0x20a488,2 +np.float32,0x3f6ba86a,0x3f39e51a,2 +np.float32,0x0,0x0,2 +np.float32,0x3e892ddd,0x3e85fcfe,2 +np.float32,0x3e2da627,0x3e2c00e0,2 +np.float32,0xff000a50,0xbf800000,2 +np.float32,0x3eb749f4,0x3eafd739,2 +np.float32,0x8024c0ae,0x8024c0ae,2 +np.float32,0xfc8f3b40,0xbf800000,2 +np.float32,0xbf685fc7,0xbf385405,2 +np.float32,0x3f1510e6,0x3f063a4f,2 +np.float32,0x3f68e8ad,0x3f3895d8,2 +np.float32,0x3dba8608,0x3dba0271,2 +np.float32,0xbf16ea10,0xbf079017,2 +np.float32,0xb3928,0xb3928,2 +np.float32,0xfe447c00,0xbf800000,2 +np.float32,0x3db9cd57,0x3db94b45,2 +np.float32,0x803b66b0,0x803b66b0,2 +np.float32,0x805b5e02,0x805b5e02,2 +np.float32,0x7ec93f61,0x3f800000,2 +np.float32,0x8005a126,0x8005a126,2 +np.float32,0x6d8888,0x6d8888,2 +np.float32,0x3e21b7de,0x3e206314,2 +np.float32,0xbec9c31e,0xbebfedc2,2 +np.float32,0xbea88aa8,0xbea2b4e5,2 +np.float32,0x3d8fc310,0x3d8f86bb,2 +np.float32,0xbf3cc68a,0xbf20a8b8,2 +np.float32,0x432690,0x432690,2 +np.float32,0xbe51d514,0xbe4ef1a3,2 +np.float32,0xbcda6d20,0xbcda5fe1,2 +np.float32,0xfe24e458,0xbf800000,2 +np.float32,0xfedc8c14,0xbf800000,2 +np.float32,0x7f7e9bd4,0x3f800000,2 +np.float32,0x3ebcc880,0x3eb4ab44,2 +np.float32,0xbe0aa490,0xbe09cd44,2 +np.float32,0x3dc9158c,0x3dc870c3,2 +np.float32,0x3e5c319e,0x3e58dc90,2 +np.float32,0x1d4527,0x1d4527,2 +np.float32,0x2dbf5,0x2dbf5,2 +np.float32,0xbf1f121f,0xbf0d5534,2 +np.float32,0x7e3e9ab5,0x3f800000,2 +np.float32,0x7f74b5c1,0x3f800000,2 +np.float32,0xbf6321ba,0xbf35c42b,2 +np.float32,0xbe5c7488,0xbe591c79,2 +np.float32,0x7e7b02cd,0x3f800000,2 +np.float32,0xfe7cbfa4,0xbf800000,2 +np.float32,0xbeace360,0xbea69a86,2 +np.float32,0x7e149b00,0x3f800000,2 +np.float32,0xbf61a700,0xbf35079a,2 +np.float32,0x7eb592a7,0x3f800000,2 +np.float32,0x3f2105e6,0x3f0eaf30,2 +np.float32,0xfd997a88,0xbf800000,2 +np.float32,0xff5d093b,0xbf800000,2 +np.float32,0x63aede,0x63aede,2 +np.float32,0x6907ee,0x6907ee,2 +np.float32,0xbf7578ee,0xbf3e680f,2 +np.float32,0xfea971e8,0xbf800000,2 +np.float32,0x3f21d0f5,0x3f0f3aed,2 +np.float32,0x3a50e2,0x3a50e2,2 +np.float32,0x7f0f5b1e,0x3f800000,2 +np.float32,0x805b9765,0x805b9765,2 +np.float32,0xbe764ab8,0xbe71a664,2 +np.float32,0x3eafac7f,0x3ea91701,2 +np.float32,0x807f4130,0x807f4130,2 +np.float32,0x7c5f31,0x7c5f31,2 +np.float32,0xbdbe0e30,0xbdbd8300,2 +np.float32,0x7ecfe4e0,0x3f800000,2 +np.float32,0xff7cb628,0xbf800000,2 +np.float32,0xff1842bc,0xbf800000,2 +np.float32,0xfd4163c0,0xbf800000,2 +np.float32,0x800e11f7,0x800e11f7,2 +np.float32,0x7f3adec8,0x3f800000,2 +np.float32,0x7f597514,0x3f800000,2 +np.float32,0xbe986e14,0xbe9414a4,2 +np.float32,0x800fa9d7,0x800fa9d7,2 +np.float32,0xff5b79c4,0xbf800000,2 +np.float32,0x80070565,0x80070565,2 +np.float32,0xbee5628e,0xbed72d60,2 +np.float32,0x3f438ef2,0x3f24b3ca,2 +np.float32,0xcda91,0xcda91,2 +np.float32,0x7e64151a,0x3f800000,2 +np.float32,0xbe95d584,0xbe91b2c7,2 +np.float32,0x8022c2a1,0x8022c2a1,2 +np.float32,0x7e7097bf,0x3f800000,2 +np.float32,0x80139035,0x80139035,2 +np.float32,0x804de2cb,0x804de2cb,2 +np.float32,0xfde5d178,0xbf800000,2 +np.float32,0x6d238,0x6d238,2 +np.float32,0x807abedc,0x807abedc,2 +np.float32,0x3f450a12,0x3f259129,2 +np.float32,0x3ef1c120,0x3ee141f2,2 +np.float32,0xfeb64dae,0xbf800000,2 +np.float32,0x8001732c,0x8001732c,2 +np.float32,0x3f76062e,0x3f3ea711,2 +np.float32,0x3eddd550,0x3ed0ebc8,2 +np.float32,0xff5ca1d4,0xbf800000,2 +np.float32,0xbf49dc5e,0xbf285673,2 +np.float32,0x7e9e5438,0x3f800000,2 +np.float32,0x7e83625e,0x3f800000,2 +np.float32,0x3f5dc41c,0x3f3310da,2 +np.float32,0x3f583efa,0x3f30342f,2 +np.float32,0xbe26bf88,0xbe254a2d,2 +np.float32,0xff1e0beb,0xbf800000,2 +np.float32,0xbe2244c8,0xbe20ec86,2 +np.float32,0xff0b1630,0xbf800000,2 +np.float32,0xff338dd6,0xbf800000,2 +np.float32,0x3eafc22c,0x3ea92a51,2 +np.float32,0x800ea07f,0x800ea07f,2 +np.float32,0x3f46f006,0x3f26aa7e,2 +np.float32,0x3e5f57cd,0x3e5bde16,2 +np.float32,0xbf1b2d8e,0xbf0a9a93,2 +np.float32,0xfeacdbe0,0xbf800000,2 +np.float32,0x7e5ea4bc,0x3f800000,2 +np.float32,0xbf51cbe2,0xbf2cc027,2 +np.float32,0x8073644c,0x8073644c,2 +np.float32,0xff2d6bfe,0xbf800000,2 +np.float32,0x3f65f0f6,0x3f37260a,2 +np.float32,0xff4b37a6,0xbf800000,2 +np.float32,0x712df7,0x712df7,2 +np.float32,0x7f71ef17,0x3f800000,2 +np.float32,0x8042245c,0x8042245c,2 +np.float32,0x3e5dde7b,0x3e5a760d,2 +np.float32,0x8069317d,0x8069317d,2 +np.float32,0x807932dd,0x807932dd,2 +np.float32,0x802f847e,0x802f847e,2 +np.float32,0x7e9300,0x7e9300,2 +np.float32,0x8040b4ab,0x8040b4ab,2 +np.float32,0xff76ef8e,0xbf800000,2 +np.float32,0x4aae3a,0x4aae3a,2 +np.float32,0x8058de73,0x8058de73,2 +np.float32,0x7e4d58c0,0x3f800000,2 +np.float32,0x3d811b30,0x3d80ef79,2 +np.float32,0x7ec952cc,0x3f800000,2 +np.float32,0xfe162b1c,0xbf800000,2 +np.float32,0x3f0f1187,0x3f01d367,2 +np.float32,0xbf2f3458,0xbf182878,2 +np.float32,0x5ceb14,0x5ceb14,2 +np.float32,0xbec29476,0xbeb9b939,2 +np.float32,0x3e71f943,0x3e6d9176,2 +np.float32,0x3ededefc,0x3ed1c909,2 +np.float32,0x805df6ac,0x805df6ac,2 +np.float32,0x3e5ae2c8,0x3e579ca8,2 +np.float32,0x3f6ad2c3,0x3f397fdf,2 +np.float32,0x7d5f94d3,0x3f800000,2 +np.float32,0xbeec7fe4,0xbedd0037,2 +np.float32,0x3f645304,0x3f365b0d,2 +np.float32,0xbf69a087,0xbf38edef,2 +np.float32,0x8025102e,0x8025102e,2 +np.float32,0x800db486,0x800db486,2 +np.float32,0x4df6c7,0x4df6c7,2 +np.float32,0x806d8cdd,0x806d8cdd,2 +np.float32,0x7f0c78cc,0x3f800000,2 +np.float32,0x7e1cf70b,0x3f800000,2 +np.float32,0x3e0ae570,0x3e0a0cf7,2 +np.float32,0x80176ef8,0x80176ef8,2 +np.float32,0x3f38b60c,0x3f1e2bbb,2 +np.float32,0x3d3071e0,0x3d3055f5,2 +np.float32,0x3ebfcfdd,0x3eb750a9,2 +np.float32,0xfe2cdec0,0xbf800000,2 +np.float32,0x7eeb2eed,0x3f800000,2 +np.float32,0x8026c904,0x8026c904,2 +np.float32,0xbec79bde,0xbebe133a,2 +np.float32,0xbf7dfab6,0xbf421d47,2 +np.float32,0x805b3cfd,0x805b3cfd,2 +np.float32,0xfdfcfb68,0xbf800000,2 +np.float32,0xbd537ec0,0xbd534eaf,2 +np.float32,0x52ce73,0x52ce73,2 +np.float32,0xfeac6ea6,0xbf800000,2 +np.float32,0x3f2c2990,0x3f162d41,2 +np.float32,0x3e3354e0,0x3e318539,2 +np.float32,0x802db22b,0x802db22b,2 +np.float32,0x7f0faa83,0x3f800000,2 +np.float32,0x7f10e161,0x3f800000,2 +np.float32,0x7f165c60,0x3f800000,2 +np.float32,0xbf5a756f,0xbf315c82,2 +np.float32,0x7f5a4b68,0x3f800000,2 +np.float32,0xbd77fbf0,0xbd77ae7c,2 +np.float32,0x65d83c,0x65d83c,2 +np.float32,0x3e5f28,0x3e5f28,2 +np.float32,0x8040ec92,0x8040ec92,2 +np.float32,0xbf2b41a6,0xbf1594d5,2 +np.float32,0x7f2f88f1,0x3f800000,2 +np.float32,0xfdb64ab8,0xbf800000,2 +np.float32,0xbf7a3ff1,0xbf4082f5,2 +np.float32,0x1948fc,0x1948fc,2 +np.float32,0x802c1039,0x802c1039,2 +np.float32,0x80119274,0x80119274,2 +np.float32,0x7e885d7b,0x3f800000,2 +np.float32,0xfaf6a,0xfaf6a,2 +np.float32,0x3eba28c4,0x3eb25e1d,2 +np.float32,0x3e4df370,0x3e4b37da,2 +np.float32,0xbf19eff6,0xbf09b97d,2 +np.float32,0xbeddd3c6,0xbed0ea7f,2 +np.float32,0xff6fc971,0xbf800000,2 +np.float32,0x7e93de29,0x3f800000,2 +np.float32,0x3eb12332,0x3eaa6485,2 +np.float32,0x3eb7c6e4,0x3eb04563,2 +np.float32,0x4a67ee,0x4a67ee,2 +np.float32,0xff1cafde,0xbf800000,2 +np.float32,0x3f5e2812,0x3f3343da,2 +np.float32,0x3f060e04,0x3ef605d4,2 +np.float32,0x3e9027d8,0x3e8c76a6,2 +np.float32,0xe2d33,0xe2d33,2 +np.float32,0xff4c94fc,0xbf800000,2 +np.float32,0xbf574908,0xbf2fb26b,2 +np.float32,0xbf786c08,0xbf3fb68e,2 +np.float32,0x8011ecab,0x8011ecab,2 +np.float32,0xbf061c6a,0xbef61bfa,2 +np.float32,0x7eea5f9d,0x3f800000,2 +np.float32,0x3ea2e19c,0x3e9d99a5,2 +np.float32,0x8071550c,0x8071550c,2 +np.float32,0x41c70b,0x41c70b,2 +np.float32,0x80291fc8,0x80291fc8,2 +np.float32,0x43b1ec,0x43b1ec,2 +np.float32,0x32f5a,0x32f5a,2 +np.float32,0xbe9310ec,0xbe8f2692,2 +np.float32,0x7f75f6bf,0x3f800000,2 +np.float32,0x3e6642a6,0x3e6274d2,2 +np.float32,0x3ecb88e0,0x3ec1733f,2 +np.float32,0x804011b6,0x804011b6,2 +np.float32,0x80629cca,0x80629cca,2 +np.float32,0x8016b914,0x8016b914,2 +np.float32,0xbdd05fc0,0xbdcfa870,2 +np.float32,0x807b824d,0x807b824d,2 +np.float32,0xfeec2576,0xbf800000,2 +np.float32,0xbf54bf22,0xbf2e584c,2 +np.float32,0xbf185eb0,0xbf089b6b,2 +np.float32,0xfbc09480,0xbf800000,2 +np.float32,0x3f413054,0x3f234e25,2 +np.float32,0x7e9e32b8,0x3f800000,2 +np.float32,0x266296,0x266296,2 +np.float32,0x460284,0x460284,2 +np.float32,0x3eb0b056,0x3ea9fe5a,2 +np.float32,0x1a7be5,0x1a7be5,2 +np.float32,0x7f099895,0x3f800000,2 +np.float32,0x3f3614f0,0x3f1c88ef,2 +np.float32,0x7e757dc2,0x3f800000,2 +np.float32,0x801fc91e,0x801fc91e,2 +np.float32,0x3f5ce37d,0x3f329ddb,2 +np.float32,0x3e664d70,0x3e627f15,2 +np.float32,0xbf38ed78,0xbf1e4dfa,2 +np.float32,0xbf5c563d,0xbf325543,2 +np.float32,0xbe91cc54,0xbe8dfb24,2 +np.float32,0x3d767fbe,0x3d7633ac,2 +np.float32,0xbf6aeb40,0xbf398b7f,2 +np.float32,0x7f40508b,0x3f800000,2 +np.float32,0x2650df,0x2650df,2 +np.float32,0xbe8cea3c,0xbe897628,2 +np.float32,0x80515af8,0x80515af8,2 +np.float32,0x7f423986,0x3f800000,2 +np.float32,0xbdf250e8,0xbdf1310c,2 +np.float32,0xfe89288a,0xbf800000,2 +np.float32,0x397b3b,0x397b3b,2 +np.float32,0x7e5e91b0,0x3f800000,2 +np.float32,0x6866e2,0x6866e2,2 +np.float32,0x7f4d8877,0x3f800000,2 +np.float32,0x3e6c4a21,0x3e682ee3,2 +np.float32,0xfc3d5980,0xbf800000,2 +np.float32,0x7eae2cd0,0x3f800000,2 +np.float32,0xbf241222,0xbf10c579,2 +np.float32,0xfebc02de,0xbf800000,2 +np.float32,0xff6e0645,0xbf800000,2 +np.float32,0x802030b6,0x802030b6,2 +np.float32,0x7ef9a441,0x3f800000,2 +np.float32,0x3fcf9f,0x3fcf9f,2 +np.float32,0xbf0ccf13,0xbf0023cc,2 +np.float32,0xfefee688,0xbf800000,2 +np.float32,0xbf6c8e0c,0xbf3a5160,2 +np.float32,0xfe749c28,0xbf800000,2 +np.float32,0x7f7fffff,0x3f800000,2 +np.float32,0x58c1a0,0x58c1a0,2 +np.float32,0x3f2de0a1,0x3f174c17,2 +np.float32,0xbf5f7138,0xbf33eb03,2 +np.float32,0x3da15270,0x3da0fd3c,2 +np.float32,0x3da66560,0x3da607e4,2 +np.float32,0xbf306f9a,0xbf18f3c6,2 +np.float32,0x3e81a4de,0x3e7de293,2 +np.float32,0xbebb5fb8,0xbeb36f1a,2 +np.float32,0x14bf64,0x14bf64,2 +np.float32,0xbeac46c6,0xbea60e73,2 +np.float32,0xbdcdf210,0xbdcd4111,2 +np.float32,0x3f7e3cd9,0x3f42395e,2 +np.float32,0xbc4be640,0xbc4be38e,2 +np.float32,0xff5f53b4,0xbf800000,2 +np.float32,0xbf1315ae,0xbf04c90b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0xbf6a4149,0xbf393aaa,2 +np.float32,0x3f66b8ee,0x3f378772,2 +np.float32,0xff29293e,0xbf800000,2 +np.float32,0xbcc989c0,0xbcc97f58,2 +np.float32,0xbd9a1b70,0xbd99d125,2 +np.float32,0xfef353cc,0xbf800000,2 +np.float32,0xbdc30cf0,0xbdc27683,2 +np.float32,0xfdfd6768,0xbf800000,2 +np.float32,0x7ebac44c,0x3f800000,2 +np.float32,0xff453cd6,0xbf800000,2 +np.float32,0x3ef07720,0x3ee03787,2 +np.float32,0x80219c14,0x80219c14,2 +np.float32,0x805553a8,0x805553a8,2 +np.float32,0x80703928,0x80703928,2 +np.float32,0xff16d3a7,0xbf800000,2 +np.float32,0x3f1472bc,0x3f05c77b,2 +np.float32,0x3eeea37a,0x3edebcf9,2 +np.float32,0x3db801e6,0x3db7838d,2 +np.float32,0x800870d2,0x800870d2,2 +np.float32,0xbea1172c,0xbe9bfa32,2 +np.float32,0x3f1f5e7c,0x3f0d8a42,2 +np.float32,0x123cdb,0x123cdb,2 +np.float32,0x7f6e6b06,0x3f800000,2 +np.float32,0x3ed80573,0x3ecc0def,2 +np.float32,0xfea31b82,0xbf800000,2 +np.float32,0x6744e0,0x6744e0,2 +np.float32,0x695e8b,0x695e8b,2 +np.float32,0xbee3888a,0xbed5a67d,2 +np.float32,0x7f64bc2a,0x3f800000,2 +np.float32,0x7f204244,0x3f800000,2 +np.float32,0x7f647102,0x3f800000,2 +np.float32,0x3dd8ebc0,0x3dd81d03,2 +np.float32,0x801e7ab1,0x801e7ab1,2 +np.float32,0x7d034b56,0x3f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80194193,0x80194193,2 +np.float32,0xfe31c8d4,0xbf800000,2 +np.float32,0x7fc0c4,0x7fc0c4,2 +np.float32,0xd95bf,0xd95bf,2 +np.float32,0x7e4f991d,0x3f800000,2 +np.float32,0x7fc563,0x7fc563,2 +np.float32,0xbe3fcccc,0xbe3d968a,2 +np.float32,0xfdaaa1c8,0xbf800000,2 +np.float32,0xbf48e449,0xbf27c949,2 +np.float32,0x3eb6c584,0x3eaf625e,2 +np.float32,0xbea35a74,0xbe9e0702,2 +np.float32,0x3eeab47a,0x3edb89d5,2 +np.float32,0xbed99556,0xbecd5de5,2 +np.float64,0xbfb94a81e0329500,0xbfb935867ba761fe,2 +np.float64,0xbfec132f1678265e,0xbfe6900eb097abc3,2 +np.float64,0x5685ea72ad0be,0x5685ea72ad0be,2 +np.float64,0xbfd74d3169ae9a62,0xbfd652e09b9daf32,2 +np.float64,0xbfe28df53d651bea,0xbfe0b8a7f50ab433,2 +np.float64,0x0,0x0,2 +np.float64,0xbfed912738bb224e,0xbfe749e3732831ae,2 +np.float64,0x7fcc6faed838df5d,0x3ff0000000000000,2 +np.float64,0xbfe95fe9a432bfd3,0xbfe51f6349919910,2 +np.float64,0xbfc4d5900b29ab20,0xbfc4a6f496179b8b,2 +np.float64,0xbfcd6025033ac04c,0xbfccded7b34b49b0,2 +np.float64,0xbfdfa655b43f4cac,0xbfdd4ca1e5bb9db8,2 +np.float64,0xe7ea5c7fcfd4c,0xe7ea5c7fcfd4c,2 +np.float64,0xffa5449ca42a8940,0xbff0000000000000,2 +np.float64,0xffe63294c1ac6529,0xbff0000000000000,2 +np.float64,0x7feb9cbae7f73975,0x3ff0000000000000,2 +np.float64,0x800eb07c3e3d60f9,0x800eb07c3e3d60f9,2 +np.float64,0x3fc95777e932aef0,0x3fc9040391e20c00,2 +np.float64,0x800736052dee6c0b,0x800736052dee6c0b,2 +np.float64,0x3fe9ae4afd335c96,0x3fe54b569bab45c7,2 +np.float64,0x7fee4c94217c9927,0x3ff0000000000000,2 +np.float64,0x80094b594bd296b3,0x80094b594bd296b3,2 +np.float64,0xffe5adbcee6b5b7a,0xbff0000000000000,2 +np.float64,0x3fecb8eab47971d5,0x3fe6e236be6f27e9,2 +np.float64,0x44956914892ae,0x44956914892ae,2 +np.float64,0xbfe3bd18ef677a32,0xbfe190bf1e07200c,2 +np.float64,0x800104e5b46209cc,0x800104e5b46209cc,2 +np.float64,0x8008fbcecf71f79e,0x8008fbcecf71f79e,2 +np.float64,0x800f0a46a0be148d,0x800f0a46a0be148d,2 +np.float64,0x7fe657a0702caf40,0x3ff0000000000000,2 +np.float64,0xffd3ff1a9027fe36,0xbff0000000000000,2 +np.float64,0x3fe78bc87bef1790,0x3fe40d2e63aaf029,2 +np.float64,0x7feeabdc4c7d57b8,0x3ff0000000000000,2 +np.float64,0xbfabd28d8437a520,0xbfabcb8ce03a0e56,2 +np.float64,0xbfddc3a133bb8742,0xbfdbc9fdb2594451,2 +np.float64,0x7fec911565b9222a,0x3ff0000000000000,2 +np.float64,0x71302604e2605,0x71302604e2605,2 +np.float64,0xee919d2bdd234,0xee919d2bdd234,2 +np.float64,0xbfc04fcff3209fa0,0xbfc0395a739a2ce4,2 +np.float64,0xffe4668a36e8cd14,0xbff0000000000000,2 +np.float64,0xbfeeafeebefd5fde,0xbfe7cd5f3d61a3ec,2 +np.float64,0x7fddb34219bb6683,0x3ff0000000000000,2 +np.float64,0xbfd2cac6cba5958e,0xbfd24520abb2ff36,2 +np.float64,0xbfb857e49630afc8,0xbfb8452d5064dec2,2 +np.float64,0x3fd2dbf90b25b7f2,0x3fd254eaf48484c2,2 +np.float64,0x800af65c94f5ecba,0x800af65c94f5ecba,2 +np.float64,0xa0eef4bf41ddf,0xa0eef4bf41ddf,2 +np.float64,0xffd8e0a4adb1c14a,0xbff0000000000000,2 +np.float64,0xffe858f6e870b1ed,0xbff0000000000000,2 +np.float64,0x3f94c2c308298580,0x3f94c208a4bb006d,2 +np.float64,0xffb45f0d7428be18,0xbff0000000000000,2 +np.float64,0x800ed4f43dbda9e9,0x800ed4f43dbda9e9,2 +np.float64,0x8002dd697e85bad4,0x8002dd697e85bad4,2 +np.float64,0x787ceab2f0f9e,0x787ceab2f0f9e,2 +np.float64,0xbfdff5fcc2bfebfa,0xbfdd8b736b128589,2 +np.float64,0x7fdb2b4294365684,0x3ff0000000000000,2 +np.float64,0xffe711e5e92e23cc,0xbff0000000000000,2 +np.float64,0x800b1c93f1163928,0x800b1c93f1163928,2 +np.float64,0x7fc524d2f22a49a5,0x3ff0000000000000,2 +np.float64,0x7fc88013b5310026,0x3ff0000000000000,2 +np.float64,0x3fe1a910c5e35222,0x3fe00fd779ebaa2a,2 +np.float64,0xbfb57ec9ca2afd90,0xbfb571e47ecb9335,2 +np.float64,0x7fd7594b20aeb295,0x3ff0000000000000,2 +np.float64,0x7fba4641ca348c83,0x3ff0000000000000,2 +np.float64,0xffe61393706c2726,0xbff0000000000000,2 +np.float64,0x7fd54f3c7baa9e78,0x3ff0000000000000,2 +np.float64,0xffe65ffb12ecbff6,0xbff0000000000000,2 +np.float64,0xbfba3b0376347608,0xbfba239cbbbd1b11,2 +np.float64,0x800200886d640112,0x800200886d640112,2 +np.float64,0xbfecf0ba4679e174,0xbfe6fd59de44a3ec,2 +np.float64,0xffe5c57e122b8afc,0xbff0000000000000,2 +np.float64,0x7fdaad0143355a02,0x3ff0000000000000,2 +np.float64,0x46ab32c08d567,0x46ab32c08d567,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfda7980fdb4f302,0xbfd90fa9c8066109,2 +np.float64,0x3fe237703c646ee0,0x3fe07969f8d8805a,2 +np.float64,0x8000e9fcfc21d3fb,0x8000e9fcfc21d3fb,2 +np.float64,0xbfdfe6e958bfcdd2,0xbfdd7f952fe87770,2 +np.float64,0xbd7baf217af8,0xbd7baf217af8,2 +np.float64,0xbfceba9e4b3d753c,0xbfce26e54359869a,2 +np.float64,0xb95a2caf72b46,0xb95a2caf72b46,2 +np.float64,0x3fb407e25a280fc5,0x3fb3fd71e457b628,2 +np.float64,0xa1da09d943b41,0xa1da09d943b41,2 +np.float64,0xbfe9c7271cf38e4e,0xbfe559296b471738,2 +np.float64,0x3fefae6170ff5cc3,0x3fe83c70ba82f0e1,2 +np.float64,0x7fe7375348ae6ea6,0x3ff0000000000000,2 +np.float64,0xffe18c9cc6e31939,0xbff0000000000000,2 +np.float64,0x800483d13a6907a3,0x800483d13a6907a3,2 +np.float64,0x7fe772a18caee542,0x3ff0000000000000,2 +np.float64,0xffefff64e7bffec9,0xbff0000000000000,2 +np.float64,0x7fcffc31113ff861,0x3ff0000000000000,2 +np.float64,0x3fd91e067e323c0d,0x3fd7e70bf365a7b3,2 +np.float64,0xb0a6673d614cd,0xb0a6673d614cd,2 +np.float64,0xffef9a297e3f3452,0xbff0000000000000,2 +np.float64,0xffe87cc15e70f982,0xbff0000000000000,2 +np.float64,0xffefd6ad8e7fad5a,0xbff0000000000000,2 +np.float64,0x7fe3aaa3a8a75546,0x3ff0000000000000,2 +np.float64,0xddab0341bb561,0xddab0341bb561,2 +np.float64,0x3fe996d6d7332dae,0x3fe53e3ed5be2922,2 +np.float64,0x3fdbe66a18b7ccd4,0x3fda41e6053c1512,2 +np.float64,0x8914775d1228f,0x8914775d1228f,2 +np.float64,0x3fe44621d4688c44,0x3fe1ef9c7225f8bd,2 +np.float64,0xffab29a2a4365340,0xbff0000000000000,2 +np.float64,0xffc8d4a0c431a940,0xbff0000000000000,2 +np.float64,0xbfd426e085284dc2,0xbfd382e2a9617b87,2 +np.float64,0xbfd3b0a525a7614a,0xbfd3176856faccf1,2 +np.float64,0x80036dedcb06dbdc,0x80036dedcb06dbdc,2 +np.float64,0x3feb13823b762704,0x3fe60ca3facdb696,2 +np.float64,0x3fd7246b7bae48d8,0x3fd62f08afded155,2 +np.float64,0x1,0x1,2 +np.float64,0x3fe8ade4b9715bc9,0x3fe4b97cc1387d27,2 +np.float64,0x3fdf2dbec53e5b7e,0x3fdcecfeee33de95,2 +np.float64,0x3fe4292bf9685258,0x3fe1dbb5a6704090,2 +np.float64,0xbfd21acbb8243598,0xbfd1a2ff42174cae,2 +np.float64,0xdd0d2d01ba1a6,0xdd0d2d01ba1a6,2 +np.float64,0x3fa3f3d2f427e7a0,0x3fa3f13d6f101555,2 +np.float64,0x7fdabf4aceb57e95,0x3ff0000000000000,2 +np.float64,0xd4d9e39ba9b3d,0xd4d9e39ba9b3d,2 +np.float64,0xffec773396f8ee66,0xbff0000000000000,2 +np.float64,0x3fa88cc79031198f,0x3fa887f7ade722ba,2 +np.float64,0xffe63a92066c7524,0xbff0000000000000,2 +np.float64,0xbfcf514e2e3ea29c,0xbfceb510e99aaa19,2 +np.float64,0x9d78c19d3af18,0x9d78c19d3af18,2 +np.float64,0x7fdd748bfbbae917,0x3ff0000000000000,2 +np.float64,0xffb3594c4626b298,0xbff0000000000000,2 +np.float64,0x80068ce5b32d19cc,0x80068ce5b32d19cc,2 +np.float64,0x3fec63d60e78c7ac,0x3fe6b85536e44217,2 +np.float64,0x80080bad4dd0175b,0x80080bad4dd0175b,2 +np.float64,0xbfec6807baf8d010,0xbfe6ba69740f9687,2 +np.float64,0x7fedbae0bbfb75c0,0x3ff0000000000000,2 +np.float64,0x8001cb7aa3c396f6,0x8001cb7aa3c396f6,2 +np.float64,0x7fe1f1f03563e3df,0x3ff0000000000000,2 +np.float64,0x7fd83d3978307a72,0x3ff0000000000000,2 +np.float64,0xbfc05ffe9d20bffc,0xbfc049464e3f0af2,2 +np.float64,0xfe6e053ffcdc1,0xfe6e053ffcdc1,2 +np.float64,0xbfd3bdf39d277be8,0xbfd32386edf12726,2 +np.float64,0x800f41b27bde8365,0x800f41b27bde8365,2 +np.float64,0xbfe2c98390e59307,0xbfe0e3c9260fe798,2 +np.float64,0xffdd6206bcbac40e,0xbff0000000000000,2 +np.float64,0x67f35ef4cfe6c,0x67f35ef4cfe6c,2 +np.float64,0x800337e02ae66fc1,0x800337e02ae66fc1,2 +np.float64,0x3fe0ff70afe1fee1,0x3fdf1f46434330df,2 +np.float64,0x3fd7e0a1df2fc144,0x3fd6d3f82c8031e4,2 +np.float64,0x8008da5cd1b1b4ba,0x8008da5cd1b1b4ba,2 +np.float64,0x80065ec9e4ccbd95,0x80065ec9e4ccbd95,2 +np.float64,0x3fe1d1e559a3a3cb,0x3fe02e4f146aa1ab,2 +np.float64,0x7feb7d2f0836fa5d,0x3ff0000000000000,2 +np.float64,0xbfcb33ce9736679c,0xbfcaccd431b205bb,2 +np.float64,0x800e6d0adf5cda16,0x800e6d0adf5cda16,2 +np.float64,0x7fe46f272ca8de4d,0x3ff0000000000000,2 +np.float64,0x4fdfc73e9fbfa,0x4fdfc73e9fbfa,2 +np.float64,0x800958a13112b143,0x800958a13112b143,2 +np.float64,0xbfea01f877f403f1,0xbfe579a541594247,2 +np.float64,0xeefaf599ddf5f,0xeefaf599ddf5f,2 +np.float64,0x80038766c5e70ece,0x80038766c5e70ece,2 +np.float64,0x7fd31bc28ba63784,0x3ff0000000000000,2 +np.float64,0xbfe4df77eee9bef0,0xbfe257abe7083b77,2 +np.float64,0x7fe6790c78acf218,0x3ff0000000000000,2 +np.float64,0xffe7c66884af8cd0,0xbff0000000000000,2 +np.float64,0x800115e36f422bc8,0x800115e36f422bc8,2 +np.float64,0x3fc601945d2c0329,0x3fc5cab917bb20bc,2 +np.float64,0x3fd6ac9546ad592b,0x3fd5c55437ec3508,2 +np.float64,0xa7bd59294f7ab,0xa7bd59294f7ab,2 +np.float64,0x8005c26c8b8b84da,0x8005c26c8b8b84da,2 +np.float64,0x8257501704aea,0x8257501704aea,2 +np.float64,0x5b12aae0b6256,0x5b12aae0b6256,2 +np.float64,0x800232fe02c465fd,0x800232fe02c465fd,2 +np.float64,0x800dae28f85b5c52,0x800dae28f85b5c52,2 +np.float64,0x3fdade1ac135bc36,0x3fd964a2000ace25,2 +np.float64,0x3fed72ca04fae594,0x3fe73b9170d809f9,2 +np.float64,0x7fc6397e2b2c72fb,0x3ff0000000000000,2 +np.float64,0x3fe1f5296d23ea53,0x3fe048802d17621e,2 +np.float64,0xffe05544b920aa89,0xbff0000000000000,2 +np.float64,0xbfdb2e1588365c2c,0xbfd9a7e4113c713e,2 +np.float64,0xbfed6a06fa3ad40e,0xbfe7376be60535f8,2 +np.float64,0xbfe31dcaf5e63b96,0xbfe120417c46cac1,2 +np.float64,0xbfb7ed67ae2fdad0,0xbfb7dba14af33b00,2 +np.float64,0xffd32bb7eb265770,0xbff0000000000000,2 +np.float64,0x80039877b04730f0,0x80039877b04730f0,2 +np.float64,0x3f832e5630265cac,0x3f832e316f47f218,2 +np.float64,0xffe7fa7f732ff4fe,0xbff0000000000000,2 +np.float64,0x9649b87f2c937,0x9649b87f2c937,2 +np.float64,0xffaee447183dc890,0xbff0000000000000,2 +np.float64,0x7fe4e02dd869c05b,0x3ff0000000000000,2 +np.float64,0x3fe1d35e7463a6bd,0x3fe02f67bd21e86e,2 +np.float64,0xffe57f40fe2afe82,0xbff0000000000000,2 +np.float64,0xbfea1362b93426c6,0xbfe5833421dba8fc,2 +np.float64,0xffe9c689fe338d13,0xbff0000000000000,2 +np.float64,0xffc592dd102b25bc,0xbff0000000000000,2 +np.float64,0x3fd283c7aba5078f,0x3fd203d61d1398c3,2 +np.float64,0x8001d6820243ad05,0x8001d6820243ad05,2 +np.float64,0x3fe0ad5991e15ab4,0x3fdea14ef0d47fbd,2 +np.float64,0x3fe3916f2ee722de,0x3fe1722684a9ffb1,2 +np.float64,0xffef9e54e03f3ca9,0xbff0000000000000,2 +np.float64,0x7fe864faebb0c9f5,0x3ff0000000000000,2 +np.float64,0xbfed3587c3fa6b10,0xbfe71e7112df8a68,2 +np.float64,0xbfdd9efc643b3df8,0xbfdbac3a16caf208,2 +np.float64,0xbfd5ac08feab5812,0xbfd4e14575a6e41b,2 +np.float64,0xffda90fae6b521f6,0xbff0000000000000,2 +np.float64,0x8001380ecf22701e,0x8001380ecf22701e,2 +np.float64,0x7fed266fa5fa4cde,0x3ff0000000000000,2 +np.float64,0xffec6c0ac3b8d815,0xbff0000000000000,2 +np.float64,0x3fe7de43c32fbc88,0x3fe43ef62821a5a6,2 +np.float64,0x800bf4ffc357ea00,0x800bf4ffc357ea00,2 +np.float64,0x3fe125c975624b93,0x3fdf59b2de3eff5d,2 +np.float64,0x8004714c1028e299,0x8004714c1028e299,2 +np.float64,0x3fef1bfbf5fe37f8,0x3fe7fd2ba1b63c8a,2 +np.float64,0x800cae15c3195c2c,0x800cae15c3195c2c,2 +np.float64,0x7fde708e083ce11b,0x3ff0000000000000,2 +np.float64,0x7fbcee5df639dcbb,0x3ff0000000000000,2 +np.float64,0x800b1467141628cf,0x800b1467141628cf,2 +np.float64,0x3fe525e0d36a4bc2,0x3fe286b6e59e30f5,2 +np.float64,0xffe987f8b8330ff1,0xbff0000000000000,2 +np.float64,0x7e0a8284fc151,0x7e0a8284fc151,2 +np.float64,0x8006f982442df305,0x8006f982442df305,2 +np.float64,0xbfd75a3cb62eb47a,0xbfd65e54cee981c9,2 +np.float64,0x258e91104b1d3,0x258e91104b1d3,2 +np.float64,0xbfecd0056779a00b,0xbfe6ed7ae97fff1b,2 +np.float64,0x7fc3a4f9122749f1,0x3ff0000000000000,2 +np.float64,0x6e2b1024dc563,0x6e2b1024dc563,2 +np.float64,0x800d575ad4daaeb6,0x800d575ad4daaeb6,2 +np.float64,0xbfceafb1073d5f64,0xbfce1c93023d8414,2 +np.float64,0xffe895cb5f312b96,0xbff0000000000000,2 +np.float64,0x7fe7811ed4ef023d,0x3ff0000000000000,2 +np.float64,0xbfd93f952f327f2a,0xbfd803e6b5576b99,2 +np.float64,0xffdd883a3fbb1074,0xbff0000000000000,2 +np.float64,0x7fee5624eefcac49,0x3ff0000000000000,2 +np.float64,0xbfe264bb2624c976,0xbfe09a9b7cc896e7,2 +np.float64,0xffef14b417be2967,0xbff0000000000000,2 +np.float64,0xbfecbd0d94397a1b,0xbfe6e43bef852d9f,2 +np.float64,0xbfe20d9e4ba41b3c,0xbfe05a98e05846d9,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x7fefde93f7bfbd27,0x3ff0000000000000,2 +np.float64,0x80076b9e232ed73d,0x80076b9e232ed73d,2 +np.float64,0xbfe80df52c701bea,0xbfe45b754b433792,2 +np.float64,0x7fe3b5a637676b4b,0x3ff0000000000000,2 +np.float64,0x2c81d14c5903b,0x2c81d14c5903b,2 +np.float64,0x80038945c767128c,0x80038945c767128c,2 +np.float64,0xffeebaf544bd75ea,0xbff0000000000000,2 +np.float64,0xffdb1867d2b630d0,0xbff0000000000000,2 +np.float64,0x3fe3376eaee66ede,0x3fe13285579763d8,2 +np.float64,0xffddf65ca43becba,0xbff0000000000000,2 +np.float64,0xffec8e3e04791c7b,0xbff0000000000000,2 +np.float64,0x80064f4bde2c9e98,0x80064f4bde2c9e98,2 +np.float64,0x7fe534a085ea6940,0x3ff0000000000000,2 +np.float64,0xbfcbabe31d3757c8,0xbfcb3f8e70adf7e7,2 +np.float64,0xbfe45ca11e28b942,0xbfe1ff04515ef809,2 +np.float64,0x65f4df02cbe9d,0x65f4df02cbe9d,2 +np.float64,0xb08b0cbb61162,0xb08b0cbb61162,2 +np.float64,0x3feae2e8b975c5d1,0x3fe5f302b5e8eda2,2 +np.float64,0x7fcf277ff93e4eff,0x3ff0000000000000,2 +np.float64,0x80010999c4821334,0x80010999c4821334,2 +np.float64,0xbfd7f65911afecb2,0xbfd6e6e9cd098f8b,2 +np.float64,0x800e0560ec3c0ac2,0x800e0560ec3c0ac2,2 +np.float64,0x7fec4152ba3882a4,0x3ff0000000000000,2 +np.float64,0xbfb5c77cd42b8ef8,0xbfb5ba1336084908,2 +np.float64,0x457ff1b68afff,0x457ff1b68afff,2 +np.float64,0x5323ec56a647e,0x5323ec56a647e,2 +np.float64,0xbfeed16cf8bda2da,0xbfe7dc49fc9ae549,2 +np.float64,0xffe8446106b088c1,0xbff0000000000000,2 +np.float64,0xffb93cd13c3279a0,0xbff0000000000000,2 +np.float64,0x7fe515c2aeea2b84,0x3ff0000000000000,2 +np.float64,0x80099df83f933bf1,0x80099df83f933bf1,2 +np.float64,0x7fb3a375562746ea,0x3ff0000000000000,2 +np.float64,0x7fcd7efa243afdf3,0x3ff0000000000000,2 +np.float64,0xffe40cddb12819bb,0xbff0000000000000,2 +np.float64,0x8008b68eecd16d1e,0x8008b68eecd16d1e,2 +np.float64,0x2aec688055d8e,0x2aec688055d8e,2 +np.float64,0xffe23750bc646ea1,0xbff0000000000000,2 +np.float64,0x5adacf60b5b7,0x5adacf60b5b7,2 +np.float64,0x7fefb29b1cbf6535,0x3ff0000000000000,2 +np.float64,0xbfeadbf90175b7f2,0xbfe5ef55e2194794,2 +np.float64,0xeaad2885d55a5,0xeaad2885d55a5,2 +np.float64,0xffd7939fba2f2740,0xbff0000000000000,2 +np.float64,0x3fd187ea3aa30fd4,0x3fd11af023472386,2 +np.float64,0xbf6eb579c03d6b00,0xbf6eb57052f47019,2 +np.float64,0x3fefb67b3bff6cf6,0x3fe83fe4499969ac,2 +np.float64,0xbfe5183aacea3076,0xbfe27da1aa0b61a0,2 +np.float64,0xbfb83e47a2307c90,0xbfb82bcb0e12db42,2 +np.float64,0x80088849b1b11094,0x80088849b1b11094,2 +np.float64,0x800ceeed7399dddb,0x800ceeed7399dddb,2 +np.float64,0x80097cd90892f9b2,0x80097cd90892f9b2,2 +np.float64,0x7ec73feefd8e9,0x7ec73feefd8e9,2 +np.float64,0x7fe3291de5a6523b,0x3ff0000000000000,2 +np.float64,0xbfd537086daa6e10,0xbfd4787af5f60653,2 +np.float64,0x800e8ed4455d1da9,0x800e8ed4455d1da9,2 +np.float64,0x800ef8d19cbdf1a3,0x800ef8d19cbdf1a3,2 +np.float64,0x800dc4fa3a5b89f5,0x800dc4fa3a5b89f5,2 +np.float64,0xaa8b85cd55171,0xaa8b85cd55171,2 +np.float64,0xffd67a5f40acf4be,0xbff0000000000000,2 +np.float64,0xbfb7496db22e92d8,0xbfb7390a48130861,2 +np.float64,0x3fd86a8e7ab0d51d,0x3fd74bfba0f72616,2 +np.float64,0xffb7f5b7fc2feb70,0xbff0000000000000,2 +np.float64,0xbfea0960a7f412c1,0xbfe57db6d0ff4191,2 +np.float64,0x375f4fc26ebeb,0x375f4fc26ebeb,2 +np.float64,0x800c537e70b8a6fd,0x800c537e70b8a6fd,2 +np.float64,0x800b3f4506d67e8a,0x800b3f4506d67e8a,2 +np.float64,0x7fe61f2d592c3e5a,0x3ff0000000000000,2 +np.float64,0xffefffffffffffff,0xbff0000000000000,2 +np.float64,0x8005d0bb84eba178,0x8005d0bb84eba178,2 +np.float64,0x800c78b0ec18f162,0x800c78b0ec18f162,2 +np.float64,0xbfc42cccfb285998,0xbfc4027392f66b0d,2 +np.float64,0x3fd8fdc73fb1fb8e,0x3fd7cb46f928153f,2 +np.float64,0x800c71754298e2eb,0x800c71754298e2eb,2 +np.float64,0x3fe4aa7a96a954f5,0x3fe233f5d3bc1352,2 +np.float64,0x7fd53841f6aa7083,0x3ff0000000000000,2 +np.float64,0x3fd0a887b8a15110,0x3fd04ac3b9c0d1ca,2 +np.float64,0x8007b8e164cf71c4,0x8007b8e164cf71c4,2 +np.float64,0xbfddc35c66bb86b8,0xbfdbc9c5dddfb014,2 +np.float64,0x6a3756fed46eb,0x6a3756fed46eb,2 +np.float64,0xffd3dcd05527b9a0,0xbff0000000000000,2 +np.float64,0xbfd7dc75632fb8ea,0xbfd6d0538b340a98,2 +np.float64,0x17501f822ea05,0x17501f822ea05,2 +np.float64,0xbfe1f98b99a3f317,0xbfe04bbf8f8b6cb3,2 +np.float64,0x66ea65d2cdd4d,0x66ea65d2cdd4d,2 +np.float64,0xbfd12241e2224484,0xbfd0bc62f46ea5e1,2 +np.float64,0x3fed6e6fb3fadcdf,0x3fe7398249097285,2 +np.float64,0x3fe0b5ebeba16bd8,0x3fdeae84b3000a47,2 +np.float64,0x66d1bce8cda38,0x66d1bce8cda38,2 +np.float64,0x3fdd728db3bae51b,0x3fdb880f28c52713,2 +np.float64,0xffb45dbe5228bb80,0xbff0000000000000,2 +np.float64,0x1ff8990c3ff14,0x1ff8990c3ff14,2 +np.float64,0x800a68e8f294d1d2,0x800a68e8f294d1d2,2 +np.float64,0xbfe4d08b84a9a117,0xbfe24da40bff6be7,2 +np.float64,0x3fe0177f0ee02efe,0x3fddb83c5971df51,2 +np.float64,0xffc56893692ad128,0xbff0000000000000,2 +np.float64,0x51b44f6aa368b,0x51b44f6aa368b,2 +np.float64,0x2258ff4e44b21,0x2258ff4e44b21,2 +np.float64,0x3fe913649e7226c9,0x3fe4f3f119530f53,2 +np.float64,0xffe3767df766ecfc,0xbff0000000000000,2 +np.float64,0xbfe62ae12fec55c2,0xbfe33108f1f22a94,2 +np.float64,0x7fb6a6308e2d4c60,0x3ff0000000000000,2 +np.float64,0xbfe00f2085e01e41,0xbfddab19b6fc77d1,2 +np.float64,0x3fb66447dc2cc890,0x3fb655b4f46844f0,2 +np.float64,0x3fd80238f6b00470,0x3fd6f143be1617d6,2 +np.float64,0xbfd05bfeb3a0b7fe,0xbfd0031ab3455e15,2 +np.float64,0xffc3a50351274a08,0xbff0000000000000,2 +np.float64,0xffd8f4241cb1e848,0xbff0000000000000,2 +np.float64,0xbfca72a88c34e550,0xbfca13ebe85f2aca,2 +np.float64,0x3fd47d683ba8fad0,0x3fd3d13f1176ed8c,2 +np.float64,0x3fb6418e642c831d,0x3fb6333ebe479ff2,2 +np.float64,0x800fde8e023fbd1c,0x800fde8e023fbd1c,2 +np.float64,0x8001fb01e323f605,0x8001fb01e323f605,2 +np.float64,0x3febb21ff9f76440,0x3fe65ed788d52fee,2 +np.float64,0x3fe47553ffe8eaa8,0x3fe20fe01f853603,2 +np.float64,0x7fca20b3f9344167,0x3ff0000000000000,2 +np.float64,0x3fe704f4ec6e09ea,0x3fe3ba7277201805,2 +np.float64,0xf864359df0c87,0xf864359df0c87,2 +np.float64,0x4d96b01c9b2d7,0x4d96b01c9b2d7,2 +np.float64,0x3fe8a09fe9f14140,0x3fe4b1c6a2d2e095,2 +np.float64,0xffc46c61b228d8c4,0xbff0000000000000,2 +np.float64,0x3fe680a837ed0150,0x3fe3679d6eeb6485,2 +np.float64,0xbfecedc20f39db84,0xbfe6fbe9ee978bf6,2 +np.float64,0x3fb2314eae24629d,0x3fb2297ba6d55d2d,2 +np.float64,0x3fe9f0b8e7b3e172,0x3fe57026eae36db3,2 +np.float64,0x80097a132ed2f427,0x80097a132ed2f427,2 +np.float64,0x800ae5a41955cb49,0x800ae5a41955cb49,2 +np.float64,0xbfd7527279aea4e4,0xbfd6577de356e1bd,2 +np.float64,0x3fe27d3e01e4fa7c,0x3fe0ac7dd96f9179,2 +np.float64,0x7fedd8cb01bbb195,0x3ff0000000000000,2 +np.float64,0x78f8695af1f0e,0x78f8695af1f0e,2 +np.float64,0x800d2d0e927a5a1d,0x800d2d0e927a5a1d,2 +np.float64,0xffe74b46fb2e968e,0xbff0000000000000,2 +np.float64,0xbfdd12d4c8ba25aa,0xbfdb39dae49e1c10,2 +np.float64,0xbfd6c14710ad828e,0xbfd5d79ef5a8d921,2 +np.float64,0x921f4e55243ea,0x921f4e55243ea,2 +np.float64,0x800b4e4c80969c99,0x800b4e4c80969c99,2 +np.float64,0x7fe08c6ab7e118d4,0x3ff0000000000000,2 +np.float64,0xbfed290014fa5200,0xbfe71871f7e859ed,2 +np.float64,0x8008c1d5c59183ac,0x8008c1d5c59183ac,2 +np.float64,0x3fd339e68c2673cd,0x3fd2aaff3f165a9d,2 +np.float64,0xbfdd20d8113a41b0,0xbfdb4553ea2cb2fb,2 +np.float64,0x3fe52a25deea544c,0x3fe2898d5bf4442c,2 +np.float64,0x498602d4930c1,0x498602d4930c1,2 +np.float64,0x3fd8c450113188a0,0x3fd799b0b2a6c43c,2 +np.float64,0xbfd72bc2f2ae5786,0xbfd6357e15ba7f70,2 +np.float64,0xbfd076188ea0ec32,0xbfd01b8fce44d1af,2 +np.float64,0x9aace1713559c,0x9aace1713559c,2 +np.float64,0x8008a730e8914e62,0x8008a730e8914e62,2 +np.float64,0x7fe9e9a3d833d347,0x3ff0000000000000,2 +np.float64,0x800d3a0d69da741b,0x800d3a0d69da741b,2 +np.float64,0xbfe3e28a29e7c514,0xbfe1aad7643a2d19,2 +np.float64,0x7fe9894c71331298,0x3ff0000000000000,2 +np.float64,0xbfe7c6acb5ef8d5a,0xbfe430c9e258ce62,2 +np.float64,0xffb5a520a62b4a40,0xbff0000000000000,2 +np.float64,0x7fc02109ae204212,0x3ff0000000000000,2 +np.float64,0xb5c58f196b8b2,0xb5c58f196b8b2,2 +np.float64,0x3feb4ee82e769dd0,0x3fe62bae9a39d8b1,2 +np.float64,0x3fec5c3cf278b87a,0x3fe6b49000f12441,2 +np.float64,0x81f64b8103eca,0x81f64b8103eca,2 +np.float64,0xbfeab00d73f5601b,0xbfe5d7f755ab73d9,2 +np.float64,0x3fd016bf28a02d7e,0x3fcf843ea23bcd3c,2 +np.float64,0xbfa1db617423b6c0,0xbfa1d9872ddeb5a8,2 +np.float64,0x3fe83c879d70790f,0x3fe4771502d8f012,2 +np.float64,0x6b267586d64cf,0x6b267586d64cf,2 +np.float64,0x3fc91b6d3f3236d8,0x3fc8ca3eb4da25a9,2 +np.float64,0x7fd4e3f8f3a9c7f1,0x3ff0000000000000,2 +np.float64,0x800a75899214eb14,0x800a75899214eb14,2 +np.float64,0x7fdb1f2e07b63e5b,0x3ff0000000000000,2 +np.float64,0xffe7805a11ef00b4,0xbff0000000000000,2 +np.float64,0x3fc8e1b88a31c371,0x3fc892af45330818,2 +np.float64,0xbfe809fe447013fc,0xbfe45918f07da4d9,2 +np.float64,0xbfeb9d7f2ab73afe,0xbfe65446bfddc792,2 +np.float64,0x3fb47f0a5c28fe15,0x3fb473db9113e880,2 +np.float64,0x800a17ae3cb42f5d,0x800a17ae3cb42f5d,2 +np.float64,0xf5540945eaa81,0xf5540945eaa81,2 +np.float64,0xbfe577fc26aaeff8,0xbfe2bcfbf2cf69ff,2 +np.float64,0xbfb99b3e06333680,0xbfb98577b88e0515,2 +np.float64,0x7fd9290391b25206,0x3ff0000000000000,2 +np.float64,0x7fe1aa62ffa354c5,0x3ff0000000000000,2 +np.float64,0x7b0189a0f604,0x7b0189a0f604,2 +np.float64,0x3f9000ed602001db,0x3f900097fe168105,2 +np.float64,0x3fd576128d2aec25,0x3fd4b1002c92286f,2 +np.float64,0xffecc98ece79931d,0xbff0000000000000,2 +np.float64,0x800a1736c7f42e6e,0x800a1736c7f42e6e,2 +np.float64,0xbfed947548bb28eb,0xbfe74b71479ae739,2 +np.float64,0xa45c032148b9,0xa45c032148b9,2 +np.float64,0xbfc13d011c227a04,0xbfc1228447de5e9f,2 +np.float64,0xffed8baa6ebb1754,0xbff0000000000000,2 +np.float64,0x800ea2de243d45bc,0x800ea2de243d45bc,2 +np.float64,0x8001396be52272d9,0x8001396be52272d9,2 +np.float64,0xd018d1cda031a,0xd018d1cda031a,2 +np.float64,0x7fe1fece1fe3fd9b,0x3ff0000000000000,2 +np.float64,0x8009ac484c135891,0x8009ac484c135891,2 +np.float64,0x3fc560ad132ac15a,0x3fc52e5a9479f08e,2 +np.float64,0x3fd6f80ebe2df01d,0x3fd607f70ce8e3f4,2 +np.float64,0xbfd3e69e82a7cd3e,0xbfd34887c2a40699,2 +np.float64,0x3fe232d9baa465b3,0x3fe0760a822ada0c,2 +np.float64,0x3fe769bbc6eed378,0x3fe3f872680f6631,2 +np.float64,0xffe63dbd952c7b7a,0xbff0000000000000,2 +np.float64,0x4e0c00da9c181,0x4e0c00da9c181,2 +np.float64,0xffeae4d89735c9b0,0xbff0000000000000,2 +np.float64,0x3fe030bcbb606179,0x3fdddfc66660bfce,2 +np.float64,0x7fe35ca40d66b947,0x3ff0000000000000,2 +np.float64,0xbfd45bd66628b7ac,0xbfd3b2e04bfe7866,2 +np.float64,0x3fd1f0be2323e17c,0x3fd17c1c340d7a48,2 +np.float64,0x3fd7123b6cae2478,0x3fd61f0675aa9ae1,2 +np.float64,0xbfe918a377723147,0xbfe4f6efe66f5714,2 +np.float64,0x7fc400356f28006a,0x3ff0000000000000,2 +np.float64,0x7fd2dead70a5bd5a,0x3ff0000000000000,2 +np.float64,0xffe9c28f81f3851e,0xbff0000000000000,2 +np.float64,0x3fd09b1ec7a1363e,0x3fd03e3894320140,2 +np.float64,0x7fe6e80c646dd018,0x3ff0000000000000,2 +np.float64,0x7fec3760a4786ec0,0x3ff0000000000000,2 +np.float64,0x309eb6ee613d8,0x309eb6ee613d8,2 +np.float64,0x800731cb0ece6397,0x800731cb0ece6397,2 +np.float64,0xbfdb0c553db618aa,0xbfd98b8a4680ee60,2 +np.float64,0x3fd603a52eac074c,0x3fd52f6b53de7455,2 +np.float64,0x9ecb821b3d971,0x9ecb821b3d971,2 +np.float64,0x3feb7d64dc36faca,0x3fe643c2754bb7f4,2 +np.float64,0xffeb94825ef72904,0xbff0000000000000,2 +np.float64,0x24267418484cf,0x24267418484cf,2 +np.float64,0xbfa6b2fbac2d65f0,0xbfa6af2dca5bfa6f,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0xffe6873978ed0e72,0xbff0000000000000,2 +np.float64,0x800447934ba88f27,0x800447934ba88f27,2 +np.float64,0x3fef305f09fe60be,0x3fe806156b8ca47c,2 +np.float64,0xffd441c697a8838e,0xbff0000000000000,2 +np.float64,0xbfa7684f6c2ed0a0,0xbfa764238d34830c,2 +np.float64,0xffb2c976142592f0,0xbff0000000000000,2 +np.float64,0xbfcc9d1585393a2c,0xbfcc25756bcbca1f,2 +np.float64,0xbfd477bb1ba8ef76,0xbfd3cc1d2114e77e,2 +np.float64,0xbfed1559983a2ab3,0xbfe70f03afd994ee,2 +np.float64,0xbfeb51139036a227,0xbfe62ccf56bc7fff,2 +np.float64,0x7d802890fb006,0x7d802890fb006,2 +np.float64,0x800e00af777c015f,0x800e00af777c015f,2 +np.float64,0x800647ce128c8f9d,0x800647ce128c8f9d,2 +np.float64,0x800a26da91d44db6,0x800a26da91d44db6,2 +np.float64,0x3fdc727eddb8e4fe,0x3fdab5fd9db630b3,2 +np.float64,0x7fd06def2ba0dbdd,0x3ff0000000000000,2 +np.float64,0xffe23678c4a46cf1,0xbff0000000000000,2 +np.float64,0xbfe7198e42ee331c,0xbfe3c7326c9c7553,2 +np.float64,0xffae465f3c3c8cc0,0xbff0000000000000,2 +np.float64,0xff9aea7c5035d500,0xbff0000000000000,2 +np.float64,0xbfeae49c0f35c938,0xbfe5f3e9326cb08b,2 +np.float64,0x3f9a16f300342de6,0x3f9a1581212be50f,2 +np.float64,0x8d99e2c31b33d,0x8d99e2c31b33d,2 +np.float64,0xffd58af253ab15e4,0xbff0000000000000,2 +np.float64,0xbfd205cd25a40b9a,0xbfd18f97155f8b25,2 +np.float64,0xbfebe839bbf7d074,0xbfe67a6024e8fefe,2 +np.float64,0xbfe4fb3595a9f66b,0xbfe26a42f99819ea,2 +np.float64,0x800e867c739d0cf9,0x800e867c739d0cf9,2 +np.float64,0x8bc4274f17885,0x8bc4274f17885,2 +np.float64,0xaec8914b5d912,0xaec8914b5d912,2 +np.float64,0x7fd1d64473a3ac88,0x3ff0000000000000,2 +np.float64,0xbfe6d6f69cedaded,0xbfe39dd61bc7e23e,2 +np.float64,0x7fed05039d7a0a06,0x3ff0000000000000,2 +np.float64,0xbfc40eab0f281d58,0xbfc3e50d14b79265,2 +np.float64,0x45179aec8a2f4,0x45179aec8a2f4,2 +np.float64,0xbfe717e362ee2fc7,0xbfe3c62a95b07d13,2 +np.float64,0xbfe5b8df0d6b71be,0xbfe2e76c7ec5013d,2 +np.float64,0x5c67ba6eb8cf8,0x5c67ba6eb8cf8,2 +np.float64,0xbfda72ce4cb4e59c,0xbfd909fdc7ecfe20,2 +np.float64,0x7fdf59a1e2beb343,0x3ff0000000000000,2 +np.float64,0xc4f7897f89ef1,0xc4f7897f89ef1,2 +np.float64,0x8fcd0a351f9a2,0x8fcd0a351f9a2,2 +np.float64,0x3fb161761022c2ec,0x3fb15aa31c464de2,2 +np.float64,0x8008a985be71530c,0x8008a985be71530c,2 +np.float64,0x3fca4ddb5e349bb7,0x3fc9f0a3b60e49c6,2 +np.float64,0x7fcc10a2d9382145,0x3ff0000000000000,2 +np.float64,0x78902b3af1206,0x78902b3af1206,2 +np.float64,0x7fe1e2765f23c4ec,0x3ff0000000000000,2 +np.float64,0xc1d288cf83a51,0xc1d288cf83a51,2 +np.float64,0x7fe8af692bb15ed1,0x3ff0000000000000,2 +np.float64,0x80057d90fb8afb23,0x80057d90fb8afb23,2 +np.float64,0x3fdc136b8fb826d8,0x3fda6749582b2115,2 +np.float64,0x800ec8ea477d91d5,0x800ec8ea477d91d5,2 +np.float64,0x4c0f4796981ea,0x4c0f4796981ea,2 +np.float64,0xec34c4a5d8699,0xec34c4a5d8699,2 +np.float64,0x7fce343dfb3c687b,0x3ff0000000000000,2 +np.float64,0xbfc95a98a332b530,0xbfc90705b2cc2fec,2 +np.float64,0x800d118e1dba231c,0x800d118e1dba231c,2 +np.float64,0x3fd354f310a6a9e8,0x3fd2c3bb90054154,2 +np.float64,0xbfdac0d4fab581aa,0xbfd94bf37424928e,2 +np.float64,0x3fe7f5391fefea72,0x3fe44cb49d51985b,2 +np.float64,0xd4c3c329a9879,0xd4c3c329a9879,2 +np.float64,0x3fc53977692a72f0,0x3fc50835d85c9ed1,2 +np.float64,0xbfd6989538ad312a,0xbfd5b3a2c08511fe,2 +np.float64,0xbfe329f2906653e5,0xbfe128ec1525a1c0,2 +np.float64,0x7ff0000000000000,0x3ff0000000000000,2 +np.float64,0xbfea57c90974af92,0xbfe5a87b04aa3116,2 +np.float64,0x7fdfba94043f7527,0x3ff0000000000000,2 +np.float64,0x3feedabddafdb57c,0x3fe7e06c0661978d,2 +np.float64,0x4bd9f3b697b3f,0x4bd9f3b697b3f,2 +np.float64,0x3fdd15bbfc3a2b78,0x3fdb3c3b8d070f7e,2 +np.float64,0x3fbd89ccd23b13a0,0x3fbd686b825cff80,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x3f9baa8928375512,0x3f9ba8d01ddd5300,2 +np.float64,0x4a3ebdf2947d8,0x4a3ebdf2947d8,2 +np.float64,0x3fe698d5c06d31ac,0x3fe376dff48312c8,2 +np.float64,0xffd5323df12a647c,0xbff0000000000000,2 +np.float64,0xffea7f111174fe22,0xbff0000000000000,2 +np.float64,0x3feb4656a9b68cad,0x3fe627392eb2156f,2 +np.float64,0x7fc1260e9c224c1c,0x3ff0000000000000,2 +np.float64,0x80056e45e5eadc8d,0x80056e45e5eadc8d,2 +np.float64,0x7fd0958ef6a12b1d,0x3ff0000000000000,2 +np.float64,0x8001f85664e3f0ae,0x8001f85664e3f0ae,2 +np.float64,0x3fe553853beaa70a,0x3fe2a4f5e7c83558,2 +np.float64,0xbfeb33ce6276679d,0xbfe61d8ec9e5ff8c,2 +np.float64,0xbfd1b24e21a3649c,0xbfd14245df6065e9,2 +np.float64,0x3fe286fc40650df9,0x3fe0b395c8059429,2 +np.float64,0xffed378058fa6f00,0xbff0000000000000,2 +np.float64,0xbfd0c4a2d7a18946,0xbfd06509a434d6a0,2 +np.float64,0xbfea31d581f463ab,0xbfe593d976139f94,2 +np.float64,0xbfe0705c85e0e0b9,0xbfde42efa978eb0c,2 +np.float64,0xe4c4c339c9899,0xe4c4c339c9899,2 +np.float64,0x3fd68befa9ad17df,0x3fd5a870b3f1f83e,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0x3fe294256965284b,0x3fe0bd271e22d86b,2 +np.float64,0x8005327a862a64f6,0x8005327a862a64f6,2 +np.float64,0xbfdb8155ce3702ac,0xbfd9ed9ef97920f8,2 +np.float64,0xbff0000000000000,0xbfe85efab514f394,2 +np.float64,0xffe66988f1ecd312,0xbff0000000000000,2 +np.float64,0x3fb178a85e22f150,0x3fb171b9fbf95f1d,2 +np.float64,0x7f829b900025371f,0x3ff0000000000000,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x8006cb77f60d96f1,0x8006cb77f60d96f1,2 +np.float64,0x3fe0c5d53aa18baa,0x3fdec7012ab92b42,2 +np.float64,0x77266426ee4cd,0x77266426ee4cd,2 +np.float64,0xbfec95f468392be9,0xbfe6d11428f60136,2 +np.float64,0x3fedbf532dfb7ea6,0x3fe75f8436dd1d58,2 +np.float64,0x8002fadd3f85f5bb,0x8002fadd3f85f5bb,2 +np.float64,0xbfefebaa8d3fd755,0xbfe8566c6aa90fba,2 +np.float64,0xffc7dd2b712fba58,0xbff0000000000000,2 +np.float64,0x7fe5d3a6e8aba74d,0x3ff0000000000000,2 +np.float64,0x2da061525b40d,0x2da061525b40d,2 +np.float64,0x7fcb9b9953373732,0x3ff0000000000000,2 +np.float64,0x2ca2f6fc59460,0x2ca2f6fc59460,2 +np.float64,0xffeb84b05af70960,0xbff0000000000000,2 +np.float64,0xffe551e86c6aa3d0,0xbff0000000000000,2 +np.float64,0xbfdb311311366226,0xbfd9aa6688faafb9,2 +np.float64,0xbfd4f3875629e70e,0xbfd43bcd73534c66,2 +np.float64,0x7fe95666f932accd,0x3ff0000000000000,2 +np.float64,0x3fc73dfb482e7bf7,0x3fc6fd70c20ebf60,2 +np.float64,0x800cd9e40939b3c8,0x800cd9e40939b3c8,2 +np.float64,0x3fb0c9fa422193f0,0x3fb0c3d38879a2ac,2 +np.float64,0xffd59a38372b3470,0xbff0000000000000,2 +np.float64,0x3fa8320ef4306420,0x3fa82d739e937d35,2 +np.float64,0x3fd517f16caa2fe4,0x3fd45c8de1e93b37,2 +np.float64,0xaed921655db24,0xaed921655db24,2 +np.float64,0x93478fb9268f2,0x93478fb9268f2,2 +np.float64,0x1615e28a2c2bd,0x1615e28a2c2bd,2 +np.float64,0xbfead23010f5a460,0xbfe5ea24d5d8f820,2 +np.float64,0x774a6070ee94d,0x774a6070ee94d,2 +np.float64,0x3fdf5874bd3eb0e9,0x3fdd0ef121dd915c,2 +np.float64,0x8004b25f53a964bf,0x8004b25f53a964bf,2 +np.float64,0xbfddacdd2ebb59ba,0xbfdbb78198fab36b,2 +np.float64,0x8008a3acf271475a,0x8008a3acf271475a,2 +np.float64,0xbfdb537c8736a6fa,0xbfd9c741038bb8f0,2 +np.float64,0xbfe56a133f6ad426,0xbfe2b3d5b8d259a1,2 +np.float64,0xffda1db531343b6a,0xbff0000000000000,2 +np.float64,0x3fcbe05f3a37c0be,0x3fcb71a54a64ddfb,2 +np.float64,0x7fe1ccaa7da39954,0x3ff0000000000000,2 +np.float64,0x3faeadd8343d5bb0,0x3faea475608860e6,2 +np.float64,0x3fe662ba1c2cc574,0x3fe354a6176e90df,2 +np.float64,0xffe4d49f4e69a93e,0xbff0000000000000,2 +np.float64,0xbfeadbc424f5b788,0xbfe5ef39dbe66343,2 +np.float64,0x99cf66f1339ed,0x99cf66f1339ed,2 +np.float64,0x33af77a2675f0,0x33af77a2675f0,2 +np.float64,0x7fec7b32ecf8f665,0x3ff0000000000000,2 +np.float64,0xffef3e44993e7c88,0xbff0000000000000,2 +np.float64,0xffe8f8ceac31f19c,0xbff0000000000000,2 +np.float64,0x7fe0d15b6da1a2b6,0x3ff0000000000000,2 +np.float64,0x4ba795c2974f3,0x4ba795c2974f3,2 +np.float64,0x3fe361aa37a6c354,0x3fe15079021d6b15,2 +np.float64,0xffe709714f6e12e2,0xbff0000000000000,2 +np.float64,0xffe7ea6a872fd4d4,0xbff0000000000000,2 +np.float64,0xffdb9441c8b72884,0xbff0000000000000,2 +np.float64,0xffd5e11ae9abc236,0xbff0000000000000,2 +np.float64,0xffe092a08b612540,0xbff0000000000000,2 +np.float64,0x3fe1f27e1ca3e4fc,0x3fe04685b5131207,2 +np.float64,0xbfe71ce1bdee39c4,0xbfe3c940809a7081,2 +np.float64,0xffe8c3aa68318754,0xbff0000000000000,2 +np.float64,0x800d4e2919da9c52,0x800d4e2919da9c52,2 +np.float64,0x7fe6c8bca76d9178,0x3ff0000000000000,2 +np.float64,0x7fced8751e3db0e9,0x3ff0000000000000,2 +np.float64,0xd61d0c8bac3a2,0xd61d0c8bac3a2,2 +np.float64,0x3fec57732938aee6,0x3fe6b22f15f38352,2 +np.float64,0xff9251cc7024a3a0,0xbff0000000000000,2 +np.float64,0xf4a68cb9e94d2,0xf4a68cb9e94d2,2 +np.float64,0x3feed76703bdaece,0x3fe7def0fc9a080c,2 +np.float64,0xbfe8971ff7712e40,0xbfe4ac3eb8ebff07,2 +np.float64,0x3fe4825f682904bf,0x3fe218c1952fe67d,2 +np.float64,0xbfd60f7698ac1eee,0xbfd539f0979b4b0c,2 +np.float64,0x3fcf0845993e1088,0x3fce7032f7180144,2 +np.float64,0x7fc83443f3306887,0x3ff0000000000000,2 +np.float64,0x3fe93123ae726247,0x3fe504e4fc437e89,2 +np.float64,0x3fbf9eb8363f3d70,0x3fbf75cdfa6828d5,2 +np.float64,0xbf8b45e5d0368bc0,0xbf8b457c29dfe1a9,2 +np.float64,0x8006c2853d0d850b,0x8006c2853d0d850b,2 +np.float64,0xffef26e25ffe4dc4,0xbff0000000000000,2 +np.float64,0x7fefffffffffffff,0x3ff0000000000000,2 +np.float64,0xbfde98f2c2bd31e6,0xbfdc761bfab1c4cb,2 +np.float64,0xffb725e6222e4bd0,0xbff0000000000000,2 +np.float64,0x800c63ead5d8c7d6,0x800c63ead5d8c7d6,2 +np.float64,0x3fea087e95f410fd,0x3fe57d3ab440706c,2 +np.float64,0xbfdf9f8a603f3f14,0xbfdd4742d77dfa57,2 +np.float64,0xfff0000000000000,0xbff0000000000000,2 +np.float64,0xbfcdc0841d3b8108,0xbfcd3a401debba9a,2 +np.float64,0x800f0c8f4f7e191f,0x800f0c8f4f7e191f,2 +np.float64,0x800ba6e75fd74dcf,0x800ba6e75fd74dcf,2 +np.float64,0x7fee4927e8bc924f,0x3ff0000000000000,2 +np.float64,0x3fadf141903be283,0x3fade8878d9d3551,2 +np.float64,0x3efb1a267df64,0x3efb1a267df64,2 +np.float64,0xffebf55f22b7eabe,0xbff0000000000000,2 +np.float64,0x7fbe8045663d008a,0x3ff0000000000000,2 +np.float64,0x3fefc0129f7f8026,0x3fe843f8b7d6cf38,2 +np.float64,0xbfe846b420f08d68,0xbfe47d1709e43937,2 +np.float64,0x7fe8e87043f1d0e0,0x3ff0000000000000,2 +np.float64,0x3fcfb718453f6e31,0x3fcf14ecee7b32b4,2 +np.float64,0x7fe4306b71a860d6,0x3ff0000000000000,2 +np.float64,0x7fee08459f7c108a,0x3ff0000000000000,2 +np.float64,0x3fed705165fae0a3,0x3fe73a66369c5700,2 +np.float64,0x7fd0e63f4da1cc7e,0x3ff0000000000000,2 +np.float64,0xffd1a40c2ea34818,0xbff0000000000000,2 +np.float64,0xbfa369795c26d2f0,0xbfa36718218d46b3,2 +np.float64,0xef70b9f5dee17,0xef70b9f5dee17,2 +np.float64,0x3fb50a0a6e2a1410,0x3fb4fdf27724560a,2 +np.float64,0x7fe30a0f6166141e,0x3ff0000000000000,2 +np.float64,0xbfd7b3ca7daf6794,0xbfd6accb81032b2d,2 +np.float64,0x3fc21dceb3243b9d,0x3fc1ff15d5d277a3,2 +np.float64,0x3fe483e445a907c9,0x3fe219ca0e269552,2 +np.float64,0x3fb2b1e2a22563c0,0x3fb2a96554900eaf,2 +np.float64,0x4b1ff6409641,0x4b1ff6409641,2 +np.float64,0xbfd92eabc9b25d58,0xbfd7f55d7776d64e,2 +np.float64,0x8003b8604c8770c1,0x8003b8604c8770c1,2 +np.float64,0x800d20a9df1a4154,0x800d20a9df1a4154,2 +np.float64,0xecf8a535d9f15,0xecf8a535d9f15,2 +np.float64,0x3fe92d15bab25a2b,0x3fe50296aa15ae85,2 +np.float64,0x800239c205a47385,0x800239c205a47385,2 +np.float64,0x3fc48664a9290cc8,0x3fc459d126320ef6,2 +np.float64,0x3fe7620625eec40c,0x3fe3f3bcbee3e8c6,2 +np.float64,0x3fd242ff4ca48600,0x3fd1c81ed7a971c8,2 +np.float64,0xbfe39bafcfa73760,0xbfe17959c7a279db,2 +np.float64,0x7fdcd2567239a4ac,0x3ff0000000000000,2 +np.float64,0x3fe5f2f292ebe5e6,0x3fe30d12f05e2752,2 +np.float64,0x7fda3819d1347033,0x3ff0000000000000,2 +np.float64,0xffca5b4d4334b69c,0xbff0000000000000,2 +np.float64,0xb8a2b7cd71457,0xb8a2b7cd71457,2 +np.float64,0x3fee689603fcd12c,0x3fe7ad4ace26d6dd,2 +np.float64,0x7fe26541a564ca82,0x3ff0000000000000,2 +np.float64,0x3fe6912ee66d225e,0x3fe3720d242c4d82,2 +np.float64,0xffe6580c75ecb018,0xbff0000000000000,2 +np.float64,0x7fe01a3370603466,0x3ff0000000000000,2 +np.float64,0xffe84e3f84b09c7e,0xbff0000000000000,2 +np.float64,0x3ff0000000000000,0x3fe85efab514f394,2 +np.float64,0x3fe214d4266429a8,0x3fe05fec03a3c247,2 +np.float64,0x3fd00aec5da015d8,0x3fcf6e070ad4ad62,2 +np.float64,0x800aac8631f5590d,0x800aac8631f5590d,2 +np.float64,0xbfe7c4f5f76f89ec,0xbfe42fc1c57b4a13,2 +np.float64,0xaf146c7d5e28e,0xaf146c7d5e28e,2 +np.float64,0xbfe57188b66ae312,0xbfe2b8be4615ef75,2 +np.float64,0xffef8cb8e1ff1971,0xbff0000000000000,2 +np.float64,0x8001daf8aa63b5f2,0x8001daf8aa63b5f2,2 +np.float64,0x3fdddcc339bbb986,0x3fdbde5f3783538b,2 +np.float64,0xdd8c92c3bb193,0xdd8c92c3bb193,2 +np.float64,0xbfe861a148f0c342,0xbfe48cf1d228a336,2 +np.float64,0xffe260a32e24c146,0xbff0000000000000,2 +np.float64,0x1f7474b43ee8f,0x1f7474b43ee8f,2 +np.float64,0x3fe81dbd89703b7c,0x3fe464d78df92b7b,2 +np.float64,0x7fed0101177a0201,0x3ff0000000000000,2 +np.float64,0x7fd8b419a8316832,0x3ff0000000000000,2 +np.float64,0x3fe93debccf27bd8,0x3fe50c27727917f0,2 +np.float64,0xe5ead05bcbd5a,0xe5ead05bcbd5a,2 +np.float64,0xbfebbbc4cff7778a,0xbfe663c4ca003bbf,2 +np.float64,0xbfea343eb474687e,0xbfe59529f73ea151,2 +np.float64,0x3fbe74a5963ce94b,0x3fbe50123ed05d8d,2 +np.float64,0x3fd31d3a5d263a75,0x3fd290c026cb38a5,2 +np.float64,0xbfd79908acaf3212,0xbfd695620e31c3c6,2 +np.float64,0xbfc26a350324d46c,0xbfc249f335f3e465,2 +np.float64,0xbfac38d5583871b0,0xbfac31866d12a45e,2 +np.float64,0x3fe40cea672819d5,0x3fe1c83754e72c92,2 +np.float64,0xbfa74770642e8ee0,0xbfa74355fcf67332,2 +np.float64,0x7fc60942d32c1285,0x3ff0000000000000,2 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/checks.pyx b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/checks.pyx new file mode 100644 index 00000000..c5529ee8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/checks.pyx @@ -0,0 +1,35 @@ +#cython: language_level=3 + +""" +Functions in this module give python-space wrappers for cython functions +exposed in numpy/__init__.pxd, so they can be tested in test_cython.py +""" +cimport numpy as cnp +cnp.import_array() + + +def is_td64(obj): + return cnp.is_timedelta64_object(obj) + + +def is_dt64(obj): + return cnp.is_datetime64_object(obj) + + +def get_dt64_value(obj): + return cnp.get_datetime64_value(obj) + + +def get_td64_value(obj): + return cnp.get_timedelta64_value(obj) + + +def get_dt64_unit(obj): + return cnp.get_datetime64_unit(obj) + + +def is_integer(obj): + return isinstance(obj, (cnp.integer, int)) + +def conv_intp(cnp.intp_t val): + return val diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/meson.build b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/meson.build new file mode 100644 index 00000000..836b74ac --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/meson.build @@ -0,0 +1,36 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +if not cy.version().version_compare('>=0.29.35') + error('tests requires Cython >= 0.29.35') +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to gh-25135, where cython may not find the right +# __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'checks', + 'checks.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + ], + include_directories: [npy_include_path], +) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/setup.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/setup.py new file mode 100644 index 00000000..6e34aa77 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/cython/setup.py @@ -0,0 +1,25 @@ +""" +Provide python-space access to the functions exposed in numpy/__init__.pxd +for testing. +""" + +import numpy as np +from distutils.core import setup +from Cython.Build import cythonize +from setuptools.extension import Extension +import os + +macros = [("NPY_NO_DEPRECATED_API", 0)] + +checks = Extension( + "checks", + sources=[os.path.join('.', "checks.pyx")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [checks] + +setup( + ext_modules=cythonize(extensions) +) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/limited_api/limited_api.c b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/limited_api/limited_api.c new file mode 100644 index 00000000..698c54c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/limited_api/limited_api.c @@ -0,0 +1,17 @@ +#define Py_LIMITED_API 0x03060000 + +#include <Python.h> +#include <numpy/arrayobject.h> +#include <numpy/ufuncobject.h> + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api" +}; + +PyMODINIT_FUNC PyInit_limited_api(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/limited_api/setup.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/limited_api/setup.py new file mode 100644 index 00000000..18747dc8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/examples/limited_api/setup.py @@ -0,0 +1,22 @@ +""" +Build an example package using the limited Python C API. +""" + +import numpy as np +from setuptools import setup, Extension +import os + +macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] + +limited_api = Extension( + "limited_api", + sources=[os.path.join('.', "limited_api.c")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [limited_api] + +setup( + ext_modules=extensions +) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test__exceptions.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test__exceptions.py new file mode 100644 index 00000000..10b87e05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test__exceptions.py @@ -0,0 +1,88 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" + +import pickle + +import pytest +import numpy as np + +_ArrayMemoryError = np.core._exceptions._ArrayMemoryError +_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError + +class TestArrayMemoryError: + def test_pickling(self): + """ Test that _ArrayMemoryError can be pickled """ + error = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + res = pickle.loads(pickle.dumps(error)) + assert res._total_size == error._total_size + + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki+1) == '1.00 KiB' + assert f(10*Ki) == '10.0 KiB' + assert f(int(999.4*Ki)) == '999. KiB' + assert f(int(1023.4*Ki)) == '1023. KiB' + assert f(int(1023.5*Ki)) == '1.00 MiB' + assert f(Ki*Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' + assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefixes isn't going to help + # anyway. + assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 + + +class TestUFuncNoLoopError: + def test_pickling(self): + """ Test that _UFuncNoLoopError can be pickled """ + assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes) + + +@pytest.mark.parametrize("args", [ + (2, 1, None), + (2, 1, "test_prefix"), + ("test message",), +]) +class TestAxisError: + def test_attr(self, args): + """Validate attribute types.""" + exc = np.AxisError(*args) + if len(args) == 1: + assert exc.axis is None + assert exc.ndim is None + else: + axis, ndim, *_ = args + assert exc.axis == axis + assert exc.ndim == ndim + + def test_pickling(self, args): + """Test that `AxisError` can be pickled.""" + exc = np.AxisError(*args) + exc2 = pickle.loads(pickle.dumps(exc)) + + assert type(exc) is type(exc2) + for name in ("axis", "ndim", "args"): + attr1 = getattr(exc, name) + attr2 = getattr(exc2, name) + assert attr1 == attr2, name diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_abc.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_abc.py new file mode 100644 index 00000000..8b12d07a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_abc.py @@ -0,0 +1,54 @@ +from numpy.testing import assert_ + +import numbers + +import numpy as np +from numpy.core.numerictypes import sctypes + +class TestABC: + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + + def test_floats(self): + for t in sctypes['float']: + assert_(isinstance(t(), numbers.Real), + f"{t.__name__} is not instance of Real") + assert_(issubclass(t, numbers.Real), + f"{t.__name__} is not subclass of Real") + assert_(not isinstance(t(), numbers.Rational), + f"{t.__name__} is instance of Rational") + assert_(not issubclass(t, numbers.Rational), + f"{t.__name__} is subclass of Rational") + + def test_complex(self): + for t in sctypes['complex']: + assert_(isinstance(t(), numbers.Complex), + f"{t.__name__} is not instance of Complex") + assert_(issubclass(t, numbers.Complex), + f"{t.__name__} is not subclass of Complex") + assert_(not isinstance(t(), numbers.Real), + f"{t.__name__} is instance of Real") + assert_(not issubclass(t, numbers.Real), + f"{t.__name__} is subclass of Real") + + def test_int(self): + for t in sctypes['int']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") + + def test_uint(self): + for t in sctypes['uint']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_api.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_api.py new file mode 100644 index 00000000..0d922869 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_api.py @@ -0,0 +1,615 @@ +import sys + +import numpy as np +from numpy.core._rational_tests import rational +import pytest +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, + HAS_REFCOUNT + ) + + +def test_array_array(): + tobj = type(object) + ones11 = np.ones((1, 1), np.float64) + tndarray = type(ones11) + # Test is_ndarray + assert_equal(np.array(ones11, dtype=np.float64), ones11) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tndarray) + np.array(ones11) + assert_equal(old_refcount, sys.getrefcount(tndarray)) + + # test None + assert_equal(np.array(None, dtype=np.float64), + np.array(np.nan, dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tobj) + np.array(None, dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(tobj)) + + # test scalar + assert_equal(np.array(1.0, dtype=np.float64), + np.ones((), dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(np.float64) + np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(np.float64)) + + # test string + S2 = np.dtype((bytes, 2)) + S3 = np.dtype((bytes, 3)) + S5 = np.dtype((bytes, 5)) + assert_equal(np.array(b"1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(b"1.0").dtype, S3) + assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3) + assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1.")) + assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5)) + + # test string + U2 = np.dtype((str, 2)) + U3 = np.dtype((str, 3)) + U5 = np.dtype((str, 5)) + assert_equal(np.array("1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array("1.0").dtype, U3) + assert_equal(np.array("1.0", dtype=str).dtype, U3) + assert_equal(np.array("1.0", dtype=U2), np.array(str("1."))) + assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) + + builtins = getattr(__builtins__, '__dict__', __builtins__) + assert_(hasattr(builtins, 'get')) + + # test memoryview + dat = np.array(memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + dict(__array_interface__=a.__array_interface__)) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + dict(__array_struct__=a.__array_struct__)) + ## wasn't what I expected... is np.array(o) supposed to equal a ? + ## instead we get a array([...], dtype=">V18") + assert_equal(bytes(np.array(o).data), bytes(a.data)) + + # test array + o = type("o", (object,), + dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(np.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + # float32 + assert_equal(np.array([None] * 10, dtype=np.float32), + np.full((10,), np.nan, dtype=np.float32)) + assert_equal(np.array([[None]] * 10, dtype=np.float32), + np.full((10, 1), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10], dtype=np.float32), + np.full((1, 10), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float32), + np.full((10, 10), np.nan, dtype=np.float32)) + # float64 + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.parametrize("array", [True, False]) +def test_array_impossible_casts(array): + # All builtin types can be forcibly cast, at least theoretically, + # but user dtypes cannot necessarily. + rt = rational(1, 2) + if array: + rt = np.array(rt) + with assert_raises(TypeError): + np.array(rt, dtype="M8") + + +# TODO: remove when fastCopyAndTranspose deprecation expires +@pytest.mark.parametrize("a", + ( + np.array(2), # 0D array + np.array([3, 2, 7, 0]), # 1D array + np.arange(6).reshape(2, 3) # 2D array + ), +) +def test_fastCopyAndTranspose(a): + with pytest.deprecated_call(): + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert b.flags.owndata + + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter can sometimes skip a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + class MyNDArray(np.ndarray): + pass + + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), MyNDArray) + + # subok=False never returns a subclass + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not MyNDArray) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a'*100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array(['a'*100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a'*10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array(['a'*10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array('a\u0140', dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + + # gh-24023 + assert_raises(TypeError, a.astype) + +@pytest.mark.parametrize("dt", ["S", "U"]) +def test_array_astype_to_string_discovery_empty(dt): + # See also gh-19085 + arr = np.array([""], dtype=object) + # Note, the itemsize is the `0 -> 1` logic, which should change. + # The important part the test is rather that it does not error. + assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize + + # check the same thing for `np.can_cast` (since it accepts arrays) + assert np.can_cast(arr, dt, casting="unsafe") + assert not np.can_cast(arr, dt, casting="same_kind") + # as well as for the object as a descriptor: + assert np.can_cast("O", dt, casting="unsafe") + +@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"]) +def test_array_astype_to_void(dt): + dt = np.dtype(dt) + arr = np.array([], dtype=dt) + assert arr.astype("V").dtype.itemsize == dt.itemsize + +def test_object_array_astype_to_void(): + # This is different to `test_array_astype_to_void` as object arrays + # are inspected. The default void is "V8" (8 is the length of double) + arr = np.array([], dtype="O").astype("V") + assert arr.dtype == "V8" + +@pytest.mark.parametrize("t", + np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float'] +) +def test_array_astype_warning(t): + # test ComplexWarning when casting from complex to float or int + a = np.array(10, dtype=np.complex_) + assert_warns(np.ComplexWarning, a.astype, t) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool_), + (np.str_, np.bool_), + (np.dtype("S10,S9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast(dtype, out_dtype): + """ + Currently, for `astype` strings are cast to booleans effectively by + calling `bool(int(string)`. This is not consistent (see gh-9875) and + will eventually be deprecated. + """ + arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype) + expected = np.array([True, True, False, False], dtype=out_dtype) + assert_array_equal(arr.astype(out_dtype), expected) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool_), + (np.str_, np.bool_), + (np.dtype("S10,S9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast_errors(dtype, out_dtype): + """ + These currently error out, since cast to integers fails, but should not + error out in the future. + """ + for invalid in ["False", "True", "", "\0", "non-empty"]: + arr = np.array([invalid], dtype=dtype) + with assert_raises(ValueError): + arr.astype(out_dtype) + +@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_]) +@pytest.mark.parametrize("scalar_type", + [np.complex64, np.complex128, np.clongdouble]) +def test_string_to_complex_cast(str_type, scalar_type): + value = scalar_type(b"1+3j") + assert scalar_type(value) == 1+3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j + assert np.array(value).astype(scalar_type)[()] == 1+3j + arr = np.zeros(1, dtype=scalar_type) + arr[0] = value + assert arr[0] == 1+3j + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_none_to_nan_cast(dtype): + # Note that at the time of writing this test, the scalar constructors + # reject None + arr = np.zeros(1, dtype=dtype) + arr[0] = None + assert np.isnan(arr)[0] + assert np.isnan(np.array(None, dtype=dtype))[()] + assert np.isnan(np.array([None], dtype=dtype))[0] + assert np.isnan(np.array(None).astype(dtype))[()] + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l)-pad) + d = np.ones(len(l)-pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = c + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2,:,:] + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=False), False, False) + check_contig(np.array(a, copy=False, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) + + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:, :, 0:0, :, :], True, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + +@pytest.mark.parametrize(["shape", "fill_value", "expected_output"], + [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), + ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))]) +def test_full_from_list(shape, fill_value, expected_output): + output = np.full(shape, fill_value) + assert_equal(output, expected_output) + +def test_astype_copyflag(): + # test the various copyflag options + arr = np.arange(10, dtype=np.intp) + + res_true = arr.astype(np.intp, copy=True) + assert not np.may_share_memory(arr, res_true) + res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS) + assert not np.may_share_memory(arr, res_always) + + res_false = arr.astype(np.intp, copy=False) + # `res_false is arr` currently, but check `may_share_memory`. + assert np.may_share_memory(arr, res_false) + res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED) + # `res_if_needed is arr` currently, but check `may_share_memory`. + assert np.may_share_memory(arr, res_if_needed) + + res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER) + assert np.may_share_memory(arr, res_never) + + # Simple tests for when a copy is necessary: + res_false = arr.astype(np.float64, copy=False) + assert_array_equal(res_false, arr) + res_if_needed = arr.astype(np.float64, + copy=np._CopyMode.IF_NEEDED) + assert_array_equal(res_if_needed, arr) + assert_raises(ValueError, arr.astype, np.float64, + copy=np._CopyMode.NEVER) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_argparse.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_argparse.py new file mode 100644 index 00000000..fae22702 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_argparse.py @@ -0,0 +1,62 @@ +""" +Tests for the private NumPy argument parsing functionality. +They mainly exists to ensure good test coverage without having to try the +weirder cases on actual numpy functions but test them in one place. + +The test function is defined in C to be equivalent to (errors may not always +match exactly, and could be adjusted): + + def func(arg1, /, arg2, *, arg3): + i = integer(arg1) # reproducing the 'i' parsing in Python. + return None +""" + +import pytest + +import numpy as np +from numpy.core._multiarray_tests import argparse_example_function as func + + +def test_invalid_integers(): + with pytest.raises(TypeError, + match="integer argument expected, got float"): + func(1.) + with pytest.raises(OverflowError): + func(2**100) + + +def test_missing_arguments(): + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func() + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func(arg2=1, arg3=4) + with pytest.raises(TypeError, + match=r"missing required argument \'arg2\' \(pos 1\)"): + func(1, arg3=5) + + +def test_too_many_positional(): + # the second argument is positional but can be passed as keyword. + with pytest.raises(TypeError, + match="takes from 2 to 3 positional arguments but 4 were given"): + func(1, 2, 3, 4) + + +def test_multiple_values(): + with pytest.raises(TypeError, + match=r"given by name \('arg2'\) and position \(position 1\)"): + func(1, 2, arg2=3) + + +def test_string_fallbacks(): + # We can (currently?) use numpy strings to test the "slow" fallbacks + # that should normally not be taken due to string interning. + arg2 = np.str_("arg2") + missing_arg = np.str_("missing_arg") + func(1, **{arg2: 3}) + with pytest.raises(TypeError, + match="got an unexpected keyword argument 'missing_arg'"): + func(2, **{missing_arg: 3}) + diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_array_coercion.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_array_coercion.py new file mode 100644 index 00000000..629bfce5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_array_coercion.py @@ -0,0 +1,898 @@ +""" +Tests for array coercion, mainly through testing `np.array` results directly. +Note that other such tests exist, e.g., in `test_api.py` and many corner-cases +are tested (sometimes indirectly) elsewhere. +""" + +from itertools import permutations, product + +import pytest +from pytest import param + +import numpy as np +from numpy.core._rational_tests import rational +from numpy.core._multiarray_umath import _discover_array_parameters + +from numpy.testing import ( + assert_array_equal, assert_warns, IS_PYPY) + + +def arraylikes(): + """ + Generator for functions converting an array into various array-likes. + If full is True (default) it includes array-likes not capable of handling + all dtypes. + """ + # base array: + def ndarray(a): + return a + + yield param(ndarray, id="ndarray") + + # subclass: + class MyArr(np.ndarray): + pass + + def subclass(a): + return a.view(MyArr) + + yield subclass + + class _SequenceLike(): + # Older NumPy versions, sometimes cared whether a protocol array was + # also _SequenceLike. This shouldn't matter, but keep it for now + # for __array__ and not the others. + def __len__(self): + raise TypeError + + def __getitem__(self): + raise TypeError + + # Array-interface + class ArrayDunder(_SequenceLike): + def __init__(self, a): + self.a = a + + def __array__(self, dtype=None): + return self.a + + yield param(ArrayDunder, id="__array__") + + # memory-view + yield param(memoryview, id="memoryview") + + # Array-interface + class ArrayInterface: + def __init__(self, a): + self.a = a # need to hold on to keep interface valid + self.__array_interface__ = a.__array_interface__ + + yield param(ArrayInterface, id="__array_interface__") + + # Array-Struct + class ArrayStruct: + def __init__(self, a): + self.a = a # need to hold on to keep struct valid + self.__array_struct__ = a.__array_struct__ + + yield param(ArrayStruct, id="__array_struct__") + + +def scalar_instances(times=True, extended_precision=True, user_dtype=True): + # Hard-coded list of scalar instances. + # Floats: + yield param(np.sqrt(np.float16(5)), id="float16") + yield param(np.sqrt(np.float32(5)), id="float32") + yield param(np.sqrt(np.float64(5)), id="float64") + if extended_precision: + yield param(np.sqrt(np.longdouble(5)), id="longdouble") + + # Complex: + yield param(np.sqrt(np.complex64(2+3j)), id="complex64") + yield param(np.sqrt(np.complex128(2+3j)), id="complex128") + if extended_precision: + yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble") + + # Bool: + # XFAIL: Bool should be added, but has some bad properties when it + # comes to strings, see also gh-9875 + # yield param(np.bool_(0), id="bool") + + # Integers: + yield param(np.int8(2), id="int8") + yield param(np.int16(2), id="int16") + yield param(np.int32(2), id="int32") + yield param(np.int64(2), id="int64") + + yield param(np.uint8(2), id="uint8") + yield param(np.uint16(2), id="uint16") + yield param(np.uint32(2), id="uint32") + yield param(np.uint64(2), id="uint64") + + # Rational: + if user_dtype: + yield param(rational(1, 2), id="rational") + + # Cannot create a structured void scalar directly: + structured = np.array([(1, 3)], "i,i")[0] + assert isinstance(structured, np.void) + assert structured.dtype == np.dtype("i,i") + yield param(structured, id="structured") + + if times: + # Datetimes and timedelta + yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(23, "s"), id="timedelta64[s]") + yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") + + yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") + + # Strings and unstructured void: + yield param(np.bytes_(b"1234"), id="bytes") + yield param(np.str_("2345"), id="unicode") + yield param(np.void(b"4321"), id="unstructured_void") + + +def is_parametric_dtype(dtype): + """Returns True if the dtype is a parametric legacy dtype (itemsize + is 0, or a datetime without units) + """ + if dtype.itemsize == 0: + return True + if issubclass(dtype.type, (np.datetime64, np.timedelta64)): + if dtype.name.endswith("64"): + # Generic time units + return True + return False + + +class TestStringDiscovery: + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_basic_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + + assert np.array(obj, dtype="S").dtype == expected + assert np.array([obj], dtype="S").dtype == expected + + # A nested array is also discovered correctly + arr = np.array(obj, dtype="O") + assert np.array(arr, dtype="S").dtype == expected + # Also if we use the dtype class + assert np.array(arr, dtype=type(expected)).dtype == expected + # Check that .astype() behaves identical + assert arr.astype("S").dtype == expected + # The DType class is accepted by `.astype()` + assert arr.astype(type(np.dtype("S"))).dtype == expected + + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_nested_arrays_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + arr = np.array(obj, dtype="O") + assert np.array([arr, arr], dtype="S").dtype == expected + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_unpack_first_level(self, arraylike): + # We unpack exactly one level of array likes + obj = np.array([None]) + obj[0] = np.array(1.2) + # the length of the included item, not of the float dtype + length = len(str(obj[0])) + expected = np.dtype(f"S{length}") + + obj = arraylike(obj) + # casting to string usually calls str(obj) + arr = np.array([obj], dtype="S") + assert arr.shape == (1, 1) + assert arr.dtype == expected + + +class TestScalarDiscovery: + def test_void_special_case(self): + # Void dtypes with structures discover tuples as elements + arr = np.array((1, 2, 3), dtype="i,i,i") + assert arr.shape == () + arr = np.array([(1, 2, 3)], dtype="i,i,i") + assert arr.shape == (1,) + + def test_char_special_case(self): + arr = np.array("string", dtype="c") + assert arr.shape == (6,) + assert arr.dtype.char == "c" + arr = np.array(["string"], dtype="c") + assert arr.shape == (1, 6) + assert arr.dtype.char == "c" + + def test_char_special_case_deep(self): + # Check that the character special case errors correctly if the + # array is too deep: + nested = ["string"] # 2 dimensions (due to string being sequence) + for i in range(np.MAXDIMS - 2): + nested = [nested] + + arr = np.array(nested, dtype='c') + assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,) + with pytest.raises(ValueError): + np.array([nested], dtype="c") + + def test_unknown_object(self): + arr = np.array(object()) + assert arr.shape == () + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar(self, scalar): + arr = np.array(scalar) + assert arr.shape == () + assert arr.dtype == scalar.dtype + + arr = np.array([[scalar, scalar]]) + assert arr.shape == (1, 2) + assert arr.dtype == scalar.dtype + + # Additionally to string this test also runs into a corner case + # with datetime promotion (the difference is the promotion order). + @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning") + def test_scalar_promotion(self): + for sc1, sc2 in product(scalar_instances(), scalar_instances()): + sc1, sc2 = sc1.values[0], sc2.values[0] + # test all combinations: + try: + arr = np.array([sc1, sc2]) + except (TypeError, ValueError): + # The promotion between two times can fail + # XFAIL (ValueError): Some object casts are currently undefined + continue + assert arr.shape == (2,) + try: + dt1, dt2 = sc1.dtype, sc2.dtype + expected_dtype = np.promote_types(dt1, dt2) + assert arr.dtype == expected_dtype + except TypeError as e: + # Will currently always go to object dtype + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar_coercion(self, scalar): + # This tests various scalar coercion paths, mainly for the numerical + # types. It includes some paths not directly related to `np.array`. + if isinstance(scalar, np.inexact): + # Ensure we have a full-precision number if available + scalar = type(scalar)((scalar * 2)**0.5) + + if type(scalar) is rational: + # Rational generally fails due to a missing cast. In the future + # object casts should automatically be defined based on `setitem`. + pytest.xfail("Rational to object cast is undefined currently.") + + # Use casting from object: + arr = np.array(scalar, dtype=object).astype(scalar.dtype) + + # Test various ways to create an array containing this scalar: + arr1 = np.array(scalar).reshape(1) + arr2 = np.array([scalar]) + arr3 = np.empty(1, dtype=scalar.dtype) + arr3[0] = scalar + arr4 = np.empty(1, dtype=scalar.dtype) + arr4[:] = [scalar] + # All of these methods should yield the same results + assert_array_equal(arr, arr1) + assert_array_equal(arr, arr2) + assert_array_equal(arr, arr3) + assert_array_equal(arr, arr4) + + @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") + @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") + @pytest.mark.parametrize("cast_to", scalar_instances()) + def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): + """ + Test that in most cases: + * `np.array(scalar, dtype=dtype)` + * `np.empty((), dtype=dtype)[()] = scalar` + * `np.array(scalar).astype(dtype)` + should behave the same. The only exceptions are parametric dtypes + (mainly datetime/timedelta without unit) and void without fields. + """ + dtype = cast_to.dtype # use to parametrize only the target dtype + + for scalar in scalar_instances(times=False): + scalar = scalar.values[0] + + if dtype.type == np.void: + if scalar.dtype.fields is not None and dtype.fields is None: + # Here, coercion to "V6" works, but the cast fails. + # Since the types are identical, SETITEM takes care of + # this, but has different rules than the cast. + with pytest.raises(TypeError): + np.array(scalar).astype(dtype) + np.array(scalar, dtype=dtype) + np.array([scalar], dtype=dtype) + continue + + # The main test, we first try to use casting and if it succeeds + # continue below testing that things are the same, otherwise + # test that the alternative paths at least also fail. + try: + cast = np.array(scalar).astype(dtype) + except (TypeError, ValueError, RuntimeError): + # coercion should also raise (error type may change) + with pytest.raises(Exception): + np.array(scalar, dtype=dtype) + + if (isinstance(scalar, rational) and + np.issubdtype(dtype, np.signedinteger)): + return + + with pytest.raises(Exception): + np.array([scalar], dtype=dtype) + # assignment should also raise + res = np.zeros((), dtype=dtype) + with pytest.raises(Exception): + res[()] = scalar + + return + + # Non error path: + arr = np.array(scalar, dtype=dtype) + assert_array_equal(arr, cast) + # assignment behaves the same + ass = np.zeros((), dtype=dtype) + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100]) + def test_pyscalar_subclasses(self, pyscalar): + """NumPy arrays are read/write which means that anything but invariant + behaviour is on thin ice. However, we currently are happy to discover + subclasses of Python float, int, complex the same as the base classes. + This should potentially be deprecated. + """ + class MyScalar(type(pyscalar)): + pass + + res = np.array(MyScalar(pyscalar)) + expected = np.array(pyscalar) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("dtype_char", np.typecodes["All"]) + def test_default_dtype_instance(self, dtype_char): + if dtype_char in "SU": + dtype = np.dtype(dtype_char + "1") + elif dtype_char == "V": + # Legacy behaviour was to use V8. The reason was float64 being the + # default dtype and that having 8 bytes. + dtype = np.dtype("V8") + else: + dtype = np.dtype(dtype_char) + + discovered_dtype, _ = _discover_array_parameters([], type(dtype)) + + assert discovered_dtype == dtype + assert discovered_dtype.itemsize == dtype.itemsize + + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + @pytest.mark.parametrize(["scalar", "error"], + [(np.float64(np.nan), ValueError), + (np.array(-1).astype(np.ulonglong)[()], OverflowError)]) + def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error): + """ + Signed integers are currently different in that they do not cast other + NumPy scalar, but instead use scalar.__int__(). The hardcoded + exception to this rule is `np.array(scalar, dtype=integer)`. + """ + dtype = np.dtype(dtype) + + # This is a special case using casting logic. It warns for the NaN + # but allows the cast (giving undefined behaviour). + with np.errstate(invalid="ignore"): + coerced = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(coerced, cast) + + # However these fail: + with pytest.raises(error): + np.array([scalar], dtype=dtype) + with pytest.raises(error): + cast[()] = scalar + + +class TestTimeScalars: + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), + param(np.timedelta64(123, "s"), id="timedelta64[s]"), + param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64(1, "D"), id="datetime64[D]")],) + def test_coercion_basic(self, dtype, scalar): + # Note the `[scalar]` is there because np.array(scalar) uses stricter + # `scalar.__int__()` rules for backward compatibility right now. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(arr, cast) + + ass = np.ones((), dtype=dtype) + if issubclass(dtype, np.integer): + with pytest.raises(TypeError): + # raises, as would np.array([scalar], dtype=dtype), this is + # conversion from times, but behaviour of integers. + ass[()] = scalar + else: + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), + param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + # Only "ns" and "generic" timedeltas can be converted to numbers + # so these are slightly special. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + + @pytest.mark.parametrize("dtype", ["S6", "U6"]) + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_datetime(self, val, unit, dtype): + # String from datetime64 assignment is currently special cased to + # never use casting. This is because casting will error in this + # case, and traditionally in most cases the behaviour is maintained + # like this. (`np.array(scalar, dtype="U6")` would have failed before) + # TODO: This discrepancy _should_ be resolved, either by relaxing the + # cast, or by deprecating the first part. + scalar = np.datetime64(val, unit) + dtype = np.dtype(dtype) + cut_string = dtype.type(str(scalar)[:6]) + + arr = np.array(scalar, dtype=dtype) + assert arr[()] == cut_string + ass = np.ones((), dtype=dtype) + ass[()] = scalar + assert ass[()] == cut_string + + with pytest.raises(RuntimeError): + # However, unlike the above assignment using `str(scalar)[:6]` + # due to being handled by the string DType and not be casting + # the explicit cast fails: + np.array(scalar).astype(dtype) + + + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_timedelta(self, val, unit): + scalar = np.timedelta64(val, unit) + + # Unlike datetime64, timedelta allows the unsafe cast: + np.array(scalar, dtype="S6") + cast = np.array(scalar).astype("S6") + ass = np.ones((), dtype="S6") + ass[()] = scalar + expected = scalar.astype("S")[:6] + assert cast[()] == expected + assert ass[()] == expected + +class TestNested: + def test_nested_simple(self): + initial = [1.2] + nested = initial + for i in range(np.MAXDIMS - 1): + nested = [nested] + + arr = np.array(nested, dtype="float64") + assert arr.shape == (1,) * np.MAXDIMS + with pytest.raises(ValueError): + np.array([nested], dtype="float64") + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + np.array([nested]) # user must ask for `object` explicitly + + arr = np.array([nested], dtype=object) + assert arr.dtype == np.dtype("O") + assert arr.shape == (1,) * np.MAXDIMS + assert arr.item() is initial + + def test_pathological_self_containing(self): + # Test that this also works for two nested sequences + l = [] + l.append(l) + arr = np.array([l, l, l], dtype=object) + assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1) + + # Also check a ragged case: + arr = np.array([l, [None], l], dtype=object) + assert arr.shape == (3, 1) + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_nested_arraylikes(self, arraylike): + # We try storing an array like into an array, but the array-like + # will have too many dimensions. This means the shape discovery + # decides that the array-like must be treated as an object (a special + # case of ragged discovery). The result will be an array with one + # dimension less than the maximum dimensions, and the array being + # assigned to it (which does work for object or if `float(arraylike)` + # works). + initial = arraylike(np.ones((1, 1))) + + nested = initial + for i in range(np.MAXDIMS - 1): + nested = [nested] + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + # It will refuse to assign the array into + np.array(nested, dtype="float64") + + # If this is object, we end up assigning a (1, 1) array into (1,) + # (due to running out of dimensions), this is currently supported but + # a special case which is not ideal. + arr = np.array(nested, dtype=object) + assert arr.shape == (1,) * np.MAXDIMS + assert arr.item() == np.array(initial).item() + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_uneven_depth_ragged(self, arraylike): + arr = np.arange(4).reshape((2, 2)) + arr = arraylike(arr) + + # Array is ragged in the second dimension already: + out = np.array([arr, [arr]], dtype=object) + assert out.shape == (2,) + assert out[0] is arr + assert type(out[1]) is list + + # Array is ragged in the third dimension: + with pytest.raises(ValueError): + # This is a broadcast error during assignment, because + # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails. + np.array([arr, [arr, arr]], dtype=object) + + def test_empty_sequence(self): + arr = np.array([[], [1], [[1]]], dtype=object) + assert arr.shape == (3,) + + # The empty sequence stops further dimension discovery, so the + # result shape will be (0,) which leads to an error during: + with pytest.raises(ValueError): + np.array([[], np.empty((0, 1))], dtype=object) + + def test_array_of_different_depths(self): + # When multiple arrays (or array-likes) are included in a + # sequences and have different depth, we currently discover + # as many dimensions as they share. (see also gh-17224) + arr = np.zeros((3, 2)) + mismatch_first_dim = np.zeros((1, 2)) + mismatch_second_dim = np.zeros((3, 3)) + + dtype, shape = _discover_array_parameters( + [arr, mismatch_second_dim], dtype=np.dtype("O")) + assert shape == (2, 3) + + dtype, shape = _discover_array_parameters( + [arr, mismatch_first_dim], dtype=np.dtype("O")) + assert shape == (2,) + # The second case is currently supported because the arrays + # can be stored as objects: + res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O")) + assert res[0] is arr + assert res[1] is mismatch_first_dim + + +class TestBadSequences: + # These are tests for bad objects passed into `np.array`, in general + # these have undefined behaviour. In the old code they partially worked + # when now they will fail. We could (and maybe should) create a copy + # of all sequences to be safe against bad-actors. + + def test_growing_list(self): + # List to coerce, `mylist` will append to it during coercion + obj = [] + class mylist(list): + def __len__(self): + obj.append([1, 2]) + return super().__len__() + + obj.append(mylist([1, 2])) + + with pytest.raises(RuntimeError): + np.array(obj) + + # Note: We do not test a shrinking list. These do very evil things + # and the only way to fix them would be to copy all sequences. + # (which may be a real option in the future). + + def test_mutated_list(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + class mylist(list): + def __len__(self): + obj[0] = [2, 3] # replace with a different list. + return super().__len__() + + obj.append([2, 3]) + obj.append(mylist([1, 2])) + # Does not crash: + np.array(obj) + + def test_replace_0d_array(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + class baditem: + def __len__(self): + obj[0][0] = 2 # replace with a different list. + raise ValueError("not actually a sequence!") + + def __getitem__(self): + pass + + # Runs into a corner case in the new code, the `array(2)` is cached + # so replacing it invalidates the cache. + obj.append([np.array(2), baditem()]) + with pytest.raises(RuntimeError): + np.array(obj) + + +class TestArrayLikes: + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_0d_object_special_case(self, arraylike): + arr = np.array(0.) + obj = arraylike(arr) + # A single array-like is always converted: + res = np.array(obj, dtype=object) + assert_array_equal(arr, res) + + # But a single 0-D nested array-like never: + res = np.array([obj], dtype=object) + assert res[0] is obj + + @pytest.mark.parametrize("arraylike", arraylikes()) + @pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)]) + def test_object_assignment_special_case(self, arraylike, arr): + obj = arraylike(arr) + empty = np.arange(1, dtype=object) + empty[:] = [obj] + assert empty[0] is obj + + def test_0d_generic_special_case(self): + class ArraySubclass(np.ndarray): + def __float__(self): + raise TypeError("e.g. quantities raise on this") + + arr = np.array(0.) + obj = arr.view(ArraySubclass) + res = np.array(obj) + # The subclass is simply cast: + assert_array_equal(arr, res) + + # If the 0-D array-like is included, __float__ is currently + # guaranteed to be used. We may want to change that, quantities + # and masked arrays half make use of this. + with pytest.raises(TypeError): + np.array([obj]) + + # The same holds for memoryview: + obj = memoryview(arr) + res = np.array(obj) + assert_array_equal(arr, res) + with pytest.raises(ValueError): + # The error type does not matter much here. + np.array([obj]) + + def test_arraylike_classes(self): + # The classes of array-likes should generally be acceptable to be + # stored inside a numpy (object) array. This tests all of the + # special attributes (since all are checked during coercion). + arr = np.array(np.int64) + assert arr[()] is np.int64 + arr = np.array([np.int64]) + assert arr[0] is np.int64 + + # This also works for properties/unbound methods: + class ArrayLike: + @property + def __array_interface__(self): + pass + + @property + def __array_struct__(self): + pass + + def __array__(self): + pass + + arr = np.array(ArrayLike) + assert arr[()] is ArrayLike + arr = np.array([ArrayLike]) + assert arr[0] is ArrayLike + + @pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform") + def test_too_large_array_error_paths(self): + """Test the error paths, including for memory leaks""" + arr = np.array(0, dtype="uint8") + # Guarantees that a contiguous copy won't work: + arr = np.broadcast_to(arr, 2**62) + + for i in range(5): + # repeat, to ensure caching cannot have an effect: + with pytest.raises(MemoryError): + np.array(arr) + with pytest.raises(MemoryError): + np.array([arr]) + + @pytest.mark.parametrize("attribute", + ["__array_interface__", "__array__", "__array_struct__"]) + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. + + class BadInterface: + def __getattr__(self, attr): + if attr == attribute: + raise error + super().__getattr__(attr) + + with pytest.raises(error): + np.array(BadInterface()) + + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_bad_length(self, error): + # RecursionError and MemoryError are considered "critical" in + # sequences. We could expand this more generally though. (NumPy 1.20) + class BadSequence: + def __len__(self): + raise error + def __getitem__(self): + # must have getitem to be a Sequence + return 1 + + with pytest.raises(error): + np.array(BadSequence()) + + +class TestAsArray: + """Test expected behaviors of ``asarray``.""" + + def test_dtype_identity(self): + """Confirm the intended behavior for *dtype* kwarg. + + The result of ``asarray()`` should have the dtype provided through the + keyword argument, when used. This forces unique array handles to be + produced for unique np.dtype objects, but (for equivalent dtypes), the + underlying data (the base object) is shared with the original array + object. + + Ref https://github.com/numpy/numpy/issues/1468 + """ + int_array = np.array([1, 2, 3], dtype='i') + assert np.asarray(int_array) is int_array + + # The character code resolves to the singleton dtype object provided + # by the numpy package. + assert np.asarray(int_array, dtype='i') is int_array + + # Derive a dtype from n.dtype('i'), but add a metadata object to force + # the dtype to be distinct. + unequal_type = np.dtype('i', metadata={'spam': True}) + annotated_int_array = np.asarray(int_array, dtype=unequal_type) + assert annotated_int_array is not int_array + assert annotated_int_array.base is int_array + # Create an equivalent descriptor with a new and distinct dtype + # instance. + equivalent_requirement = np.dtype('i', metadata={'spam': True}) + annotated_int_array_alt = np.asarray(annotated_int_array, + dtype=equivalent_requirement) + assert unequal_type == equivalent_requirement + assert unequal_type is not equivalent_requirement + assert annotated_int_array_alt is not annotated_int_array + assert annotated_int_array_alt.dtype is equivalent_requirement + + # Check the same logic for a pair of C types whose equivalence may vary + # between computing environments. + # Find an equivalent pair. + integer_type_codes = ('i', 'l', 'q') + integer_dtypes = [np.dtype(code) for code in integer_type_codes] + typeA = None + typeB = None + for typeA, typeB in permutations(integer_dtypes, r=2): + if typeA == typeB: + assert typeA is not typeB + break + assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype) + + # These ``asarray()`` calls may produce a new view or a copy, + # but never the same object. + long_int_array = np.asarray(int_array, dtype='l') + long_long_int_array = np.asarray(int_array, dtype='q') + assert long_int_array is not int_array + assert long_long_int_array is not int_array + assert np.asarray(long_int_array, dtype='q') is not long_int_array + array_a = np.asarray(int_array, dtype=typeA) + assert typeA == typeB + assert typeA is not typeB + assert array_a.dtype is typeA + assert array_a is not np.asarray(array_a, dtype=typeB) + assert np.asarray(array_a, dtype=typeB).dtype is typeB + assert array_a is np.asarray(array_a, dtype=typeB).base + + +class TestSpecialAttributeLookupFailure: + # An exception was raised while fetching the attribute + + class WeirdArrayLike: + @property + def __array__(self): + raise RuntimeError("oops!") + + class WeirdArrayInterface: + @property + def __array_interface__(self): + raise RuntimeError("oops!") + + def test_deprecated(self): + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayLike()) + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayInterface()) + + +def test_subarray_from_array_construction(): + # Arrays are more complex, since they "broadcast" on success: + arr = np.array([1, 2]) + + res = arr.astype("(2)i,") + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array(arr, dtype="(2)i,") + + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array([[(1,), (2,)], arr], dtype="(2)i,") + assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]]) + + # Also try a multi-dimensional example: + arr = np.arange(5 * 2).reshape(5, 2) + expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2)) + + res = arr.astype("(2,2)f") + assert_array_equal(res, expected) + + res = np.array(arr, dtype="(2,2)f") + assert_array_equal(res, expected) + + +def test_empty_string(): + # Empty strings are unfortunately often converted to S1 and we need to + # make sure we are filling the S1 and not the (possibly) detected S0 + # result. This should likely just return S0 and if not maybe the decision + # to return S1 should be moved. + res = np.array([""] * 10, dtype="S") + assert_array_equal(res, np.array("\0", "S1")) + assert res.dtype == "S1" + + arr = np.array([""] * 10, dtype=object) + + res = arr.astype("S") + assert_array_equal(res, b"") + assert res.dtype == "S1" + + res = np.array(arr, dtype="S") + assert_array_equal(res, b"") + # TODO: This is arguably weird/wrong, but seems old: + assert res.dtype == f"S{np.dtype('O').itemsize}" + + res = np.array([[""] * 10, arr], dtype="S") + assert_array_equal(res, b"") + assert res.shape == (2, 10) + assert res.dtype == "S1" diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_array_interface.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_array_interface.py new file mode 100644 index 00000000..16c719c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_array_interface.py @@ -0,0 +1,219 @@ +import sys +import pytest +import numpy as np +from numpy.testing import extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Some codes to generate data and manage temporary buffers use when + sharing with numpy via the array interface protocol. + """ + + if not sys.platform.startswith('linux'): + pytest.skip('link fails on cygwin') + + prologue = ''' + #include <Python.h> + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include <numpy/arrayobject.h> + #include <stdio.h> + #include <math.h> + + NPY_NO_EXPORT + void delete_array_struct(PyObject *cap) { + + /* get the array interface structure */ + PyArrayInterface *inter = (PyArrayInterface*) + PyCapsule_GetPointer(cap, NULL); + + /* get the buffer by which data was shared */ + double *ptr = (double*)PyCapsule_GetContext(cap); + + /* for the purposes of the regression test set the elements + to nan */ + for (npy_intp i = 0; i < inter->shape[0]; ++i) + ptr[i] = nan(""); + + /* free the shared buffer */ + free(ptr); + + /* free the array interface structure */ + free(inter->shape); + free(inter); + + fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr); + } + ''' + + functions = [ + ("new_array_struct", "METH_VARARGS", """ + + long long n_elem = 0; + double value = 0.0; + + if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) { + Py_RETURN_NONE; + } + + /* allocate and initialize the data to share with numpy */ + long long n_bytes = n_elem*sizeof(double); + double *data = (double*)malloc(n_bytes); + + if (!data) { + PyErr_Format(PyExc_MemoryError, + "Failed to malloc %lld bytes", n_bytes); + + Py_RETURN_NONE; + } + + for (long long i = 0; i < n_elem; ++i) { + data[i] = value; + } + + /* calculate the shape and stride */ + int nd = 1; + + npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp)); + npy_intp *shape = ss; + npy_intp *stride = ss + nd; + + shape[0] = n_elem; + stride[0] = sizeof(double); + + /* construct the array interface */ + PyArrayInterface *inter = (PyArrayInterface*) + malloc(sizeof(PyArrayInterface)); + + memset(inter, 0, sizeof(PyArrayInterface)); + + inter->two = 2; + inter->nd = nd; + inter->typekind = 'f'; + inter->itemsize = sizeof(double); + inter->shape = shape; + inter->strides = stride; + inter->data = data; + inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED | + NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + + /* package into a capsule */ + PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct); + + /* save the pointer to the data */ + PyCapsule_SetContext(cap, data); + + fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)data); + + return cap; + """) + ] + + more_init = "import_array();" + + try: + import array_interface_testing + return array_interface_testing + except ImportError: + pass + + # if it does not exist, build and load it + return extbuild.build_and_import_extension('array_interface_testing', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on +# Python 3.12 and up. +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +@pytest.mark.slow +def test_cstruct(get_module): + + class data_source: + """ + This class is for testing the timing of the PyCapsule destructor + invoked when numpy release its reference to the shared data as part of + the numpy array interface protocol. If the PyCapsule destructor is + called early the shared data is freed and invalid memory accesses will + occur. + """ + + def __init__(self, size, value): + self.size = size + self.value = value + + @property + def __array_struct__(self): + return get_module.new_array_struct(self.size, self.value) + + # write to the same stream as the C code + stderr = sys.__stderr__ + + # used to validate the shared data. + expected_value = -3.1415 + multiplier = -10000.0 + + # create some data to share with numpy via the array interface + # assign the data an expected value. + stderr.write(' ---- create an object to share data ---- \n') + buf = data_source(256, expected_value) + stderr.write(' ---- OK!\n\n') + + # share the data + stderr.write(' ---- share data via the array interface protocol ---- \n') + arr = np.array(buf, copy=False) + stderr.write('arr.__array_interface___ = %s\n' % ( + str(arr.__array_interface__))) + stderr.write('arr.base = %s\n' % (str(arr.base))) + stderr.write(' ---- OK!\n\n') + + # release the source of the shared data. this will not release the data + # that was shared with numpy, that is done in the PyCapsule destructor. + stderr.write(' ---- destroy the object that shared data ---- \n') + buf = None + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # read the data. If the PyCapsule destructor we defined was prematurely + # called then reading the values here may cause a SEGV and will be reported + # as invalid reads by valgrind + stderr.write(' ---- read shared data ---- \n') + stderr.write('arr = %s\n' % (str(arr))) + stderr.write(' ---- OK!\n\n') + + # write to the shared buffer. If the shared data was prematurely deleted + # this will may cause a SEGV and valgrind will report invalid writes + stderr.write(' ---- modify shared data ---- \n') + arr *= multiplier + expected_value *= multiplier + stderr.write('arr.__array_interface___ = %s\n' % ( + str(arr.__array_interface__))) + stderr.write('arr.base = %s\n' % (str(arr.base))) + stderr.write(' ---- OK!\n\n') + + # read the data. If the shared data was prematurely deleted this + # will may cause a SEGV and valgrind will report invalid reads + stderr.write(' ---- read modified shared data ---- \n') + stderr.write('arr = %s\n' % (str(arr))) + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # free the shared data, the PyCapsule destructor should run here + stderr.write(' ---- free shared data ---- \n') + arr = None + stderr.write(' ---- OK!\n\n') diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_arraymethod.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_arraymethod.py new file mode 100644 index 00000000..4fd4d555 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_arraymethod.py @@ -0,0 +1,85 @@ +""" +This file tests the generic aspects of ArrayMethod. At the time of writing +this is private API, but when added, public API may be added here. +""" + +from __future__ import annotations + +import sys +import types +from typing import Any + +import pytest + +import numpy as np +from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl + + +class TestResolveDescriptors: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize("args", [ + (True,), # Not a tuple. + ((None,)), # Too few elements + ((None, None, None),), # Too many + ((None, None),), # Input dtype is None, which is invalid. + ((np.dtype("d"), True),), # Output dtype is not a dtype + ((np.dtype("f"), None),), # Input dtype does not match method + ]) + def test_invalid_arguments(self, args): + with pytest.raises(TypeError): + self.method._resolve_descriptors(*args) + + +class TestSimpleStridedCall: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize(["args", "error"], [ + ((True,), TypeError), # Not a tuple + (((None,),), TypeError), # Too few elements + ((None, None), TypeError), # Inputs are not arrays. + (((None, None, None),), TypeError), # Too many + (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes + (((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),), + TypeError), # Does not support byte-swapping + (((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),), + ValueError), # not 1-D + (((np.ones(3, dtype="d"), np.ones(4, dtype="f")),), + ValueError), # different length + (((np.frombuffer(b"\0x00"*3*2, dtype="d"), + np.frombuffer(b"\0x00"*3, dtype="f")),), + ValueError), # output not writeable + ]) + def test_invalid_arguments(self, args, error): + # This is private API, which may be modified freely + with pytest.raises(error): + self.method._simple_strided_call(*args) + + +@pytest.mark.parametrize( + "cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap] +) +class TestClassGetItem: + def test_class_getitem(self, cls: type[np.ndarray]) -> None: + """Test `ndarray.__class_getitem__`.""" + alias = cls[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert cls[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + cls[arg_tup] diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_arrayprint.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_arrayprint.py new file mode 100644 index 00000000..6796b407 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_arrayprint.py @@ -0,0 +1,1047 @@ +import sys +import gc +from hypothesis import given +from hypothesis.extra import numpy as hynp +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, + assert_raises_regex, + ) +from numpy.core.arrayprint import _typelessdata +import textwrap + +class TestArrayRepr: + def test_nan_inf(self): + x = np.array([np.nan, np.inf]) + assert_equal(repr(x), 'array([nan, inf])') + + def test_subclass(self): + class sub(np.ndarray): pass + + # one dimensional + x1d = np.array([1, 2]).view(sub) + assert_equal(repr(x1d), 'sub([1, 2])') + + # two dimensional + x2d = np.array([[1, 2], [3, 4]]).view(sub) + assert_equal(repr(x2d), + 'sub([[1, 2],\n' + ' [3, 4]])') + + # two dimensional with flexible dtype + xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub) + assert_equal(repr(xstruct), + "sub([[(1,), (1,)],\n" + " [(1,), (1,)]], dtype=[('a', '<i4')])" + ) + + @pytest.mark.xfail(reason="See gh-10544") + def test_object_subclass(self): + class sub(np.ndarray): + def __new__(cls, inp): + obj = np.asarray(inp).view(cls) + return obj + + def __getitem__(self, ind): + ret = super().__getitem__(ind) + return sub(ret) + + # test that object + subclass is OK: + x = sub([None, None]) + assert_equal(repr(x), 'sub([None, None], dtype=object)') + assert_equal(str(x), '[None None]') + + x = sub([None, sub([None, None])]) + assert_equal(repr(x), + 'sub([None, sub([None, None], dtype=object)], dtype=object)') + assert_equal(str(x), '[None sub([None, None], dtype=object)]') + + def test_0d_object_subclass(self): + # make sure that subclasses which return 0ds instead + # of scalars don't cause infinite recursion in str + class sub(np.ndarray): + def __new__(cls, inp): + obj = np.asarray(inp).view(cls) + return obj + + def __getitem__(self, ind): + ret = super().__getitem__(ind) + return sub(ret) + + x = sub(1) + assert_equal(repr(x), 'sub(1)') + assert_equal(str(x), '1') + + x = sub([1, 1]) + assert_equal(repr(x), 'sub([1, 1])') + assert_equal(str(x), '[1 1]') + + # check it works properly with object arrays too + x = sub(None) + assert_equal(repr(x), 'sub(None, dtype=object)') + assert_equal(str(x), 'None') + + # plus recursive object arrays (even depth > 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + x[()] = 0 # resolve circular references for garbage collector + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super().__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + + def test_self_containing(self): + arr0d = np.array(None) + arr0d[()] = arr0d + assert_equal(repr(arr0d), + 'array(array(..., dtype=object), dtype=object)') + arr0d[()] = 0 # resolve recursion for garbage collector + + arr1d = np.array([None, None]) + arr1d[1] = arr1d + assert_equal(repr(arr1d), + 'array([None, array(..., dtype=object)], dtype=object)') + arr1d[1] = 0 # resolve recursion for garbage collector + + first = np.array(None) + second = np.array(None) + first[()] = second + second[()] = first + assert_equal(repr(first), + 'array(array(array(..., dtype=object), dtype=object), dtype=object)') + first[()] = 0 # resolve circular references for garbage collector + + def test_containing_list(self): + # printing square brackets directly would be ambiguuous + arr1d = np.array([None, None]) + arr1d[0] = [1, 2] + arr1d[1] = [3] + assert_equal(repr(arr1d), + 'array([list([1, 2]), list([3])], dtype=object)') + + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + + +class TestComplexArray: + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_equal(res, val) + +class TestArray2String: + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + + x = np.arange(3) + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + assert_(np.array2string(x, formatter={'all':_format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind':_format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), + x_hex) + assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == + '[abcabc defdef]') + + def test_structure_format_mixed(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_equal(np.array2string(x), + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") + + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) + + def test_structure_format_int(self): + # See #8160 + struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) + assert_equal(np.array2string(struct_int), + "[([ 1, -1],) ([123, 1],)]") + struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], + dtype=[('B', 'i4', (2, 2))]) + assert_equal(np.array2string(struct_2dint), + "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") + + def test_structure_format_float(self): + # See #8172 + array_scalar = np.array( + (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') + assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), vars(np)), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + assert_equal(repr(A), reprA) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + assert_equal(repr(A), reprA) + + def test_summarize_structure(self): + A = (np.arange(2002, dtype="<i8").reshape(2, 1001) + .view([('i', "<i8", (1001,))])) + strA = ("[[([ 0, 1, 2, ..., 998, 999, 1000],)]\n" + " [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]]") + assert_equal(str(A), strA) + + reprA = ("array([[([ 0, 1, 2, ..., 998, 999, 1000],)],\n" + " [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]],\n" + " dtype=[('i', '<i8', (1001,))])") + assert_equal(repr(A), reprA) + + B = np.ones(2002, dtype=">i8").view([('i', ">i8", (2, 1001))]) + strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]" + assert_equal(str(B), strB) + + reprB = ( + "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n" + " dtype=[('i', '>i8', (2, 1001))])" + ) + assert_equal(repr(B), reprB) + + C = (np.arange(22, dtype="<i8").reshape(2, 11) + .view([('i1', "<i8"), ('i10', "<i8", (10,))])) + strC = "[[( 0, [ 1, ..., 10])]\n [(11, [12, ..., 21])]]" + assert_equal(np.array2string(C, threshold=1, edgeitems=1), strC) + + def test_linewidth(self): + a = np.full(6, 1) + + def make_str(a, width, **kw): + return np.array2string(a, separator="", max_line_width=width, **kw) + + assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' + ' 11]') + + assert_equal(make_str(a, 8), '[111111]') + assert_equal(make_str(a, 7), '[11111\n' + ' 1]') + assert_equal(make_str(a, 5), '[111\n' + ' 111]') + + b = a[None,None,:] + + assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' + ' 1]]]') + + assert_equal(make_str(b, 12), '[[[111111]]]') + assert_equal(make_str(b, 9), '[[[111\n' + ' 111]]]') + assert_equal(make_str(b, 8), '[[[11\n' + ' 11\n' + ' 11]]]') + + def test_wide_element(self): + a = np.array(['xxxxx']) + assert_equal( + np.array2string(a, max_line_width=5), + "['xxxxx']" + ) + assert_equal( + np.array2string(a, max_line_width=5, legacy='1.13'), + "[ 'xxxxx']" + ) + + def test_multiline_repr(self): + class MultiLine: + def __repr__(self): + return "Line 1\nLine 2" + + a = np.array([[None, MultiLine()], [MultiLine(), None]]) + + assert_equal( + np.array2string(a), + '[[None Line 1\n' + ' Line 2]\n' + ' [Line 1\n' + ' Line 2 None]]' + ) + assert_equal( + np.array2string(a, max_line_width=5), + '[[None\n' + ' Line 1\n' + ' Line 2]\n' + ' [Line 1\n' + ' Line 2\n' + ' None]]' + ) + assert_equal( + repr(a), + 'array([[None, Line 1\n' + ' Line 2],\n' + ' [Line 1\n' + ' Line 2, None]], dtype=object)' + ) + + class MultiLineLong: + def __repr__(self): + return "Line 1\nLooooooooooongestLine2\nLongerLine 3" + + a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]]) + assert_equal( + repr(a), + 'array([[None, Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ],\n' + ' [Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 , None]], dtype=object)' + ) + assert_equal( + np.array_repr(a, 20), + 'array([[None,\n' + ' Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ],\n' + ' [Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ,\n' + ' None]],\n' + ' dtype=object)' + ) + + def test_nested_array_repr(self): + a = np.empty((2, 2), dtype=object) + a[0, 0] = np.eye(2) + a[0, 1] = np.eye(3) + a[1, 0] = None + a[1, 1] = np.ones((3, 1)) + assert_equal( + repr(a), + 'array([[array([[1., 0.],\n' + ' [0., 1.]]), array([[1., 0., 0.],\n' + ' [0., 1., 0.],\n' + ' [0., 0., 1.]])],\n' + ' [None, array([[1.],\n' + ' [1.],\n' + ' [1.]])]], dtype=object)' + ) + + @given(hynp.from_dtype(np.dtype("U"))) + def test_any_text(self, text): + # This test checks that, given any value that can be represented in an + # array of dtype("U") (i.e. unicode string), ... + a = np.array([text, text, text]) + # casting a list of them to an array does not e.g. truncate the value + assert_equal(a[0], text) + # and that np.array2string puts a newline in the expected location + expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text) + result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3) + assert_equal(result, expected_repr) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # make sure we do not hold references to the array due to a recursive + # closure (gh-10620) + gc.disable() + a = np.arange(2) + r1 = sys.getrefcount(a) + np.array2string(a) + np.array2string(a) + r2 = sys.getrefcount(a) + gc.collect() + gc.enable() + assert_(r1 == r2) + +class TestPrintOptions: + """Test getting and setting global print options.""" + + def setup_method(self): + self.oldopts = np.get_printoptions() + + def teardown_method(self): + np.set_printoptions(**self.oldopts) + + def test_basic(self): + x = np.array([1.5, 0, 1.234567890]) + assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") + np.set_printoptions(precision=4) + assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") + + def test_precision_zero(self): + np.set_printoptions(precision=0) + for values, string in ( + ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."), + ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."), + ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."), + ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")): + x = np.array(values) + assert_equal(repr(x), "array([%s])" % string) + + def test_formatter(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + + def test_formatter_reset(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'all':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'int':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int_kind':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + x = np.arange(3.) + np.set_printoptions(formatter={'float':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") + np.set_printoptions(formatter={'float_kind':None}) + assert_equal(repr(x), "array([0., 1., 2.])") + + def test_0d_arrays(self): + assert_equal(str(np.array('café', '<U4')), 'café') + + assert_equal(repr(np.array('café', '<U4')), + "array('café', dtype='<U4')") + assert_equal(str(np.array('test', np.str_)), 'test') + + a = np.zeros(1, dtype=[('a', '<i4', (3,))]) + assert_equal(str(a[0]), '([0, 0, 0],)') + + assert_equal(repr(np.datetime64('2005-02-25')[...]), + "array('2005-02-25', dtype='datetime64[D]')") + + assert_equal(repr(np.timedelta64('10', 'Y')[...]), + "array(10, dtype='timedelta64[Y]')") + + # repr of 0d arrays is affected by printoptions + x = np.array(1) + np.set_printoptions(formatter={'all':lambda x: "test"}) + assert_equal(repr(x), "array(test)") + # str is unaffected + assert_equal(str(x), "1") + + # check `style` arg raises + assert_warns(DeprecationWarning, np.array2string, + np.array(1.), style=repr) + # but not in legacy mode + np.array2string(np.array(1.), style=repr, legacy='1.13') + # gh-10934 style was broken in legacy mode, check it works + np.array2string(np.array(1.), legacy='1.13') + + def test_float_spacing(self): + x = np.array([1., 2., 3.]) + y = np.array([1., 2., -10.]) + z = np.array([100., 2., -1.]) + w = np.array([-100., 2., 1.]) + + assert_equal(repr(x), 'array([1., 2., 3.])') + assert_equal(repr(y), 'array([ 1., 2., -10.])') + assert_equal(repr(np.array(y[0])), 'array(1.)') + assert_equal(repr(np.array(y[-1])), 'array(-10.)') + assert_equal(repr(z), 'array([100., 2., -1.])') + assert_equal(repr(w), 'array([-100., 2., 1.])') + + assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])') + assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])') + + x = np.array([np.inf, 100000, 1.1234]) + y = np.array([np.inf, 100000, -1.1234]) + z = np.array([np.inf, 1.1234, -1e120]) + np.set_printoptions(precision=2) + assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])') + assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])') + assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])') + + def test_bool_spacing(self): + assert_equal(repr(np.array([True, True])), + 'array([ True, True])') + assert_equal(repr(np.array([True, False])), + 'array([ True, False])') + assert_equal(repr(np.array([True])), + 'array([ True])') + assert_equal(repr(np.array(True)), + 'array(True)') + assert_equal(repr(np.array(False)), + 'array(False)') + + def test_sign_spacing(self): + a = np.arange(4.) + b = np.array([1.234e9]) + c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16') + + assert_equal(repr(a), 'array([0., 1., 2., 3.])') + assert_equal(repr(np.array(1.)), 'array(1.)') + assert_equal(repr(b), 'array([1.234e+09])') + assert_equal(repr(np.array([0.])), 'array([0.])') + assert_equal(repr(c), + "array([1. +1.j , 1.12345679+1.12345679j])") + assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])') + + np.set_printoptions(sign=' ') + assert_equal(repr(a), 'array([ 0., 1., 2., 3.])') + assert_equal(repr(np.array(1.)), 'array( 1.)') + assert_equal(repr(b), 'array([ 1.234e+09])') + assert_equal(repr(c), + "array([ 1. +1.j , 1.12345679+1.12345679j])") + assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])') + + np.set_printoptions(sign='+') + assert_equal(repr(a), 'array([+0., +1., +2., +3.])') + assert_equal(repr(np.array(1.)), 'array(+1.)') + assert_equal(repr(b), 'array([+1.234e+09])') + assert_equal(repr(c), + "array([+1. +1.j , +1.12345679+1.12345679j])") + + np.set_printoptions(legacy='1.13') + assert_equal(repr(a), 'array([ 0., 1., 2., 3.])') + assert_equal(repr(b), 'array([ 1.23400000e+09])') + assert_equal(repr(-b), 'array([ -1.23400000e+09])') + assert_equal(repr(np.array(1.)), 'array(1.0)') + assert_equal(repr(np.array([0.])), 'array([ 0.])') + assert_equal(repr(c), + "array([ 1.00000000+1.j , 1.12345679+1.12345679j])") + # gh-10383 + assert_equal(str(np.array([-1., 10])), "[ -1. 10.]") + + assert_raises(TypeError, np.set_printoptions, wrongarg=True) + + def test_float_overflow_nowarn(self): + # make sure internal computations in FloatingFormat don't + # warn about overflow + repr(np.array([1e4, 0.1], dtype='f2')) + + def test_sign_spacing_structured(self): + a = np.ones(2, dtype='<f,<f') + assert_equal(repr(a), + "array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])") + assert_equal(repr(a[0]), "(1., 1.)") + + def test_floatmode(self): + x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244, + 0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16) + y = np.array([0.2918820979355541, 0.5064172631089138, + 0.2848750619642916, 0.4342965294660567, + 0.7326538397312751, 0.3459503329096204, + 0.0862072768214508, 0.39112753029631175], + dtype=np.float64) + z = np.arange(6, dtype=np.float16)/10 + c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16') + + # also make sure 1e23 is right (is between two fp numbers) + w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64) + # note: we construct w from the strings `1eXX` instead of doing + # `10.**arange(24)` because it turns out the two are not equivalent in + # python. On some architectures `1e23 != 10.**23`. + wp = np.array([1.234e1, 1e2, 1e123]) + + # unique mode + np.set_printoptions(floatmode='unique') + assert_equal(repr(x), + "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n" + " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)") + assert_equal(repr(y), + "array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n" + " 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n" + " 0.0862072768214508 , 0.39112753029631175])") + assert_equal(repr(z), + "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)") + assert_equal(repr(w), + "array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n" + " 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n" + " 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n" + " 1.e+24])") + assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])") + assert_equal(repr(c), + "array([1. +1.j , 1.123456789+1.123456789j])") + + # maxprec mode, precision=8 + np.set_printoptions(floatmode='maxprec', precision=8) + assert_equal(repr(x), + "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n" + " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)") + assert_equal(repr(y), + "array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n" + " 0.34595033, 0.08620728, 0.39112753])") + assert_equal(repr(z), + "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)") + assert_equal(repr(w[::5]), + "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])") + assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])") + assert_equal(repr(c), + "array([1. +1.j , 1.12345679+1.12345679j])") + + # fixed mode, precision=4 + np.set_printoptions(floatmode='fixed', precision=4) + assert_equal(repr(x), + "array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n" + " 0.2383, 0.4226], dtype=float16)") + assert_equal(repr(y), + "array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])") + assert_equal(repr(z), + "array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)") + assert_equal(repr(w[::5]), + "array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])") + assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])") + assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])") + assert_equal(repr(c), + "array([1.0000+1.0000j, 1.1235+1.1235j])") + # for larger precision, representation error becomes more apparent: + np.set_printoptions(floatmode='fixed', precision=8) + assert_equal(repr(z), + "array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n" + " 0.50000000], dtype=float16)") + + # maxprec_equal mode, precision=8 + np.set_printoptions(floatmode='maxprec_equal', precision=8) + assert_equal(repr(x), + "array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n" + " 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)") + assert_equal(repr(y), + "array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n" + " 0.34595033, 0.08620728, 0.39112753])") + assert_equal(repr(z), + "array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)") + assert_equal(repr(w[::5]), + "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])") + assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])") + assert_equal(repr(c), + "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])") + + # test unique special case (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(np.float64(np.array2string(a, floatmode='unique')), a) + + def test_legacy_mode_scalars(self): + # in legacy mode, str of floats get truncated, and complex scalars + # use * for non-finite imaginary part + np.set_printoptions(legacy='1.13') + assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912') + assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)') + + np.set_printoptions(legacy=False) + assert_equal(str(np.float64(1.123456789123456789)), + '1.1234567891234568') + assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)') + + def test_legacy_stray_comma(self): + np.set_printoptions(legacy='1.13') + assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]') + + np.set_printoptions(legacy=False) + assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]') + + def test_dtype_linewidth_wrapping(self): + np.set_printoptions(linewidth=75) + assert_equal(repr(np.arange(10,20., dtype='f4')), + "array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)") + assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\ + array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.], + dtype=float32)""")) + + styp = '<U4' + assert_equal(repr(np.ones(3, dtype=styp)), + "array(['1', '1', '1'], dtype='{}')".format(styp)) + assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ + array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'], + dtype='{}')""".format(styp))) + + @pytest.mark.parametrize( + ['native'], + [ + ('bool',), + ('uint8',), + ('uint16',), + ('uint32',), + ('uint64',), + ('int8',), + ('int16',), + ('int32',), + ('int64',), + ('float16',), + ('float32',), + ('float64',), + ('U1',), # 4-byte width string + ], + ) + def test_dtype_endianness_repr(self, native): + ''' + there was an issue where + repr(array([0], dtype='<u2')) and repr(array([0], dtype='>u2')) + both returned the same thing: + array([0], dtype=uint16) + even though their dtypes have different endianness. + ''' + native_dtype = np.dtype(native) + non_native_dtype = native_dtype.newbyteorder() + non_native_repr = repr(np.array([1], non_native_dtype)) + native_repr = repr(np.array([1], native_dtype)) + # preserve the sensible default of only showing dtype if nonstandard + assert ('dtype' in native_repr) ^ (native_dtype in _typelessdata),\ + ("an array's repr should show dtype if and only if the type " + 'of the array is NOT one of the standard types ' + '(e.g., int32, bool, float64).') + if non_native_dtype.itemsize > 1: + # if the type is >1 byte, the non-native endian version + # must show endianness. + assert non_native_repr != native_repr + assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]])""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + np.set_printoptions(legacy='1.13') + + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[ 0.]], + + ..., + [[ 0.]]], + + + ..., + [[[ 0.]], + + ..., + [[ 0.]]]])""") + ) + + def test_edgeitems_structured(self): + np.set_printoptions(edgeitems=1, threshold=1) + A = np.arange(5*2*3, dtype="<i8").view([('i', "<i8", (5, 2, 3))]) + reprA = ( + "array([([[[ 0, ..., 2], [ 3, ..., 5]], ..., " + "[[24, ..., 26], [27, ..., 29]]],)],\n" + " dtype=[('i', '<i8', (5, 2, 3))])" + ) + assert_equal(repr(A), reprA) + + def test_bad_args(self): + assert_raises(ValueError, np.set_printoptions, threshold=float('nan')) + assert_raises(TypeError, np.set_printoptions, threshold='1') + assert_raises(TypeError, np.set_printoptions, threshold=b'1') + + assert_raises(TypeError, np.set_printoptions, precision='1') + assert_raises(TypeError, np.set_printoptions, precision=1.5) + +def test_unicode_object_array(): + expected = "array(['é'], dtype=object)" + x = np.array(['\xe9'], dtype=object) + assert_equal(repr(x), expected) + + +class TestContextManager: + def test_ctx_mgr(self): + # test that context manager actually works + with np.printoptions(precision=2): + s = str(np.array([2.0]) / 3) + assert_equal(s, '[0.67]') + + def test_ctx_mgr_restores(self): + # test that print options are actually restrored + opts = np.get_printoptions() + with np.printoptions(precision=opts['precision'] - 1, + linewidth=opts['linewidth'] - 4): + pass + assert_equal(np.get_printoptions(), opts) + + def test_ctx_mgr_exceptions(self): + # test that print options are restored even if an exception is raised + opts = np.get_printoptions() + try: + with np.printoptions(precision=2, linewidth=11): + raise ValueError + except ValueError: + pass + assert_equal(np.get_printoptions(), opts) + + def test_ctx_mgr_as_smth(self): + opts = {"precision": 2} + with np.printoptions(**opts) as ctx: + saved_opts = ctx.copy() + assert_equal({k: saved_opts[k] for k in opts}, opts) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py new file mode 100644 index 00000000..d8318017 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py @@ -0,0 +1,154 @@ +import pytest +from pytest import param +from numpy.testing import IS_WASM +import numpy as np + + +def values_and_dtypes(): + """ + Generate value+dtype pairs that generate floating point errors during + casts. The invalid casts to integers will generate "invalid" value + warnings, the float casts all generate "overflow". + + (The Python int/float paths don't need to get tested in all the same + situations, but it does not hurt.) + """ + # Casting to float16: + yield param(70000, "float16", id="int-to-f2") + yield param("70000", "float16", id="str-to-f2") + yield param(70000.0, "float16", id="float-to-f2") + yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2") + yield param(np.float64(70000.), "float16", id="double-to-f2") + yield param(np.float32(70000.), "float16", id="float-to-f2") + # Casting to float32: + yield param(10**100, "float32", id="int-to-f4") + yield param(1e100, "float32", id="float-to-f2") + yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2") + yield param(np.float64(1e300), "float32", id="double-to-f2") + # Casting to float64: + # If longdouble is double-double, its max can be rounded down to the double + # max. So we correct the double spacing (a bit weird, admittedly): + max_ld = np.finfo(np.longdouble).max + spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0)) + if max_ld - spacing > np.finfo("f8").max: + yield param(np.finfo(np.longdouble).max, "float64", + id="longdouble-to-f8") + + # Cast to complex32: + yield param(2e300, "complex64", id="float-to-c8") + yield param(2e300+0j, "complex64", id="complex-to-c8") + yield param(2e300j, "complex64", id="complex-to-c8") + yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8") + + # Invalid float to integer casts: + with np.errstate(over="ignore"): + for to_dt in np.typecodes["AllInteger"]: + for value in [np.inf, np.nan]: + for from_dt in np.typecodes["AllFloat"]: + from_dt = np.dtype(from_dt) + from_val = from_dt.type(value) + + yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}") + + +def check_operations(dtype, value): + """ + There are many dedicated paths in NumPy which cast and should check for + floating point errors which occurred during those casts. + """ + if dtype.kind != 'i': + # These assignments use the stricter setitem logic: + def assignment(): + arr = np.empty(3, dtype=dtype) + arr[0] = value + + yield assignment + + def fill(): + arr = np.empty(3, dtype=dtype) + arr.fill(value) + + yield fill + + def copyto_scalar(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe") + + yield copyto_scalar + + def copyto(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe") + + yield copyto + + def copyto_scalar_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe", + where=[True, False, True]) + + yield copyto_scalar_masked + + def copyto_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe", + where=[True, False, True]) + + yield copyto_masked + + def direct_cast(): + np.array([value, value, value]).astype(dtype) + + yield direct_cast + + def direct_cast_nd_strided(): + arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :] + arr.astype(dtype) + + yield direct_cast_nd_strided + + def boolean_array_assignment(): + arr = np.empty(3, dtype=dtype) + arr[[True, False, True]] = np.array([value, value]) + + yield boolean_array_assignment + + def integer_array_assignment(): + arr = np.empty(3, dtype=dtype) + values = np.array([value, value]) + + arr[[0, 1]] = values + + yield integer_array_assignment + + def integer_array_assignment_with_subspace(): + arr = np.empty((5, 3), dtype=dtype) + values = np.array([value, value, value]) + + arr[[0, 2]] = values + + yield integer_array_assignment_with_subspace + + def flat_assignment(): + arr = np.empty((3,), dtype=dtype) + values = np.array([value, value, value]) + arr.flat[:] = values + + yield flat_assignment + +@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") +@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes()) +@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") +def test_floatingpoint_errors_casting(dtype, value): + dtype = np.dtype(dtype) + for operation in check_operations(dtype, value): + dtype = np.dtype(dtype) + + match = "invalid" if dtype.kind in 'iu' else "overflow" + with pytest.warns(RuntimeWarning, match=match): + operation() + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError, match=match): + operation() + diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_casting_unittests.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_casting_unittests.py new file mode 100644 index 00000000..a49d876d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_casting_unittests.py @@ -0,0 +1,819 @@ +""" +The tests exercise the casting machinery in a more low-level manner. +The reason is mostly to test a new implementation of the casting machinery. + +Unlike most tests in NumPy, these are closer to unit-tests rather +than integration tests. +""" + +import pytest +import textwrap +import enum +import random +import ctypes + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from numpy.testing import assert_array_equal +from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl + + +# Simple skips object, parametric and long double (unsupported by struct) +simple_dtypes = "?bhilqBHILQefdFD" +if np.dtype("l").itemsize != np.dtype("q").itemsize: + # Remove l and L, the table was generated with 64bit linux in mind. + simple_dtypes = simple_dtypes.replace("l", "").replace("L", "") +simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes] + + +def simple_dtype_instances(): + for dtype_class in simple_dtypes: + dt = dtype_class() + yield pytest.param(dt, id=str(dt)) + if dt.byteorder != "|": + dt = dt.newbyteorder() + yield pytest.param(dt, id=str(dt)) + + +def get_expected_stringlength(dtype): + """Returns the string length when casting the basic dtypes to strings. + """ + if dtype == np.bool_: + return 5 + if dtype.kind in "iu": + if dtype.itemsize == 1: + length = 3 + elif dtype.itemsize == 2: + length = 5 + elif dtype.itemsize == 4: + length = 10 + elif dtype.itemsize == 8: + length = 20 + else: + raise AssertionError(f"did not find expected length for {dtype}") + + if dtype.kind == "i": + length += 1 # adds one character for the sign + + return length + + # Note: Can't do dtype comparison for longdouble on windows + if dtype.char == "g": + return 48 + elif dtype.char == "G": + return 48 * 2 + elif dtype.kind == "f": + return 32 # also for half apparently. + elif dtype.kind == "c": + return 32 * 2 + + raise AssertionError(f"did not find expected length for {dtype}") + + +class Casting(enum.IntEnum): + no = 0 + equiv = 1 + safe = 2 + same_kind = 3 + unsafe = 4 + + +def _get_cancast_table(): + table = textwrap.dedent(""" + X ? b h i l q B H I L Q e f d g F D G S U V O M m + ? # = = = = = = = = = = = = = = = = = = = = = . = + b . # = = = = . . . . . = = = = = = = = = = = . = + h . ~ # = = = . . . . . ~ = = = = = = = = = = . = + i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . = + l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + B . ~ = = = = # = = = = = = = = = = = = = = = . = + H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . = + I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . = + L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + e . . . . . . . . . . . # = = = = = = = = = = . . + f . . . . . . . . . . . ~ # = = = = = = = = = . . + d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . . + g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . . + F . . . . . . . . . . . . . . . # = = = = = = . . + D . . . . . . . . . . . . . . . ~ # = = = = = . . + G . . . . . . . . . . . . . . . ~ ~ # = = = = . . + S . . . . . . . . . . . . . . . . . . # = = = . . + U . . . . . . . . . . . . . . . . . . . # = = . . + V . . . . . . . . . . . . . . . . . . . . # = . . + O . . . . . . . . . . . . . . . . . . . . = # . . + M . . . . . . . . . . . . . . . . . . . . = = # . + m . . . . . . . . . . . . . . . . . . . . = = . # + """).strip().split("\n") + dtypes = [type(np.dtype(c)) for c in table[0][2::2]] + + convert_cast = {".": Casting.unsafe, "~": Casting.same_kind, + "=": Casting.safe, "#": Casting.equiv, + " ": -1} + + cancast = {} + for from_dt, row in zip(dtypes, table[1:]): + cancast[from_dt] = {} + for to_dt, c in zip(dtypes, row[2::2]): + cancast[from_dt][to_dt] = convert_cast[c] + + return cancast + +CAST_TABLE = _get_cancast_table() + + +class TestChanges: + """ + These test cases exercise some behaviour changes + """ + @pytest.mark.parametrize("string", ["S", "U"]) + @pytest.mark.parametrize("floating", ["e", "f", "d", "g"]) + def test_float_to_string(self, floating, string): + assert np.can_cast(floating, string) + # 100 is long enough to hold any formatted floating + assert np.can_cast(floating, f"{string}100") + + def test_to_void(self): + # But in general, we do consider these safe: + assert np.can_cast("d", "V") + assert np.can_cast("S20", "V") + + # Do not consider it a safe cast if the void is too smaller: + assert not np.can_cast("d", "V1") + assert not np.can_cast("S20", "V1") + assert not np.can_cast("U1", "V1") + # Structured to unstructured is just like any other: + assert np.can_cast("d,i", "V", casting="same_kind") + # Unstructured void to unstructured is actually no cast at all: + assert np.can_cast("V3", "V", casting="no") + assert np.can_cast("V0", "V", casting="no") + + +class TestCasting: + size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize + + def get_data(self, dtype1, dtype2): + if dtype2 is None or dtype1.itemsize >= dtype2.itemsize: + length = self.size // dtype1.itemsize + else: + length = self.size // dtype2.itemsize + + # Assume that the base array is well enough aligned for all inputs. + arr1 = np.empty(length, dtype=dtype1) + assert arr1.flags.c_contiguous + assert arr1.flags.aligned + + values = [random.randrange(-128, 128) for _ in range(length)] + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype1.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype1).max + 1 + arr1[i] = value + + if dtype2 is None: + if dtype1.char == "?": + values = [bool(v) for v in values] + return arr1, values + + if dtype2.char == "?": + values = [bool(v) for v in values] + + arr2 = np.empty(length, dtype=dtype2) + assert arr2.flags.c_contiguous + assert arr2.flags.aligned + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype2.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype2).max + 1 + arr2[i] = value + + return arr1, arr2, values + + def get_data_variation(self, arr1, arr2, aligned=True, contig=True): + """ + Returns a copy of arr1 that may be non-contiguous or unaligned, and a + matching array for arr2 (although not a copy). + """ + if contig: + stride1 = arr1.dtype.itemsize + stride2 = arr2.dtype.itemsize + elif aligned: + stride1 = 2 * arr1.dtype.itemsize + stride2 = 2 * arr2.dtype.itemsize + else: + stride1 = arr1.dtype.itemsize + 1 + stride2 = arr2.dtype.itemsize + 1 + + max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 + max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 + from_bytes = np.zeros(max_size1, dtype=np.uint8) + to_bytes = np.zeros(max_size2, dtype=np.uint8) + + # Sanity check that the above is large enough: + assert stride1 * len(arr1) <= from_bytes.nbytes + assert stride2 * len(arr2) <= to_bytes.nbytes + + if aligned: + new1 = as_strided(from_bytes[:-1].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[:-1].view(arr2.dtype), + arr2.shape, (stride2,)) + else: + new1 = as_strided(from_bytes[1:].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[1:].view(arr2.dtype), + arr2.shape, (stride2,)) + + new1[...] = arr1 + + if not contig: + # Ensure we did not overwrite bytes that should not be written: + offset = arr1.dtype.itemsize if aligned else 0 + buf = from_bytes[offset::stride1].tobytes() + assert buf.count(b"\0") == len(buf) + + if contig: + assert new1.flags.c_contiguous + assert new2.flags.c_contiguous + else: + assert not new1.flags.c_contiguous + assert not new2.flags.c_contiguous + + if aligned: + assert new1.flags.aligned + assert new2.flags.aligned + else: + assert not new1.flags.aligned or new1.dtype.alignment == 1 + assert not new2.flags.aligned or new2.dtype.alignment == 1 + + return new1, new2 + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_simple_cancast(self, from_Dt): + for to_Dt in simple_dtypes: + cast = get_castingimpl(from_Dt, to_Dt) + + for from_dt in [from_Dt(), from_Dt().newbyteorder()]: + default = cast._resolve_descriptors((from_dt, None))[1][1] + assert default == to_Dt() + del default + + for to_dt in [to_Dt(), to_Dt().newbyteorder()]: + casting, (from_res, to_res), view_off = ( + cast._resolve_descriptors((from_dt, to_dt))) + assert(type(from_res) == from_Dt) + assert(type(to_res) == to_Dt) + if view_off is not None: + # If a view is acceptable, this is "no" casting + # and byte order must be matching. + assert casting == Casting.no + # The above table lists this as "equivalent" + assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + # Note that to_res may not be the same as from_dt + assert from_res.isnative == to_res.isnative + else: + if from_Dt == to_Dt: + # Note that to_res may not be the same as from_dt + assert from_res.isnative != to_res.isnative + assert casting == CAST_TABLE[from_Dt][to_Dt] + + if from_Dt is to_Dt: + assert(from_dt is from_res) + assert(to_dt is to_res) + + + @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") + @pytest.mark.parametrize("from_dt", simple_dtype_instances()) + def test_simple_direct_casts(self, from_dt): + """ + This test checks numeric direct casts for dtypes supported also by the + struct module (plus complex). It tries to be test a wide range of + inputs, but skips over possibly undefined behaviour (e.g. int rollover). + Longdouble and CLongdouble are tested, but only using double precision. + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + for to_dt in simple_dtype_instances(): + to_dt = to_dt.values[0] + cast = get_castingimpl(type(from_dt), type(to_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + + if from_res is not from_dt or to_res is not to_dt: + # Do not test this case, it is handled in multiple steps, + # each of which should is tested individually. + return + + safe = casting <= Casting.safe + del from_res, to_res, casting + + arr1, arr2, values = self.get_data(from_dt, to_dt) + + cast._simple_strided_call((arr1, arr2)) + + # Check via python list + assert arr2.tolist() == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + # Check if alignment makes a difference, but only if supported + # and only if the alignment can be wrong + if ((from_dt.alignment == 1 and to_dt.alignment == 1) or + not cast._supports_unaligned): + return + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + del arr1_o, arr2_o, cast + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_numeric_to_times(self, from_Dt): + # We currently only implement contiguous loops, so only need to + # test those. + from_dt = from_Dt() + + time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"), + np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")] + for time_dt in time_dtypes: + cast = get_castingimpl(type(from_dt), type(time_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, time_dt)) + + assert from_res is from_dt + assert to_res is time_dt + del from_res, to_res + + assert casting & CAST_TABLE[from_Dt][type(time_dt)] + assert view_off is None + + int64_dt = np.dtype(np.int64) + arr1, arr2, values = self.get_data(from_dt, int64_dt) + arr2 = arr2.view(time_dt) + arr2[...] = np.datetime64("NaT") + + if time_dt == np.dtype("M8"): + # This is a bit of a strange path, and could probably be removed + arr1[-1] = 0 # ensure at least one value is not NaT + + # The cast currently succeeds, but the values are invalid: + cast._simple_strided_call((arr1, arr2)) + with pytest.raises(ValueError): + str(arr2[-1]) # e.g. conversion to string fails + return + + cast._simple_strided_call((arr1, arr2)) + + assert [int(v) for v in arr2.tolist()] == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + @pytest.mark.parametrize( + ["from_dt", "to_dt", "expected_casting", "expected_view_off", + "nom", "denom"], + [("M8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("M8", "M8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("M8[ms]", "M8", Casting.unsafe, None, 1, 1), + ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, -1, 1314, -1315, 564442610]), + ("m8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("m8", "m8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("m8[ms]", "m8", Casting.unsafe, None, 1, 1), + ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, 0, 1314, -1315, 564442610])]) + def test_time_to_time(self, from_dt, to_dt, + expected_casting, expected_view_off, + nom, denom): + from_dt = np.dtype(from_dt) + if to_dt is not None: + to_dt = np.dtype(to_dt) + + # Test a few values for casting (results generated with NumPy 1.19) + values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32]) + values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) + assert values.dtype.byteorder == from_dt.byteorder + assert np.isnat(values.view(from_dt)[0]) + + DType = type(from_dt) + cast = get_castingimpl(DType, DType) + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + assert from_res is from_dt + assert to_res is to_dt or to_dt is None + assert casting == expected_casting + assert view_off == expected_view_off + + if nom is not None: + expected_out = (values * nom // denom).view(to_res) + expected_out[0] = "NaT" + else: + expected_out = np.empty_like(values) + expected_out[...] = denom + expected_out = expected_out.view(to_dt) + + orig_arr = values.view(from_dt) + orig_out = np.empty_like(expected_out) + + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): + # Casting from non-generic to generic units is an error and should + # probably be reported as an invalid cast earlier. + with pytest.raises(ValueError): + cast._simple_strided_call((orig_arr, orig_out)) + return + + for aligned in [True, True]: + for contig in [True, True]: + arr, out = self.get_data_variation( + orig_arr, orig_out, aligned, contig) + out[...] = 0 + cast._simple_strided_call((arr, out)) + assert_array_equal(out.view("int64"), expected_out.view("int64")) + + def string_with_modified_length(self, dtype, change_length): + fact = 1 if dtype.char == "S" else 4 + length = dtype.itemsize // fact + change_length + return np.dtype(f"{dtype.byteorder}{dtype.char}{length}") + + @pytest.mark.parametrize("other_DT", simple_dtypes) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_cancast(self, other_DT, string_char): + fact = 1 if string_char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(other_DT, string_DT) + + other_dt = other_DT() + expected_length = get_expected_stringlength(other_dt) + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert safety == Casting.safe # we consider to string casts "safe" + assert view_off is None + assert isinstance(res_dt, string_DT) + + # These casts currently implement changing the string length, so + # check the cast-safety for too long/fixed string lengths: + for change_length in [-1, 0, 1]: + if change_length >= 0: + expected_safety = Casting.safe + else: + expected_safety = Casting.same_kind + + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + assert res_dt is to_dt + assert safety == expected_safety + assert view_off is None + + # The opposite direction is always considered unsafe: + cast = get_castingimpl(string_DT, other_DT) + + safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt)) + assert safety == Casting.unsafe + assert view_off is None + + cast = get_castingimpl(string_DT, other_DT) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (string_dt, None)) + assert safety == Casting.unsafe + assert view_off is None + assert other_dt is res_dt # returns the singleton for simple dtypes + + @pytest.mark.parametrize("string_char", ["S", "U"]) + @pytest.mark.parametrize("other_dt", simple_dtype_instances()) + def test_simple_string_casts_roundtrip(self, other_dt, string_char): + """ + Tests casts from and to string by checking the roundtripping property. + + The test also covers some string to string casts (but not all). + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + string_DT = type(np.dtype(string_char)) + + cast = get_castingimpl(type(other_dt), string_DT) + cast_back = get_castingimpl(string_DT, type(other_dt)) + _, (res_other_dt, string_dt), _ = cast._resolve_descriptors( + (other_dt, None)) + + if res_other_dt is not other_dt: + # do not support non-native byteorder, skip test in that case + assert other_dt.byteorder != res_other_dt.byteorder + return + + orig_arr, values = self.get_data(other_dt, None) + str_arr = np.zeros(len(orig_arr), dtype=string_dt) + string_dt_short = self.string_with_modified_length(string_dt, -1) + str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short) + string_dt_long = self.string_with_modified_length(string_dt, 1) + str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long) + + assert not cast._supports_unaligned # if support is added, should test + assert not cast_back._supports_unaligned + + for contig in [True, False]: + other_arr, str_arr = self.get_data_variation( + orig_arr, str_arr, True, contig) + _, str_arr_short = self.get_data_variation( + orig_arr, str_arr_short.copy(), True, contig) + _, str_arr_long = self.get_data_variation( + orig_arr, str_arr_long, True, contig) + + cast._simple_strided_call((other_arr, str_arr)) + + cast._simple_strided_call((other_arr, str_arr_short)) + assert_array_equal(str_arr.astype(string_dt_short), str_arr_short) + + cast._simple_strided_call((other_arr, str_arr_long)) + assert_array_equal(str_arr, str_arr_long) + + if other_dt.kind == "b": + # Booleans do not roundtrip + continue + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr, other_arr)) + assert_array_equal(orig_arr, other_arr) + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr_long, other_arr)) + assert_array_equal(orig_arr, other_arr) + + @pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"]) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_to_string_cancast(self, other_dt, string_char): + other_dt = np.dtype(other_dt) + + fact = 1 if string_char == "S" else 4 + div = 1 if other_dt.char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(type(other_dt), string_DT) + + expected_length = other_dt.itemsize // div + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert isinstance(res_dt, string_DT) + + expected_view_off = None + if other_dt.char == string_char: + if other_dt.isnative: + expected_safety = Casting.no + expected_view_off = 0 + else: + expected_safety = Casting.equiv + elif string_char == "U": + expected_safety = Casting.safe + else: + expected_safety = Casting.unsafe + + assert view_off == expected_view_off + assert expected_safety == safety + + for change_length in [-1, 0, 1]: + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + + assert res_dt is to_dt + if change_length <= 0: + assert view_off == expected_view_off + else: + assert view_off is None + if expected_safety == Casting.unsafe: + assert safety == expected_safety + elif change_length < 0: + assert safety == Casting.same_kind + elif change_length == 0: + assert safety == expected_safety + elif change_length > 0: + assert safety == Casting.safe + + @pytest.mark.parametrize("order1", [">", "<"]) + @pytest.mark.parametrize("order2", [">", "<"]) + def test_unicode_byteswapped_cast(self, order1, order2): + # Very specific tests (not using the castingimpl directly) + # that tests unicode bytedwaps including for unaligned array data. + dtype1 = np.dtype(f"{order1}U30") + dtype2 = np.dtype(f"{order2}U30") + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2) + if dtype1.alignment != 1: + # alignment should always be >1, but skip the check if not + assert not data1.flags.aligned + assert not data2.flags.aligned + + element = "this is a ünicode string‽" + data1[()] = element + # Test both `data1` and `data1.copy()` (which should be aligned) + for data in [data1, data1.copy()]: + data2[...] = data1 + assert data2[()] == element + assert data2.copy()[()] == element + + def test_void_to_string_special_case(self): + # Cover a small special case in void to string casting that could + # probably just as well be turned into an error (compare + # `test_object_to_parametric_internal_error` below). + assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5 + assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5 + + def test_object_to_parametric_internal_error(self): + # We reject casting from object to a parametric type, without + # figuring out the correct instance first. + object_dtype = type(np.dtype(object)) + other_dtype = type(np.dtype(str)) + cast = get_castingimpl(object_dtype, other_dtype) + with pytest.raises(TypeError, + match="casting from object to the parametric DType"): + cast._resolve_descriptors((np.dtype("O"), None)) + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_object_and_simple_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(object_dtype, type(dtype)) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), dtype)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt is dtype + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), None)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt == dtype.newbyteorder("=") + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_simple_to_object_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(type(dtype), object_dtype) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (dtype, None)) + assert safety == Casting.safe + assert view_off is None + assert res_dt is np.dtype("O") + + @pytest.mark.parametrize("casting", ["no", "unsafe"]) + def test_void_and_structured_with_subarray(self, casting): + # test case corresponding to gh-19325 + dtype = np.dtype([("foo", "<f4", (3, 2))]) + expected = casting == "unsafe" + assert np.can_cast("V4", dtype, casting=casting) == expected + assert np.can_cast(dtype, "V4", casting=casting) == expected + + @pytest.mark.parametrize(["to_dt", "expected_off"], + [ # Same as `from_dt` but with both fields shifted: + (np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"], + "offsets": [0, 4]}), 2), + # Additional change of the names + (np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"], + "offsets": [0, 4]}), 2), + # Incompatible field offset change + (np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"], + "offsets": [0, 6]}), None)]) + def test_structured_field_offsets(self, to_dt, expected_off): + # This checks the cast-safety and view offset for swapped and "shifted" + # fields which are viewable + from_dt = np.dtype({"names": ["a", "b"], + "formats": ["i4", "f4"], + "offsets": [2, 6]}) + cast = get_castingimpl(type(from_dt), type(to_dt)) + safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt)) + if from_dt.names == to_dt.names: + assert safety == Casting.equiv + else: + assert safety == Casting.safe + # Shifting the original data pointer by -2 will align both by + # effectively adding 2 bytes of spacing before `from_dt`. + assert view_off == expected_off + + @pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [ + # Subarray cases: + ("i", "(1,1)i", 0), + ("(1,1)i", "i", 0), + ("(2,1)i", "(2,1)i", 0), + # field cases (field to field is tested explicitly also): + # Not considered viewable, because a negative offset would allow + # may structured dtype to indirectly access invalid memory. + ("i", dict(names=["a"], formats=["i"], offsets=[2]), None), + (dict(names=["a"], formats=["i"], offsets=[2]), "i", 2), + # Currently considered not viewable, due to multiple fields + # even though they overlap (maybe we should not allow that?) + ("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]), + None), + # different number of fields can't work, should probably just fail + # so it never reports "viewable": + ("i,i", "i,i,i", None), + # Unstructured void cases: + ("i4", "V3", 0), # void smaller or equal + ("i4", "V4", 0), # void smaller or equal + ("i4", "V10", None), # void is larger (no view) + ("O", "V4", None), # currently reject objects for view here. + ("O", "V8", None), # currently reject objects for view here. + ("V4", "V3", 0), + ("V4", "V4", 0), + ("V3", "V4", None), + # Note that currently void-to-other cast goes via byte-strings + # and is not a "view" based cast like the opposite direction: + ("V4", "i4", None), + # completely invalid/impossible cast: + ("i,i", "i,i,i", None), + ]) + def test_structured_view_offsets_paramteric( + self, from_dt, to_dt, expected_off): + # TODO: While this test is fairly thorough, right now, it does not + # really test some paths that may have nonzero offsets (they don't + # really exists). + from_dt = np.dtype(from_dt) + to_dt = np.dtype(to_dt) + cast = get_castingimpl(type(from_dt), type(to_dt)) + _, _, view_off = cast._resolve_descriptors((from_dt, to_dt)) + assert view_off == expected_off + + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_object_casts_NULL_None_equivalence(self, dtype): + # None to <other> casts may succeed or fail, but a NULL'ed array must + # behave the same as one filled with None's. + arr_normal = np.array([None] * 5) + arr_NULLs = np.empty_like(arr_normal) + ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes) + # If the check fails (maybe it should) the test would lose its purpose: + assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes + + try: + expected = arr_normal.astype(dtype) + except TypeError: + with pytest.raises(TypeError): + arr_NULLs.astype(dtype), + else: + assert_array_equal(expected, arr_NULLs.astype(dtype)) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + def test_nonstandard_bool_to_other(self, dtype): + # simple test for casting bool_ to numeric types, which should not + # expose the detail that NumPy bools can sometimes take values other + # than 0 and 1. See also gh-19514. + nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool) + res = nonstandard_bools.astype(dtype) + expected = [0, 1, 1] + assert_array_equal(res, expected) + diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_conversion_utils.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_conversion_utils.py new file mode 100644 index 00000000..c602eba4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_conversion_utils.py @@ -0,0 +1,208 @@ +""" +Tests for numpy/core/src/multiarray/conversion_utils.c +""" +import re +import sys + +import pytest + +import numpy as np +import numpy.core._multiarray_tests as mt +from numpy.testing import assert_warns, IS_PYPY + + +class StringConverterTestCase: + allow_bytes = True + case_insensitive = True + exact_match = False + warn = True + + def _check_value_error(self, val): + pattern = r'\(got {}\)'.format(re.escape(repr(val))) + with pytest.raises(ValueError, match=pattern) as exc: + self.conv(val) + + def _check_conv_assert_warn(self, val, expected): + if self.warn: + with assert_warns(DeprecationWarning) as exc: + assert self.conv(val) == expected + else: + assert self.conv(val) == expected + + def _check(self, val, expected): + """Takes valid non-deprecated inputs for converters, + runs converters on inputs, checks correctness of outputs, + warnings and errors""" + assert self.conv(val) == expected + + if self.allow_bytes: + assert self.conv(val.encode('ascii')) == expected + else: + with pytest.raises(TypeError): + self.conv(val.encode('ascii')) + + if len(val) != 1: + if self.exact_match: + self._check_value_error(val[:1]) + self._check_value_error(val + '\0') + else: + self._check_conv_assert_warn(val[:1], expected) + + if self.case_insensitive: + if val != val.lower(): + self._check_conv_assert_warn(val.lower(), expected) + if val != val.upper(): + self._check_conv_assert_warn(val.upper(), expected) + else: + if val != val.lower(): + self._check_value_error(val.lower()) + if val != val.upper(): + self._check_value_error(val.upper()) + + def test_wrong_type(self): + # common cases which apply to all the below + with pytest.raises(TypeError): + self.conv({}) + with pytest.raises(TypeError): + self.conv([]) + + def test_wrong_value(self): + # nonsense strings + self._check_value_error('') + self._check_value_error('\N{greek small letter pi}') + + if self.allow_bytes: + self._check_value_error(b'') + # bytes which can't be converted to strings via utf8 + self._check_value_error(b"\xFF") + if self.exact_match: + self._check_value_error("there's no way this is supported") + + +class TestByteorderConverter(StringConverterTestCase): + """ Tests of PyArray_ByteorderConverter """ + conv = mt.run_byteorder_converter + warn = False + + def test_valid(self): + for s in ['big', '>']: + self._check(s, 'NPY_BIG') + for s in ['little', '<']: + self._check(s, 'NPY_LITTLE') + for s in ['native', '=']: + self._check(s, 'NPY_NATIVE') + for s in ['ignore', '|']: + self._check(s, 'NPY_IGNORE') + for s in ['swap']: + self._check(s, 'NPY_SWAP') + + +class TestSortkindConverter(StringConverterTestCase): + """ Tests of PyArray_SortkindConverter """ + conv = mt.run_sortkind_converter + warn = False + + def test_valid(self): + self._check('quicksort', 'NPY_QUICKSORT') + self._check('heapsort', 'NPY_HEAPSORT') + self._check('mergesort', 'NPY_STABLESORT') # alias + self._check('stable', 'NPY_STABLESORT') + + +class TestSelectkindConverter(StringConverterTestCase): + """ Tests of PyArray_SelectkindConverter """ + conv = mt.run_selectkind_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check('introselect', 'NPY_INTROSELECT') + + +class TestSearchsideConverter(StringConverterTestCase): + """ Tests of PyArray_SearchsideConverter """ + conv = mt.run_searchside_converter + def test_valid(self): + self._check('left', 'NPY_SEARCHLEFT') + self._check('right', 'NPY_SEARCHRIGHT') + + +class TestOrderConverter(StringConverterTestCase): + """ Tests of PyArray_OrderConverter """ + conv = mt.run_order_converter + warn = False + + def test_valid(self): + self._check('c', 'NPY_CORDER') + self._check('f', 'NPY_FORTRANORDER') + self._check('a', 'NPY_ANYORDER') + self._check('k', 'NPY_KEEPORDER') + + def test_flatten_invalid_order(self): + # invalid after gh-14596 + with pytest.raises(ValueError): + self.conv('Z') + for order in [False, True, 0, 8]: + with pytest.raises(TypeError): + self.conv(order) + + +class TestClipmodeConverter(StringConverterTestCase): + """ Tests of PyArray_ClipmodeConverter """ + conv = mt.run_clipmode_converter + def test_valid(self): + self._check('clip', 'NPY_CLIP') + self._check('wrap', 'NPY_WRAP') + self._check('raise', 'NPY_RAISE') + + # integer values allowed here + assert self.conv(np.CLIP) == 'NPY_CLIP' + assert self.conv(np.WRAP) == 'NPY_WRAP' + assert self.conv(np.RAISE) == 'NPY_RAISE' + + +class TestCastingConverter(StringConverterTestCase): + """ Tests of PyArray_CastingConverter """ + conv = mt.run_casting_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check("no", "NPY_NO_CASTING") + self._check("equiv", "NPY_EQUIV_CASTING") + self._check("safe", "NPY_SAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + self._check("unsafe", "NPY_UNSAFE_CASTING") + + +class TestIntpConverter: + """ Tests of PyArray_IntpConverter """ + conv = mt.run_intp_converter + + def test_basic(self): + assert self.conv(1) == (1,) + assert self.conv((1, 2)) == (1, 2) + assert self.conv([1, 2]) == (1, 2) + assert self.conv(()) == () + + def test_none(self): + # once the warning expires, this will raise TypeError + with pytest.warns(DeprecationWarning): + assert self.conv(None) == () + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_float(self): + with pytest.raises(TypeError): + self.conv(1.0) + with pytest.raises(TypeError): + self.conv([1, 1.0]) + + def test_too_large(self): + with pytest.raises(ValueError): + self.conv(2**64) + + def test_too_many_dims(self): + assert self.conv([1]*32) == (1,)*32 + with pytest.raises(ValueError): + self.conv([1]*33) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cpu_dispatcher.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cpu_dispatcher.py new file mode 100644 index 00000000..41a60d5c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cpu_dispatcher.py @@ -0,0 +1,43 @@ +from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__ +from numpy.core import _umath_tests +from numpy.testing import assert_equal + +def test_dispatcher(): + """ + Testing the utilities of the CPU dispatcher + """ + targets = ( + "SSE2", "SSE41", "AVX2", + "VSX", "VSX2", "VSX3", + "NEON", "ASIMD", "ASIMDHP", + "VX", "VXE" + ) + highest_sfx = "" # no suffix for the baseline + all_sfx = [] + for feature in reversed(targets): + # skip baseline features, by the default `CCompilerOpt` do not generate separated objects + # for the baseline, just one object combined all of them via 'baseline' option + # within the configuration statements. + if feature in __cpu_baseline__: + continue + # check compiler and running machine support + if feature not in __cpu_dispatch__ or not __cpu_features__[feature]: + continue + + if not highest_sfx: + highest_sfx = "_" + feature + all_sfx.append("func" + "_" + feature) + + test = _umath_tests.test_dispatch() + assert_equal(test["func"], "func" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) + + if highest_sfx: + assert_equal(test["func_xb"], "func" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) + else: + assert_equal(test["func_xb"], "nobase") + assert_equal(test["var_xb"], "nobase") + + all_sfx.append("func") # add the baseline + assert_equal(test["all"], all_sfx) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cpu_features.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cpu_features.py new file mode 100644 index 00000000..48ab30a4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cpu_features.py @@ -0,0 +1,404 @@ +import sys, platform, re, pytest +from numpy.core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) +import numpy as np +import subprocess +import pathlib +import os +import re + +def assert_features_equal(actual, desired, fname): + __tracebackhide__ = True # Hide traceback for py.test + actual, desired = str(actual), str(desired) + if actual == desired: + return + detected = str(__cpu_features__).replace("'", "") + try: + with open("/proc/cpuinfo") as fd: + cpuinfo = fd.read(2048) + except Exception as err: + cpuinfo = str(err) + + try: + import subprocess + auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + auxv = auxv.decode() + except Exception as err: + auxv = str(err) + + import textwrap + error_report = textwrap.indent( +""" +########################################### +### Extra debugging information +########################################### +------------------------------------------- +--- NumPy Detections +------------------------------------------- +%s +------------------------------------------- +--- SYS / CPUINFO +------------------------------------------- +%s.... +------------------------------------------- +--- SYS / AUXV +------------------------------------------- +%s +""" % (detected, cpuinfo, auxv), prefix='\r') + + raise AssertionError(( + "Failure Detection\n" + " NAME: '%s'\n" + " ACTUAL: %s\n" + " DESIRED: %s\n" + "%s" + ) % (fname, actual, desired, error_report)) + +def _text_to_list(txt): + out = txt.strip("][\n").replace("'", "").split(', ') + return None if out[0] == "" else out + +class AbstractTest: + features = [] + features_groups = {} + features_map = {} + features_flags = set() + + def load_flags(self): + # a hook + pass + def test_features(self): + self.load_flags() + for gname, features in self.features_groups.items(): + test_features = [self.cpu_have(f) for f in features] + assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + + for feature_name in self.features: + cpu_have = self.cpu_have(feature_name) + npy_have = __cpu_features__.get(feature_name) + assert_features_equal(npy_have, cpu_have, feature_name) + + def cpu_have(self, feature_name): + map_names = self.features_map.get(feature_name, feature_name) + if isinstance(map_names, str): + return map_names in self.features_flags + for f in map_names: + if f in self.features_flags: + return True + return False + + def load_flags_cpuinfo(self, magic_key): + self.features_flags = self.get_cpuinfo_item(magic_key) + + def get_cpuinfo_item(self, magic_key): + values = set() + with open('/proc/cpuinfo') as fd: + for line in fd: + if not line.startswith(magic_key): + continue + flags_value = [s.strip() for s in line.split(':', 1)] + if len(flags_value) == 2: + values = values.union(flags_value[1].upper().split()) + return values + + def load_flags_auxv(self): + auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + for at in auxv.split(b'\n'): + if not at.startswith(b"AT_HWCAP"): + continue + hwcap_value = [s.strip() for s in at.split(b':', 1)] + if len(hwcap_value) == 2: + self.features_flags = self.features_flags.union( + hwcap_value[1].upper().decode().split() + ) + +@pytest.mark.skipif( + sys.platform == 'emscripten', + reason= ( + "The subprocess module is not available on WASM platforms and" + " therefore this test class cannot be properly executed." + ), +) +class TestEnvPrivation: + cwd = pathlib.Path(__file__).parent.resolve() + env = os.environ.copy() + _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None) + _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None) + SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True) + unavailable_feats = [ + feat for feat in __cpu_dispatch__ if not __cpu_features__[feat] + ] + UNAVAILABLE_FEAT = ( + None if len(unavailable_feats) == 0 + else unavailable_feats[0] + ) + BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0] + SCRIPT = """ +def main(): + from numpy.core._multiarray_umath import __cpu_features__, __cpu_dispatch__ + + detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]] + print(detected) + +if __name__ == "__main__": + main() + """ + + @pytest.fixture(autouse=True) + def setup_class(self, tmp_path_factory): + file = tmp_path_factory.mktemp("runtime_test_script") + file /= "_runtime_detect.py" + file.write_text(self.SCRIPT) + self.file = file + return + + def _run(self): + return subprocess.run( + [sys.executable, self.file], + env=self.env, + **self.SUBPROCESS_ARGS, + ) + + # Helper function mimicing pytest.raises for subprocess call + def _expect_error( + self, + msg, + err_type, + no_error_msg="Failed to generate error" + ): + try: + self._run() + except subprocess.CalledProcessError as e: + assertion_message = f"Expected: {msg}\nGot: {e.stderr}" + assert re.search(msg, e.stderr), assertion_message + + assertion_message = ( + f"Expected error of type: {err_type}; see full " + f"error:\n{e.stderr}" + ) + assert re.search(err_type, e.stderr), assertion_message + else: + assert False, no_error_msg + + def setup_method(self): + """Ensure that the environment is reset""" + self.env = os.environ.copy() + return + + def test_runtime_feature_selection(self): + """ + Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the + features exactly specified are dispatched. + """ + + # Capture runtime-enabled features + out = self._run() + non_baseline_features = _text_to_list(out.stdout) + + if non_baseline_features is None: + pytest.skip( + "No dispatchable features outside of baseline detected." + ) + feature = non_baseline_features[0] + + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = feature + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that only one feature is enabled, and it is exactly the one + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == {feature} + + if len(non_baseline_features) < 2: + pytest.skip("Only one non-baseline feature detected.") + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features) + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that both features are enabled, and they are exactly the ones + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == set(non_baseline_features) + return + + @pytest.mark.parametrize("enabled, disabled", + [ + ("feature", "feature"), + ("feature", "same"), + ]) + def test_both_enable_disable_set(self, enabled, disabled): + """ + Ensure that when both environment variables are set then an + ImportError is thrown + """ + self.env['NPY_ENABLE_CPU_FEATURES'] = enabled + self.env['NPY_DISABLE_CPU_FEATURES'] = disabled + msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES" + err_type = "ImportError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + @pytest.mark.parametrize("action", ["ENABLE", "DISABLE"]) + def test_variable_too_long(self, action): + """ + Test that an error is thrown if the environment variables are too long + to be processed. Current limit is 1024, but this may change later. + """ + MAX_VAR_LENGTH = 1024 + # Actual length is MAX_VAR_LENGTH + 1 due to null-termination + self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH + msg = ( + f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is " + f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + def test_impossible_feature_disable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-disabling + request is made. This includes disabling a baseline feature. + """ + + if self.BASELINE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.BASELINE_FEAT + self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot disable CPU feature '{bad_feature}', since it is " + "part of the baseline optimizations" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + def test_impossible_feature_enable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-enabling + request is made. This includes enabling a feature not supported by the + machine, or disabling a baseline optimization. + """ + + if self.UNAVAILABLE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.UNAVAILABLE_FEAT + self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + # Ensure that only the bad feature gets reported + feats = f"{bad_feature}, {self.BASELINE_FEAT}" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since they " + "are not supported by your machine." + ) + self._expect_error(msg, err_type) + +is_linux = sys.platform.startswith('linux') +is_cygwin = sys.platform.startswith('cygwin') +machine = platform.machine() +is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +@pytest.mark.skipif( + not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" +) +class Test_X86_Features(AbstractTest): + features = [ + "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", + "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", + "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", + "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", + "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", + ] + features_groups = dict( + AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], + AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", + "AVX5124VNNIW", "AVX512VPOPCNTDQ"], + AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], + AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], + AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI"], + AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], + AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", + "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "AVX512FP16"], + ) + features_map = dict( + SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA", + AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2", + AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ", + AVX512FP16="AVX512_FP16", + ) + def load_flags(self): + self.load_flags_cpuinfo("flags") + +is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") +class Test_POWER_Features(AbstractTest): + features = ["VSX", "VSX2", "VSX3", "VSX4"] + features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1") + + def load_flags(self): + self.load_flags_auxv() + + +is_zarch = re.match("^(s390x)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_zarch, + reason="Only for Linux and IBM Z") +class Test_ZARCH_Features(AbstractTest): + features = ["VX", "VXE", "VXE2"] + + def load_flags(self): + self.load_flags_auxv() + + +is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") +class Test_ARM_Features(AbstractTest): + features = [ + "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" + ] + features_groups = dict( + NEON_FP16 = ["NEON", "HALF"], + NEON_VFPV4 = ["NEON", "VFPV4"], + ) + def load_flags(self): + self.load_flags_cpuinfo("Features") + arch = self.get_cpuinfo_item("CPU architecture") + # in case of mounting virtual filesystem of aarch64 kernel + is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0 + if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = dict( + NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD" + ) + else: + self.features_map = dict( + # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) + # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # if the kernel reports any one of the following ARM8 features. + ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32") + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_custom_dtypes.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_custom_dtypes.py new file mode 100644 index 00000000..da6a4bd5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_custom_dtypes.py @@ -0,0 +1,253 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_equal +from numpy.core._multiarray_umath import ( + _discover_array_parameters as discover_array_params, _get_sfloat_dtype) + + +SF = _get_sfloat_dtype() + + +class TestSFloat: + def _get_array(self, scaling, aligned=True): + if not aligned: + a = np.empty(3*8 + 1, dtype=np.uint8)[1:] + a = a.view(np.float64) + a[:] = [1., 2., 3.] + else: + a = np.array([1., 2., 3.]) + + a *= 1./scaling # the casting code also uses the reciprocal. + return a.view(SF(scaling)) + + def test_sfloat_rescaled(self): + sf = SF(1.) + sf2 = sf.scaled_by(2.) + assert sf2.get_scaling() == 2. + sf6 = sf2.scaled_by(3.) + assert sf6.get_scaling() == 6. + + def test_class_discovery(self): + # This does not test much, since we always discover the scaling as 1. + # But most of NumPy (when writing) does not understand DType classes + dt, _ = discover_array_params([1., 2., 3.], dtype=SF) + assert dt == SF(1.) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_scaled_float_from_floats(self, scaling): + a = np.array([1., 2., 3.], dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + def test_repr(self): + # Check the repr, mainly to cover the code paths: + assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_name(self): + assert SF(1.).name == "_ScaledFloatTestDType64" + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_from_float(self, scaling): + a = np.array([1., 2., 3.]).astype(dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + @pytest.mark.parametrize("aligned", [True, False]) + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_getitem(self, aligned, scaling): + a = self._get_array(1., aligned) + assert a.tolist() == [1., 2., 3.] + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_casts(self, aligned): + a = self._get_array(1., aligned) + + assert np.can_cast(a, SF(-1.), casting="equiv") + assert not np.can_cast(a, SF(-1.), casting="no") + na = a.astype(SF(-1.)) + assert_array_equal(-1 * na.view(np.float64), a.view(np.float64)) + + assert np.can_cast(a, SF(2.), casting="same_kind") + assert not np.can_cast(a, SF(2.), casting="safe") + a2 = a.astype(SF(2.)) + assert_array_equal(2 * a2.view(np.float64), a.view(np.float64)) + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_cast_internal_errors(self, aligned): + a = self._get_array(2e300, aligned) + + with pytest.raises(TypeError, + match="error raised inside the core-loop: non-finite factor!"): + a.astype(SF(2e-300)) + + def test_sfloat_promotion(self): + assert np.result_type(SF(2.), SF(3.)) == SF(3.) + assert np.result_type(SF(3.), SF(2.)) == SF(3.) + # Float64 -> SF(1.) and then promotes normally, so both of this work: + assert np.result_type(SF(3.), np.float64) == SF(3.) + assert np.result_type(np.float64, SF(0.5)) == SF(1.) + + # Test an undefined promotion: + with pytest.raises(TypeError): + np.result_type(SF(1.), np.int64) + + def test_basic_multiply(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a * b + # multiplies dtype scaling and content separately: + assert res.dtype.get_scaling() == 8. + expected_view = a.view(np.float64) * b.view(np.float64) + assert_array_equal(res.view(np.float64), expected_view) + + def test_possible_and_impossible_reduce(self): + # For reductions to work, the first and last operand must have the + # same dtype. For this parametric DType that is not necessarily true. + a = self._get_array(2.) + # Addition reductin works (as of writing requires to pass initial + # because setting a scaled-float from the default `0` fails). + res = np.add.reduce(a, initial=0.) + assert res == a.astype(np.float64).sum() + + # But each multiplication changes the factor, so a reduction is not + # possible (the relaxed version of the old refusal to handle any + # flexible dtype). + with pytest.raises(TypeError, + match="the resolved dtypes are not compatible"): + np.multiply.reduce(a) + + def test_basic_ufunc_at(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + float_b = b.view(np.float64).copy() + np.multiply.at(float_b, [1, 1, 1], float_a) + np.multiply.at(b, [1, 1, 1], float_a) + + assert_array_equal(b.view(np.float64), float_b) + + def test_basic_multiply_promotion(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + res1 = float_a * b + res2 = b * float_a + + # one factor is one, so we get the factor of b: + assert res1.dtype == res2.dtype == b.dtype + expected_view = float_a * b.view(np.float64) + assert_array_equal(res1.view(np.float64), expected_view) + assert_array_equal(res2.view(np.float64), expected_view) + + # Check that promotion works when `out` is used: + np.multiply(b, float_a, out=res2) + with pytest.raises(TypeError): + # The promoter accepts this (maybe it should not), but the SFloat + # result cannot be cast to integer: + np.multiply(b, float_a, out=np.arange(3)) + + def test_basic_addition(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a + b + # addition uses the type promotion rules for the result: + assert res.dtype == np.result_type(a.dtype, b.dtype) + expected_view = (a.astype(res.dtype).view(np.float64) + + b.astype(res.dtype).view(np.float64)) + assert_array_equal(res.view(np.float64), expected_view) + + def test_addition_cast_safety(self): + """The addition method is special for the scaled float, because it + includes the "cast" between different factors, thus cast-safety + is influenced by the implementation. + """ + a = self._get_array(2.) + b = self._get_array(-2.) + c = self._get_array(3.) + + # sign change is "equiv": + np.add(a, b, casting="equiv") + with pytest.raises(TypeError): + np.add(a, b, casting="no") + + # Different factor is "same_kind" (default) so check that "safe" fails + with pytest.raises(TypeError): + np.add(a, c, casting="safe") + + # Check that casting the output fails also (done by the ufunc here) + with pytest.raises(TypeError): + np.add(a, a, out=c, casting="safe") + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_casts_to_bool(self, ufunc): + a = self._get_array(2.) + a[0] = 0. # make sure first element is considered False. + + float_equiv = a.astype(float) + expected = ufunc(float_equiv, float_equiv) + res = ufunc(a, a) + assert_array_equal(res, expected) + + # also check that the same works for reductions: + expected = ufunc.reduce(float_equiv) + res = ufunc.reduce(a) + assert_array_equal(res, expected) + + # The output casting does not match the bool, bool -> bool loop: + with pytest.raises(TypeError): + ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv") + + def test_wrapped_and_wrapped_reductions(self): + a = self._get_array(2.) + float_equiv = a.astype(float) + + expected = np.hypot(float_equiv, float_equiv) + res = np.hypot(a, a) + assert res.dtype == a.dtype + res_float = res.view(np.float64) * 2 + assert_array_equal(res_float, expected) + + # Also check reduction (keepdims, due to incorrect getitem) + res = np.hypot.reduce(a, keepdims=True) + assert res.dtype == a.dtype + expected = np.hypot.reduce(float_equiv, keepdims=True) + assert res.view(np.float64) * 2 == expected + + def test_astype_class(self): + # Very simple test that we accept `.astype()` also on the class. + # ScaledFloat always returns the default descriptor, but it does + # check the relevant code paths. + arr = np.array([1., 2., 3.], dtype=object) + + res = arr.astype(SF) # passing the class class + expected = arr.astype(SF(1.)) # above will have discovered 1. scaling + assert_array_equal(res.view(np.float64), expected.view(np.float64)) + + def test_creation_class(self): + arr1 = np.array([1., 2., 3.], dtype=SF) + assert arr1.dtype == SF(1.) + arr2 = np.array([1., 2., 3.], dtype=SF(1.)) + assert_array_equal(arr1.view(np.float64), arr2.view(np.float64)) + + +def test_type_pickle(): + # can't actually unpickle, but we can pickle (if in namespace) + import pickle + + np._ScaledFloatTestDType = SF + + s = pickle.dumps(SF) + res = pickle.loads(s) + assert res is SF + + del np._ScaledFloatTestDType + + +def test_is_numeric(): + assert SF._is_numeric diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cython.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cython.py new file mode 100644 index 00000000..0e0d00c2 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_cython.py @@ -0,0 +1,135 @@ +import os +import shutil +import subprocess +import sys +import pytest + +import numpy as np +from numpy.testing import IS_WASM + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Cython 0.29.30 is required for Python 3.11 and there are + # other fixes in the 0.29 series that are needed even for earlier + # Python versions. + # Note: keep in sync with the one in pyproject.toml + required_version = "0.29.30" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython') + build_dir = tmpdir_factory.mktemp("cython_test") / "build" + os.makedirs(build_dir, exist_ok=True) + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", str(srcdir)], + cwd=build_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + + sys.path.append(str(build_dir)) + +def test_is_timedelta64_object(install_temp): + import checks + + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) + assert checks.is_td64(np.timedelta64("NaT", "ns")) + + assert not checks.is_td64(1) + assert not checks.is_td64(None) + assert not checks.is_td64("foo") + assert not checks.is_td64(np.datetime64("now", "s")) + + +def test_is_datetime64_object(install_temp): + import checks + + assert checks.is_dt64(np.datetime64(1234, "ns")) + assert checks.is_dt64(np.datetime64("NaT", "ns")) + + assert not checks.is_dt64(1) + assert not checks.is_dt64(None) + assert not checks.is_dt64("foo") + assert not checks.is_dt64(np.timedelta64(1234)) + + +def test_get_datetime64_value(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + + result = checks.get_dt64_value(dt64) + expected = dt64.view("i8") + + assert result == expected + + +def test_get_timedelta64_value(install_temp): + import checks + + td64 = np.timedelta64(12345, "h") + + result = checks.get_td64_value(td64) + expected = td64.view("i8") + + assert result == expected + + +def test_get_datetime64_unit(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + result = checks.get_dt64_unit(dt64) + expected = 10 + assert result == expected + + td64 = np.timedelta64(12345, "h") + result = checks.get_dt64_unit(td64) + expected = 5 + assert result == expected + + +def test_abstract_scalars(install_temp): + import checks + + assert checks.is_integer(1) + assert checks.is_integer(np.int8(1)) + assert checks.is_integer(np.uint64(1)) + +def test_conv_intp(install_temp): + import checks + + class myint: + def __int__(self): + return 3 + + # These conversion passes via `__int__`, not `__index__`: + assert checks.conv_intp(3.) == 3 + assert checks.conv_intp(myint()) == 3 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_datetime.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_datetime.py new file mode 100644 index 00000000..547ebf9d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_datetime.py @@ -0,0 +1,2569 @@ + +import numpy +import numpy as np +import datetime +import pytest +from numpy.testing import ( + IS_WASM, + assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, + assert_raises_regex, assert_array_equal, + ) +from numpy.compat import pickle + +# Use pytz to test out various time zones if available +try: + from pytz import timezone as tz + _has_pytz = True +except ImportError: + _has_pytz = False + +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 + + +class TestDateTime: + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'μs', # alias for us + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype('M8[750%s]' % unit) + assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) + dt2 = np.dtype('m8[%s]' % unit) + assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("<M8") == np.dtype("M8")) + assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or + np.dtype("<M8[D]") == np.dtype("M8[D]")) + assert_(np.dtype(">M8") != np.dtype("<M8")) + + assert_equal(np.dtype("=m8"), np.dtype("m8")) + assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]")) + assert_(np.dtype(">m8") == np.dtype("m8") or + np.dtype("<m8") == np.dtype("m8")) + assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or + np.dtype("<m8[D]") == np.dtype("m8[D]")) + assert_(np.dtype(">m8") != np.dtype("<m8")) + + # Check that the parser rejects bad datetime types + assert_raises(TypeError, np.dtype, 'M8[badunit]') + assert_raises(TypeError, np.dtype, 'm8[badunit]') + assert_raises(TypeError, np.dtype, 'M8[YY]') + assert_raises(TypeError, np.dtype, 'm8[YY]') + assert_raises(TypeError, np.dtype, 'm4') + assert_raises(TypeError, np.dtype, 'M7') + assert_raises(TypeError, np.dtype, 'm7') + assert_raises(TypeError, np.dtype, 'M16') + assert_raises(TypeError, np.dtype, 'm16') + assert_raises(TypeError, np.dtype, 'M8[3000000000ps]') + + def test_datetime_casting_rules(self): + # Cannot cast safely/same_kind between timedelta and datetime + assert_(not np.can_cast('m8', 'M8', casting='same_kind')) + assert_(not np.can_cast('M8', 'm8', casting='same_kind')) + assert_(not np.can_cast('m8', 'M8', casting='safe')) + assert_(not np.can_cast('M8', 'm8', casting='safe')) + + # Can cast safely/same_kind from integer to timedelta + assert_(np.can_cast('i8', 'm8', casting='same_kind')) + assert_(np.can_cast('i8', 'm8', casting='safe')) + assert_(np.can_cast('i4', 'm8', casting='same_kind')) + assert_(np.can_cast('i4', 'm8', casting='safe')) + assert_(np.can_cast('u4', 'm8', casting='same_kind')) + assert_(np.can_cast('u4', 'm8', casting='safe')) + + # Cannot cast safely from unsigned integer of the same size, which + # could overflow + assert_(np.can_cast('u8', 'm8', casting='same_kind')) + assert_(not np.can_cast('u8', 'm8', casting='safe')) + + # Cannot cast safely/same_kind from float to timedelta + assert_(not np.can_cast('f4', 'm8', casting='same_kind')) + assert_(not np.can_cast('f4', 'm8', casting='safe')) + + # Cannot cast safely/same_kind from integer to datetime + assert_(not np.can_cast('i8', 'M8', casting='same_kind')) + assert_(not np.can_cast('i8', 'M8', casting='safe')) + + # Cannot cast safely/same_kind from bool to datetime + assert_(not np.can_cast('b1', 'M8', casting='same_kind')) + assert_(not np.can_cast('b1', 'M8', casting='safe')) + # Can cast safely/same_kind from bool to timedelta + assert_(np.can_cast('b1', 'm8', casting='same_kind')) + assert_(np.can_cast('b1', 'm8', casting='safe')) + + # Can cast datetime safely from months/years to days + assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe')) + assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe')) + # Cannot cast timedelta safely from months/years to days + assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe')) + assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe')) + # Can cast datetime same_kind from months/years to days + assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind')) + assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind')) + # Can't cast timedelta same_kind from months/years to days + assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind')) + assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind')) + # Can cast datetime same_kind across the date/time boundary + assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind')) + # Can cast timedelta same_kind across the date/time boundary + assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind')) + assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind')) + + # Cannot cast safely if the integer multiplier doesn't divide + assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe')) + assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe')) + # But can cast same_kind + assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind')) + # Can cast safely if the integer multiplier does divide + assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe')) + + # We can always cast types with generic units (corresponding to NaT) to + # more specific types + assert_(np.can_cast('m8', 'm8[h]', casting='same_kind')) + assert_(np.can_cast('m8', 'm8[h]', casting='safe')) + assert_(np.can_cast('M8', 'M8[h]', casting='same_kind')) + assert_(np.can_cast('M8', 'M8[h]', casting='safe')) + # but not the other way around + assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind')) + assert_(not np.can_cast('m8[h]', 'm8', casting='safe')) + assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind')) + assert_(not np.can_cast('M8[h]', 'M8', casting='safe')) + + def test_datetime_prefix_conversions(self): + # regression tests related to gh-19631; + # test metric prefixes from seconds down to + # attoseconds for bidirectional conversions + smaller_units = ['M8[7000ms]', + 'M8[2000us]', + 'M8[1000ns]', + 'M8[5000ns]', + 'M8[2000ps]', + 'M8[9000fs]', + 'M8[1000as]', + 'M8[2000000ps]', + 'M8[1000000as]', + 'M8[2000000000ps]', + 'M8[1000000000as]'] + larger_units = ['M8[7s]', + 'M8[2ms]', + 'M8[us]', + 'M8[5us]', + 'M8[2ns]', + 'M8[9ps]', + 'M8[1fs]', + 'M8[2us]', + 'M8[1ps]', + 'M8[2ms]', + 'M8[1ns]'] + for larger_unit, smaller_unit in zip(larger_units, smaller_units): + assert np.can_cast(larger_unit, smaller_unit, casting='safe') + assert np.can_cast(smaller_unit, larger_unit, casting='safe') + + @pytest.mark.parametrize("unit", [ + "s", "ms", "us", "ns", "ps", "fs", "as"]) + def test_prohibit_negative_datetime(self, unit): + with assert_raises(TypeError): + np.array([1], dtype=f"M8[-1{unit}]") + + def test_compare_generic_nat(self): + # regression tests for gh-6452 + assert_(np.datetime64('NaT') != + np.datetime64('2000') + np.timedelta64('NaT')) + assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us')) + assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT')) + + @pytest.mark.parametrize("size", [ + 3, 21, 217, 1000]) + def test_datetime_nat_argsort_stability(self, size): + # NaT < NaT should be False internally for + # sort stability + expected = np.arange(size) + arr = np.tile(np.datetime64('NaT'), size) + assert_equal(np.argsort(arr, kind='mergesort'), expected) + + @pytest.mark.parametrize("size", [ + 3, 21, 217, 1000]) + def test_timedelta_nat_argsort_stability(self, size): + # NaT < NaT should be False internally for + # sort stability + expected = np.arange(size) + arr = np.tile(np.timedelta64('NaT'), size) + assert_equal(np.argsort(arr, kind='mergesort'), expected) + + @pytest.mark.parametrize("arr, expected", [ + # the example provided in gh-12629 + (['NaT', 1, 2, 3], + [1, 2, 3, 'NaT']), + # multiple NaTs + (['NaT', 9, 'NaT', -707], + [-707, 9, 'NaT', 'NaT']), + # this sort explores another code path for NaT + ([1, -2, 3, 'NaT'], + [-2, 1, 3, 'NaT']), + # 2-D array + ([[51, -220, 'NaT'], + [-17, 'NaT', -90]], + [[-220, 51, 'NaT'], + [-90, -17, 'NaT']]), + ]) + @pytest.mark.parametrize("dtype", [ + 'M8[ns]', 'M8[us]', + 'm8[ns]', 'm8[us]']) + def test_datetime_timedelta_sort_nat(self, arr, expected, dtype): + # fix for gh-12629 and gh-15063; NaT sorting to end of array + arr = np.array(arr, dtype=dtype) + expected = np.array(expected, dtype=dtype) + arr.sort() + assert_equal(arr, expected) + + def test_datetime_scalar_construction(self): + # Construct with different units + assert_equal(np.datetime64('1950-03-12', 'D'), + np.datetime64('1950-03-12')) + assert_equal(np.datetime64('1950-03-12T13', 's'), + np.datetime64('1950-03-12T13', 'm')) + + # Default construction means NaT + assert_equal(np.datetime64(), np.datetime64('NaT')) + + # Some basic strings and repr + assert_equal(str(np.datetime64('NaT')), 'NaT') + assert_equal(repr(np.datetime64('NaT')), + "numpy.datetime64('NaT')") + assert_equal(str(np.datetime64('2011-02')), '2011-02') + assert_equal(repr(np.datetime64('2011-02')), + "numpy.datetime64('2011-02')") + + # None gets constructed as NaT + assert_equal(np.datetime64(None), np.datetime64('NaT')) + + # Default construction of NaT is in generic units + assert_equal(np.datetime64().dtype, np.dtype('M8')) + assert_equal(np.datetime64('NaT').dtype, np.dtype('M8')) + + # Construction from integers requires a specified unit + assert_raises(ValueError, np.datetime64, 17) + + # When constructing from a scalar or zero-dimensional array, + # it either keeps the units or you can override them. + a = np.datetime64('2000-03-18T16', 'h') + b = np.array('2000-03-18T16', dtype='M8[h]') + + assert_equal(a.dtype, np.dtype('M8[h]')) + assert_equal(b.dtype, np.dtype('M8[h]')) + + assert_equal(np.datetime64(a), a) + assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]')) + + assert_equal(np.datetime64(b), a) + assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]')) + + assert_equal(np.datetime64(a, 's'), a) + assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]')) + + assert_equal(np.datetime64(b, 's'), a) + assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]')) + + # Construction from datetime.date + assert_equal(np.datetime64('1945-03-25'), + np.datetime64(datetime.date(1945, 3, 25))) + assert_equal(np.datetime64('2045-03-25', 'D'), + np.datetime64(datetime.date(2045, 3, 25), 'D')) + # Construction from datetime.datetime + assert_equal(np.datetime64('1980-01-25T14:36:22.5'), + np.datetime64(datetime.datetime(1980, 1, 25, + 14, 36, 22, 500000))) + + # Construction with time units from a date is okay + assert_equal(np.datetime64('1920-03-13', 'h'), + np.datetime64('1920-03-13T00')) + assert_equal(np.datetime64('1920-03', 'm'), + np.datetime64('1920-03-01T00:00')) + assert_equal(np.datetime64('1920', 's'), + np.datetime64('1920-01-01T00:00:00')) + assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'), + np.datetime64('2045-03-25T00:00:00.000')) + + # Construction with date units from a datetime is also okay + assert_equal(np.datetime64('1920-03-13T18', 'D'), + np.datetime64('1920-03-13')) + assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'), + np.datetime64('1920-03')) + assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'), + np.datetime64('1920')) + + def test_datetime_scalar_construction_timezone(self): + # verify that supplying an explicit timezone works, but is deprecated + with assert_warns(DeprecationWarning): + assert_equal(np.datetime64('2000-01-01T00Z'), + np.datetime64('2000-01-01T00')) + with assert_warns(DeprecationWarning): + assert_equal(np.datetime64('2000-01-01T00-08'), + np.datetime64('2000-01-01T08')) + + def test_datetime_array_find_type(self): + dt = np.datetime64('1970-01-01', 'M') + arr = np.array([dt]) + assert_equal(arr.dtype, np.dtype('M8[M]')) + + # at the moment, we don't automatically convert these to datetime64 + + dt = datetime.date(1970, 1, 1) + arr = np.array([dt]) + assert_equal(arr.dtype, np.dtype('O')) + + dt = datetime.datetime(1970, 1, 1, 12, 30, 40) + arr = np.array([dt]) + assert_equal(arr.dtype, np.dtype('O')) + + # find "supertype" for non-dates and dates + + b = np.bool_(True) + dm = np.datetime64('1970-01-01', 'M') + d = datetime.date(1970, 1, 1) + dt = datetime.datetime(1970, 1, 1, 12, 30, 40) + + arr = np.array([b, dm]) + assert_equal(arr.dtype, np.dtype('O')) + + arr = np.array([b, d]) + assert_equal(arr.dtype, np.dtype('O')) + + arr = np.array([b, dt]) + assert_equal(arr.dtype, np.dtype('O')) + + arr = np.array([d, d]).astype('datetime64') + assert_equal(arr.dtype, np.dtype('M8[D]')) + + arr = np.array([dt, dt]).astype('datetime64') + assert_equal(arr.dtype, np.dtype('M8[us]')) + + @pytest.mark.parametrize("unit", [ + # test all date / time units and use + # "generic" to select generic unit + ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"), + ("s"), ("ms"), ("us"), ("ns"), ("ps"), + ("fs"), ("as"), ("generic") ]) + def test_timedelta_np_int_construction(self, unit): + # regression test for gh-7617 + if unit != "generic": + assert_equal(np.timedelta64(np.int64(123), unit), + np.timedelta64(123, unit)) + else: + assert_equal(np.timedelta64(np.int64(123)), + np.timedelta64(123)) + + def test_timedelta_scalar_construction(self): + # Construct with different units + assert_equal(np.timedelta64(7, 'D'), + np.timedelta64(1, 'W')) + assert_equal(np.timedelta64(120, 's'), + np.timedelta64(2, 'm')) + + # Default construction means 0 + assert_equal(np.timedelta64(), np.timedelta64(0)) + + # None gets constructed as NaT + assert_equal(np.timedelta64(None), np.timedelta64('NaT')) + + # Some basic strings and repr + assert_equal(str(np.timedelta64('NaT')), 'NaT') + assert_equal(repr(np.timedelta64('NaT')), + "numpy.timedelta64('NaT')") + assert_equal(str(np.timedelta64(3, 's')), '3 seconds') + assert_equal(repr(np.timedelta64(-3, 's')), + "numpy.timedelta64(-3,'s')") + assert_equal(repr(np.timedelta64(12)), + "numpy.timedelta64(12)") + + # Construction from an integer produces generic units + assert_equal(np.timedelta64(12).dtype, np.dtype('m8')) + + # When constructing from a scalar or zero-dimensional array, + # it either keeps the units or you can override them. + a = np.timedelta64(2, 'h') + b = np.array(2, dtype='m8[h]') + + assert_equal(a.dtype, np.dtype('m8[h]')) + assert_equal(b.dtype, np.dtype('m8[h]')) + + assert_equal(np.timedelta64(a), a) + assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]')) + + assert_equal(np.timedelta64(b), a) + assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]')) + + assert_equal(np.timedelta64(a, 's'), a) + assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]')) + + assert_equal(np.timedelta64(b, 's'), a) + assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]')) + + # Construction from datetime.timedelta + assert_equal(np.timedelta64(5, 'D'), + np.timedelta64(datetime.timedelta(days=5))) + assert_equal(np.timedelta64(102347621, 's'), + np.timedelta64(datetime.timedelta(seconds=102347621))) + assert_equal(np.timedelta64(-10234760000, 'us'), + np.timedelta64(datetime.timedelta( + microseconds=-10234760000))) + assert_equal(np.timedelta64(10234760000, 'us'), + np.timedelta64(datetime.timedelta( + microseconds=10234760000))) + assert_equal(np.timedelta64(1023476, 'ms'), + np.timedelta64(datetime.timedelta(milliseconds=1023476))) + assert_equal(np.timedelta64(10, 'm'), + np.timedelta64(datetime.timedelta(minutes=10))) + assert_equal(np.timedelta64(281, 'h'), + np.timedelta64(datetime.timedelta(hours=281))) + assert_equal(np.timedelta64(28, 'W'), + np.timedelta64(datetime.timedelta(weeks=28))) + + # Cannot construct across nonlinear time unit boundaries + a = np.timedelta64(3, 's') + assert_raises(TypeError, np.timedelta64, a, 'M') + assert_raises(TypeError, np.timedelta64, a, 'Y') + a = np.timedelta64(6, 'M') + assert_raises(TypeError, np.timedelta64, a, 'D') + assert_raises(TypeError, np.timedelta64, a, 'h') + a = np.timedelta64(1, 'Y') + assert_raises(TypeError, np.timedelta64, a, 'D') + assert_raises(TypeError, np.timedelta64, a, 'm') + a = datetime.timedelta(seconds=3) + assert_raises(TypeError, np.timedelta64, a, 'M') + assert_raises(TypeError, np.timedelta64, a, 'Y') + a = datetime.timedelta(weeks=3) + assert_raises(TypeError, np.timedelta64, a, 'M') + assert_raises(TypeError, np.timedelta64, a, 'Y') + a = datetime.timedelta() + assert_raises(TypeError, np.timedelta64, a, 'M') + assert_raises(TypeError, np.timedelta64, a, 'Y') + + def test_timedelta_object_array_conversion(self): + # Regression test for gh-11096 + inputs = [datetime.timedelta(28), + datetime.timedelta(30), + datetime.timedelta(31)] + expected = np.array([28, 30, 31], dtype='timedelta64[D]') + actual = np.array(inputs, dtype='timedelta64[D]') + assert_equal(expected, actual) + + def test_timedelta_0_dim_object_array_conversion(self): + # Regression test for gh-11151 + test = np.array(datetime.timedelta(seconds=20)) + actual = test.astype(np.timedelta64) + # expected value from the array constructor workaround + # described in above issue + expected = np.array(datetime.timedelta(seconds=20), + np.timedelta64) + assert_equal(actual, expected) + + def test_timedelta_nat_format(self): + # gh-17552 + assert_equal('NaT', '{0}'.format(np.timedelta64('nat'))) + + def test_timedelta_scalar_construction_units(self): + # String construction detecting units + assert_equal(np.datetime64('2010').dtype, + np.dtype('M8[Y]')) + assert_equal(np.datetime64('2010-03').dtype, + np.dtype('M8[M]')) + assert_equal(np.datetime64('2010-03-12').dtype, + np.dtype('M8[D]')) + assert_equal(np.datetime64('2010-03-12T17').dtype, + np.dtype('M8[h]')) + assert_equal(np.datetime64('2010-03-12T17:15').dtype, + np.dtype('M8[m]')) + assert_equal(np.datetime64('2010-03-12T17:15:08').dtype, + np.dtype('M8[s]')) + + assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype, + np.dtype('M8[ms]')) + assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype, + np.dtype('M8[ms]')) + assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype, + np.dtype('M8[ms]')) + + assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype, + np.dtype('M8[us]')) + assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype, + np.dtype('M8[us]')) + assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype, + np.dtype('M8[us]')) + + assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype, + np.dtype('M8[ns]')) + assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype, + np.dtype('M8[ns]')) + assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype, + np.dtype('M8[ns]')) + + assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype, + np.dtype('M8[ps]')) + assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype, + np.dtype('M8[ps]')) + assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype, + np.dtype('M8[ps]')) + + assert_equal(np.datetime64( + '1970-01-01T00:00:02.1234567890123').dtype, + np.dtype('M8[fs]')) + assert_equal(np.datetime64( + '1970-01-01T00:00:02.12345678901234').dtype, + np.dtype('M8[fs]')) + assert_equal(np.datetime64( + '1970-01-01T00:00:02.123456789012345').dtype, + np.dtype('M8[fs]')) + + assert_equal(np.datetime64( + '1970-01-01T00:00:02.1234567890123456').dtype, + np.dtype('M8[as]')) + assert_equal(np.datetime64( + '1970-01-01T00:00:02.12345678901234567').dtype, + np.dtype('M8[as]')) + assert_equal(np.datetime64( + '1970-01-01T00:00:02.123456789012345678').dtype, + np.dtype('M8[as]')) + + # Python date object + assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype, + np.dtype('M8[D]')) + + # Python datetime object + assert_equal(np.datetime64( + datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype, + np.dtype('M8[us]')) + + # 'today' special value + assert_equal(np.datetime64('today').dtype, + np.dtype('M8[D]')) + + # 'now' special value + assert_equal(np.datetime64('now').dtype, + np.dtype('M8[s]')) + + def test_datetime_nat_casting(self): + a = np.array('NaT', dtype='M8[D]') + b = np.datetime64('NaT', '[D]') + + # Arrays + assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]')) + assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]')) + assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]')) + assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]')) + assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]')) + + # Scalars -> Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + # NaN -> NaT + nan = np.array([np.nan] * 8) + fnan = nan.astype('f') + lnan = nan.astype('g') + cnan = nan.astype('D') + cfnan = nan.astype('F') + clnan = nan.astype('G') + + nat = np.array([np.datetime64('NaT')] * 8) + assert_equal(nan.astype('M8[ns]'), nat) + assert_equal(fnan.astype('M8[ns]'), nat) + assert_equal(lnan.astype('M8[ns]'), nat) + assert_equal(cnan.astype('M8[ns]'), nat) + assert_equal(cfnan.astype('M8[ns]'), nat) + assert_equal(clnan.astype('M8[ns]'), nat) + + nat = np.array([np.timedelta64('NaT')] * 8) + assert_equal(nan.astype('timedelta64[ns]'), nat) + assert_equal(fnan.astype('timedelta64[ns]'), nat) + assert_equal(lnan.astype('timedelta64[ns]'), nat) + assert_equal(cnan.astype('timedelta64[ns]'), nat) + assert_equal(cfnan.astype('timedelta64[ns]'), nat) + assert_equal(clnan.astype('timedelta64[ns]'), nat) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') + dt_a = np.array(a, dtype='M') + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Unicode to datetime + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a + assert_equal(dt_a, dt_b) + + # Datetime to unicode + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.bytes_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128)) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + def test_time_byteswapping(self, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) + + unswapped = times_swapped.view(np.int64).newbyteorder() + assert_array_equal(unswapped, times.view(np.int64)) + + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res, expected) + res = times.astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + @pytest.mark.parametrize("str_dtype", ["U", "S"]) + def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + # Unfortunately, timedelta does not roundtrip: + from_strings = np.array(["2017", "NaT"], dtype=str_dtype) + to_strings = times.astype(str_dtype) # assume this is correct + + # Check that conversion from times to string works if src is swapped: + times_swapped = times.astype(times.dtype.newbyteorder()) + res = times_swapped.astype(str_dtype) + assert_array_equal(res, to_strings) + # And also if both are swapped: + res = times_swapped.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + # only destination is swapped: + res = times.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + + # Check that conversion from string to times works if src is swapped: + from_strings_swapped = from_strings.astype( + from_strings.dtype.newbyteorder()) + res = from_strings_swapped.astype(time_dtype) + assert_array_equal(res, times) + # And if both are swapped: + res = from_strings_swapped.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + # Only destination is swapped: + res = from_strings.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x: + "'%s'" % np.datetime_as_string(x, timezone='UTC')}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_timedelta_array_str(self): + a = np.array([-1, 0, 100], dtype='m') + assert_equal(str(a), "[ -1 0 100]") + a = np.array(['NaT', 'NaT'], dtype='m') + assert_equal(str(a), "['NaT' 'NaT']") + # Check right-alignment with NaTs + a = np.array([-1, 'NaT', 0], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 0]") + a = np.array([-1, 'NaT', 1234567], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + # Test with other byteorder: + a = np.array([-1, 'NaT', 1234567], dtype='>m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + a = np.array([-1, 'NaT', 1234567], dtype='<m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + def test_pickle(self): + # Check that pickle roundtripping works + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dt = np.dtype('M8[7D]') + assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt) + dt = np.dtype('M8[W]') + assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt) + scalar = np.datetime64('2016-01-01T00:00:00.000000000') + assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)), + scalar) + delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000') + assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)), + delta) + + # Check that loading pickles from 1.6 works + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]')) + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('<M8[W]')) + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ + b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime <op> datetime computes the metadata gcd + # timedelta <op> timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), + np.dtype(mM+'8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), + np.dtype(mM+'8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), + np.dtype(mM+'8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), + np.dtype(mM+'8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), + np.dtype(mM+'8[s]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), + np.dtype(mM+'8[7s]')) + # timedelta <op> timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta and float cannot be safely cast with each other + assert_raises(TypeError, np.promote_types, "float32", "m8") + assert_raises(TypeError, np.promote_types, "m8", "float32") + assert_raises(TypeError, np.promote_types, "uint64", "m8") + assert_raises(TypeError, np.promote_types, "m8", "uint64") + + # timedelta <op> timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]") + assert_raises(OverflowError, cast) + + def cast2(): + numpy.datetime64("2014").astype("<M8[fs]") + assert_raises(OverflowError, cast2) + + def test_pyobject_roundtrip(self): + # All datetime types should be able to roundtrip through object + a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, + -1020040340, -2942398, -1, 0, 1, 234523453, 1199164176], + dtype=np.int64) + # With date units + for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']: + b = a.copy().view(dtype=unit) + b[0] = '-0001-01-01' + b[1] = '-0001-12-31' + b[2] = '0000-01-01' + b[3] = '0001-01-01' + b[4] = '1969-12-31' + b[5] = '1970-01-01' + b[6] = '9999-12-31' + b[7] = '10000-01-01' + b[8] = 'NaT' + + assert_equal(b.astype(object).astype(unit), b, + "Error roundtripping unit %s" % unit) + # With time units + for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]', + 'M8[300as]', 'M8[20us]']: + b = a.copy().view(dtype=unit) + b[0] = '-0001-01-01T00' + b[1] = '-0001-12-31T00' + b[2] = '0000-01-01T00' + b[3] = '0001-01-01T00' + b[4] = '1969-12-31T23:59:59.999999' + b[5] = '1970-01-01T00' + b[6] = '9999-12-31T23:59:59.999999' + b[7] = '10000-01-01T00' + b[8] = 'NaT' + + assert_equal(b.astype(object).astype(unit), b, + "Error roundtripping unit %s" % unit) + + def test_month_truncation(self): + # Make sure that months are truncating correctly + assert_equal(np.array('1945-03-01', dtype='M8[M]'), + np.array('1945-03-31', dtype='M8[M]')) + assert_equal(np.array('1969-11-01', dtype='M8[M]'), + np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]')) + assert_equal(np.array('1969-12-01', dtype='M8[M]'), + np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]')) + assert_equal(np.array('1970-01-01', dtype='M8[M]'), + np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]')) + assert_equal(np.array('1980-02-01', dtype='M8[M]'), + np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]')) + + def test_different_unit_comparison(self): + # Check some years with date units + for unit1 in ['Y', 'M', 'D']: + dt1 = np.dtype('M8[%s]' % unit1) + for unit2 in ['Y', 'M', 'D']: + dt2 = np.dtype('M8[%s]' % unit2) + assert_equal(np.array('1945', dtype=dt1), + np.array('1945', dtype=dt2)) + assert_equal(np.array('1970', dtype=dt1), + np.array('1970', dtype=dt2)) + assert_equal(np.array('9999', dtype=dt1), + np.array('9999', dtype=dt2)) + assert_equal(np.array('10000', dtype=dt1), + np.array('10000-01-01', dtype=dt2)) + assert_equal(np.datetime64('1945', unit1), + np.datetime64('1945', unit2)) + assert_equal(np.datetime64('1970', unit1), + np.datetime64('1970', unit2)) + assert_equal(np.datetime64('9999', unit1), + np.datetime64('9999', unit2)) + assert_equal(np.datetime64('10000', unit1), + np.datetime64('10000-01-01', unit2)) + # Check some datetimes with time units + for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']: + dt1 = np.dtype('M8[%s]' % unit1) + for unit2 in ['h', 'm', 's', 'ms', 'us']: + dt2 = np.dtype('M8[%s]' % unit2) + assert_equal(np.array('1945-03-12T18', dtype=dt1), + np.array('1945-03-12T18', dtype=dt2)) + assert_equal(np.array('1970-03-12T18', dtype=dt1), + np.array('1970-03-12T18', dtype=dt2)) + assert_equal(np.array('9999-03-12T18', dtype=dt1), + np.array('9999-03-12T18', dtype=dt2)) + assert_equal(np.array('10000-01-01T00', dtype=dt1), + np.array('10000-01-01T00', dtype=dt2)) + assert_equal(np.datetime64('1945-03-12T18', unit1), + np.datetime64('1945-03-12T18', unit2)) + assert_equal(np.datetime64('1970-03-12T18', unit1), + np.datetime64('1970-03-12T18', unit2)) + assert_equal(np.datetime64('9999-03-12T18', unit1), + np.datetime64('9999-03-12T18', unit2)) + assert_equal(np.datetime64('10000-01-01T00', unit1), + np.datetime64('10000-01-01T00', unit2)) + # Check some days with units that won't overflow + for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']: + dt1 = np.dtype('M8[%s]' % unit1) + for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']: + dt2 = np.dtype('M8[%s]' % unit2) + assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1), + np.array('1932-02-17T00:00:00', dtype='M').astype(dt2), + casting='unsafe')) + assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1), + np.array('10000-04-27T00:00:00', dtype='M').astype(dt2), + casting='unsafe')) + + # Shouldn't be able to compare datetime and timedelta + a = np.array('2012-12-21', dtype='M8[D]') + b = np.array(3, dtype='m8[D]') + assert_raises(TypeError, np.less, a, b) + # not even if "unsafe" + assert_raises(TypeError, np.less, a, b, casting='unsafe') + + def test_datetime_like(self): + a = np.array([3], dtype='m8[4D]') + b = np.array(['2012-12-21'], dtype='M8[D]') + + assert_equal(np.ones_like(a).dtype, a.dtype) + assert_equal(np.zeros_like(a).dtype, a.dtype) + assert_equal(np.empty_like(a).dtype, a.dtype) + assert_equal(np.ones_like(b).dtype, b.dtype) + assert_equal(np.zeros_like(b).dtype, b.dtype) + assert_equal(np.empty_like(b).dtype, b.dtype) + + def test_datetime_unary(self): + for tda, tdb, tdzero, tdone, tdmone in \ + [ + # One-dimensional arrays + (np.array([3], dtype='m8[D]'), + np.array([-3], dtype='m8[D]'), + np.array([0], dtype='m8[D]'), + np.array([1], dtype='m8[D]'), + np.array([-1], dtype='m8[D]')), + # NumPy scalars + (np.timedelta64(3, '[D]'), + np.timedelta64(-3, '[D]'), + np.timedelta64(0, '[D]'), + np.timedelta64(1, '[D]'), + np.timedelta64(-1, '[D]'))]: + # negative ufunc + assert_equal(-tdb, tda) + assert_equal((-tdb).dtype, tda.dtype) + assert_equal(np.negative(tdb), tda) + assert_equal(np.negative(tdb).dtype, tda.dtype) + + # positive ufunc + assert_equal(np.positive(tda), tda) + assert_equal(np.positive(tda).dtype, tda.dtype) + assert_equal(np.positive(tdb), tdb) + assert_equal(np.positive(tdb).dtype, tdb.dtype) + + # absolute ufunc + assert_equal(np.absolute(tdb), tda) + assert_equal(np.absolute(tdb).dtype, tda.dtype) + + # sign ufunc + assert_equal(np.sign(tda), tdone) + assert_equal(np.sign(tdb), tdmone) + assert_equal(np.sign(tdzero), tdzero) + assert_equal(np.sign(tda).dtype, tda.dtype) + + # The ufuncs always produce native-endian results + assert_ + + def test_datetime_add(self): + for dta, dtb, dtc, dtnat, tda, tdb, tdc in \ + [ + # One-dimensional arrays + (np.array(['2012-12-21'], dtype='M8[D]'), + np.array(['2012-12-24'], dtype='M8[D]'), + np.array(['2012-12-21T11'], dtype='M8[h]'), + np.array(['NaT'], dtype='M8[D]'), + np.array([3], dtype='m8[D]'), + np.array([11], dtype='m8[h]'), + np.array([3*24 + 11], dtype='m8[h]')), + # NumPy scalars + (np.datetime64('2012-12-21', '[D]'), + np.datetime64('2012-12-24', '[D]'), + np.datetime64('2012-12-21T11', '[h]'), + np.datetime64('NaT', '[D]'), + np.timedelta64(3, '[D]'), + np.timedelta64(11, '[h]'), + np.timedelta64(3*24 + 11, '[h]'))]: + # m8 + m8 + assert_equal(tda + tdb, tdc) + assert_equal((tda + tdb).dtype, np.dtype('m8[h]')) + # m8 + bool + assert_equal(tdb + True, tdb + 1) + assert_equal((tdb + True).dtype, np.dtype('m8[h]')) + # m8 + int + assert_equal(tdb + 3*24, tdc) + assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]')) + # bool + m8 + assert_equal(False + tdb, tdb) + assert_equal((False + tdb).dtype, np.dtype('m8[h]')) + # int + m8 + assert_equal(3*24 + tdb, tdc) + assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]')) + # M8 + bool + assert_equal(dta + True, dta + 1) + assert_equal(dtnat + True, dtnat) + assert_equal((dta + True).dtype, np.dtype('M8[D]')) + # M8 + int + assert_equal(dta + 3, dtb) + assert_equal(dtnat + 3, dtnat) + assert_equal((dta + 3).dtype, np.dtype('M8[D]')) + # bool + M8 + assert_equal(False + dta, dta) + assert_equal(False + dtnat, dtnat) + assert_equal((False + dta).dtype, np.dtype('M8[D]')) + # int + M8 + assert_equal(3 + dta, dtb) + assert_equal(3 + dtnat, dtnat) + assert_equal((3 + dta).dtype, np.dtype('M8[D]')) + # M8 + m8 + assert_equal(dta + tda, dtb) + assert_equal(dtnat + tda, dtnat) + assert_equal((dta + tda).dtype, np.dtype('M8[D]')) + # m8 + M8 + assert_equal(tda + dta, dtb) + assert_equal(tda + dtnat, dtnat) + assert_equal((tda + dta).dtype, np.dtype('M8[D]')) + + # In M8 + m8, the result goes to higher precision + assert_equal(np.add(dta, tdb, casting='unsafe'), dtc) + assert_equal(np.add(dta, tdb, casting='unsafe').dtype, + np.dtype('M8[h]')) + assert_equal(np.add(tdb, dta, casting='unsafe'), dtc) + assert_equal(np.add(tdb, dta, casting='unsafe').dtype, + np.dtype('M8[h]')) + + # M8 + M8 + assert_raises(TypeError, np.add, dta, dtb) + + def test_datetime_subtract(self): + for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \ + [ + # One-dimensional arrays + (np.array(['2012-12-21'], dtype='M8[D]'), + np.array(['2012-12-24'], dtype='M8[D]'), + np.array(['1940-12-24'], dtype='M8[D]'), + np.array(['1940-12-24T00'], dtype='M8[h]'), + np.array(['1940-12-23T13'], dtype='M8[h]'), + np.array(['NaT'], dtype='M8[D]'), + np.array([3], dtype='m8[D]'), + np.array([11], dtype='m8[h]'), + np.array([3*24 - 11], dtype='m8[h]')), + # NumPy scalars + (np.datetime64('2012-12-21', '[D]'), + np.datetime64('2012-12-24', '[D]'), + np.datetime64('1940-12-24', '[D]'), + np.datetime64('1940-12-24T00', '[h]'), + np.datetime64('1940-12-23T13', '[h]'), + np.datetime64('NaT', '[D]'), + np.timedelta64(3, '[D]'), + np.timedelta64(11, '[h]'), + np.timedelta64(3*24 - 11, '[h]'))]: + # m8 - m8 + assert_equal(tda - tdb, tdc) + assert_equal((tda - tdb).dtype, np.dtype('m8[h]')) + assert_equal(tdb - tda, -tdc) + assert_equal((tdb - tda).dtype, np.dtype('m8[h]')) + # m8 - bool + assert_equal(tdc - True, tdc - 1) + assert_equal((tdc - True).dtype, np.dtype('m8[h]')) + # m8 - int + assert_equal(tdc - 3*24, -tdb) + assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]')) + # int - m8 + assert_equal(False - tdb, -tdb) + assert_equal((False - tdb).dtype, np.dtype('m8[h]')) + # int - m8 + assert_equal(3*24 - tdb, tdc) + assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]')) + # M8 - bool + assert_equal(dtb - True, dtb - 1) + assert_equal(dtnat - True, dtnat) + assert_equal((dtb - True).dtype, np.dtype('M8[D]')) + # M8 - int + assert_equal(dtb - 3, dta) + assert_equal(dtnat - 3, dtnat) + assert_equal((dtb - 3).dtype, np.dtype('M8[D]')) + # M8 - m8 + assert_equal(dtb - tda, dta) + assert_equal(dtnat - tda, dtnat) + assert_equal((dtb - tda).dtype, np.dtype('M8[D]')) + + # In M8 - m8, the result goes to higher precision + assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte) + assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype, + np.dtype('M8[h]')) + + # M8 - M8 with different goes to higher precision + assert_equal(np.subtract(dtc, dtd, casting='unsafe'), + np.timedelta64(0, 'h')) + assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype, + np.dtype('m8[h]')) + assert_equal(np.subtract(dtd, dtc, casting='unsafe'), + np.timedelta64(0, 'h')) + assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype, + np.dtype('m8[h]')) + + # m8 - M8 + assert_raises(TypeError, np.subtract, tda, dta) + # bool - M8 + assert_raises(TypeError, np.subtract, False, dta) + # int - M8 + assert_raises(TypeError, np.subtract, 3, dta) + + def test_datetime_multiply(self): + for dta, tda, tdb, tdc in \ + [ + # One-dimensional arrays + (np.array(['2012-12-21'], dtype='M8[D]'), + np.array([6], dtype='m8[h]'), + np.array([9], dtype='m8[h]'), + np.array([12], dtype='m8[h]')), + # NumPy scalars + (np.datetime64('2012-12-21', '[D]'), + np.timedelta64(6, '[h]'), + np.timedelta64(9, '[h]'), + np.timedelta64(12, '[h]'))]: + # m8 * int + assert_equal(tda * 2, tdc) + assert_equal((tda * 2).dtype, np.dtype('m8[h]')) + # int * m8 + assert_equal(2 * tda, tdc) + assert_equal((2 * tda).dtype, np.dtype('m8[h]')) + # m8 * float + assert_equal(tda * 1.5, tdb) + assert_equal((tda * 1.5).dtype, np.dtype('m8[h]')) + # float * m8 + assert_equal(1.5 * tda, tdb) + assert_equal((1.5 * tda).dtype, np.dtype('m8[h]')) + + # m8 * m8 + assert_raises(TypeError, np.multiply, tda, tdb) + # m8 * M8 + assert_raises(TypeError, np.multiply, dta, tda) + # M8 * m8 + assert_raises(TypeError, np.multiply, tda, dta) + # M8 * int + assert_raises(TypeError, np.multiply, dta, 2) + # int * M8 + assert_raises(TypeError, np.multiply, 2, dta) + # M8 * float + assert_raises(TypeError, np.multiply, dta, 1.5) + # float * M8 + assert_raises(TypeError, np.multiply, 1.5, dta) + + # NaTs + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + nat = np.timedelta64('NaT') + def check(a, b, res): + assert_equal(a * b, res) + assert_equal(b * a, res) + for tp in (int, float): + check(nat, tp(2), nat) + check(nat, tp(0), nat) + for f in (float('inf'), float('nan')): + check(np.timedelta64(1), f, nat) + check(np.timedelta64(0), f, nat) + check(nat, f, nat) + + @pytest.mark.parametrize("op1, op2, exp", [ + # m8 same units round down + (np.timedelta64(7, 's'), + np.timedelta64(4, 's'), + 1), + # m8 same units round down with negative + (np.timedelta64(7, 's'), + np.timedelta64(-4, 's'), + -2), + # m8 same units negative no round down + (np.timedelta64(8, 's'), + np.timedelta64(-4, 's'), + -2), + # m8 different units + (np.timedelta64(1, 'm'), + np.timedelta64(31, 's'), + 1), + # m8 generic units + (np.timedelta64(1890), + np.timedelta64(31), + 60), + # Y // M works + (np.timedelta64(2, 'Y'), + np.timedelta64('13', 'M'), + 1), + # handle 1D arrays + (np.array([1, 2, 3], dtype='m8'), + np.array([2], dtype='m8'), + np.array([0, 1, 1], dtype=np.int64)), + ]) + def test_timedelta_floor_divide(self, op1, op2, exp): + assert_equal(op1 // op2, exp) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("op1, op2", [ + # div by 0 + (np.timedelta64(10, 'us'), + np.timedelta64(0, 'us')), + # div with NaT + (np.timedelta64('NaT'), + np.timedelta64(50, 'us')), + # special case for int64 min + # in integer floor division + (np.timedelta64(np.iinfo(np.int64).min), + np.timedelta64(-1)), + ]) + def test_timedelta_floor_div_warnings(self, op1, op2): + with assert_warns(RuntimeWarning): + actual = op1 // op2 + assert_equal(actual, 0) + assert_equal(actual.dtype, np.int64) + + @pytest.mark.parametrize("val1, val2", [ + # the smallest integer that can't be represented + # exactly in a double should be preserved if we avoid + # casting to double in floordiv operation + (9007199254740993, 1), + # stress the alternate floordiv code path where + # operand signs don't match and remainder isn't 0 + (9007199254740999, -2), + ]) + def test_timedelta_floor_div_precision(self, val1, val2): + op1 = np.timedelta64(val1) + op2 = np.timedelta64(val2) + actual = op1 // op2 + # Python reference integer floor + expected = val1 // val2 + assert_equal(actual, expected) + + @pytest.mark.parametrize("val1, val2", [ + # years and months sometimes can't be unambiguously + # divided for floor division operation + (np.timedelta64(7, 'Y'), + np.timedelta64(3, 's')), + (np.timedelta64(7, 'M'), + np.timedelta64(1, 'D')), + ]) + def test_timedelta_floor_div_error(self, val1, val2): + with assert_raises_regex(TypeError, "common metadata divisor"): + val1 // val2 + + @pytest.mark.parametrize("op1, op2", [ + # reuse the test cases from floordiv + (np.timedelta64(7, 's'), + np.timedelta64(4, 's')), + # m8 same units round down with negative + (np.timedelta64(7, 's'), + np.timedelta64(-4, 's')), + # m8 same units negative no round down + (np.timedelta64(8, 's'), + np.timedelta64(-4, 's')), + # m8 different units + (np.timedelta64(1, 'm'), + np.timedelta64(31, 's')), + # m8 generic units + (np.timedelta64(1890), + np.timedelta64(31)), + # Y // M works + (np.timedelta64(2, 'Y'), + np.timedelta64('13', 'M')), + # handle 1D arrays + (np.array([1, 2, 3], dtype='m8'), + np.array([2], dtype='m8')), + ]) + def test_timedelta_divmod(self, op1, op2): + expected = (op1 // op2, op1 % op2) + assert_equal(divmod(op1, op2), expected) + + @pytest.mark.skipif(IS_WASM, reason="does not work in wasm") + @pytest.mark.parametrize("op1, op2", [ + # reuse cases from floordiv + # div by 0 + (np.timedelta64(10, 'us'), + np.timedelta64(0, 'us')), + # div with NaT + (np.timedelta64('NaT'), + np.timedelta64(50, 'us')), + # special case for int64 min + # in integer floor division + (np.timedelta64(np.iinfo(np.int64).min), + np.timedelta64(-1)), + ]) + def test_timedelta_divmod_warnings(self, op1, op2): + with assert_warns(RuntimeWarning): + expected = (op1 // op2, op1 % op2) + with assert_warns(RuntimeWarning): + actual = divmod(op1, op2) + assert_equal(actual, expected) + + def test_datetime_divide(self): + for dta, tda, tdb, tdc, tdd in \ + [ + # One-dimensional arrays + (np.array(['2012-12-21'], dtype='M8[D]'), + np.array([6], dtype='m8[h]'), + np.array([9], dtype='m8[h]'), + np.array([12], dtype='m8[h]'), + np.array([6], dtype='m8[m]')), + # NumPy scalars + (np.datetime64('2012-12-21', '[D]'), + np.timedelta64(6, '[h]'), + np.timedelta64(9, '[h]'), + np.timedelta64(12, '[h]'), + np.timedelta64(6, '[m]'))]: + # m8 / int + assert_equal(tdc / 2, tda) + assert_equal((tdc / 2).dtype, np.dtype('m8[h]')) + # m8 / float + assert_equal(tda / 0.5, tdc) + assert_equal((tda / 0.5).dtype, np.dtype('m8[h]')) + # m8 / m8 + assert_equal(tda / tdb, 6 / 9) + assert_equal(np.divide(tda, tdb), 6 / 9) + assert_equal(np.true_divide(tda, tdb), 6 / 9) + assert_equal(tdb / tda, 9 / 6) + assert_equal((tda / tdb).dtype, np.dtype('f8')) + assert_equal(tda / tdd, 60) + assert_equal(tdd / tda, 1 / 60) + + # int / m8 + assert_raises(TypeError, np.divide, 2, tdb) + # float / m8 + assert_raises(TypeError, np.divide, 0.5, tdb) + # m8 / M8 + assert_raises(TypeError, np.divide, dta, tda) + # M8 / m8 + assert_raises(TypeError, np.divide, tda, dta) + # M8 / int + assert_raises(TypeError, np.divide, dta, 2) + # int / M8 + assert_raises(TypeError, np.divide, 2, dta) + # M8 / float + assert_raises(TypeError, np.divide, dta, 1.5) + # float / M8 + assert_raises(TypeError, np.divide, 1.5, dta) + + # NaTs + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, r".*encountered in divide") + nat = np.timedelta64('NaT') + for tp in (int, float): + assert_equal(np.timedelta64(1) / tp(0), nat) + assert_equal(np.timedelta64(0) / tp(0), nat) + assert_equal(nat / tp(0), nat) + assert_equal(nat / tp(2), nat) + # Division by inf + assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0)) + assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0)) + assert_equal(nat / float('inf'), nat) + # Division by nan + assert_equal(np.timedelta64(1) / float('nan'), nat) + assert_equal(np.timedelta64(0) / float('nan'), nat) + assert_equal(nat / float('nan'), nat) + + def test_datetime_compare(self): + # Test all the comparison operators + a = np.datetime64('2000-03-12T18:00:00.000000') + b = np.array(['2000-03-12T18:00:00.000000', + '2000-03-12T17:59:59.999999', + '2000-03-12T18:00:00.000001', + '1970-01-11T12:00:00.909090', + '2016-01-11T12:00:00.909090'], + dtype='datetime64[us]') + assert_equal(np.equal(a, b), [1, 0, 0, 0, 0]) + assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1]) + assert_equal(np.less(a, b), [0, 0, 1, 0, 1]) + assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1]) + assert_equal(np.greater(a, b), [0, 1, 0, 1, 0]) + assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0]) + + def test_datetime_compare_nat(self): + dt_nat = np.datetime64('NaT', 'D') + dt_other = np.datetime64('2000-01-01') + td_nat = np.timedelta64('NaT', 'h') + td_other = np.timedelta64(1, 'h') + + for op in [np.equal, np.less, np.less_equal, + np.greater, np.greater_equal]: + assert_(not op(dt_nat, dt_nat)) + assert_(not op(dt_nat, dt_other)) + assert_(not op(dt_other, dt_nat)) + + assert_(not op(td_nat, td_nat)) + assert_(not op(td_nat, td_other)) + assert_(not op(td_other, td_nat)) + + assert_(np.not_equal(dt_nat, dt_nat)) + assert_(np.not_equal(dt_nat, dt_other)) + assert_(np.not_equal(dt_other, dt_nat)) + + assert_(np.not_equal(td_nat, td_nat)) + assert_(np.not_equal(td_nat, td_other)) + assert_(np.not_equal(td_other, td_nat)) + + def test_datetime_minmax(self): + # The metadata of the result should become the GCD + # of the operand metadata + a = np.array('1999-03-12T13', dtype='M8[2m]') + b = np.array('1999-03-12T12', dtype='M8[s]') + assert_equal(np.minimum(a, b), b) + assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]')) + assert_equal(np.fmin(a, b), b) + assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]')) + assert_equal(np.maximum(a, b), a) + assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]')) + assert_equal(np.fmax(a, b), a) + assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]')) + # Viewed as integers, the comparison is opposite because + # of the units chosen + assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) + + # Interaction with NaT + a = np.array('1999-03-12T13', dtype='M8[2m]') + dtnat = np.array('NaT', dtype='M8[h]') + assert_equal(np.minimum(a, dtnat), dtnat) + assert_equal(np.minimum(dtnat, a), dtnat) + assert_equal(np.maximum(a, dtnat), dtnat) + assert_equal(np.maximum(dtnat, a), dtnat) + assert_equal(np.fmin(dtnat, a), a) + assert_equal(np.fmin(a, dtnat), a) + assert_equal(np.fmax(dtnat, a), a) + assert_equal(np.fmax(a, dtnat), a) + + # Also do timedelta + a = np.array(3, dtype='m8[h]') + b = np.array(3*3600 - 3, dtype='m8[s]') + assert_equal(np.minimum(a, b), b) + assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]')) + assert_equal(np.fmin(a, b), b) + assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]')) + assert_equal(np.maximum(a, b), a) + assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]')) + assert_equal(np.fmax(a, b), a) + assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]')) + # Viewed as integers, the comparison is opposite because + # of the units chosen + assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) + + # should raise between datetime and timedelta + # + # TODO: Allowing unsafe casting by + # default in ufuncs strikes again... :( + a = np.array(3, dtype='m8[h]') + b = np.array('1999-03-12T12', dtype='M8[s]') + #assert_raises(TypeError, np.minimum, a, b) + #assert_raises(TypeError, np.maximum, a, b) + #assert_raises(TypeError, np.fmin, a, b) + #assert_raises(TypeError, np.fmax, a, b) + assert_raises(TypeError, np.minimum, a, b, casting='same_kind') + assert_raises(TypeError, np.maximum, a, b, casting='same_kind') + assert_raises(TypeError, np.fmin, a, b, casting='same_kind') + assert_raises(TypeError, np.fmax, a, b, casting='same_kind') + + def test_hours(self): + t = np.ones(3, dtype='M8[s]') + t[0] = 60*60*24 + 60*60*10 + assert_(t[0].item().hour == 10) + + def test_divisor_conversion_year(self): + assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]')) + assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]')) + assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]')) + + def test_divisor_conversion_month(self): + assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]')) + assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]')) + assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]')) + + def test_divisor_conversion_week(self): + assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]')) + assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]')) + assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]')) + + def test_divisor_conversion_day(self): + assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]')) + assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]')) + assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]')) + + def test_divisor_conversion_hour(self): + assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]')) + assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]')) + + def test_divisor_conversion_minute(self): + assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]')) + assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]')) + + def test_divisor_conversion_second(self): + assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]')) + assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]')) + + def test_divisor_conversion_fs(self): + assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]')) + assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]')) + + def test_divisor_conversion_as(self): + assert_raises(ValueError, lambda: np.dtype('M8[as/10]')) + + def test_string_parser_variants(self): + # Allow space instead of 'T' between date and time + assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')), + np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]'))) + # Allow positive years + assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')), + np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]'))) + # Allow negative years + assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')), + np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]'))) + # UTC specifier + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')), + np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')), + np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]'))) + # Time zone offset + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')), + np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')), + np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal( + np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')), + np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]'))) + with assert_warns(DeprecationWarning): + assert_equal(np.datetime64('1977-03-02T12:30-0230'), + np.datetime64('1977-03-02T15:00')) + + def test_string_parser_error_check(self): + # Arbitrary bad string + assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]')) + # Character after year must be '-' + assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]')) + # Cannot have trailing '-' + assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]')) + # Month must be in range [1,12] + assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]')) + # Month must have two digits + assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]')) + # 'Mor' is not a valid month + assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]')) + # Cannot have trailing '-' + assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]')) + # Day must be in range [1,len(month)] + assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]')) + # Cannot have trailing characters + assert_raises(ValueError, np.array, ['1980-02-03%'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03 q'], + np.dtype('M8[us]')) + + # Hours must be in range [0, 23] + assert_raises(ValueError, np.array, ['1980-02-03 25'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03T25'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03 24:01'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03T24:01'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03 -1'], + np.dtype('M8[us]')) + # No trailing ':' + assert_raises(ValueError, np.array, ['1980-02-03 01:'], + np.dtype('M8[us]')) + # Minutes must be in range [0, 59] + assert_raises(ValueError, np.array, ['1980-02-03 01:-1'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03 01:60'], + np.dtype('M8[us]')) + # No trailing ':' + assert_raises(ValueError, np.array, ['1980-02-03 01:60:'], + np.dtype('M8[us]')) + # Seconds must be in range [0, 59] + assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'], + np.dtype('M8[us]')) + assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'], + np.dtype('M8[us]')) + # Timezone offset must within a reasonable range + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'], + np.dtype('M8[us]')) + with assert_warns(DeprecationWarning): + assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'], + np.dtype('M8[us]')) + + def test_creation_overflow(self): + date = '1980-03-23 20:00:00' + timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) + for unit in ['ms', 'us', 'ns']: + timesteps *= 1000 + x = np.array([date], dtype='datetime64[%s]' % unit) + + assert_equal(timesteps, x[0].astype(np.int64), + err_msg='Datetime conversion error for unit %s' % unit) + + assert_equal(x[0].astype(np.int64), 322689600000000000) + + # gh-13062 + with pytest.raises(OverflowError): + np.datetime64(2**64, 'D') + with pytest.raises(OverflowError): + np.timedelta64(2**64, 'D') + + def test_datetime_as_string(self): + # Check all the units with default string conversion + date = '1959-10-13' + datetime = '1959-10-13T12:34:56.789012345678901234' + + assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')), + '1959') + assert_equal(np.datetime_as_string(np.datetime64(date, 'M')), + '1959-10') + assert_equal(np.datetime_as_string(np.datetime64(date, 'D')), + '1959-10-13') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')), + '1959-10-13T12') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')), + '1959-10-13T12:34') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')), + '1959-10-13T12:34:56') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')), + '1959-10-13T12:34:56.789') + for us in ['us', 'μs', b'us']: # check non-ascii and bytes too + assert_equal(np.datetime_as_string(np.datetime64(datetime, us)), + '1959-10-13T12:34:56.789012') + + datetime = '1969-12-31T23:34:56.789012345678901234' + + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')), + '1969-12-31T23:34:56.789012345') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')), + '1969-12-31T23:34:56.789012345678') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')), + '1969-12-31T23:34:56.789012345678901') + + datetime = '1969-12-31T23:59:57.789012345678901234' + + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')), + datetime) + datetime = '1970-01-01T00:34:56.789012345678901234' + + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')), + '1970-01-01T00:34:56.789012345') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')), + '1970-01-01T00:34:56.789012345678') + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')), + '1970-01-01T00:34:56.789012345678901') + + datetime = '1970-01-01T00:00:05.789012345678901234' + + assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')), + datetime) + + # String conversion with the unit= parameter + a = np.datetime64('2032-07-18T12:23:34.123456', 'us') + assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'), + '2032') + assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'), + '2032-07') + assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'), + '2032-07-18') + assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), + '2032-07-18') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') + assert_equal(np.datetime_as_string(a, unit='m'), + '2032-07-18T12:23') + assert_equal(np.datetime_as_string(a, unit='s'), + '2032-07-18T12:23:34') + assert_equal(np.datetime_as_string(a, unit='ms'), + '2032-07-18T12:23:34.123') + assert_equal(np.datetime_as_string(a, unit='us'), + '2032-07-18T12:23:34.123456') + assert_equal(np.datetime_as_string(a, unit='ns'), + '2032-07-18T12:23:34.123456000') + assert_equal(np.datetime_as_string(a, unit='ps'), + '2032-07-18T12:23:34.123456000000') + assert_equal(np.datetime_as_string(a, unit='fs'), + '2032-07-18T12:23:34.123456000000000') + assert_equal(np.datetime_as_string(a, unit='as'), + '2032-07-18T12:23:34.123456000000000000') + + # unit='auto' parameter + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'), + '2032-07-18T12:23:34.123456') + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'), + '2032-07-18T12:23:34.120') + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'), + '2032-07-18T12:23:34') + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'), + '2032-07-18T12:23') + # 'auto' doesn't split up hour and minute + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'), + '2032-07-18T12:00') + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'), + '2032-07-18') + # 'auto' doesn't split up the date + assert_equal(np.datetime_as_string( + np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'), + '2032-07-01') + assert_equal(np.datetime_as_string( + np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), + '2032-01-01') + + @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") + def test_datetime_as_string_timezone(self): + # timezone='local' vs 'UTC' + a = np.datetime64('2010-03-15T06:30', 'm') + assert_equal(np.datetime_as_string(a), + '2010-03-15T06:30') + assert_equal(np.datetime_as_string(a, timezone='naive'), + '2010-03-15T06:30') + assert_equal(np.datetime_as_string(a, timezone='UTC'), + '2010-03-15T06:30Z') + assert_(np.datetime_as_string(a, timezone='local') != + '2010-03-15T06:30') + + b = np.datetime64('2010-02-15T06:30', 'm') + + assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + '2010-03-15T01:30-0500') + assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + '2010-03-15T02:30-0400') + assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + '2010-03-14T23:30-0700') + + assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + '2010-02-15T00:30-0600') + assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + '2010-02-15T01:30-0500') + assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + '2010-02-14T22:30-0800') + + # Dates to strings with a timezone attached is disabled by default + assert_raises(TypeError, np.datetime_as_string, a, unit='D', + timezone=tz('US/Pacific')) + # Check that we can print out the date in the specified time zone + assert_equal(np.datetime_as_string(a, unit='D', + timezone=tz('US/Pacific'), casting='unsafe'), + '2010-03-14') + assert_equal(np.datetime_as_string(b, unit='D', + timezone=tz('US/Central'), casting='unsafe'), + '2010-02-15') + + def test_datetime_arange(self): + # With two datetimes provided as strings + a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]') + assert_equal(a.dtype, np.dtype('M8[D]')) + assert_equal(a, + np.array(['2010-01-05', '2010-01-06', '2010-01-07', + '2010-01-08', '2010-01-09'], dtype='M8[D]')) + + a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]') + assert_equal(a.dtype, np.dtype('M8[D]')) + assert_equal(a, + np.array(['1950-02-10', '1950-02-09', '1950-02-08', + '1950-02-07'], dtype='M8[D]')) + + # Unit should be detected as months here + a = np.arange('1969-05', '1970-05', 2, dtype='M8') + assert_equal(a.dtype, np.dtype('M8[M]')) + assert_equal(a, + np.datetime64('1969-05') + np.arange(12, step=2)) + + # datetime, integer|timedelta works as well + # produces arange (start, start + stop) in this case + a = np.arange('1969', 18, 3, dtype='M8') + assert_equal(a.dtype, np.dtype('M8[Y]')) + assert_equal(a, + np.datetime64('1969') + np.arange(18, step=3)) + a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8') + assert_equal(a.dtype, np.dtype('M8[D]')) + assert_equal(a, + np.datetime64('1969-12-19') + np.arange(22, step=2)) + + # Step of 0 is disallowed + assert_raises(ValueError, np.arange, np.datetime64('today'), + np.datetime64('today') + 3, 0) + # Promotion across nonlinear unit boundaries is disallowed + assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'), + np.timedelta64(5, 'M')) + assert_raises(TypeError, np.arange, + np.datetime64('2012-02-03T14', 's'), + np.timedelta64(5, 'Y')) + + def test_datetime_arange_no_dtype(self): + d = np.array('2010-01-04', dtype="M8[D]") + assert_equal(np.arange(d, d + 1), d) + assert_raises(ValueError, np.arange, d) + + def test_timedelta_arange(self): + a = np.arange(3, 10, dtype='m8') + assert_equal(a.dtype, np.dtype('m8')) + assert_equal(a, np.timedelta64(0) + np.arange(3, 10)) + + a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8') + assert_equal(a.dtype, np.dtype('m8[s]')) + assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2)) + + # Step of 0 is disallowed + assert_raises(ValueError, np.arange, np.timedelta64(0), + np.timedelta64(5), 0) + # Promotion across nonlinear unit boundaries is disallowed + assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'), + np.timedelta64(5, 'M')) + assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'), + np.timedelta64(5, 'D')) + + @pytest.mark.parametrize("val1, val2, expected", [ + # case from gh-12092 + (np.timedelta64(7, 's'), + np.timedelta64(3, 's'), + np.timedelta64(1, 's')), + # negative value cases + (np.timedelta64(3, 's'), + np.timedelta64(-2, 's'), + np.timedelta64(-1, 's')), + (np.timedelta64(-3, 's'), + np.timedelta64(2, 's'), + np.timedelta64(1, 's')), + # larger value cases + (np.timedelta64(17, 's'), + np.timedelta64(22, 's'), + np.timedelta64(17, 's')), + (np.timedelta64(22, 's'), + np.timedelta64(17, 's'), + np.timedelta64(5, 's')), + # different units + (np.timedelta64(1, 'm'), + np.timedelta64(57, 's'), + np.timedelta64(3, 's')), + (np.timedelta64(1, 'us'), + np.timedelta64(727, 'ns'), + np.timedelta64(273, 'ns')), + # NaT is propagated + (np.timedelta64('NaT'), + np.timedelta64(50, 'ns'), + np.timedelta64('NaT')), + # Y % M works + (np.timedelta64(2, 'Y'), + np.timedelta64(22, 'M'), + np.timedelta64(2, 'M')), + ]) + def test_timedelta_modulus(self, val1, val2, expected): + assert_equal(val1 % val2, expected) + + @pytest.mark.parametrize("val1, val2", [ + # years and months sometimes can't be unambiguously + # divided for modulus operation + (np.timedelta64(7, 'Y'), + np.timedelta64(3, 's')), + (np.timedelta64(7, 'M'), + np.timedelta64(1, 'D')), + ]) + def test_timedelta_modulus_error(self, val1, val2): + with assert_raises_regex(TypeError, "common metadata divisor"): + val1 % val2 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_timedelta_modulus_div_by_zero(self): + with assert_warns(RuntimeWarning): + actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') + assert_equal(actual, np.timedelta64('NaT')) + + @pytest.mark.parametrize("val1, val2", [ + # cases where one operand is not + # timedelta64 + (np.timedelta64(7, 'Y'), + 15,), + (7.5, + np.timedelta64(1, 'D')), + ]) + def test_timedelta_modulus_type_resolution(self, val1, val2): + # NOTE: some of the operations may be supported + # in the future + with assert_raises_regex(TypeError, + "'remainder' cannot use operands with types"): + val1 % val2 + + def test_timedelta_arange_no_dtype(self): + d = np.array(5, dtype="m8[D]") + assert_equal(np.arange(d, d + 1), d) + assert_equal(np.arange(d), np.arange(0, d)) + + def test_datetime_maximum_reduce(self): + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]') + assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]')) + assert_equal(np.maximum.reduce(a), + np.datetime64('2010-01-02')) + + a = np.array([1, 4, 0, 7, 2], dtype='m8[s]') + assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]')) + assert_equal(np.maximum.reduce(a), + np.timedelta64(7, 's')) + + def test_timedelta_correct_mean(self): + # test mainly because it worked only via a bug in that allowed: + # `timedelta.sum(dtype="f8")` to ignore the dtype request. + a = np.arange(1000, dtype="m8[s]") + assert_array_equal(a.mean(), a.sum() / len(a)) + + def test_datetime_no_subtract_reducelike(self): + # subtracting two datetime64 works, but we cannot reduce it, since + # the result of that subtraction will have a different dtype. + arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]") + msg = r"the resolved dtypes are not compatible" + + with pytest.raises(TypeError, match=msg): + np.subtract.reduce(arr) + + with pytest.raises(TypeError, match=msg): + np.subtract.accumulate(arr) + + with pytest.raises(TypeError, match=msg): + np.subtract.reduceat(arr, [0]) + + def test_datetime_busday_offset(self): + # First Monday in June + assert_equal( + np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'), + np.datetime64('2011-06-06')) + # Last Monday in June + assert_equal( + np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'), + np.datetime64('2011-06-27')) + assert_equal( + np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'), + np.datetime64('2011-06-27')) + + # Default M-F business days, different roll modes + assert_equal(np.busday_offset('2010-08', 0, roll='backward'), + np.datetime64('2010-07-30')) + assert_equal(np.busday_offset('2010-08', 0, roll='preceding'), + np.datetime64('2010-07-30')) + assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'), + np.datetime64('2010-08-02')) + assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'), + np.datetime64('2010-08-02')) + assert_equal(np.busday_offset('2010-08', 0, roll='forward'), + np.datetime64('2010-08-02')) + assert_equal(np.busday_offset('2010-08', 0, roll='following'), + np.datetime64('2010-08-02')) + assert_equal(np.busday_offset('2010-10-30', 0, roll='following'), + np.datetime64('2010-11-01')) + assert_equal( + np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'), + np.datetime64('2010-10-29')) + assert_equal( + np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'), + np.datetime64('2010-10-29')) + assert_equal( + np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'), + np.datetime64('2010-10-18')) + assert_equal( + np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'), + np.datetime64('2010-10-15')) + # roll='raise' by default + assert_raises(ValueError, np.busday_offset, '2011-06-04', 0) + + # Bigger offset values + assert_equal(np.busday_offset('2006-02-01', 25), + np.datetime64('2006-03-08')) + assert_equal(np.busday_offset('2006-03-08', -25), + np.datetime64('2006-02-01')) + assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'), + np.datetime64('2007-04-07')) + assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'), + np.datetime64('2007-02-25')) + + # NaT values when roll is not raise + assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'), + np.datetime64('NaT')) + assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'), + np.datetime64('NaT')) + assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'), + np.datetime64('NaT')) + + def test_datetime_busdaycalendar(self): + # Check that it removes NaT, duplicates, and weekends + # and sorts the result. + bdd = np.busdaycalendar( + holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT', + '2011-12-26', '2011-05-30', '2011-01-17']) + assert_equal(bdd.holidays, + np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8')) + # Default M-F weekmask + assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?')) + + # Check string weekmask with varying whitespace. + bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri") + assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?')) + + # Check length 7 0/1 string + bdd = np.busdaycalendar(weekmask="0011001") + assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?')) + + # Check length 7 string weekmask. + bdd = np.busdaycalendar(weekmask="Mon Tue") + assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?')) + + # All-zeros weekmask should raise + assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0]) + # weekday names must be correct case + assert_raises(ValueError, np.busdaycalendar, weekmask="satsun") + # All-zeros weekmask should raise + assert_raises(ValueError, np.busdaycalendar, weekmask="") + # Invalid weekday name codes should raise + assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We") + assert_raises(ValueError, np.busdaycalendar, weekmask="Max") + assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue") + + def test_datetime_busday_holidays_offset(self): + # With exactly one holiday + assert_equal( + np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']), + np.datetime64('2011-11-18')) + assert_equal( + np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']), + np.datetime64('2011-11-10')) + assert_equal( + np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']), + np.datetime64('2011-11-10')) + assert_equal( + np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']), + np.datetime64('2011-11-04')) + # With the holiday appearing twice + assert_equal( + np.busday_offset('2011-11-10', 1, + holidays=['2011-11-11', '2011-11-11']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-14', -1, + holidays=['2011-11-11', '2011-11-11']), + np.datetime64('2011-11-10')) + # With a NaT holiday + assert_equal( + np.busday_offset('2011-11-10', 1, + holidays=['2011-11-11', 'NaT']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-14', -1, + holidays=['NaT', '2011-11-11']), + np.datetime64('2011-11-10')) + # With another holiday after + assert_equal( + np.busday_offset('2011-11-10', 1, + holidays=['2011-11-11', '2011-11-24']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-14', -1, + holidays=['2011-11-11', '2011-11-24']), + np.datetime64('2011-11-10')) + # With another holiday before + assert_equal( + np.busday_offset('2011-11-10', 1, + holidays=['2011-10-10', '2011-11-11']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-14', -1, + holidays=['2011-10-10', '2011-11-11']), + np.datetime64('2011-11-10')) + # With another holiday before and after + assert_equal( + np.busday_offset('2011-11-10', 1, + holidays=['2011-10-10', '2011-11-11', '2011-11-24']), + np.datetime64('2011-11-14')) + assert_equal( + np.busday_offset('2011-11-14', -1, + holidays=['2011-10-10', '2011-11-11', '2011-11-24']), + np.datetime64('2011-11-10')) + + # A bigger forward jump across more than one week/holiday + holidays = ['2011-10-10', '2011-11-11', '2011-11-24', + '2011-12-25', '2011-05-30', '2011-02-21', + '2011-12-26', '2012-01-02'] + bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays) + assert_equal( + np.busday_offset('2011-10-03', 4, holidays=holidays), + np.busday_offset('2011-10-03', 4)) + assert_equal( + np.busday_offset('2011-10-03', 5, holidays=holidays), + np.busday_offset('2011-10-03', 5 + 1)) + assert_equal( + np.busday_offset('2011-10-03', 27, holidays=holidays), + np.busday_offset('2011-10-03', 27 + 1)) + assert_equal( + np.busday_offset('2011-10-03', 28, holidays=holidays), + np.busday_offset('2011-10-03', 28 + 2)) + assert_equal( + np.busday_offset('2011-10-03', 35, holidays=holidays), + np.busday_offset('2011-10-03', 35 + 2)) + assert_equal( + np.busday_offset('2011-10-03', 36, holidays=holidays), + np.busday_offset('2011-10-03', 36 + 3)) + assert_equal( + np.busday_offset('2011-10-03', 56, holidays=holidays), + np.busday_offset('2011-10-03', 56 + 3)) + assert_equal( + np.busday_offset('2011-10-03', 57, holidays=holidays), + np.busday_offset('2011-10-03', 57 + 4)) + assert_equal( + np.busday_offset('2011-10-03', 60, holidays=holidays), + np.busday_offset('2011-10-03', 60 + 4)) + assert_equal( + np.busday_offset('2011-10-03', 61, holidays=holidays), + np.busday_offset('2011-10-03', 61 + 5)) + assert_equal( + np.busday_offset('2011-10-03', 61, busdaycal=bdd), + np.busday_offset('2011-10-03', 61 + 5)) + # A bigger backward jump across more than one week/holiday + assert_equal( + np.busday_offset('2012-01-03', -1, holidays=holidays), + np.busday_offset('2012-01-03', -1 - 1)) + assert_equal( + np.busday_offset('2012-01-03', -4, holidays=holidays), + np.busday_offset('2012-01-03', -4 - 1)) + assert_equal( + np.busday_offset('2012-01-03', -5, holidays=holidays), + np.busday_offset('2012-01-03', -5 - 2)) + assert_equal( + np.busday_offset('2012-01-03', -25, holidays=holidays), + np.busday_offset('2012-01-03', -25 - 2)) + assert_equal( + np.busday_offset('2012-01-03', -26, holidays=holidays), + np.busday_offset('2012-01-03', -26 - 3)) + assert_equal( + np.busday_offset('2012-01-03', -33, holidays=holidays), + np.busday_offset('2012-01-03', -33 - 3)) + assert_equal( + np.busday_offset('2012-01-03', -34, holidays=holidays), + np.busday_offset('2012-01-03', -34 - 4)) + assert_equal( + np.busday_offset('2012-01-03', -56, holidays=holidays), + np.busday_offset('2012-01-03', -56 - 4)) + assert_equal( + np.busday_offset('2012-01-03', -57, holidays=holidays), + np.busday_offset('2012-01-03', -57 - 5)) + assert_equal( + np.busday_offset('2012-01-03', -57, busdaycal=bdd), + np.busday_offset('2012-01-03', -57 - 5)) + + # Can't supply both a weekmask/holidays and busdaycal + assert_raises(ValueError, np.busday_offset, '2012-01-03', -15, + weekmask='1111100', busdaycal=bdd) + assert_raises(ValueError, np.busday_offset, '2012-01-03', -15, + holidays=holidays, busdaycal=bdd) + + # Roll with the holidays + assert_equal( + np.busday_offset('2011-12-25', 0, + roll='forward', holidays=holidays), + np.datetime64('2011-12-27')) + assert_equal( + np.busday_offset('2011-12-26', 0, + roll='forward', holidays=holidays), + np.datetime64('2011-12-27')) + assert_equal( + np.busday_offset('2011-12-26', 0, + roll='backward', holidays=holidays), + np.datetime64('2011-12-23')) + assert_equal( + np.busday_offset('2012-02-27', 0, + roll='modifiedfollowing', + holidays=['2012-02-27', '2012-02-26', '2012-02-28', + '2012-03-01', '2012-02-29']), + np.datetime64('2012-02-24')) + assert_equal( + np.busday_offset('2012-03-06', 0, + roll='modifiedpreceding', + holidays=['2012-03-02', '2012-03-03', '2012-03-01', + '2012-03-05', '2012-03-07', '2012-03-06']), + np.datetime64('2012-03-08')) + + def test_datetime_busday_holidays_count(self): + holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24', + '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17', + '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30', + '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10'] + bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays) + + # Validate against busday_offset broadcast against + # a range of offsets + dates = np.busday_offset('2011-01-01', np.arange(366), + roll='forward', busdaycal=bdd) + assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd), + np.arange(366)) + # Returns negative value when reversed + # -1 since the '2011-01-01' is not a busday + assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd), + -np.arange(366) - 1) + + # 2011-12-31 is a saturday + dates = np.busday_offset('2011-12-31', -np.arange(366), + roll='forward', busdaycal=bdd) + # only the first generated date is in the future of 2011-12-31 + expected = np.arange(366) + expected[0] = -1 + assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd), + expected) + # Returns negative value when reversed + expected = -np.arange(366)+1 + expected[0] = 0 + assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd), + expected) + + # Can't supply both a weekmask/holidays and busdaycal + assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03', + weekmask='1111100', busdaycal=bdd) + assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03', + holidays=holidays, busdaycal=bdd) + + # Number of Mondays in March 2011 + assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4) + # Returns negative value when reversed + assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4) + + sunday = np.datetime64('2023-03-05') + monday = sunday + 1 + friday = sunday + 5 + saturday = sunday + 6 + assert_equal(np.busday_count(sunday, monday), 0) + assert_equal(np.busday_count(monday, sunday), -1) + + assert_equal(np.busday_count(friday, saturday), 1) + assert_equal(np.busday_count(saturday, friday), 0) + + + def test_datetime_is_busday(self): + holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24', + '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17', + '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30', + '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10', + 'NaT'] + bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays) + + # Weekend/weekday tests + assert_equal(np.is_busday('2011-01-01'), False) + assert_equal(np.is_busday('2011-01-02'), False) + assert_equal(np.is_busday('2011-01-03'), True) + + # All the holidays are not business days + assert_equal(np.is_busday(holidays, busdaycal=bdd), + np.zeros(len(holidays), dtype='?')) + + def test_datetime_y2038(self): + # Test parsing on either side of the Y2038 boundary + a = np.datetime64('2038-01-19T03:14:07') + assert_equal(a.view(np.int64), 2**31 - 1) + a = np.datetime64('2038-01-19T03:14:08') + assert_equal(a.view(np.int64), 2**31) + + # Test parsing on either side of the Y2038 boundary with + # a manually specified timezone offset + with assert_warns(DeprecationWarning): + a = np.datetime64('2038-01-19T04:14:07+0100') + assert_equal(a.view(np.int64), 2**31 - 1) + with assert_warns(DeprecationWarning): + a = np.datetime64('2038-01-19T04:14:08+0100') + assert_equal(a.view(np.int64), 2**31) + + # Test parsing a date after Y2038 + a = np.datetime64('2038-01-20T13:21:14') + assert_equal(str(a), '2038-01-20T13:21:14') + + def test_isnat(self): + assert_(np.isnat(np.datetime64('NaT', 'ms'))) + assert_(np.isnat(np.datetime64('NaT', 'ns'))) + assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07'))) + + assert_(np.isnat(np.timedelta64('NaT', "ms"))) + assert_(not np.isnat(np.timedelta64(34, "ms"))) + + res = np.array([False, False, True]) + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'ns', 'ps', 'fs', 'as']: + arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit) + assert_equal(np.isnat(arr), res) + arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit) + assert_equal(np.isnat(arr), res) + arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit) + assert_equal(np.isnat(arr), res) + arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit) + assert_equal(np.isnat(arr), res) + + def test_isnat_error(self): + # Test that only datetime dtype arrays are accepted + for t in np.typecodes["All"]: + if t in np.typecodes["Datetime"]: + continue + assert_raises(TypeError, np.isnat, np.zeros(10, t)) + + def test_isfinite_scalar(self): + assert_(not np.isfinite(np.datetime64('NaT', 'ms'))) + assert_(not np.isfinite(np.datetime64('NaT', 'ns'))) + assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07'))) + + assert_(not np.isfinite(np.timedelta64('NaT', "ms"))) + assert_(np.isfinite(np.timedelta64(34, "ms"))) + + @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', + 'us', 'ns', 'ps', 'fs', 'as']) + @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]', + '<timedelta64[%s]', '>timedelta64[%s]']) + def test_isfinite_isinf_isnan_units(self, unit, dstr): + '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes + ''' + arr_val = [123, -321, "NaT"] + arr = np.array(arr_val, dtype= dstr % unit) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) + assert_equal(np.isfinite(arr), pos) + assert_equal(np.isinf(arr), false) + assert_equal(np.isnan(arr), neg) + + def test_assert_equal(self): + assert_raises(AssertionError, assert_equal, + np.datetime64('nat'), np.timedelta64('nat')) + + def test_corecursive_input(self): + # construct a co-recursive list + a, b = [], [] + a.append(b) + b.append(a) + obj_arr = np.array([None]) + obj_arr[0] = a + + # At some point this caused a stack overflow (gh-11154). Now raises + # ValueError since the nested list cannot be converted to a datetime. + assert_raises(ValueError, obj_arr.astype, 'M8') + assert_raises(ValueError, obj_arr.astype, 'm8') + + @pytest.mark.parametrize("shape", [(), (1,)]) + def test_discovery_from_object_array(self, shape): + arr = np.array("2020-10-10", dtype=object).reshape(shape) + res = np.array("2020-10-10", dtype="M8").reshape(shape) + assert res.dtype == np.dtype("M8[D]") + assert_equal(arr.astype("M8"), res) + arr[...] = np.bytes_("2020-10-10") # try a numpy string type + assert_equal(arr.astype("M8"), res) + arr = arr.astype("S") + assert_equal(arr.astype("S").astype("M8"), res) + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + # compound units + "10D", "2M", + ]) + def test_limit_symmetry(self, time_unit): + """ + Dates should have symmetric limits around the unix epoch at +/-np.int64 + """ + epoch = np.datetime64(0, time_unit) + latest = np.datetime64(np.iinfo(np.int64).max, time_unit) + earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) + + # above should not have overflowed + assert earliest < epoch < latest + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", + pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), + "D", "h", "m", + "s", "ms", "us", "ns", "ps", "fs", "as", + pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), + ]) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_limit_str_roundtrip(self, time_unit, sign): + """ + Limits should roundtrip when converted to strings. + + This tests the conversion to and from npy_datetimestruct. + """ + # TODO: add absolute (gold standard) time span limit strings + limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) + + # Convert to string and back. Explicit unit needed since the day and + # week reprs are not distinguishable. + limit_via_str = np.datetime64(str(limit), time_unit) + assert limit_via_str == limit + + +class TestDateTimeData: + + def test_basic(self): + a = np.array(['1980-03-23'], dtype=np.datetime64) + assert_equal(np.datetime_data(a.dtype), ('D', 1)) + + def test_bytes(self): + # byte units are converted to unicode + dt = np.datetime64('2000', (b'ms', 5)) + assert np.datetime_data(dt.dtype) == ('ms', 5) + + dt = np.datetime64('2000', b'5ms') + assert np.datetime_data(dt.dtype) == ('ms', 5) + + def test_non_ascii(self): + # μs is normalized to μ + dt = np.datetime64('2000', ('μs', 5)) + assert np.datetime_data(dt.dtype) == ('us', 5) + + dt = np.datetime64('2000', '5μs') + assert np.datetime_data(dt.dtype) == ('us', 5) + + +def test_comparisons_return_not_implemented(): + # GH#17017 + + class custom: + __array_priority__ = 10000 + + obj = custom() + + dt = np.datetime64('2000', 'ns') + td = dt - dt + + for item in [dt, td]: + assert item.__eq__(obj) is NotImplemented + assert item.__ne__(obj) is NotImplemented + assert item.__le__(obj) is NotImplemented + assert item.__lt__(obj) is NotImplemented + assert item.__ge__(obj) is NotImplemented + assert item.__gt__(obj) is NotImplemented diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_defchararray.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_defchararray.py new file mode 100644 index 00000000..39699f45 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_defchararray.py @@ -0,0 +1,686 @@ +import pytest + +import numpy as np +from numpy.core.multiarray import _vec_string +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, + assert_raises_regex + ) + +kw_unicode_true = {'unicode': True} # make 2to3 work properly +kw_unicode_false = {'unicode': False} + +class TestBasic: + def test_from_object_array(self): + A = np.array([['abc', 2], + ['long ', '0123456789']], dtype='O') + B = np.char.array(A) + assert_equal(B.dtype.itemsize, 10) + assert_array_equal(B, [[b'abc', b'2'], + [b'long', b'0123456789']]) + + def test_from_object_array_unicode(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']], dtype='O') + assert_raises(ValueError, np.char.array, (A,)) + B = np.char.array(A, **kw_unicode_true) + assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) + assert_array_equal(B, [['abc', 'Sigma \u03a3'], + ['long', '0123456789']]) + + def test_from_string_array(self): + A = np.array([[b'abc', b'foo'], + [b'long ', b'0123456789']]) + assert_equal(A.dtype.type, np.bytes_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B[0, 0] = 'changed' + assert_(B[0, 0] != A[0, 0]) + C = np.char.asarray(A) + assert_array_equal(C, A) + assert_equal(C.dtype, A.dtype) + C[0, 0] = 'changed again' + assert_(C[0, 0] != B[0, 0]) + assert_(C[0, 0] == A[0, 0]) + + def test_from_unicode_array(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']]) + assert_equal(A.dtype.type, np.str_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B = np.char.array(A, **kw_unicode_true) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + + def fail(): + np.char.array(A, **kw_unicode_false) + + assert_raises(UnicodeEncodeError, fail) + + def test_unicode_upconvert(self): + A = np.char.array(['abc']) + B = np.char.array(['\u03a3']) + assert_(issubclass((A + B).dtype.type, np.str_)) + + def test_from_string(self): + A = np.char.array(b'abc') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 3) + assert_(issubclass(A.dtype.type, np.bytes_)) + + def test_from_unicode(self): + A = np.char.array('\u03a3') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 1) + assert_equal(A.itemsize, 4) + assert_(issubclass(A.dtype.type, np.str_)) + +class TestVecString: + def test_non_existent_method(self): + + def fail(): + _vec_string('a', np.bytes_, 'bogus') + + assert_raises(AttributeError, fail) + + def test_non_string_array(self): + + def fail(): + _vec_string(1, np.bytes_, 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_args_tuple(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', 1) + + assert_raises(TypeError, fail) + + def test_invalid_type_descr(self): + + def fail(): + _vec_string(['a'], 'BOGUS', 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_function_args(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', (1,)) + + assert_raises(TypeError, fail) + + def test_invalid_result_type(self): + + def fail(): + _vec_string(['a'], np.int_, 'strip') + + assert_raises(TypeError, fail) + + def test_broadcast_error(self): + + def fail(): + _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],)) + + assert_raises(ValueError, fail) + + +class TestWhitespace: + def setup_method(self): + self.A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.chararray) + self.B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + + def test1(self): + assert_(np.all(self.A == self.B)) + assert_(np.all(self.A >= self.B)) + assert_(np.all(self.A <= self.B)) + assert_(not np.any(self.A > self.B)) + assert_(not np.any(self.A < self.B)) + assert_(not np.any(self.A != self.B)) + +class TestChar: + def setup_method(self): + self.A = np.array('abc1', dtype='c').view(np.chararray) + + def test_it(self): + assert_equal(self.A.shape, (4,)) + assert_equal(self.A.upper()[:2].tobytes(), b'AB') + +class TestComparisons: + def setup_method(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']]).view(np.chararray) + + def test_not_equal(self): + assert_array_equal((self.A != self.B), [[True, False], [True, True]]) + + def test_equal(self): + assert_array_equal((self.A == self.B), [[False, True], [False, False]]) + + def test_greater_equal(self): + assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) + + def test_less_equal(self): + assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) + + def test_greater(self): + assert_array_equal((self.A > self.B), [[False, False], [True, True]]) + + def test_less(self): + assert_array_equal((self.A < self.B), [[True, False], [False, False]]) + + def test_type(self): + out1 = np.char.equal(self.A, self.B) + out2 = np.char.equal('a', 'a') + assert_(isinstance(out1, np.ndarray)) + assert_(isinstance(out2, np.ndarray)) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def setup_method(self): + TestComparisons.setup_method(self) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']], np.str_).view(np.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def setup_method(self): + TestComparisons.setup_method(self) + self.A = np.array([['abc', '123'], + ['789', 'xyz']], np.str_).view(np.chararray) + +class TestInformation: + def setup_method(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) + self.B = np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) + + def test_len(self): + assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + assert_(issubclass(self.A.count('').dtype.type, np.integer)) + assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) + # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + + def test_endswith(self): + assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.endswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + def test_find(self): + assert_(issubclass(self.A.find('a').dtype.type, np.integer)) + assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) + + def test_index(self): + + def fail(): + self.A.index('a') + + assert_raises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) + assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) + assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) + assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + assert_(issubclass(self.A.islower().dtype.type, np.bool_)) + assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) + assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) + assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) + assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) + assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + + def fail(): + self.A.rindex('a') + + assert_raises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.startswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + +class TestMethods: + def setup_method(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.chararray) + self.B = np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) + + def test_capitalize(self): + tgt = [[b' abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(self.A.capitalize(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) + assert_array_equal(self.B.capitalize(), tgt) + + def test_center(self): + assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) + C = self.A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.center(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.center(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO ', b' FOO '], + [b' FOO ', b' FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_decode(self): + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + + def test_encode(self): + B = self.B.encode('unicode_escape') + assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) + + def test_expandtabs(self): + T = self.A.expandtabs() + assert_(T[2, 0] == b'123 345 \0') + + def test_join(self): + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') + + A = np.char.join([',', '#'], A0) + assert_(issubclass(A.dtype.type, np.str_)) + tgt = np.array([[' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + assert_array_equal(np.char.join([',', '#'], A0), tgt) + + def test_ljust(self): + assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + + C = self.A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.ljust(20, b'#') + assert_array_equal(C.startswith(b'#'), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b'FOO ', b'FOO '], + [b'FOO ', b'FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_lower(self): + tgt = [[b' abc ', b''], + [b'12345', b'mixedcase'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) + assert_array_equal(self.A.lower(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mixedcase'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(self.B.lower().dtype.type, np.str_)) + assert_array_equal(self.B.lower(), tgt) + + def test_lstrip(self): + tgt = [[b'abc ', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(self.A.lstrip(), tgt) + + tgt = [[b' abc', b''], + [b'2345', b'ixedCase'], + [b'23 \t 345 \x00', b'UPPER']] + assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + + tgt = [['\u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) + assert_array_equal(self.B.lstrip(), tgt) + + def test_partition(self): + P = self.A.partition([b'3', b'M']) + tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_replace(self): + R = self.A.replace([b'3', b'a'], + [b'##########', b'@']) + tgt = [[b' abc ', b''], + [b'12##########45', b'MixedC@se'], + [b'12########## \t ##########45 \x00', b'UPPER']] + assert_(issubclass(R.dtype.type, np.bytes_)) + assert_array_equal(R, tgt) + + def test_rjust(self): + assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + + C = self.A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.rjust(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_array_equal(C.endswith(b'#'), + [[False, True], [False, False], [False, False]]) + + C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO', b' FOO'], + [b' FOO', b' FOO']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_rpartition(self): + P = self.A.rpartition([b'3', b'M']) + tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_rsplit(self): + A = self.A.rsplit(b'3') + tgt = [[[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_rstrip(self): + assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + + tgt = [[b' abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_array_equal(self.A.rstrip(), tgt) + + tgt = [[b' abc ', b''], + [b'1234', b'MixedCase'], + [b'123 \t 345 \x00', b'UPP'] + ] + assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + + tgt = [[' \u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) + assert_array_equal(self.B.rstrip(), tgt) + + def test_strip(self): + tgt = [[b'abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) + assert_array_equal(self.A.strip(), tgt) + + tgt = [[b' abc ', b''], + [b'234', b'ixedCas'], + [b'23 \t 345 \x00', b'UPP']] + assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + + tgt = [['\u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.strip().dtype.type, np.str_)) + assert_array_equal(self.B.strip(), tgt) + + def test_split(self): + A = self.A.split(b'3') + tgt = [ + [[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + tgt = [[b' ABC ', b''], + [b'12345', b'mIXEDcASE'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(self.A.swapcase(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mIXEDcASE'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) + assert_array_equal(self.B.swapcase(), tgt) + + def test_title(self): + tgt = [[b' Abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.title().dtype.type, np.bytes_)) + assert_array_equal(self.A.title(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.title().dtype.type, np.str_)) + assert_array_equal(self.B.title(), tgt) + + def test_upper(self): + tgt = [[b' ABC ', b''], + [b'12345', b'MIXEDCASE'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) + assert_array_equal(self.A.upper(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'MIXEDCASE'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.upper().dtype.type, np.str_)) + assert_array_equal(self.B.upper(), tgt) + + def test_isnumeric(self): + + def fail(): + self.A.isnumeric() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) + assert_array_equal(self.B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + + def fail(): + self.A.isdecimal() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) + assert_array_equal(self.B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + + +class TestOperations: + def setup_method(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '456'], + ['051', 'tuv']]).view(np.chararray) + + def test_add(self): + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.chararray) + assert_array_equal(AB, (self.A + self.B)) + assert_(len((self.A + self.B)[0][0]) == 6) + + def test_radd(self): + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.chararray) + assert_array_equal(QA, ('q' + self.A)) + + def test_mul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + + assert_array_equal(Ar, (self.A * r)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + A*ob + + def test_rmul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + assert_array_equal(Ar, (r * self.A)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + ob * A + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) + C = np.array([[3, 7], [19, 1]]) + FC = np.array([['3', '7.000000'], + ['19', '1']]).view(np.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) + A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + assert_(("%s" % self.A) == str(self.A)) + assert_(("%r" % self.A) == repr(self.A)) + + for ob in [42, object()]: + with assert_raises_regex( + TypeError, "unsupported operand type.* and 'chararray'"): + ob % self.A + + def test_slice(self): + """Regression test for https://github.com/numpy/numpy/issues/5982""" + + arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], + dtype='S4').view(np.chararray) + sl1 = arr[:] + assert_array_equal(sl1, arr) + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) + + sl2 = arr[:, :] + assert_array_equal(sl2, arr) + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) + + assert_(arr[0, 0] == b'abc') + + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.chararray((4,)) + assert_(s[[]].size == 0) + + +@pytest.mark.parametrize(["dt1", "dt2"], + [("S", "U"), ("U", "S"), ("S", "O"), ("U", "O"), + ("S", "d"), ("S", "V")]) +def test_add_types(dt1, dt2): + arr1 = np.array([1234234], dtype=dt1) + # If the following fails, e.g. use a number and test "V" explicitly + arr2 = np.array([b"423"], dtype=dt2) + with pytest.raises(TypeError, + match=f".*same dtype kind.*{arr1.dtype}.*{arr2.dtype}"): + np.char.add(arr1, arr2) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_deprecations.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_deprecations.py new file mode 100644 index 00000000..3ada39e9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_deprecations.py @@ -0,0 +1,817 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +import datetime +import operator +import warnings +import pytest +import tempfile +import re +import sys + +import numpy as np +from numpy.testing import ( + assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, + KnownFailureException, break_cycles, + ) + +from numpy.core._multiarray_tests import fromstring_null_term_c_api + +try: + import pytz + _has_pytz = True +except ImportError: + _has_pytz = False + + +class _DeprecationTestCase: + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + warning_cls = DeprecationWarning + + def setup_method(self): + self.warn_ctx = warnings.catch_warnings(record=True) + self.log = self.warn_ctx.__enter__() + + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + + def teardown_method(self): + self.warn_ctx.__exit__() + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=np._NoValue, + args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + function : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_others : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfully. + args : tuple + Arguments for `function` + kwargs : dict + Keyword arguments for `function` + """ + __tracebackhide__ = True # Hide traceback for py.test + + # reset the log + self.log[:] = [] + + if exceptions is np._NoValue: + exceptions = (self.warning_cls,) + + try: + function(*args, **kwargs) + except (Exception if function_fails else tuple()): + pass + + # just in case, clear the registry + num_found = 0 + for warning in self.log: + if warning.category is self.warning_cls: + num_found += 1 + elif not ignore_others: + raise AssertionError( + "expected %s but got: %s" % + (self.warning_cls.__name__, warning.category)) + if num is not None and num_found != num: + msg = "%i warnings found but %i expected." % (len(self.log), num) + lst = [str(w) for w in self.log] + raise AssertionError("\n".join([msg] + lst)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=self.warning_cls) + try: + function(*args, **kwargs) + if exceptions != tuple(): + raise AssertionError( + "No error raised during function call") + except exceptions: + if exceptions == tuple(): + raise AssertionError( + "Error raised during function call") + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test that warnings are not raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + + +class _VisibleDeprecationTestCase(_DeprecationTestCase): + warning_cls = np.VisibleDeprecationWarning + + +class TestDatetime64Timezone(_DeprecationTestCase): + """Parsing of datetime64 with timezones deprecated in 1.11.0, because + datetime64 is now timezone naive rather than UTC only. + + It will be quite a while before we can remove this, because, at the very + least, a lot of existing code uses the 'Z' modifier to avoid conversion + from local time to UTC, even if otherwise it handles time in a timezone + naive fashion. + """ + def test_string(self): + self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',)) + self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',)) + + @pytest.mark.skipif(not _has_pytz, + reason="The pytz module is not available.") + def test_datetime(self): + tz = pytz.timezone('US/Eastern') + dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz) + self.assert_deprecated(np.datetime64, args=(dt,)) + + +class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): + """Assigning the 'data' attribute of an ndarray is unsafe as pointed + out in gh-7093. Eventually, such assignment should NOT be allowed, but + in the interests of maintaining backwards compatibility, only a Deprecation- + Warning will be raised instead for the time being to give developers time to + refactor relevant code. + """ + + def test_data_attr_assignment(self): + a = np.arange(10) + b = np.linspace(0, 1, 10) + + self.message = ("Assigning the 'data' attribute is an " + "inherently unsafe operation and will " + "be removed in the future.") + self.assert_deprecated(a.__setattr__, args=('data', b.data)) + + +class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase): + """ + If a 'width' parameter is passed into ``binary_repr`` that is insufficient to + represent the number in base 2 (positive) or 2's complement (negative) form, + the function used to silently ignore the parameter and return a representation + using the minimal number of bits needed for the form in question. Such behavior + is now considered unsafe from a user perspective and will raise an error in the future. + """ + + def test_insufficient_width_positive(self): + args = (10,) + kwargs = {'width': 2} + + self.message = ("Insufficient bit width provided. This behavior " + "will raise an error in the future.") + self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) + + def test_insufficient_width_negative(self): + args = (-5,) + kwargs = {'width': 2} + + self.message = ("Insufficient bit width provided. This behavior " + "will raise an error in the future.") + self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) + + +class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase): + # Deprecated 2021-01-05, NumPy 1.21 + message = r".*`.dtype` attribute" + + def test_deprecation_dtype_attribute_is_dtype(self): + class dt: + dtype = "f8" + + class vdt(np.void): + dtype = "f,f" + + self.assert_deprecated(lambda: np.dtype(dt)) + self.assert_deprecated(lambda: np.dtype(dt())) + self.assert_deprecated(lambda: np.dtype(vdt)) + self.assert_deprecated(lambda: np.dtype(vdt(1))) + + +class TestTestDeprecated: + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + test_case_instance.setup_method() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning, stacklevel=2) + + test_case_instance.assert_deprecated(foo) + test_case_instance.teardown_method() + + +class TestNonNumericConjugate(_DeprecationTestCase): + """ + Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, + which conflicts with the error behavior of np.conjugate. + """ + def test_conjugate(self): + for a in np.array(5), np.array(5j): + self.assert_not_deprecated(a.conjugate) + for a in (np.array('s'), np.array('2016', 'M'), + np.array((1, 2), [('a', int), ('b', int)])): + self.assert_deprecated(a.conjugate) + + +class TestNPY_CHAR(_DeprecationTestCase): + # 2017-05-03, 1.13.0 + def test_npy_char_deprecation(self): + from numpy.core._multiarray_tests import npy_char_deprecation + self.assert_deprecated(npy_char_deprecation) + assert_(npy_char_deprecation() == 'S1') + + +class TestPyArray_AS1D(_DeprecationTestCase): + def test_npy_pyarrayas1d_deprecation(self): + from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation + assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation) + + +class TestPyArray_AS2D(_DeprecationTestCase): + def test_npy_pyarrayas2d_deprecation(self): + from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation + assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation) + + +class TestDatetimeEvent(_DeprecationTestCase): + # 2017-08-11, 1.14.0 + def test_3_tuple(self): + for cls in (np.datetime64, np.timedelta64): + # two valid uses - (unit, num) and (unit, num, den, None) + self.assert_not_deprecated(cls, args=(1, ('ms', 2))) + self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) + + # trying to use the event argument, removed in 1.7.0, is deprecated + # it used to be a uint8 + self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) + + +class TestTruthTestingEmptyArrays(_DeprecationTestCase): + # 2017-09-25, 1.14.0 + message = '.*truth value of an empty array is ambiguous.*' + + def test_1d(self): + self.assert_deprecated(bool, args=(np.array([]),)) + + def test_2d(self): + self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) + + +class TestBincount(_DeprecationTestCase): + # 2017-06-01, 1.14.0 + def test_bincount_minlength(self): + self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) + + + +class TestGeneratorSum(_DeprecationTestCase): + # 2018-02-25, 1.15.0 + def test_generator_sum(self): + self.assert_deprecated(np.sum, args=((i for i in range(5)),)) + + +class TestFromstring(_DeprecationTestCase): + # 2017-10-19, 1.14 + def test_fromstring(self): + self.assert_deprecated(np.fromstring, args=('\x00'*80,)) + + +class TestFromStringAndFileInvalidData(_DeprecationTestCase): + # 2019-06-08, 1.17.0 + # Tests should be moved to real tests when deprecation is done. + message = "string or file could not be read to its end" + + @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) + def test_deprecate_unparsable_data_file(self, invalid_str): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + + with tempfile.TemporaryFile(mode="w") as f: + x.tofile(f, sep=',', format='%.2f') + f.write(invalid_str) + + f.seek(0) + self.assert_deprecated(lambda: np.fromfile(f, sep=",")) + f.seek(0) + self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) + # Should not raise: + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + f.seek(0) + res = np.fromfile(f, sep=",", count=4) + assert_array_equal(res, x) + + @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) + def test_deprecate_unparsable_string(self, invalid_str): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + x_str = "1.51,2,3.51,4{}".format(invalid_str) + + self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) + self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) + + # The C-level API can use not fixed size, but 0 terminated strings, + # so test that as well: + bytestr = x_str.encode("ascii") + self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) + + with assert_warns(DeprecationWarning): + # this is slightly strange, in that fromstring leaves data + # potentially uninitialized (would be good to error when all is + # read, but count is larger then actual data maybe). + res = np.fromstring(x_str, sep=",", count=5) + assert_array_equal(res[:-1], x) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # Should not raise: + res = np.fromstring(x_str, sep=",", count=4) + assert_array_equal(res, x) + + +class Test_GetSet_NumericOps(_DeprecationTestCase): + # 2018-09-20, 1.16.0 + def test_get_numeric_ops(self): + from numpy.core._multiarray_tests import getset_numericops + self.assert_deprecated(getset_numericops, num=2) + + # empty kwargs prevents any state actually changing which would break + # other tests. + self.assert_deprecated(np.set_numeric_ops, kwargs={}) + assert_raises(ValueError, np.set_numeric_ops, add='abc') + + +class TestShape1Fields(_DeprecationTestCase): + warning_cls = FutureWarning + + # 2019-05-20, 1.17.0 + def test_shape_1_fields(self): + self.assert_deprecated(np.dtype, args=([('a', int, 1)],)) + + +class TestNonZero(_DeprecationTestCase): + # 2019-05-26, 1.17.0 + def test_zerod(self): + self.assert_deprecated(lambda: np.nonzero(np.array(0))) + self.assert_deprecated(lambda: np.nonzero(np.array(1))) + + +class TestToString(_DeprecationTestCase): + # 2020-03-06 1.19.0 + message = re.escape("tostring() is deprecated. Use tobytes() instead.") + + def test_tostring(self): + arr = np.array(list(b"test\xFF"), dtype=np.uint8) + self.assert_deprecated(arr.tostring) + + def test_tostring_matches_tobytes(self): + arr = np.array(list(b"test\xFF"), dtype=np.uint8) + b = arr.tobytes() + with assert_warns(DeprecationWarning): + s = arr.tostring() + assert s == b + + +class TestDTypeCoercion(_DeprecationTestCase): + # 2020-02-06 1.19.0 + message = "Converting .* to a dtype .*is deprecated" + deprecated_types = [ + # The builtin scalar super types: + np.generic, np.flexible, np.number, + np.inexact, np.floating, np.complexfloating, + np.integer, np.unsignedinteger, np.signedinteger, + # character is a deprecated S1 special case: + np.character, + ] + + def test_dtype_coercion(self): + for scalar_type in self.deprecated_types: + self.assert_deprecated(np.dtype, args=(scalar_type,)) + + def test_array_construction(self): + for scalar_type in self.deprecated_types: + self.assert_deprecated(np.array, args=([], scalar_type,)) + + def test_not_deprecated(self): + # All specific types are not deprecated: + for group in np.sctypes.values(): + for scalar_type in group: + self.assert_not_deprecated(np.dtype, args=(scalar_type,)) + + for scalar_type in [type, dict, list, tuple]: + # Typical python types are coerced to object currently: + self.assert_not_deprecated(np.dtype, args=(scalar_type,)) + + +class BuiltInRoundComplexDType(_DeprecationTestCase): + # 2020-03-31 1.19.0 + deprecated_types = [np.csingle, np.cdouble, np.clongdouble] + not_deprecated_types = [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ] + + def test_deprecated(self): + for scalar_type in self.deprecated_types: + scalar = scalar_type(0) + self.assert_deprecated(round, args=(scalar,)) + self.assert_deprecated(round, args=(scalar, 0)) + self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + def test_not_deprecated(self): + for scalar_type in self.not_deprecated_types: + scalar = scalar_type(0) + self.assert_not_deprecated(round, args=(scalar,)) + self.assert_not_deprecated(round, args=(scalar, 0)) + self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + +class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase): + # 2020-05-27, NumPy 1.20.0 + message = "Out of bound index found. This was previously ignored.*" + + @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])]) + def test_empty_subspace(self, index): + # Test for both a single and two/multiple advanced indices. These + # This will raise an IndexError in the future. + arr = np.ones((2, 2, 0)) + self.assert_deprecated(arr.__getitem__, args=(index,)) + self.assert_deprecated(arr.__setitem__, args=(index, 0.)) + + # for this array, the subspace is only empty after applying the slice + arr2 = np.ones((2, 2, 1)) + index2 = (slice(0, 0),) + index + self.assert_deprecated(arr2.__getitem__, args=(index2,)) + self.assert_deprecated(arr2.__setitem__, args=(index2, 0.)) + + def test_empty_index_broadcast_not_deprecated(self): + arr = np.ones((2, 2, 2)) + + index = ([[3], [2]], []) # broadcast to an empty result. + self.assert_not_deprecated(arr.__getitem__, args=(index,)) + self.assert_not_deprecated(arr.__setitem__, + args=(index, np.empty((2, 0, 2)))) + + +class TestNonExactMatchDeprecation(_DeprecationTestCase): + # 2020-04-22 + def test_non_exact_match(self): + arr = np.array([[3, 6, 6], [4, 5, 1]]) + # misspelt mode check + self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp')) + # using completely different word with first character as R + self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random')) + + +class TestMatrixInOuter(_DeprecationTestCase): + # 2020-05-13 NumPy 1.20.0 + message = (r"add.outer\(\) was passed a numpy matrix as " + r"(first|second) argument.") + + def test_deprecated(self): + arr = np.array([1, 2, 3]) + m = np.array([1, 2, 3]).view(np.matrix) + self.assert_deprecated(np.add.outer, args=(m, m), num=2) + self.assert_deprecated(np.add.outer, args=(arr, m)) + self.assert_deprecated(np.add.outer, args=(m, arr)) + self.assert_not_deprecated(np.add.outer, args=(arr, arr)) + + +class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): + # NumPy 1.20, 2020-09-03 + message = "concatenate with `axis=None` will use same-kind casting" + + def test_deprecated(self): + self.assert_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64))) + + def test_not_deprecated(self): + self.assert_not_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), + 'casting': "unsafe"}) + + with assert_raises(TypeError): + # Tests should notice if the deprecation warning is given first... + np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), + casting="same_kind") + + +class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase): + # Deprecated 2020-11-24, NumPy 1.20 + """ + Technically, it should be impossible to create numpy object scalars, + but there was an unpickle path that would in theory allow it. That + path is invalid and must lead to the warning. + """ + message = "Unpickling a scalar with object dtype is deprecated." + + def test_deprecated(self): + ctor = np.core.multiarray.scalar + self.assert_deprecated(lambda: ctor(np.dtype("O"), 1)) + + +class TestSingleElementSignature(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 + message = r"The use of a length 1" + + def test_deprecated(self): + self.assert_deprecated(lambda: np.add(1, 2, signature="d")) + self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),))) + + +class TestCtypesGetter(_DeprecationTestCase): + # Deprecated 2021-05-18, Numpy 1.21.0 + warning_cls = DeprecationWarning + ctypes = np.array([1]).ctypes + + @pytest.mark.parametrize( + "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] + ) + def test_deprecated(self, name: str) -> None: + func = getattr(self.ctypes, name) + self.assert_deprecated(lambda: func()) + + @pytest.mark.parametrize( + "name", ["data", "shape", "strides", "_as_parameter_"] + ) + def test_not_deprecated(self, name: str) -> None: + self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) + + +PARTITION_DICT = { + "partition method": np.arange(10).partition, + "argpartition method": np.arange(10).argpartition, + "partition function": lambda kth: np.partition(np.arange(10), kth), + "argpartition function": lambda kth: np.argpartition(np.arange(10), kth), +} + + +@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT) +class TestPartitionBoolIndex(_DeprecationTestCase): + # Deprecated 2021-09-29, NumPy 1.22 + warning_cls = DeprecationWarning + message = "Passing booleans as partition index is deprecated" + + def test_deprecated(self, func): + self.assert_deprecated(lambda: func(True)) + self.assert_deprecated(lambda: func([False, True])) + + def test_not_deprecated(self, func): + self.assert_not_deprecated(lambda: func(1)) + self.assert_not_deprecated(lambda: func([0, 1])) + + +class TestMachAr(_DeprecationTestCase): + # Deprecated 2022-11-22, NumPy 1.25 + warning_cls = DeprecationWarning + + def test_deprecated_module(self): + self.assert_deprecated(lambda: getattr(np.core, "MachAr")) + + +class TestQuantileInterpolationDeprecation(_DeprecationTestCase): + # Deprecated 2021-11-08, NumPy 1.22 + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_deprecated(self, func): + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="linear")) + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="nearest")) + + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_both_passed(self, func): + with warnings.catch_warnings(): + # catch the DeprecationWarning so that it does not raise: + warnings.simplefilter("always", DeprecationWarning) + with pytest.raises(TypeError): + func([0., 1.], 0., interpolation="nearest", method="nearest") + + +class TestMemEventHook(_DeprecationTestCase): + # Deprecated 2021-11-18, NumPy 1.23 + def test_mem_seteventhook(self): + # The actual tests are within the C code in + # multiarray/_multiarray_tests.c.src + import numpy.core._multiarray_tests as ma_tests + with pytest.warns(DeprecationWarning, + match='PyDataMem_SetEventHook is deprecated'): + ma_tests.test_pydatamem_seteventhook_start() + # force an allocation and free of a numpy array + # needs to be larger then limit of small memory cacher in ctors.c + a = np.zeros(1000) + del a + break_cycles() + with pytest.warns(DeprecationWarning, + match='PyDataMem_SetEventHook is deprecated'): + ma_tests.test_pydatamem_seteventhook_end() + + +class TestArrayFinalizeNone(_DeprecationTestCase): + message = "Setting __array_finalize__ = None" + + def test_use_none_is_deprecated(self): + # Deprecated way that ndarray itself showed nothing needs finalizing. + class NoFinalize(np.ndarray): + __array_finalize__ = None + + self.assert_deprecated(lambda: np.array(1).view(NoFinalize)) + +class TestAxisNotMAXDIMS(_DeprecationTestCase): + # Deprecated 2022-01-08, NumPy 1.23 + message = r"Using `axis=32` \(MAXDIMS\) is deprecated" + + def test_deprecated(self): + a = np.zeros((1,)*32) + self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS)) + + +class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase): + # Deprecated 2022-07-03, NumPy 1.23 + # This test can be removed without replacement after the deprecation. + # The tests: + # * numpy/lib/tests/test_loadtxt.py::test_integer_signs + # * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails + # Have a warning filter that needs to be removed. + message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*" + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_deprecated_warning(self, dtype): + with pytest.warns(DeprecationWarning, match=self.message): + np.loadtxt(["10.5"], dtype=dtype) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_deprecated_raised(self, dtype): + # The DeprecationWarning is chained when raised, so test manually: + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + np.loadtxt(["10.5"], dtype=dtype) + except ValueError as e: + assert isinstance(e.__cause__, DeprecationWarning) + + +class TestScalarConversion(_DeprecationTestCase): + # 2023-01-02, 1.25.0 + def test_float_conversion(self): + self.assert_deprecated(float, args=(np.array([3.14]),)) + + def test_behaviour(self): + b = np.array([[3.14]]) + c = np.zeros(5) + with pytest.warns(DeprecationWarning): + c[0] = b + + +class TestPyIntConversion(_DeprecationTestCase): + message = r".*stop allowing conversion of out-of-bound.*" + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_deprecated_scalar(self, dtype): + dtype = np.dtype(dtype) + info = np.iinfo(dtype) + + # Cover the most common creation paths (all end up in the + # same place): + def scalar(value, dtype): + dtype.type(value) + + def assign(value, dtype): + arr = np.array([0, 0, 0], dtype=dtype) + arr[2] = value + + def create(value, dtype): + np.array([value], dtype=dtype) + + for creation_func in [scalar, assign, create]: + try: + self.assert_deprecated( + lambda: creation_func(info.min - 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + try: + self.assert_deprecated( + lambda: creation_func(info.max + 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + +class TestDeprecatedGlobals(_DeprecationTestCase): + # Deprecated 2022-11-17, NumPy 1.24 + def test_type_aliases(self): + # from builtins + self.assert_deprecated(lambda: np.bool8) + self.assert_deprecated(lambda: np.int0) + self.assert_deprecated(lambda: np.uint0) + self.assert_deprecated(lambda: np.bytes0) + self.assert_deprecated(lambda: np.str0) + self.assert_deprecated(lambda: np.object0) + + +@pytest.mark.parametrize("name", + ["bool", "long", "ulong", "str", "bytes", "object"]) +def test_future_scalar_attributes(name): + # FutureWarning added 2022-11-17, NumPy 1.24, + assert name not in dir(np) # we may want to not add them + with pytest.warns(FutureWarning, + match=f"In the future .*{name}"): + assert not hasattr(np, name) + + # Unfortunately, they are currently still valid via `np.dtype()` + np.dtype(name) + name in np.sctypeDict + + +# Ignore the above future attribute warning for this test. +@pytest.mark.filterwarnings("ignore:In the future:FutureWarning") +class TestRemovedGlobals: + # Removed 2023-01-12, NumPy 1.24.0 + # Not a deprecation, but the large error was added to aid those who missed + # the previous deprecation, and should be removed similarly to one + # (or faster). + @pytest.mark.parametrize("name", + ["object", "bool", "float", "complex", "str", "int"]) + def test_attributeerror_includes_info(self, name): + msg = f".*\n`np.{name}` was a deprecated alias for the builtin" + with pytest.raises(AttributeError, match=msg): + getattr(np, name) + + +class TestDeprecatedFinfo(_DeprecationTestCase): + # Deprecated in NumPy 1.25, 2023-01-16 + def test_deprecated_none(self): + self.assert_deprecated(np.finfo, args=(None,)) + +class TestFromnumeric(_DeprecationTestCase): + # 2023-02-28, 1.25.0 + def test_round_(self): + self.assert_deprecated(lambda: np.round_(np.array([1.5, 2.5, 3.5]))) + + # 2023-03-02, 1.25.0 + def test_cumproduct(self): + self.assert_deprecated(lambda: np.cumproduct(np.array([1, 2, 3]))) + + # 2023-03-02, 1.25.0 + def test_product(self): + self.assert_deprecated(lambda: np.product(np.array([1, 2, 3]))) + + # 2023-03-02, 1.25.0 + def test_sometrue(self): + self.assert_deprecated(lambda: np.sometrue(np.array([True, False]))) + + # 2023-03-02, 1.25.0 + def test_alltrue(self): + self.assert_deprecated(lambda: np.alltrue(np.array([True, False]))) + + +class TestMathAlias(_DeprecationTestCase): + # Deprecated in Numpy 1.25, 2023-04-06 + def test_deprecated_np_math(self): + self.assert_deprecated(lambda: np.math) + + def test_deprecated_np_lib_math(self): + self.assert_deprecated(lambda: np.lib.math) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_dlpack.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_dlpack.py new file mode 100644 index 00000000..49249bc6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_dlpack.py @@ -0,0 +1,124 @@ +import sys +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, IS_PYPY + + +class TestDLPack: + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + def test_dunder_dlpack_refcount(self): + x = np.arange(5) + y = x.__dlpack__() + assert sys.getrefcount(x) == 3 + del y + assert sys.getrefcount(x) == 2 + + def test_dunder_dlpack_stream(self): + x = np.arange(5) + x.__dlpack__(stream=None) + + with pytest.raises(RuntimeError): + x.__dlpack__(stream=1) + + def test_strides_not_multiple_of_itemsize(self): + dt = np.dtype([('int', np.int32), ('char', np.int8)]) + y = np.zeros((5,), dtype=dt) + z = y['int'] + + with pytest.raises(BufferError): + np.from_dlpack(z) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + def test_from_dlpack_refcount(self): + x = np.arange(5) + y = np.from_dlpack(x) + assert sys.getrefcount(x) == 3 + del y + assert sys.getrefcount(x) == 2 + + @pytest.mark.parametrize("dtype", [ + np.bool_, + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.complex64, np.complex128 + ]) + def test_dtype_passthrough(self, dtype): + x = np.arange(5).astype(dtype) + y = np.from_dlpack(x) + + assert y.dtype == x.dtype + assert_array_equal(x, y) + + def test_invalid_dtype(self): + x = np.asarray(np.datetime64('2021-05-27')) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_invalid_byte_swapping(self): + dt = np.dtype('=i8').newbyteorder() + x = np.arange(5, dtype=dt) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_non_contiguous(self): + x = np.arange(25).reshape((5, 5)) + + y1 = x[0] + assert_array_equal(y1, np.from_dlpack(y1)) + + y2 = x[:, 0] + assert_array_equal(y2, np.from_dlpack(y2)) + + y3 = x[1, :] + assert_array_equal(y3, np.from_dlpack(y3)) + + y4 = x[1] + assert_array_equal(y4, np.from_dlpack(y4)) + + y5 = np.diagonal(x).copy() + assert_array_equal(y5, np.from_dlpack(y5)) + + @pytest.mark.parametrize("ndim", range(33)) + def test_higher_dims(self, ndim): + shape = (1,) * ndim + x = np.zeros(shape, dtype=np.float64) + + assert shape == np.from_dlpack(x).shape + + def test_dlpack_device(self): + x = np.arange(5) + assert x.__dlpack_device__() == (1, 0) + y = np.from_dlpack(x) + assert y.__dlpack_device__() == (1, 0) + z = y[::2] + assert z.__dlpack_device__() == (1, 0) + + def dlpack_deleter_exception(self): + x = np.arange(5) + _ = x.__dlpack__() + raise RuntimeError + + def test_dlpack_destructor_exception(self): + with pytest.raises(RuntimeError): + self.dlpack_deleter_exception() + + def test_readonly(self): + x = np.arange(5) + x.flags.writeable = False + with pytest.raises(BufferError): + x.__dlpack__() + + def test_ndim0(self): + x = np.array(1.0) + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_size1dims_arrays(self): + x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4), + buffer=np.ones(1000, dtype=np.uint8), order='F') + y = np.from_dlpack(x) + assert_array_equal(x, y) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_dtype.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_dtype.py new file mode 100644 index 00000000..ac155b67 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_dtype.py @@ -0,0 +1,1906 @@ +import sys +import operator +import pytest +import ctypes +import gc +import types +from typing import Any + +import numpy as np +import numpy.dtypes +from numpy.core._rational_tests import rational +from numpy.core._multiarray_tests import create_custom_field_dtype +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, + IS_PYSTON, _OLD_PROMOTION) +from numpy.compat import pickle +from itertools import permutations +import random + +import hypothesis +from hypothesis.extra import numpy as hynp + + + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin: + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, + np.compat.unicode]) + def test_run(self, t): + """Only test hash runs at all.""" + dt = np.dtype(t) + hash(dt) + + @pytest.mark.parametrize('t', [int, float]) + def test_dtype(self, t): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + assert_(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + assert_(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + assert_(left == right) + assert_(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise an error + + assert_raises(TypeError, np.dtype, 'O3') + assert_raises(TypeError, np.dtype, 'O5') + assert_raises(TypeError, np.dtype, 'O7') + assert_raises(TypeError, np.dtype, 'b3') + assert_raises(TypeError, np.dtype, 'h4') + assert_raises(TypeError, np.dtype, 'I5') + assert_raises(TypeError, np.dtype, 'e3') + assert_raises(TypeError, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_raises(TypeError, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_raises(TypeError, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_raises(TypeError, np.dtype, 'l4') + assert_raises(TypeError, np.dtype, 'L4') + else: + assert_raises(TypeError, np.dtype, 'l8') + assert_raises(TypeError, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_raises(TypeError, np.dtype, 'q4') + assert_raises(TypeError, np.dtype, 'Q4') + else: + assert_raises(TypeError, np.dtype, 'q8') + assert_raises(TypeError, np.dtype, 'Q8') + + def test_richcompare_invalid_dtype_equality(self): + # Make sure objects that cannot be converted to valid + # dtypes results in False/True when compared to valid dtypes. + # Here 7 cannot be converted to dtype. No exceptions should be raised + + assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" + assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" + + @pytest.mark.parametrize( + 'operation', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_richcompare_invalid_dtype_comparison(self, operation): + # Make sure TypeError is raised for comparison operators + # for invalid dtypes. Here 7 is an invalid dtype. + + with pytest.raises(TypeError): + operation(np.dtype(np.int32), 7) + + @pytest.mark.parametrize("dtype", + ['Bool', 'Bytes0', 'Complex32', 'Complex64', + 'Datetime64', 'Float16', 'Float32', 'Float64', + 'Int8', 'Int16', 'Int32', 'Int64', + 'Object0', 'Str0', 'Timedelta64', + 'UInt8', 'UInt16', 'Uint32', 'UInt32', + 'Uint64', 'UInt64', 'Void0', + "Float128", "Complex128"]) + def test_numeric_style_types_are_invalid(self, dtype): + with assert_raises(TypeError): + np.dtype(dtype) + + def test_remaining_dtypes_with_bad_bytesize(self): + # The np.<name> aliases were deprecated, these probably should be too + assert np.dtype("int0") is np.dtype("intp") + assert np.dtype("uint0") is np.dtype("uintp") + assert np.dtype("bool8") is np.dtype("bool") + assert np.dtype("bytes0") is np.dtype("bytes") + assert np.dtype("str0") is np.dtype("str") + assert np.dtype("object0") is np.dtype("object") + + @pytest.mark.parametrize( + 'value', + ['m8', 'M8', 'datetime64', 'timedelta64', + 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10', + '>f', '<f', '=f', '|f', + ]) + def test_dtype_bytes_str_equivalence(self, value): + bytes_value = value.encode('ascii') + from_bytes = np.dtype(bytes_value) + from_str = np.dtype(value) + assert_dtype_equal(from_bytes, from_str) + + def test_dtype_from_bytes(self): + # Empty bytes object + assert_raises(TypeError, np.dtype, b'') + # Byte order indicator, but no type + assert_raises(TypeError, np.dtype, b'|') + + # Single character with ordinal < NPY_NTYPES returns + # type by index into _builtin_descrs + assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool')) + assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object)) + + # Single character where value is a valid type code + assert_dtype_equal(np.dtype(b'f'), np.dtype('float32')) + + # Bytes with non-ascii values raise errors + assert_raises(TypeError, np.dtype, b'\xff') + assert_raises(TypeError, np.dtype, b's\xff') + + def test_bad_param(self): + # Can't give a size that's too small + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'i1'], + 'offsets':[0, 4], + 'itemsize':4}) + # If alignment is enabled, the alignment (4) must divide the itemsize + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'i1'], + 'offsets':[0, 4], + 'itemsize':9}, align=True) + # If alignment is enabled, the individual fields must be aligned + assert_raises(ValueError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i1', 'f4'], + 'offsets':[0, 2]}, align=True) + + def test_field_order_equality(self): + x = np.dtype({'names': ['A', 'B'], + 'formats': ['i4', 'f4'], + 'offsets': [0, 4]}) + y = np.dtype({'names': ['B', 'A'], + 'formats': ['i4', 'f4'], + 'offsets': [4, 0]}) + assert_equal(x == y, False) + # This is an safe cast (not equiv) due to the different names: + assert np.can_cast(x, y, casting="safe") + + @pytest.mark.parametrize( + ["type_char", "char_size", "scalar_type"], + [["U", 4, np.str_], + ["S", 1, np.bytes_]]) + def test_create_string_dtypes_directly( + self, type_char, char_size, scalar_type): + dtype_class = type(np.dtype(type_char)) + + dtype = dtype_class(8) + assert dtype.type is scalar_type + assert dtype.itemsize == 8*char_size + + def test_create_invalid_string_errors(self): + one_too_big = np.iinfo(np.intc).max + 1 + with pytest.raises(TypeError): + type(np.dtype("U"))(one_too_big // 4) + + with pytest.raises(TypeError): + # Code coverage for very large numbers: + type(np.dtype("U"))(np.iinfo(np.intp).max // 4 + 1) + + if one_too_big < sys.maxsize: + with pytest.raises(TypeError): + type(np.dtype("S"))(one_too_big) + + with pytest.raises(ValueError): + type(np.dtype("U"))(-1) + + +class TestRecord: + def test_equivalent_record(self): + """Test whether equivalent record dtypes hash the same.""" + a = np.dtype([('yo', int)]) + b = np.dtype([('yo', int)]) + assert_dtype_equal(a, b) + + def test_different_names(self): + # In theory, they may hash the same (collision) ? + a = np.dtype([('yo', int)]) + b = np.dtype([('ye', int)]) + assert_dtype_not_equal(a, b) + + def test_different_titles(self): + # In theory, they may hash the same (collision) ? + a = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + b = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['RRed pixel', 'Blue pixel']}) + assert_dtype_not_equal(a, b) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount_dictionary_setting(self): + names = ["name1"] + formats = ["f8"] + titles = ["t1"] + offsets = [0] + d = dict(names=names, formats=formats, titles=titles, offsets=offsets) + refcounts = {k: sys.getrefcount(i) for k, i in d.items()} + np.dtype(d) + refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()} + assert refcounts == refcounts_new + + def test_mutate(self): + # Mutating a dtype should reset the cached hash value. + # NOTE: Mutating should be deprecated, but new API added to replace it. + a = np.dtype([('yo', int)]) + b = np.dtype([('yo', int)]) + c = np.dtype([('ye', int)]) + assert_dtype_equal(a, b) + assert_dtype_not_equal(a, c) + a.names = ['ye'] + assert_dtype_equal(a, c) + assert_dtype_not_equal(a, b) + state = b.__reduce__()[2] + a.__setstate__(state) + assert_dtype_equal(a, b) + assert_dtype_not_equal(a, c) + + def test_mutate_error(self): + # NOTE: Mutating should be deprecated, but new API added to replace it. + a = np.dtype("i,i") + + with pytest.raises(ValueError, match="must replace all names at once"): + a.names = ["f0"] + + with pytest.raises(ValueError, match=".*and not string"): + a.names = ["f0", b"not a unicode name"] + + def test_not_lists(self): + """Test if an appropriate exception is raised when passing bad values to + the dtype constructor. + """ + assert_raises(TypeError, np.dtype, + dict(names={'A', 'B'}, formats=['f8', 'i4'])) + assert_raises(TypeError, np.dtype, + dict(names=['A', 'B'], formats={'f8', 'i4'})) + + def test_aligned_size(self): + # Check that structured dtypes get padded to an aligned size + dt = np.dtype('i4, i1', align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype({'names':['f0', 'f1'], + 'formats':['i4', 'u1'], + 'offsets':[0, 4]}, align=True) + assert_equal(dt.itemsize, 8) + dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) + assert_equal(dt.itemsize, 8) + # Nesting should preserve that alignment + dt1 = np.dtype([('f0', 'i4'), + ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), + ('f2', 'i1')], align=True) + assert_equal(dt1.itemsize, 20) + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], + 'formats':['i4', + [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], + 'i1'], + 'offsets':[0, 4, 16]}, align=True) + assert_equal(dt2.itemsize, 20) + dt3 = np.dtype({'f0': ('i4', 0), + 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), + 'f2': ('i1', 16)}, align=True) + assert_equal(dt3.itemsize, 20) + assert_equal(dt1, dt2) + assert_equal(dt2, dt3) + # Nesting should preserve packing + dt1 = np.dtype([('f0', 'i4'), + ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), + ('f2', 'i1')], align=False) + assert_equal(dt1.itemsize, 11) + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], + 'formats':['i4', + [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], + 'i1'], + 'offsets':[0, 4, 10]}, align=False) + assert_equal(dt2.itemsize, 11) + dt3 = np.dtype({'f0': ('i4', 0), + 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4), + 'f2': ('i1', 10)}, align=False) + assert_equal(dt3.itemsize, 11) + assert_equal(dt1, dt2) + assert_equal(dt2, dt3) + # Array of subtype should preserve alignment + dt1 = np.dtype([('a', '|i1'), + ('b', [('f0', '<i2'), + ('f1', '<f4')], 2)], align=True) + assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'), + ('b', [('f0', '<i2'), ('', '|V2'), + ('f1', '<f4')], (2,))]) + + def test_union_struct(self): + # Should be able to create union dtypes + dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'], + 'offsets':[0, 0, 2]}, align=True) + assert_equal(dt.itemsize, 4) + a = np.array([3], dtype='<u4').view(dt) + a['f1'] = 10 + a['f2'] = 36 + assert_equal(a['f0'], 10 + 36*256*256) + # Should be able to specify fields out of order + dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'], + 'offsets':[4, 0, 2]}, align=True) + assert_equal(dt.itemsize, 8) + # field name should not matter: assignment is by position + dt2 = np.dtype({'names':['f2', 'f0', 'f1'], + 'formats':['<u4', '<u2', '<u2'], + 'offsets':[4, 0, 2]}, align=True) + vals = [(0, 1, 2), (3, 2**15-1, 4)] + vals2 = [(0, 1, 2), (3, 2**15-1, 4)] + a = np.array(vals, dt) + b = np.array(vals2, dt2) + assert_equal(a.astype(dt2), b) + assert_equal(b.astype(dt), a) + assert_equal(a.view(dt2), b) + assert_equal(b.view(dt), a) + # Should not be able to overlap objects with other types + assert_raises(TypeError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['O', 'i1'], + 'offsets':[0, 2]}) + assert_raises(TypeError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', 'O'], + 'offsets':[0, 3]}) + assert_raises(TypeError, np.dtype, + {'names':['f0', 'f1'], + 'formats':[[('a', 'O')], 'i1'], + 'offsets':[0, 2]}) + assert_raises(TypeError, np.dtype, + {'names':['f0', 'f1'], + 'formats':['i4', [('a', 'O')]], + 'offsets':[0, 3]}) + # Out of order should still be ok, however + dt = np.dtype({'names':['f0', 'f1'], + 'formats':['i1', 'O'], + 'offsets':[np.dtype('intp').itemsize, 0]}) + + @pytest.mark.parametrize(["obj", "dtype", "expected"], + [([], ("(2)f4,"), np.empty((0, 2), dtype="f4")), + (3, "(3)f4,", [3, 3, 3]), + (np.float64(2), "(2)f4,", [2, 2]), + ([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None), + (["1", "2"], "(2)i,", None)]) + def test_subarray_list(self, obj, dtype, expected): + dtype = np.dtype(dtype) + res = np.array(obj, dtype=dtype) + + if expected is None: + # iterate the 1-d list to fill the array + expected = np.empty(len(obj), dtype=dtype) + for i in range(len(expected)): + expected[i] = obj[i] + + assert_array_equal(res, expected) + + def test_comma_datetime(self): + dt = np.dtype('M8[D],datetime64[Y],i8') + assert_equal(dt, np.dtype([('f0', 'M8[D]'), + ('f1', 'datetime64[Y]'), + ('f2', 'i8')])) + + def test_from_dictproxy(self): + # Tests for PR #5920 + dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']}) + assert_dtype_equal(dt, np.dtype(dt.fields)) + dt2 = np.dtype((np.void, dt.fields)) + assert_equal(dt2.fields, dt.fields) + + def test_from_dict_with_zero_width_field(self): + # Regression test for #6430 / #2196 + dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)]) + dt2 = np.dtype({'names': ['val1', 'val2'], + 'formats': [(np.float32, (0,)), int]}) + + assert_dtype_equal(dt, dt2) + assert_equal(dt.fields['val1'][0].itemsize, 0) + assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize) + + def test_bool_commastring(self): + d = np.dtype('?,?,?') # raises? + assert_equal(len(d.names), 3) + for n in d.names: + assert_equal(d.fields[n][0], np.dtype('?')) + + def test_nonint_offsets(self): + # gh-8059 + def make_dtype(off): + return np.dtype({'names': ['A'], 'formats': ['i4'], + 'offsets': [off]}) + + assert_raises(TypeError, make_dtype, 'ASD') + assert_raises(OverflowError, make_dtype, 2**70) + assert_raises(TypeError, make_dtype, 2.3) + assert_raises(ValueError, make_dtype, -10) + + # no errors here: + dt = make_dtype(np.uint32(0)) + np.zeros(1, dtype=dt)[0].item() + + def test_fields_by_index(self): + dt = np.dtype([('a', np.int8), ('b', np.float32, 3)]) + assert_dtype_equal(dt[0], np.dtype(np.int8)) + assert_dtype_equal(dt[1], np.dtype((np.float32, 3))) + assert_dtype_equal(dt[-1], dt[1]) + assert_dtype_equal(dt[-2], dt[0]) + assert_raises(IndexError, lambda: dt[-3]) + + assert_raises(TypeError, operator.getitem, dt, 3.0) + + assert_equal(dt[1], dt[np.int8(1)]) + + @pytest.mark.parametrize('align_flag',[False, True]) + def test_multifield_index(self, align_flag): + # indexing with a list produces subfields + # the align flag should be preserved + dt = np.dtype([ + (('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8') + ], align=align_flag) + + dt_sub = dt[['B', 'col1']] + assert_equal( + dt_sub, + np.dtype({ + 'names': ['B', 'col1'], + 'formats': ['<f8', '<U20'], + 'offsets': [88, 0], + 'titles': [None, 'title'], + 'itemsize': 96 + }) + ) + assert_equal(dt_sub.isalignedstruct, align_flag) + + dt_sub = dt[['B']] + assert_equal( + dt_sub, + np.dtype({ + 'names': ['B'], + 'formats': ['<f8'], + 'offsets': [88], + 'itemsize': 96 + }) + ) + assert_equal(dt_sub.isalignedstruct, align_flag) + + dt_sub = dt[[]] + assert_equal( + dt_sub, + np.dtype({ + 'names': [], + 'formats': [], + 'offsets': [], + 'itemsize': 96 + }) + ) + assert_equal(dt_sub.isalignedstruct, align_flag) + + assert_raises(TypeError, operator.getitem, dt, ()) + assert_raises(TypeError, operator.getitem, dt, [1, 2, 3]) + assert_raises(TypeError, operator.getitem, dt, ['col1', 2]) + assert_raises(KeyError, operator.getitem, dt, ['fake']) + assert_raises(KeyError, operator.getitem, dt, ['title']) + assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1']) + + def test_partial_dict(self): + # 'names' is missing + assert_raises(ValueError, np.dtype, + {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)}) + + def test_fieldless_views(self): + a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[], + 'itemsize':8}) + assert_raises(ValueError, a.view, np.dtype([])) + + d = np.dtype((np.dtype([]), 10)) + assert_equal(d.shape, (10,)) + assert_equal(d.itemsize, 0) + assert_equal(d.base, np.dtype([])) + + arr = np.fromiter((() for i in range(10)), []) + assert_equal(arr.dtype, np.dtype([])) + assert_raises(ValueError, np.frombuffer, b'', dtype=[]) + assert_equal(np.frombuffer(b'', dtype=[], count=2), + np.empty(2, dtype=[])) + + assert_raises(ValueError, np.dtype, ([], 'f8')) + assert_raises(ValueError, np.zeros(1, dtype='i4').view, []) + + assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]), + np.ones(2, dtype=bool)) + + assert_equal(np.zeros((1, 2), dtype=[]) == a, + np.ones((1, 2), dtype=bool)) + + def test_nonstructured_with_object(self): + # See gh-23277, the dtype here thinks it contain objects, if the + # assert about that fails, the test becomes meaningless (which is OK) + arr = np.recarray((0,), dtype="O") + assert arr.dtype.names is None # no fields + assert arr.dtype.hasobject # but claims to contain objects + del arr # the deletion failed previously. + + +class TestSubarray: + def test_single_subarray(self): + a = np.dtype((int, (2))) + b = np.dtype((int, (2,))) + assert_dtype_equal(a, b) + + assert_equal(type(a.subdtype[1]), tuple) + assert_equal(type(b.subdtype[1]), tuple) + + def test_equivalent_record(self): + """Test whether equivalent subarray dtypes hash the same.""" + a = np.dtype((int, (2, 3))) + b = np.dtype((int, (2, 3))) + assert_dtype_equal(a, b) + + def test_nonequivalent_record(self): + """Test whether different subarray dtypes hash differently.""" + a = np.dtype((int, (2, 3))) + b = np.dtype((int, (3, 2))) + assert_dtype_not_equal(a, b) + + a = np.dtype((int, (2, 3))) + b = np.dtype((int, (2, 2))) + assert_dtype_not_equal(a, b) + + a = np.dtype((int, (1, 2, 3))) + b = np.dtype((int, (1, 2))) + assert_dtype_not_equal(a, b) + + def test_shape_equal(self): + """Test some data types that are equal""" + assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple()))) + # FutureWarning during deprecation period; after it is passed this + # should instead check that "(1)f8" == "1f8" == ("f8", 1). + with pytest.warns(FutureWarning): + assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1))) + assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,)))) + assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2)))) + d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2)) + assert_dtype_equal(np.dtype(d), np.dtype(d)) + + def test_shape_simple(self): + """Test some simple cases that shouldn't be equal""" + assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,)))) + assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1)))) + assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3)))) + + def test_shape_monster(self): + """Test some more complicated cases that shouldn't be equal""" + assert_dtype_not_equal( + np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2)))) + assert_dtype_not_equal( + np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2)))) + assert_dtype_not_equal( + np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2)))) + assert_dtype_not_equal( + np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2)))) + + def test_shape_sequence(self): + # Any sequence of integers should work as shape, but the result + # should be a tuple (immutable) of base type integers. + a = np.array([1, 2, 3], dtype=np.int16) + l = [1, 2, 3] + # Array gets converted + dt = np.dtype([('a', 'f4', a)]) + assert_(isinstance(dt['a'].shape, tuple)) + assert_(isinstance(dt['a'].shape[0], int)) + # List gets converted + dt = np.dtype([('a', 'f4', l)]) + assert_(isinstance(dt['a'].shape, tuple)) + # + + class IntLike: + def __index__(self): + return 3 + + def __int__(self): + # (a PyNumber_Check fails without __int__) + return 3 + + dt = np.dtype([('a', 'f4', IntLike())]) + assert_(isinstance(dt['a'].shape, tuple)) + assert_(isinstance(dt['a'].shape[0], int)) + dt = np.dtype([('a', 'f4', (IntLike(),))]) + assert_(isinstance(dt['a'].shape, tuple)) + assert_(isinstance(dt['a'].shape[0], int)) + + def test_shape_matches_ndim(self): + dt = np.dtype([('a', 'f4', ())]) + assert_equal(dt['a'].shape, ()) + assert_equal(dt['a'].ndim, 0) + + dt = np.dtype([('a', 'f4')]) + assert_equal(dt['a'].shape, ()) + assert_equal(dt['a'].ndim, 0) + + dt = np.dtype([('a', 'f4', 4)]) + assert_equal(dt['a'].shape, (4,)) + assert_equal(dt['a'].ndim, 1) + + dt = np.dtype([('a', 'f4', (1, 2, 3))]) + assert_equal(dt['a'].shape, (1, 2, 3)) + assert_equal(dt['a'].ndim, 3) + + def test_shape_invalid(self): + # Check that the shape is valid. + max_int = np.iinfo(np.intc).max + max_intp = np.iinfo(np.intp).max + # Too large values (the datatype is part of this) + assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)]) + assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)]) + assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))]) + # Takes a different code path (fails earlier: + assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)]) + # Negative values + assert_raises(ValueError, np.dtype, [('a', 'f4', -1)]) + assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))]) + + def test_alignment(self): + #Check that subarrays are aligned + t1 = np.dtype('(1,)i4', align=True) + t2 = np.dtype('2i4', align=True) + assert_equal(t1.alignment, t2.alignment) + + def test_aligned_empty(self): + # Mainly regression test for gh-19696: construction failed completely + dt = np.dtype([], align=True) + assert dt == np.dtype([]) + dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True) + assert dt == np.dtype([]) + + def test_subarray_base_item(self): + arr = np.ones(3, dtype=[("f", "i", 3)]) + # Extracting the field "absorbs" the subarray into a view: + assert arr["f"].base is arr + # Extract the structured item, and then check the tuple component: + item = arr.item(0) + assert type(item) is tuple and len(item) == 1 + assert item[0].base is arr + + def test_subarray_cast_copies(self): + # Older versions of NumPy did NOT copy, but they got the ownership + # wrong (not actually knowing the correct base!). Versions since 1.21 + # (I think) crashed fairly reliable. This defines the correct behavior + # as a copy. Keeping the ownership would be possible (but harder) + arr = np.ones(3, dtype=[("f", "i", 3)]) + cast = arr.astype(object) + for fields in cast: + assert type(fields) == tuple and len(fields) == 1 + subarr = fields[0] + assert subarr.base is None + assert subarr.flags.owndata + + +def iter_struct_object_dtypes(): + """ + Iterates over a few complex dtypes and object pattern which + fill the array with a given object (defaults to a singleton). + + Yields + ------ + dtype : dtype + pattern : tuple + Structured tuple for use with `np.array`. + count : int + Number of objects stored in the dtype. + singleton : object + A singleton object. The returned pattern is constructed so that + all objects inside the datatype are set to the singleton. + """ + obj = object() + + dt = np.dtype([('b', 'O', (2, 3))]) + p = ([[obj] * 3] * 2,) + yield pytest.param(dt, p, 6, obj, id="<subarray>") + + dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))]) + p = (0, [[obj] * 3] * 2) + yield pytest.param(dt, p, 6, obj, id="<subarray in field>") + + dt = np.dtype([('a', 'i4'), + ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))]) + p = (0, [[(obj, 0)] * 3] * 2) + yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>") + + dt = np.dtype([('a', 'i4'), + ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))]) + p = (0, [[(obj, obj)] * 3] * 2) + yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>") + + +@pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test will no longer " + "work. See gh-23986" +) +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestStructuredObjectRefcounting: + """These tests cover various uses of complicated structured types which + include objects and thus require reference counting. + """ + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize(["creation_func", "creation_obj"], [ + pytest.param(np.empty, None, + # None is probably used for too many things + marks=pytest.mark.skip("unreliable due to python's behaviour")), + (np.ones, 1), + (np.zeros, 0)]) + def test_structured_object_create_delete(self, dt, pat, count, singleton, + creation_func, creation_obj): + """Structured object reference counting in creation and deletion""" + # The test assumes that 0, 1, and None are singletons. + gc.collect() + before = sys.getrefcount(creation_obj) + arr = creation_func(3, dt) + + now = sys.getrefcount(creation_obj) + assert now - before == count * 3 + del arr + now = sys.getrefcount(creation_obj) + assert now == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_item_setting(self, dt, pat, count, singleton): + """Structured object reference counting for simple item setting""" + one = 1 + + gc.collect() + before = sys.getrefcount(singleton) + arr = np.array([pat] * 3, dt) + assert sys.getrefcount(singleton) - before == count * 3 + # Fill with `1` and check that it was replaced correctly: + before2 = sys.getrefcount(one) + arr[...] = one + after2 = sys.getrefcount(one) + assert after2 - before2 == count * 3 + del arr + gc.collect() + assert sys.getrefcount(one) == before2 + assert sys.getrefcount(singleton) == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize( + ['shape', 'index', 'items_changed'], + [((3,), ([0, 2],), 2), + ((3, 2), ([0, 2], slice(None)), 4), + ((3, 2), ([0, 2], [1]), 2), + ((3,), ([True, False, True]), 2)]) + def test_structured_object_indexing(self, shape, index, items_changed, + dt, pat, count, singleton): + """Structured object reference counting for advanced indexing.""" + # Use two small negative values (should be singletons, but less likely + # to run into race-conditions). This failed in some threaded envs + # When using 0 and 1. If it fails again, should remove all explicit + # checks, and rely on `pytest-leaks` reference count checker only. + val0 = -4 + val1 = -5 + + arr = np.full(shape, val0, dt) + + gc.collect() + before_val0 = sys.getrefcount(val0) + before_val1 = sys.getrefcount(val1) + # Test item getting: + part = arr[index] + after_val0 = sys.getrefcount(val0) + assert after_val0 - before_val0 == count * items_changed + del part + # Test item setting: + arr[index] = val1 + gc.collect() + after_val0 = sys.getrefcount(val0) + after_val1 = sys.getrefcount(val1) + assert before_val0 - after_val0 == count * items_changed + assert after_val1 - before_val1 == count * items_changed + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): + """Structured object reference counting for specialized functions. + The older functions such as take and repeat use different code paths + then item setting (when writing this). + """ + indices = [0, 1] + + arr = np.array([pat] * 3, dt) + gc.collect() + before = sys.getrefcount(singleton) + res = arr.take(indices) + after = sys.getrefcount(singleton) + assert after - before == count * 2 + new = res.repeat(10) + gc.collect() + after_repeat = sys.getrefcount(singleton) + assert after_repeat - after == count * 2 * 10 + + +class TestStructuredDtypeSparseFields: + """Tests subarray fields which contain sparse dtypes so that + not all memory is used by the dtype work. Such dtype's should + leave the underlying memory unchanged. + """ + dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'], + 'offsets':[0, 4]}, (2, 3))]) + sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'], + 'offsets':[4]}, (2, 3))]) + + def test_sparse_field_assignment(self): + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[...] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + def test_sparse_field_assignment_fancy(self): + # Fancy assignment goes to the copyswap function for complex types: + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + +class TestMonsterType: + """Test deeply nested subtypes.""" + + def test1(self): + simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + a = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + b = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + assert_dtype_equal(a, b) + + c = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + d = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + assert_dtype_equal(c, d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_list_recursion(self): + l = list() + l.append(('f', l)) + with pytest.raises(RecursionError): + np.dtype(l) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_tuple_recursion(self): + d = np.int32 + for i in range(100000): + d = (d, (1,)) + with pytest.raises(RecursionError): + np.dtype(d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_dict_recursion(self): + d = dict(names=['self'], formats=[None], offsets=[0]) + d['formats'][0] = d + with pytest.raises(RecursionError): + np.dtype(d) + + +class TestMetadata: + def test_no_metadata(self): + d = np.dtype(int) + assert_(d.metadata is None) + + def test_metadata_takes_dict(self): + d = np.dtype(int, metadata={'datum': 1}) + assert_(d.metadata == {'datum': 1}) + + def test_metadata_rejects_nondict(self): + assert_raises(TypeError, np.dtype, int, metadata='datum') + assert_raises(TypeError, np.dtype, int, metadata=1) + assert_raises(TypeError, np.dtype, int, metadata=None) + + def test_nested_metadata(self): + d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))]) + assert_(d['a'].metadata == {'datum': 1}) + + def test_base_metadata_copied(self): + d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1}))) + assert_(d.metadata == {'datum': 1}) + +class TestString: + def test_complex_dtype_str(self): + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names': ['top', 'bottom']," + " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]]," + " 'offsets': [0, 76800]," + " 'itemsize': 80000," + " 'aligned': True}") + with np.printoptions(legacy='1.21'): + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['<u4', 'u1', 'u1', 'u1'], + 'offsets': [0, 0, 1, 2], + 'titles': ['Color', 'Red pixel', + 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "{'names': ['rgba', 'r', 'g', 'b']," + " 'formats': ['<u4', 'u1', 'u1', 'u1']," + " 'offsets': [0, 0, 1, 2]," + " 'titles': ['Color', 'Red pixel', " + "'Green pixel', 'Blue pixel']," + " 'itemsize': 4}") + + dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], + 'offsets': [0, 2], + 'titles': ['Red pixel', 'Blue pixel']}) + assert_equal(str(dt), + "{'names': ['r', 'b']," + " 'formats': ['u1', 'u1']," + " 'offsets': [0, 2]," + " 'titles': ['Red pixel', 'Blue pixel']," + " 'itemsize': 3}") + + dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')]) + assert_equal(str(dt), + "[('a', '<m8[D]'), ('b', '<M8[us]')]") + + def test_repr_structured(self): + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + def test_repr_structured_not_packed(self): + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['<u4', 'u1', 'u1', 'u1'], + 'offsets': [0, 0, 1, 2], + 'titles': ['Color', 'Red pixel', + 'Green pixel', 'Blue pixel']}, align=True) + assert_equal(repr(dt), + "dtype({'names': ['rgba', 'r', 'g', 'b']," + " 'formats': ['<u4', 'u1', 'u1', 'u1']," + " 'offsets': [0, 0, 1, 2]," + " 'titles': ['Color', 'Red pixel', " + "'Green pixel', 'Blue pixel']," + " 'itemsize': 4}, align=True)") + + dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], + 'offsets': [0, 2], + 'titles': ['Red pixel', 'Blue pixel'], + 'itemsize': 4}) + assert_equal(repr(dt), + "dtype({'names': ['r', 'b'], " + "'formats': ['u1', 'u1'], " + "'offsets': [0, 2], " + "'titles': ['Red pixel', 'Blue pixel'], " + "'itemsize': 4})") + + def test_repr_structured_datetime(self): + dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')]) + assert_equal(repr(dt), + "dtype([('a', '<M8[D]'), ('b', '<m8[us]')])") + + def test_repr_str_subarray(self): + dt = np.dtype(('<i2', (1,))) + assert_equal(repr(dt), "dtype(('<i2', (1,)))") + assert_equal(str(dt), "('<i2', (1,))") + + def test_base_dtype_with_object_type(self): + # Issue gh-2798, should not error. + np.array(['a'], dtype="O").astype(("O", [("name", "O")])) + + def test_empty_string_to_object(self): + # Pull request #4722 + np.array(["", ""]).astype(object) + + def test_void_subclass_unsized(self): + dt = np.dtype(np.record) + assert_equal(repr(dt), "dtype('V')") + assert_equal(str(dt), '|V0') + assert_equal(dt.name, 'record') + + def test_void_subclass_sized(self): + dt = np.dtype((np.record, 2)) + assert_equal(repr(dt), "dtype('V2')") + assert_equal(str(dt), '|V2') + assert_equal(dt.name, 'record16') + + def test_void_subclass_fields(self): + dt = np.dtype((np.record, [('a', '<u2')])) + assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))") + assert_equal(str(dt), "(numpy.record, [('a', '<u2')])") + assert_equal(dt.name, 'record16') + + +class TestDtypeAttributeDeletion: + + def test_dtype_non_writable_attributes_deletion(self): + dt = np.dtype(np.double) + attr = ["subdtype", "descr", "str", "name", "base", "shape", + "isbuiltin", "isnative", "isalignedstruct", "fields", + "metadata", "hasobject"] + + for s in attr: + assert_raises(AttributeError, delattr, dt, s) + + def test_dtype_writable_attributes_deletion(self): + dt = np.dtype(np.double) + attr = ["names"] + for s in attr: + assert_raises(AttributeError, delattr, dt, s) + + +class TestDtypeAttributes: + def test_descr_has_trailing_void(self): + # see gh-6359 + dtype = np.dtype({ + 'names': ['A', 'B'], + 'formats': ['f4', 'f4'], + 'offsets': [0, 8], + 'itemsize': 16}) + new_dtype = np.dtype(dtype.descr) + assert_equal(new_dtype.itemsize, 16) + + def test_name_dtype_subclass(self): + # Ticket #4357 + class user_def_subcls(np.void): + pass + assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls') + + def test_zero_stride(self): + arr = np.ones(1, dtype="i8") + arr = np.broadcast_to(arr, 10) + assert arr.strides == (0,) + with pytest.raises(ValueError): + arr.dtype = "i1" + +class TestDTypeMakeCanonical: + def check_canonical(self, dtype, canonical): + """ + Check most properties relevant to "canonical" versions of a dtype, + which is mainly native byte order for datatypes supporting this. + + The main work is checking structured dtypes with fields, where we + reproduce most the actual logic used in the C-code. + """ + assert type(dtype) is type(canonical) + + # a canonical DType should always have equivalent casting (both ways) + assert np.can_cast(dtype, canonical, casting="equiv") + assert np.can_cast(canonical, dtype, casting="equiv") + # a canonical dtype (and its fields) is always native (checks fields): + assert canonical.isnative + + # Check that canonical of canonical is the same (no casting): + assert np.result_type(canonical) == canonical + + if not dtype.names: + # The flags currently never change for unstructured dtypes + assert dtype.flags == canonical.flags + return + + # Must have all the needs API flag set: + assert dtype.flags & 0b10000 + + # Check that the fields are identical (including titles): + assert dtype.fields.keys() == canonical.fields.keys() + + def aligned_offset(offset, alignment): + # round up offset: + return - (-offset // alignment) * alignment + + totalsize = 0 + max_alignment = 1 + for name in dtype.names: + # each field is also canonical: + new_field_descr = canonical.fields[name][0] + self.check_canonical(dtype.fields[name][0], new_field_descr) + + # Must have the "inherited" object related flags: + expected = 0b11011 & new_field_descr.flags + assert (canonical.flags & expected) == expected + + if canonical.isalignedstruct: + totalsize = aligned_offset(totalsize, new_field_descr.alignment) + max_alignment = max(new_field_descr.alignment, max_alignment) + + assert canonical.fields[name][1] == totalsize + # if a title exists, they must match (otherwise empty tuple): + assert dtype.fields[name][2:] == canonical.fields[name][2:] + + totalsize += new_field_descr.itemsize + + if canonical.isalignedstruct: + totalsize = aligned_offset(totalsize, max_alignment) + assert canonical.itemsize == totalsize + assert canonical.alignment == max_alignment + + def test_simple(self): + dt = np.dtype(">i4") + assert np.result_type(dt).isnative + assert np.result_type(dt).num == dt.num + + # dtype with empty space: + struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]] + canonical = np.result_type(struct_dt) + assert canonical.itemsize == 4+8 + assert canonical.isnative + + # aligned struct dtype with empty space: + struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]] + canonical = np.result_type(struct_dt) + assert canonical.isalignedstruct + assert canonical.itemsize == np.dtype("i8").alignment + 8 + assert canonical.isnative + + def test_object_flag_not_inherited(self): + # The following dtype still indicates "object", because its included + # in the unaccessible space (maybe this could change at some point): + arr = np.ones(3, "i,O,i")[["f0", "f2"]] + assert arr.dtype.hasobject + canonical_dt = np.result_type(arr.dtype) + assert not canonical_dt.hasobject + + @pytest.mark.slow + @hypothesis.given(dtype=hynp.nested_dtypes()) + def test_make_canonical_hypothesis(self, dtype): + canonical = np.result_type(dtype) + self.check_canonical(dtype, canonical) + # result_type with two arguments should always give identical results: + two_arg_result = np.result_type(dtype, dtype) + assert np.can_cast(two_arg_result, canonical, casting="no") + + @pytest.mark.slow + @hypothesis.given( + dtype=hypothesis.extra.numpy.array_dtypes( + subtype_strategy=hypothesis.extra.numpy.array_dtypes(), + min_size=5, max_size=10, allow_subarrays=True)) + def test_structured(self, dtype): + # Pick 4 of the fields at random. This will leave empty space in the + # dtype (since we do not canonicalize it here). + field_subset = random.sample(dtype.names, k=4) + dtype_with_empty_space = dtype[field_subset] + assert dtype_with_empty_space.itemsize == dtype.itemsize + canonicalized = np.result_type(dtype_with_empty_space) + self.check_canonical(dtype_with_empty_space, canonicalized) + # promotion with two arguments should always give identical results: + two_arg_result = np.promote_types( + dtype_with_empty_space, dtype_with_empty_space) + assert np.can_cast(two_arg_result, canonicalized, casting="no") + + # Ensure that we also check aligned struct (check the opposite, in + # case hypothesis grows support for `align`. Then repeat the test: + dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct) + dtype_with_empty_space = dtype_aligned[field_subset] + assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize + canonicalized = np.result_type(dtype_with_empty_space) + self.check_canonical(dtype_with_empty_space, canonicalized) + # promotion with two arguments should always give identical results: + two_arg_result = np.promote_types( + dtype_with_empty_space, dtype_with_empty_space) + assert np.can_cast(two_arg_result, canonicalized, casting="no") + + +class TestPickling: + + def check_pickling(self, dtype): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + buf = pickle.dumps(dtype, proto) + # The dtype pickling itself pickles `np.dtype` if it is pickled + # as a singleton `dtype` should be stored in the buffer: + assert b"_DType_reconstruct" not in buf + assert b"dtype" in buf + pickled = pickle.loads(buf) + assert_equal(pickled, dtype) + assert_equal(pickled.descr, dtype.descr) + if dtype.metadata is not None: + assert_equal(pickled.metadata, dtype.metadata) + # Check the reconstructed dtype is functional + x = np.zeros(3, dtype=dtype) + y = np.zeros(3, dtype=pickled) + assert_equal(x, y) + assert_equal(x[0], y[0]) + + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, + np.compat.unicode, bool]) + def test_builtin(self, t): + self.check_pickling(np.dtype(t)) + + def test_structured(self): + dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2))) + self.check_pickling(dt) + + def test_structured_aligned(self): + dt = np.dtype('i4, i1', align=True) + self.check_pickling(dt) + + def test_structured_unaligned(self): + dt = np.dtype('i4, i1', align=False) + self.check_pickling(dt) + + def test_structured_padded(self): + dt = np.dtype({ + 'names': ['A', 'B'], + 'formats': ['f4', 'f4'], + 'offsets': [0, 8], + 'itemsize': 16}) + self.check_pickling(dt) + + def test_structured_titles(self): + dt = np.dtype({'names': ['r', 'b'], + 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + self.check_pickling(dt) + + @pytest.mark.parametrize('base', ['m8', 'M8']) + @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's', + 'ms', 'us', 'ns', 'ps', 'fs', 'as']) + def test_datetime(self, base, unit): + dt = np.dtype('%s[%s]' % (base, unit) if unit else base) + self.check_pickling(dt) + if unit: + dt = np.dtype('%s[7%s]' % (base, unit)) + self.check_pickling(dt) + + def test_metadata(self): + dt = np.dtype(int, metadata={'datum': 1}) + self.check_pickling(dt) + + @pytest.mark.parametrize("DType", + [type(np.dtype(t)) for t in np.typecodes['All']] + + [np.dtype(rational), np.dtype]) + def test_pickle_types(self, DType): + # Check that DTypes (the classes/types) roundtrip when pickling + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + roundtrip_DType = pickle.loads(pickle.dumps(DType, proto)) + assert roundtrip_DType is DType + + +class TestPromotion: + """Test cases related to more complex DType promotions. Further promotion + tests are defined in `test_numeric.py` + """ + @np._no_nep50_warning() + @pytest.mark.parametrize(["other", "expected", "expected_weak"], + [(2**16-1, np.complex64, None), + (2**32-1, np.complex128, np.complex64), + (np.float16(2), np.complex64, None), + (np.float32(2), np.complex64, None), + (np.longdouble(2), np.complex64, np.clongdouble), + # Base of the double value to sidestep any rounding issues: + (np.longdouble(np.nextafter(1.7e308, 0.)), + np.complex128, np.clongdouble), + # Additionally use "nextafter" so the cast can't round down: + (np.longdouble(np.nextafter(1.7e308, np.inf)), + np.clongdouble, None), + # repeat for complex scalars: + (np.complex64(2), np.complex64, None), + (np.clongdouble(2), np.complex64, np.clongdouble), + # Base of the double value to sidestep any rounding issues: + (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), + np.complex128, np.clongdouble), + # Additionally use "nextafter" so the cast can't round down: + (np.clongdouble(np.nextafter(1.7e308, np.inf)), + np.clongdouble, None), + ]) + def test_complex_other_value_based(self, + weak_promotion, other, expected, expected_weak): + if weak_promotion and expected_weak is not None: + expected = expected_weak + + # This would change if we modify the value based promotion + min_complex = np.dtype(np.complex64) + + res = np.result_type(other, min_complex) + assert res == expected + # Check the same for a simple ufunc call that uses the same logic: + res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype + assert res == expected + + @pytest.mark.parametrize(["other", "expected"], + [(np.bool_, np.complex128), + (np.int64, np.complex128), + (np.float16, np.complex64), + (np.float32, np.complex64), + (np.float64, np.complex128), + (np.longdouble, np.clongdouble), + (np.complex64, np.complex64), + (np.complex128, np.complex128), + (np.clongdouble, np.clongdouble), + ]) + def test_complex_scalar_value_based(self, other, expected): + # This would change if we modify the value based promotion + complex_scalar = 1j + + res = np.result_type(other, complex_scalar) + assert res == expected + # Check the same for a simple ufunc call that uses the same logic: + res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype + assert res == expected + + def test_complex_pyscalar_promote_rational(self): + with pytest.raises(TypeError, + match=r".* no common DType exists for the given inputs"): + np.result_type(1j, rational) + + with pytest.raises(TypeError, + match=r".* no common DType exists for the given inputs"): + np.result_type(1j, rational(1, 2)) + + @pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100]) + def test_python_integer_promotion(self, val): + # If we only path scalars (mainly python ones!), the result must take + # into account that the integer may be considered int32, int64, uint64, + # or object depending on the input value. So test those paths! + expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype) + assert np.result_type(val, 0) == expected_dtype + # For completeness sake, also check with a NumPy scalar as second arg: + assert np.result_type(val, np.int8(0)) == expected_dtype + + @pytest.mark.parametrize(["other", "expected"], + [(1, rational), (1., np.float64)]) + @np._no_nep50_warning() + def test_float_int_pyscalar_promote_rational( + self, weak_promotion, other, expected): + # Note that rationals are a bit akward as they promote with float64 + # or default ints, but not float16 or uint8/int8 (which looks + # inconsistent here). The new promotion fixes this (partially?) + if not weak_promotion and type(other) == float: + # The float version, checks float16 in the legacy path, which fails + # the integer version seems to check int8 (also), so it can + # pass. + with pytest.raises(TypeError, + match=r".* do not have a common DType"): + np.result_type(other, rational) + else: + assert np.result_type(other, rational) == expected + + assert np.result_type(other, rational(1, 2)) == expected + + @pytest.mark.parametrize(["dtypes", "expected"], [ + # These promotions are not associative/commutative: + ([np.uint16, np.int16, np.float16], np.float32), + ([np.uint16, np.int8, np.float16], np.float32), + ([np.uint8, np.int16, np.float16], np.float32), + # The following promotions are not ambiguous, but cover code + # paths of abstract promotion (no particular logic being tested) + ([1, 1, np.float64], np.float64), + ([1, 1., np.complex128], np.complex128), + ([1, 1j, np.float64], np.complex128), + ([1., 1., np.int64], np.float64), + ([1., 1j, np.float64], np.complex128), + ([1j, 1j, np.float64], np.complex128), + ([1, True, np.bool_], np.int_), + ]) + def test_permutations_do_not_influence_result(self, dtypes, expected): + # Tests that most permutations do not influence the result. In the + # above some uint and int combintations promote to a larger integer + # type, which would then promote to a larger than necessary float. + for perm in permutations(dtypes): + assert np.result_type(*perm) == expected + + +def test_rational_dtype(): + # test for bug gh-5719 + a = np.array([1111], dtype=rational).astype + assert_raises(OverflowError, a, 'int8') + + # test that dtype detection finds user-defined types + x = rational(1) + assert_equal(np.array([x,x]).dtype, np.dtype(rational)) + + +def test_dtypes_are_true(): + # test for gh-6294 + assert bool(np.dtype('f8')) + assert bool(np.dtype('i8')) + assert bool(np.dtype([('a', 'i8'), ('b', 'f4')])) + + +def test_invalid_dtype_string(): + # test for gh-10440 + assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]') + assert_raises(TypeError, np.dtype, 'Fl\xfcgel') + + +def test_keyword_argument(): + # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971 + assert np.dtype(dtype=np.float64) == np.dtype(np.float64) + + +def test_ulong_dtype(): + # test for gh-21063 + assert np.dtype("ulong") == np.dtype(np.uint) + + +class TestFromDTypeAttribute: + def test_simple(self): + class dt: + dtype = np.dtype("f8") + + assert np.dtype(dt) == np.float64 + assert np.dtype(dt()) == np.float64 + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_recursion(self): + class dt: + pass + + dt.dtype = dt + with pytest.raises(RecursionError): + np.dtype(dt) + + dt_instance = dt() + dt_instance.dtype = dt + with pytest.raises(RecursionError): + np.dtype(dt_instance) + + def test_void_subtype(self): + class dt(np.void): + # This code path is fully untested before, so it is unclear + # what this should be useful for. Note that if np.void is used + # numpy will think we are deallocating a base type [1.17, 2019-02]. + dtype = np.dtype("f,f") + + np.dtype(dt) + np.dtype(dt(1)) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_void_subtype_recursion(self): + class vdt(np.void): + pass + + vdt.dtype = vdt + + with pytest.raises(RecursionError): + np.dtype(vdt) + + with pytest.raises(RecursionError): + np.dtype(vdt(1)) + + +class TestDTypeClasses: + @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational]) + def test_basic_dtypes_subclass_properties(self, dtype): + # Note: Except for the isinstance and type checks, these attributes + # are considered currently private and may change. + dtype = np.dtype(dtype) + assert isinstance(dtype, np.dtype) + assert type(dtype) is not np.dtype + if dtype.type.__name__ != "rational": + dt_name = type(dtype).__name__.lower().removesuffix("dtype") + if dt_name == "uint" or dt_name == "int": + # The scalar names has a `c` attached because "int" is Python + # int and that is long... + dt_name += "c" + sc_name = dtype.type.__name__ + assert dt_name == sc_name.strip("_") + assert type(dtype).__module__ == "numpy.dtypes" + + assert getattr(numpy.dtypes, type(dtype).__name__) is type(dtype) + else: + assert type(dtype).__name__ == "dtype[rational]" + assert type(dtype).__module__ == "numpy" + + assert not type(dtype)._abstract + + # the flexible dtypes and datetime/timedelta have additional parameters + # which are more than just storage information, these would need to be + # given when creating a dtype: + parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64) + if dtype.type not in parametric: + assert not type(dtype)._parametric + assert type(dtype)() is dtype + else: + assert type(dtype)._parametric + with assert_raises(TypeError): + type(dtype)() + + def test_dtype_superclass(self): + assert type(np.dtype) is not type + assert isinstance(np.dtype, type) + + assert type(np.dtype).__name__ == "_DTypeMeta" + assert type(np.dtype).__module__ == "numpy" + assert np.dtype._abstract + + def test_is_numeric(self): + all_codes = set(np.typecodes['All']) + numeric_codes = set(np.typecodes['AllInteger'] + + np.typecodes['AllFloat'] + '?') + non_numeric_codes = all_codes - numeric_codes + + for code in numeric_codes: + assert type(np.dtype(code))._is_numeric + + for code in non_numeric_codes: + assert not type(np.dtype(code))._is_numeric + + @pytest.mark.parametrize("int_", ["UInt", "Int"]) + @pytest.mark.parametrize("size", [8, 16, 32, 64]) + def test_integer_alias_names(self, int_, size): + DType = getattr(numpy.dtypes, f"{int_}{size}DType") + sctype = getattr(numpy, f"{int_.lower()}{size}") + assert DType.type is sctype + assert DType.__name__.lower().removesuffix("dtype") == sctype.__name__ + + @pytest.mark.parametrize("name", + ["Half", "Float", "Double", "CFloat", "CDouble"]) + def test_float_alias_names(self, name): + with pytest.raises(AttributeError): + getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType + + +class TestFromCTypes: + + @staticmethod + def check(ctype, dtype): + dtype = np.dtype(dtype) + assert_equal(np.dtype(ctype), dtype) + assert_equal(np.dtype(ctype()), dtype) + + def test_array(self): + c8 = ctypes.c_uint8 + self.check( 3 * c8, (np.uint8, (3,))) + self.check( 1 * c8, (np.uint8, (1,))) + self.check( 0 * c8, (np.uint8, (0,))) + self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,))) + self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,))) + + def test_padded_structure(self): + class PaddedStruct(ctypes.Structure): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', np.uint8), + ('b', np.uint16) + ], align=True) + self.check(PaddedStruct, expected) + + def test_bit_fields(self): + class BitfieldStruct(ctypes.Structure): + _fields_ = [ + ('a', ctypes.c_uint8, 7), + ('b', ctypes.c_uint8, 1) + ] + assert_raises(TypeError, np.dtype, BitfieldStruct) + assert_raises(TypeError, np.dtype, BitfieldStruct()) + + def test_pointer(self): + p_uint8 = ctypes.POINTER(ctypes.c_uint8) + assert_raises(TypeError, np.dtype, p_uint8) + + def test_void_pointer(self): + self.check(ctypes.c_void_p, np.uintp) + + def test_union(self): + class Union(ctypes.Union): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16), + ] + expected = np.dtype(dict( + names=['a', 'b'], + formats=[np.uint8, np.uint16], + offsets=[0, 0], + itemsize=2 + )) + self.check(Union, expected) + + def test_union_with_struct_packed(self): + class Struct(ctypes.Structure): + _pack_ = 1 + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + + class Union(ctypes.Union): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16), + ('c', ctypes.c_uint32), + ('d', Struct), + ] + expected = np.dtype(dict( + names=['a', 'b', 'c', 'd'], + formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + offsets=[0, 0, 0, 0], + itemsize=ctypes.sizeof(Union) + )) + self.check(Union, expected) + + def test_union_packed(self): + class Struct(ctypes.Structure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + class Union(ctypes.Union): + _pack_ = 1 + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16), + ('c', ctypes.c_uint32), + ('d', Struct), + ] + expected = np.dtype(dict( + names=['a', 'b', 'c', 'd'], + formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + offsets=[0, 0, 0, 0], + itemsize=ctypes.sizeof(Union) + )) + self.check(Union, expected) + + def test_packed_structure(self): + class PackedStructure(ctypes.Structure): + _pack_ = 1 + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', np.uint8), + ('b', np.uint16) + ]) + self.check(PackedStructure, expected) + + def test_large_packed_structure(self): + class PackedStructure(ctypes.Structure): + _pack_ = 2 + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16), + ('c', ctypes.c_uint8), + ('d', ctypes.c_uint16), + ('e', ctypes.c_uint32), + ('f', ctypes.c_uint32), + ('g', ctypes.c_uint8) + ] + expected = np.dtype(dict( + formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ], + offsets=[0, 2, 4, 6, 8, 12, 16], + names=['a', 'b', 'c', 'd', 'e', 'f', 'g'], + itemsize=18)) + self.check(PackedStructure, expected) + + def test_big_endian_structure_packed(self): + class BigEndStruct(ctypes.BigEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', '>u4')]) + self.check(BigEndStruct, expected) + + def test_little_endian_structure_packed(self): + class LittleEndStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', '<u4')]) + self.check(LittleEndStruct, expected) + + def test_little_endian_structure(self): + class PaddedStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', '<B'), + ('b', '<H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_big_endian_structure(self): + class PaddedStruct(ctypes.BigEndianStructure): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', '>B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_simple_endian_types(self): + self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2')) + self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2')) + self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) + self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) + + all_types = set(np.typecodes['All']) + all_pairs = permutations(all_types, 2) + + @pytest.mark.parametrize("pair", all_pairs) + def test_pairs(self, pair): + """ + Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] + Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')]) + """ + # gh-5645: check that np.dtype('i,L') can be used + pair_type = np.dtype('{},{}'.format(*pair)) + expected = np.dtype([('f0', pair[0]), ('f1', pair[1])]) + assert_equal(pair_type, expected) + + +class TestUserDType: + @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") + def test_custom_structured_dtype(self): + class mytype: + pass + + blueprint = np.dtype([("field", object)]) + dt = create_custom_field_dtype(blueprint, mytype, 0) + assert dt.type == mytype + # We cannot (currently) *create* this dtype with `np.dtype` because + # mytype does not inherit from `np.generic`. This seems like an + # unnecessary restriction, but one that has been around forever: + assert np.dtype(mytype) == np.dtype("O") + + def test_custom_structured_dtype_errors(self): + class mytype: + pass + + blueprint = np.dtype([("field", object)]) + + with pytest.raises(ValueError): + # Tests what happens if fields are unset during creation + # which is currently rejected due to the containing object + # (see PyArray_RegisterDataType). + create_custom_field_dtype(blueprint, mytype, 1) + + with pytest.raises(RuntimeError): + # Tests that a dtype must have its type field set up to np.dtype + # or in this case a builtin instance. + create_custom_field_dtype(blueprint, mytype, 2) + + +class TestClassGetItem: + def test_dtype(self) -> None: + alias = np.dtype[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.dtype + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_dtype_subclass(self, code: str) -> None: + cls = type(np.dtype(code)) + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.dtype[arg_tup] + else: + with pytest.raises(TypeError): + np.dtype[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.dtype[Any] + + +def test_result_type_integers_and_unitless_timedelta64(): + # Regression test for gh-20077. The following call of `result_type` + # would cause a seg. fault. + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) + + +def test_creating_dtype_with_dtype_class_errors(): + # Regression test for #25031, calling `np.dtype` with itself segfaulted. + with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): + np.array(np.ones(10), dtype=np.dtype) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_einsum.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_einsum.py new file mode 100644 index 00000000..702be248 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_einsum.py @@ -0,0 +1,1248 @@ +import itertools +import sys +import platform + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_raises, suppress_warnings, assert_raises_regex, assert_allclose + ) + +try: + COMPILERS = np.show_config(mode="dicts")["Compilers"] + USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl" +except TypeError: + USING_CLANG_CL = False + +# Setup for optimize einsum +chars = 'abcdefghij' +sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) +global_size_dict = dict(zip(chars, sizes)) + + +class TestEinsum: + def test_einsum_errors(self): + for do_opt in [True, False]: + # Need enough arguments + assert_raises(ValueError, np.einsum, optimize=do_opt) + assert_raises(ValueError, np.einsum, "", optimize=do_opt) + + # subscripts must be a string + assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt) + + # out parameter must be an array + assert_raises(TypeError, np.einsum, "", 0, out='test', + optimize=do_opt) + + # order parameter must be a valid order + assert_raises(ValueError, np.einsum, "", 0, order='W', + optimize=do_opt) + + # casting parameter must be a valid casting + assert_raises(ValueError, np.einsum, "", 0, casting='blah', + optimize=do_opt) + + # dtype parameter must be a valid dtype + assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type', + optimize=do_opt) + + # other keyword arguments are rejected + assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, + optimize=do_opt) + + # issue 4528 revealed a segfault with this call + assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt) + + # number of operands must match count in subscripts string + assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, ",", 0, [0], [0], + optimize=do_opt) + assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt) + + # can't have more subscripts than dimensions in the operand + assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt) + assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt) + + # invalid ellipsis + assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt) + + # invalid subscript character + assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt) + + # output subscripts must appear in input + assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt) + + # output subscripts may only be specified once + assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]], + optimize=do_opt) + + # dimensions much match when being collapsed + assert_raises(ValueError, np.einsum, "ii", + np.arange(6).reshape(2, 3), optimize=do_opt) + assert_raises(ValueError, np.einsum, "ii->i", + np.arange(6).reshape(2, 3), optimize=do_opt) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), + optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2), optimize=do_opt) + with assert_raises_regex(ValueError, "'b'"): + # gh-11221 - 'c' erroneously appeared in the error message + a = np.ones((3, 3, 4, 5, 6)) + b = np.ones((3, 4, 5)) + np.einsum('aabcb,abc', a, b) + + # Check order kwarg, asanyarray allows 1d to pass through + assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1), + optimize=do_opt, order='d') + + def test_einsum_object_errors(self): + # Exceptions created by object arithmetic should + # successfully propagate + + class CustomException(Exception): + pass + + class DestructoBox: + + def __init__(self, value, destruct): + self._val = value + self._destruct = destruct + + def __add__(self, other): + tmp = self._val + other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __radd__(self, other): + if other == 0: + return self + else: + return self.__add__(other) + + def __mul__(self, other): + tmp = self._val * other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __rmul__(self, other): + if other == 0: + return self + else: + return self.__mul__(other) + + a = np.array([DestructoBox(i, 5) for i in range(1, 10)], + dtype='object').reshape(3, 3) + + # raised from unbuffered_loop_nop1_ndim2 + assert_raises(CustomException, np.einsum, "ij->i", a) + + # raised from unbuffered_loop_nop1_ndim3 + b = np.array([DestructoBox(i, 100) for i in range(0, 27)], + dtype='object').reshape(3, 3, 3) + assert_raises(CustomException, np.einsum, "i...k->...", b) + + # raised from unbuffered_loop_nop2_ndim2 + b = np.array([DestructoBox(i, 55) for i in range(1, 4)], + dtype='object') + assert_raises(CustomException, np.einsum, "ij, j", a, b) + + # raised from unbuffered_loop_nop2_ndim3 + assert_raises(CustomException, np.einsum, "ij, jh", a, a) + + # raised from PyArray_EinsteinSum + assert_raises(CustomException, np.einsum, "ij->", a) + + def test_einsum_views(self): + # pass-through + for do_opt in [True, False]: + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("...", a, optimize=do_opt) + assert_(b.base is a) + + b = np.einsum(a, [Ellipsis], optimize=do_opt) + assert_(b.base is a) + + b = np.einsum("ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + b = np.einsum(a, [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + # output is writeable whenever input is writeable + b = np.einsum("...", a, optimize=do_opt) + assert_(b.flags['WRITEABLE']) + a.flags['WRITEABLE'] = False + b = np.einsum("...", a, optimize=do_opt) + assert_(not b.flags['WRITEABLE']) + + # transpose + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("ji", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9) + a.shape = (3, 3) + + b = np.einsum("ii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("...ii->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("iii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24) + a.shape = (2, 3, 4) + + b = np.einsum("ijk->jik", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + @np._no_nep50_warning() + def check_einsum_sums(self, dtype, do_opt=False): + dtype = np.dtype(dtype) + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i->", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0], [], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("...i->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2*n, dtype=dtype).reshape(2, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + # trace(a) + for n in range(1, 17): + a = np.arange(n*n, dtype=dtype).reshape(n, n) + b = np.trace(a) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("ii", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b) + + # gh-15961: should accept numpy int64 type in subscript list + np_array = np.asarray([0, 0]) + assert_equal(np.einsum(a, np_array, optimize=do_opt), b) + assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3 * n, dtype=dtype).reshape(3, n) + b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), + np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), + np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype)+1 + b = np.arange(n, dtype=dtype)+1 + assert_equal(np.einsum("i,j", a, b, optimize=do_opt), + np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), + np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), + np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), + np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), + np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), + np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', + optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], + optimize=do_opt), a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1 + a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe', optimize=do_opt), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a, optimize=do_opt), + np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), + 2*np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), + 2*np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + # singleton dimensions broadcast (gh-10343) + p = np.ones((10,2)) + q = np.ones((1,2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + + # Cases which were failing (gh-10899) + x = np.eye(2, dtype=dtype) + y = np.ones(2, dtype=dtype) + assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), + [2.]) # contig_contig_outstride0_two + assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), + [2.]) # stride0_contig_outstride0_two + assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), + [2.]) # contig_stride0_outstride0_two + + def test_einsum_sums_int8(self): + if ( + (sys.platform == 'darwin' and platform.machine() == 'x86_64') + or + USING_CLANG_CL + ): + pytest.xfail('Fails on macOS x86-64 and when using clang-cl ' + 'with Meson, see gh-23838') + self.check_einsum_sums('i1') + + def test_einsum_sums_uint8(self): + if ( + (sys.platform == 'darwin' and platform.machine() == 'x86_64') + or + USING_CLANG_CL + ): + pytest.xfail('Fails on macOS x86-64 and when using clang-cl ' + 'with Meson, see gh-23838') + self.check_einsum_sums('u1') + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2') + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2') + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4') + self.check_einsum_sums('i4', True) + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4') + self.check_einsum_sums('u4', True) + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8') + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8') + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2') + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4') + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8') + self.check_einsum_sums('f8', True) + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble) + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8') + self.check_einsum_sums('c8', True) + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16') + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble) + + def test_einsum_sums_object(self): + self.check_einsum_sums('object') + self.check_einsum_sums('object', True) + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize='greedy'), 20) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), + np.einsum('ijklm,ijn->', a, b, optimize=True)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + # Ensure explicitly setting out=None does not cause an error + # see issue gh-15776 and issue gh-15256 + assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + + def test_object_loop(self): + + class Mult: + def __mul__(self, other): + return 42 + + objMult = np.array([Mult()]) + objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + + with pytest.raises(TypeError): + np.einsum("i,j", [1], objNULL) + with pytest.raises(TypeError): + np.einsum("i,j", objNULL, [1]) + assert np.einsum("i,j", objMult, objMult) == 42 + + def test_subscript_range(self): + # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used + # when creating a subscript from arrays + a = np.ones((2, 3)) + b = np.ones((3, 4)) + np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) + np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) + np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) + assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) + assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + # We need to test the optimized parsing as well + + A = np.arange(2 * 3 * 4).reshape(2, 3, 4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + + A = np.arange(12).reshape((4, 3)) + B = np.arange(6).reshape((3, 2)) + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + + dims = [2, 3, 4, 5] + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + + J, K, M = 160, 160, 120 + A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) + B = np.arange(J * K * M * 3).reshape(J, K, M, 3) + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) + es = np.einsum('cl, cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) + B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) + es = np.einsum('cl, cpxy->lpxy', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + + def test_einsum_fixed_collapsingbug(self): + # Issue #5147. + # The bug only occurred when output argument of einssum was used. + x = np.random.normal(0, 1, (5, 5, 5, 5)) + y1 = np.zeros((5, 5)) + np.einsum('aabb->ab', x, out=y1) + idx = np.arange(5) + y2 = x[idx[:, None], idx[:, None], idx, idx] + assert_equal(y1, y2) + + def test_einsum_failed_on_p9_and_s390x(self): + # Issues gh-14692 and gh-12689 + # Bug with signed vs unsigned char errored on power9 and s390x Linux + tensor = np.random.random_sample((10, 10, 10, 10)) + x = np.einsum('ijij->', tensor) + y = tensor.trace(axis1=0, axis2=2).trace() + assert_allclose(x, y) + + def test_einsum_all_contig_non_contig_output(self): + # Issue gh-5907, tests that the all contiguous special case + # actually checks the contiguity of the output + x = np.ones((5, 5)) + out = np.ones(10)[::2] + correct_base = np.ones(10) + correct_base[::2] = 5 + # Always worked (inner iteration is done with 0-stride): + np.einsum('mi,mi,mi->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 1: + out = np.ones(10)[::2] + np.einsum('im,im,im->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 2, buffering causes x to be contiguous but + # special cases do not catch the operation before: + out = np.ones((2, 2, 2))[..., 0] + correct_base = np.ones((2, 2, 2)) + correct_base[..., 0] = 2 + x = np.ones((2, 2), np.float32) + np.einsum('ij,jk->ik', x, x, out=out) + assert_array_equal(out.base, correct_base) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllFloat"] + np.typecodes["AllInteger"]) + def test_different_paths(self, dtype): + # Test originally added to cover broken float16 path: gh-20305 + # Likely most are covered elsewhere, at least partially. + dtype = np.dtype(dtype) + # Simple test, designed to exercise most specialized code paths, + # note the +0.5 for floats. This makes sure we use a float value + # where the results must be exact. + arr = (np.arange(7) + 0.5).astype(dtype) + scalar = np.array(2, dtype=dtype) + + # contig -> scalar: + res = np.einsum('i->', arr) + assert res == arr.sum() + # contig, contig -> contig: + res = np.einsum('i,i->i', arr, arr) + assert_array_equal(res, arr * arr) + # noncontig, noncontig -> contig: + res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2]) + assert_array_equal(res, arr * arr) + # contig + contig -> scalar + assert np.einsum('i,i->', arr, arr) == (arr * arr).sum() + # contig + scalar -> contig (with out) + out = np.ones(7, dtype=dtype) + res = np.einsum('i,->i', arr, dtype.type(2), out=out) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> contig (with out) + res = np.einsum(',i->i', scalar, arr) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> scalar + res = np.einsum(',i->', scalar, arr) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + scalar -> scalar + res = np.einsum('i,->', arr, scalar) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + contig + contig -> scalar + arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype) + res = np.einsum('i,i,i->', arr, arr, arr) + assert_array_equal(res, (arr * arr * arr).sum()) + # four arrays: + res = np.einsum('i,i,i,i->', arr, arr, arr, arr) + assert_array_equal(res, (arr * arr * arr * arr).sum()) + + def test_small_boolean_arrays(self): + # See gh-5946. + # Use array of True embedded in False. + a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] + a[...] = True + out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] + tgt = np.ones((2, 1, 1), dtype=np.bool_) + res = np.einsum('...ij,...jk->...ik', a, a, out=out) + assert_equal(res, tgt) + + def test_out_is_res(self): + a = np.arange(9).reshape(3, 3) + res = np.einsum('...ij,...jk->...ik', a, a, out=a) + assert res is a + + def optimize_compare(self, subscripts, operands=None): + # Tests all paths of the optimization function against + # conventional einsum + if operands is None: + args = [subscripts] + terms = subscripts.split('->')[0].split(',') + for term in terms: + dims = [global_size_dict[x] for x in term] + args.append(np.random.rand(*dims)) + else: + args = [subscripts] + operands + + noopt = np.einsum(*args, optimize=False) + opt = np.einsum(*args, optimize='greedy') + assert_almost_equal(opt, noopt) + opt = np.einsum(*args, optimize='optimal') + assert_almost_equal(opt, noopt) + + def test_hadamard_like_products(self): + # Hadamard outer products + self.optimize_compare('a,ab,abc->abc') + self.optimize_compare('a,b,ab->ab') + + def test_index_transformations(self): + # Simple index transformation cases + self.optimize_compare('ea,fb,gc,hd,abcd->efgh') + self.optimize_compare('ea,fb,abcd,gc,hd->efgh') + self.optimize_compare('abcd,ea,fb,gc,hd->efgh') + + def test_complex(self): + # Long test cases + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') + self.optimize_compare('abhe,hidj,jgba,hiab,gab') + self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') + self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') + self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') + self.optimize_compare('bdhe,acad,hiab,agac,hibd') + + def test_collapse(self): + # Inner products + self.optimize_compare('ab,ab,c->') + self.optimize_compare('ab,ab,c->c') + self.optimize_compare('ab,ab,cd,cd->') + self.optimize_compare('ab,ab,cd,cd->ac') + self.optimize_compare('ab,ab,cd,cd->cd') + self.optimize_compare('ab,ab,cd,cd,ef,ef->') + + def test_expand(self): + # Outer products + self.optimize_compare('ab,cd,ef->abcdef') + self.optimize_compare('ab,cd,ef->acdf') + self.optimize_compare('ab,cd,de->abcde') + self.optimize_compare('ab,cd,de->be') + self.optimize_compare('ab,bcd,cd->abcd') + self.optimize_compare('ab,bcd,cd->abd') + + def test_edge_cases(self): + # Difficult edge cases for optimization + self.optimize_compare('eb,cb,fb->cef') + self.optimize_compare('dd,fb,be,cdb->cef') + self.optimize_compare('bca,cdb,dbf,afc->') + self.optimize_compare('dcc,fce,ea,dbf->ab') + self.optimize_compare('fdf,cdd,ccd,afe->ae') + self.optimize_compare('abcd,ad') + self.optimize_compare('ed,fcd,ff,bcf->be') + self.optimize_compare('baa,dcf,af,cde->be') + self.optimize_compare('bd,db,eac->ace') + self.optimize_compare('fff,fae,bef,def->abd') + self.optimize_compare('efc,dbc,acf,fd->abe') + self.optimize_compare('ba,ac,da->bcd') + + def test_inner_product(self): + # Inner products + self.optimize_compare('ab,ab') + self.optimize_compare('ab,ba') + self.optimize_compare('abc,abc') + self.optimize_compare('abc,bac') + self.optimize_compare('abc,cba') + + def test_random_cases(self): + # Randomly built test cases + self.optimize_compare('aab,fa,df,ecc->bde') + self.optimize_compare('ecb,fef,bad,ed->ac') + self.optimize_compare('bcf,bbb,fbf,fc->') + self.optimize_compare('bb,ff,be->e') + self.optimize_compare('bcb,bb,fc,fff->') + self.optimize_compare('fbb,dfd,fc,fc->') + self.optimize_compare('afd,ba,cc,dc->bf') + self.optimize_compare('adb,bc,fa,cfc->d') + self.optimize_compare('bbd,bda,fc,db->acf') + self.optimize_compare('dba,ead,cad->bce') + self.optimize_compare('aef,fbc,dca->bde') + + def test_combined_views_mapping(self): + # gh-10792 + a = np.arange(9).reshape(1, 1, 3, 1, 3) + b = np.einsum('bbcdc->d', a) + assert_equal(b, [12]) + + def test_broadcasting_dot_cases(self): + # Ensures broadcasting cases are not mistaken for GEMM + + a = np.random.rand(1, 5, 4) + b = np.random.rand(4, 6) + c = np.random.rand(5, 6) + d = np.random.rand(10) + + self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) + self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) + + e = np.random.rand(1, 1, 5, 4) + f = np.random.rand(7, 7) + self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) + self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) + + # Edge case found in gh-11308 + g = np.arange(64).reshape(2, 4, 8) + self.optimize_compare('obk,ijk->ioj', operands=[g, g]) + + def test_output_order(self): + # Ensure output order is respected for optimize cases, the below + # conraction should yield a reshaped tensor view + # gh-16415 + + a = np.ones((2, 3, 5), order='F') + b = np.ones((4, 3), order='F') + + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt) + assert_(tmp.flags.c_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + c = np.ones((4, 3), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + + d = np.ones((2, 3, 5), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + +class TestEinsumPath: + def build_operands(self, string, size_dict=global_size_dict): + + # Builds views based off initial operands + operands = [string] + terms = string.split('->')[0].split(',') + for term in terms: + dims = [size_dict[x] for x in term] + operands.append(np.random.rand(*dims)) + + return operands + + def assert_path_equal(self, comp, benchmark): + # Checks if list of tuples are equivalent + ret = (len(comp) == len(benchmark)) + assert_(ret) + for pos in range(len(comp) - 1): + ret &= isinstance(comp[pos + 1], tuple) + ret &= (comp[pos + 1] == benchmark[pos + 1]) + assert_(ret) + + def test_memory_contraints(self): + # Ensure memory constraints are satisfied + + outer_test = self.build_operands('a,b,c->abc') + + path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + long_test = self.build_operands('acdf,jbje,gihb,hfac') + path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + def test_long_paths(self): + # Long complex cases + + # Long test 1 + long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + path, path_str = np.einsum_path(*long_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + # Long test 2 + long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') + path, path_str = np.einsum_path(*long_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) + + def test_edge_paths(self): + # Difficult edge cases + + # Edge test1 + edge_test1 = self.build_operands('eb,cb,fb->cef') + path, path_str = np.einsum_path(*edge_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + # Edge test2 + edge_test2 = self.build_operands('dd,fb,be,cdb->cef') + path, path_str = np.einsum_path(*edge_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + # Edge test3 + edge_test3 = self.build_operands('bca,cdb,dbf,afc->') + path, path_str = np.einsum_path(*edge_test3, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test3, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test4 + edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): + # Test explicit path handling + path_test = self.build_operands('dcc,fce,ea,dbf->ab') + + path, path_str = np.einsum_path(*path_test, optimize=False) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*path_test, optimize=True) + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_internal_trace(self): + #gh-20962 + path_test = self.build_operands('cab,cdd->ab') + exp_path = ['einsum_path', (1,), (0, 1)] + + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_invalid(self): + path_test = self.build_operands('ab,bc,cd,de->ae') + exp_path = ['einsum_path', (2, 3), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + path_test = self.build_operands('a,a,a->a') + exp_path = ['einsum_path', (1,), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + def test_spaces(self): + #gh-10794 + arr = np.array([[1]]) + for sp in itertools.product(['', ' '], repeat=4): + # no error for any spacing + np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) + +def test_overlap(): + a = np.arange(9, dtype=int).reshape(3, 3) + b = np.arange(9, dtype=int).reshape(3, 3) + d = np.dot(a, b) + # sanity check + c = np.einsum('ij,jk->ik', a, b) + assert_equal(c, d) + #gh-10080, out overlaps one of the operands + c = np.einsum('ij,jk->ik', a, b, out=b) + assert_equal(c, d) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_errstate.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_errstate.py new file mode 100644 index 00000000..3a5647f6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_errstate.py @@ -0,0 +1,61 @@ +import pytest +import sysconfig + +import numpy as np +from numpy.testing import assert_, assert_raises, IS_WASM + +# The floating point emulation on ARM EABI systems lacking a hardware FPU is +# known to be buggy. This is an attempt to identify these hosts. It may not +# catch all possible cases, but it catches the known cases of gh-413 and +# gh-15562. +hosttype = sysconfig.get_config_var('HOST_GNU_TYPE') +arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi') + +class TestErrstate: + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-413,-15562)') + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + with assert_raises(FloatingPointError): + np.sqrt(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + with assert_raises(FloatingPointError): + a // 0 + # As should this, see gh-15562 + with assert_raises(FloatingPointError): + a // a + + def test_errcall(self): + def foo(*args): + print(args) + + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert_(np.geterrcall() is foo, 'call is not foo') + with np.errstate(call=None): + assert_(np.geterrcall() is None, 'call is not None') + assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') + + def test_errstate_decorator(self): + @np.errstate(all='ignore') + def foo(): + a = -np.arange(3) + a // 0 + + foo() diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_extint128.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_extint128.py new file mode 100644 index 00000000..3b64915f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_extint128.py @@ -0,0 +1,219 @@ +import itertools +import contextlib +import operator +import pytest + +import numpy as np +import numpy.core._multiarray_tests as mt + +from numpy.testing import assert_raises, assert_equal + + +INT64_MAX = np.iinfo(np.int64).max +INT64_MIN = np.iinfo(np.int64).min +INT64_MID = 2**32 + +# int128 is not two's complement, the sign bit is separate +INT128_MAX = 2**128 - 1 +INT128_MIN = -INT128_MAX +INT128_MID = 2**64 + +INT64_VALUES = ( + [INT64_MIN + j for j in range(20)] + + [INT64_MAX - j for j in range(20)] + + [INT64_MID + j for j in range(-20, 20)] + + [2*INT64_MID + j for j in range(-20, 20)] + + [INT64_MID//2 + j for j in range(-20, 20)] + + list(range(-70, 70)) +) + +INT128_VALUES = ( + [INT128_MIN + j for j in range(20)] + + [INT128_MAX - j for j in range(20)] + + [INT128_MID + j for j in range(-20, 20)] + + [2*INT128_MID + j for j in range(-20, 20)] + + [INT128_MID//2 + j for j in range(-20, 20)] + + list(range(-70, 70)) + + [False] # negative zero +) + +INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] + + +@contextlib.contextmanager +def exc_iter(*args): + """ + Iterate over Cartesian product of *args, and if an exception is raised, + add information of the current iterate. + """ + + value = [None] + + def iterate(): + for v in itertools.product(*args): + value[0] = v + yield v + + try: + yield iterate() + except Exception: + import traceback + msg = "At: %r\n%s" % (repr(value[0]), + traceback.format_exc()) + raise AssertionError(msg) + + +def test_safe_binop(): + # Test checked arithmetic routines + + ops = [ + (operator.add, 1), + (operator.sub, 2), + (operator.mul, 3) + ] + + with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: + for xop, a, b in it: + pyop, op = xop + c = pyop(a, b) + + if not (INT64_MIN <= c <= INT64_MAX): + assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) + else: + d = mt.extint_safe_binop(a, b, op) + if c != d: + # assert_equal is slow + assert_equal(d, c) + + +def test_to_128(): + with exc_iter(INT64_VALUES) as it: + for a, in it: + b = mt.extint_to_128(a) + if a != b: + assert_equal(b, a) + + +def test_to_64(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if not (INT64_MIN <= a <= INT64_MAX): + assert_raises(OverflowError, mt.extint_to_64, a) + else: + b = mt.extint_to_64(a) + if a != b: + assert_equal(b, a) + + +def test_mul_64_64(): + with exc_iter(INT64_VALUES, INT64_VALUES) as it: + for a, b in it: + c = a * b + d = mt.extint_mul_64_64(a, b) + if c != d: + assert_equal(d, c) + + +def test_add_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a + b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_add_128, a, b) + else: + d = mt.extint_add_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_sub_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a - b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_sub_128, a, b) + else: + d = mt.extint_sub_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_neg_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + b = -a + c = mt.extint_neg_128(a) + if b != c: + assert_equal(c, b) + + +def test_shl_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -(((-a) << 1) & (2**128-1)) + else: + b = (a << 1) & (2**128-1) + c = mt.extint_shl_128(a) + if b != c: + assert_equal(c, b) + + +def test_shr_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -((-a) >> 1) + else: + b = a >> 1 + c = mt.extint_shr_128(a) + if b != c: + assert_equal(c, b) + + +def test_gt_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a > b + d = mt.extint_gt_128(a, b) + if c != d: + assert_equal(d, c) + + +@pytest.mark.slow +def test_divmod_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + if a >= 0: + c, cr = divmod(a, b) + else: + c, cr = divmod(-a, b) + c = -c + cr = -cr + + d, dr = mt.extint_divmod_128_64(a, b) + + if c != d or d != dr or b*d + dr != a: + assert_equal(d, c) + assert_equal(dr, cr) + assert_equal(b*d + dr, a) + + +def test_floordiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = a // b + d = mt.extint_floordiv_128_64(a, b) + + if c != d: + assert_equal(d, c) + + +def test_ceildiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = (a + b - 1) // b + d = mt.extint_ceildiv_128_64(a, b) + + if c != d: + assert_equal(d, c) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_function_base.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_function_base.py new file mode 100644 index 00000000..79f1ecfc --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_function_base.py @@ -0,0 +1,446 @@ +import pytest +from numpy import ( + logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, + ndarray, sqrt, nextafter, stack, errstate + ) +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, + ) + + +class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __div__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rdiv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + +class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + +class TestLogspace: + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_start_stop_array(self): + start = array([0., 1.]) + stop = array([6., 7.]) + t1 = logspace(start, stop, 6) + t2 = stack([logspace(_start, _stop, 6) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = logspace(start, stop[0], 6) + t4 = stack([logspace(_start, stop[0], 6) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = logspace(start, stop, 6, axis=-1) + assert_equal(t5, t2.T) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_base_array(self, axis: int): + start = 1 + stop = 2 + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, stop, num=num, base=_base) for _base in base], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_stop_base_array(self, axis: int): + start = 1 + stop = array([2, 3]) + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, _stop, num=num, base=_base) + for _stop, _base in zip(stop, base)], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(logspace(a, b), logspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + ls = logspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0)) + ls = logspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0, 1)) + + +class TestGeomspace: + + def test_basic(self): + y = geomspace(1, 1e6) + assert_(len(y) == 50) + y = geomspace(1, 1e6, num=100) + assert_(y[-1] == 10 ** 6) + y = geomspace(1, 1e6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = geomspace(1, 1e6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + y = geomspace(8, 2, num=3) + assert_allclose(y, [8, 4, 2]) + assert_array_equal(y.imag, 0) + + y = geomspace(-1, -100, num=3) + assert_array_equal(y, [-1, -10, -100]) + assert_array_equal(y.imag, 0) + + y = geomspace(-100, -1, num=3) + assert_array_equal(y, [-100, -10, -1]) + assert_array_equal(y.imag, 0) + + def test_boundaries_match_start_and_stop_exactly(self): + # make sure that the boundaries of the returned array exactly + # equal 'start' and 'stop' - this isn't obvious because + # np.exp(np.log(x)) isn't necessarily exactly equal to x + start = 0.3 + stop = 20.3 + + y = geomspace(start, stop, num=1) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=1, endpoint=False) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=3) + assert_equal(y[0], start) + assert_equal(y[-1], stop) + + y = geomspace(start, stop, num=3, endpoint=False) + assert_equal(y[0], start) + + def test_nan_interior(self): + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:-1]).all()) + assert_equal(y[3], 3.0) + + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4, endpoint=False) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:]).all()) + + def test_complex(self): + # Purely imaginary + y = geomspace(1j, 16j, num=5) + assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) + assert_array_equal(y.real, 0) + + y = geomspace(-4j, -324j, num=5) + assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) + assert_array_equal(y.real, 0) + + y = geomspace(1+1j, 1000+1000j, num=4) + assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) + + y = geomspace(-1+1j, -1000+1000j, num=4) + assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) + + # Logarithmic spirals + y = geomspace(-1, 1, num=3, dtype=complex) + assert_allclose(y, [-1, 1j, +1]) + + y = geomspace(0+3j, -3+0j, 3) + assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) + y = geomspace(0+3j, 3+0j, 3) + assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) + y = geomspace(-3+0j, 0-3j, 3) + assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) + y = geomspace(0+3j, -3+0j, 3) + assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) + y = geomspace(-2-3j, 5+7j, 7) + assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, + 2.08885354-4.34146838j, 4.58345529-3.16355218j, + 6.41401745-0.55233457j, 6.75707386+3.11795092j, + 5+7j]) + + # Type promotion should prevent the -5 from becoming a NaN + y = geomspace(3j, -5, 2) + assert_allclose(y, [3j, -5]) + y = geomspace(-5, 3j, 2) + assert_allclose(y, [-5, 3j]) + + def test_dtype(self): + y = geomspace(1, 1e6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = geomspace(1, 1e6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + # Native types + y = geomspace(1, 1e6, dtype=float) + assert_equal(y.dtype, dtype('float_')) + y = geomspace(1, 1e6, dtype=complex) + assert_equal(y.dtype, dtype('complex')) + + def test_start_stop_array_scalar(self): + lim1 = array([120, 100], dtype="int8") + lim2 = array([-120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = geomspace(lim1[0], lim1[1], 5) + t2 = geomspace(lim2[0], lim2[1], 5) + t3 = geomspace(lim3[0], lim3[1], 5) + t4 = geomspace(120.0, 100.0, 5) + t5 = geomspace(-120.0, -100.0, 5) + t6 = geomspace(1200.0, 1000.0, 5) + + # t3 uses float32, t6 uses float64 + assert_allclose(t1, t4, rtol=1e-2) + assert_allclose(t2, t5, rtol=1e-2) + assert_allclose(t3, t6, rtol=1e-5) + + def test_start_stop_array(self): + # Try to use all special cases. + start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) + t1 = geomspace(start, stop, 5) + t2 = stack([geomspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = geomspace(start, stop[0], 5) + t4 = stack([geomspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = geomspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + gs = geomspace(a, b) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0)) + gs = geomspace(a, b, 1) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0, 1)) + + def test_bounds(self): + assert_raises(ValueError, geomspace, 0, 10) + assert_raises(ValueError, geomspace, 10, 0) + assert_raises(ValueError, geomspace, 0, 0) + + +class TestLinspace: + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=False) + assert_(y[-1] < 10) + assert_raises(ValueError, linspace, 0, 10, num=-1) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + assert_raises(TypeError, linspace, 0, 1, num=2.5) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_start_stop_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_start_stop_array(self): + start = array([-120, 120], dtype="int8") + stop = array([100, -100], dtype="int8") + t1 = linspace(start, stop, 5) + t2 = stack([linspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = linspace(start, stop[0], 5) + t4 = stack([linspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = linspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) + lim2 = linspace(1j, 10, 5) + t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + def test_subclass(self): + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + + def test_array_interface(self): + # Regression test for https://github.com/numpy/numpy/pull/6659 + # Ensure that start/stop can be objects that implement + # __array_interface__ and are convertible to numeric scalars + + class Arrayish: + """ + A generic object that supports the __array_interface__ and hence + can in principle be converted to a numeric scalar, but is not + otherwise recognized as numeric, but also happens to support + multiplication by floats. + + Data should be an object that implements the buffer interface, + and contains at least 4 bytes. + """ + + def __init__(self, data): + self._data = data + + @property + def __array_interface__(self): + return {'shape': (), 'typestr': '<i4', 'data': self._data, + 'version': 3} + + def __mul__(self, other): + # For the purposes of this test any multiplication is an + # identity operation :) + return self + + one = Arrayish(array(1, dtype='<i4')) + five = Arrayish(array(5, dtype='<i4')) + + assert_equal(linspace(one, five), linspace(1, 5)) + + def test_denormal_numbers(self): + # Regression test for gh-5437. Will probably fail when compiled + # with ICC, which flushes denormals to zero + for ftype in sctypes['float']: + stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number + assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype))) + + def test_equivalent_to_arange(self): + for j in range(1000): + assert_equal(linspace(0, j, j+1, dtype=int), + arange(j+1, dtype=int)) + + def test_retstep(self): + for num in [0, 1, 2]: + for ept in [False, True]: + y = linspace(0, 1, num, endpoint=ept, retstep=True) + assert isinstance(y, tuple) and len(y) == 2 + if num == 2: + y0_expect = [0.0, 1.0] if ept else [0.0, 0.5] + assert_array_equal(y[0], y0_expect) + assert_equal(y[1], y0_expect[1]) + elif num == 1 and not ept: + assert_array_equal(y[0], [0.0]) + assert_equal(y[1], 1.0) + else: + assert_array_equal(y[0], [0.0][:num]) + assert isnan(y[1]) + + def test_object(self): + start = array(1, dtype='O') + stop = array(2, dtype='O') + y = linspace(start, stop, 3) + assert_array_equal(y, array([1., 1.5, 2.])) + + def test_round_negative(self): + y = linspace(-1, 3, num=8, dtype=int) + t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int) + assert_array_equal(y, t) + + def test_any_step_zero_and_not_mult_inplace(self): + # any_step_zero is True, _mult_inplace is False + start = array([0.0, 1.0]) + stop = array([2.0, 1.0]) + y = linspace(start, stop, 3) + assert_array_equal(y, array([[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]])) + + diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_getlimits.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_getlimits.py new file mode 100644 index 00000000..f646e2bd --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_getlimits.py @@ -0,0 +1,194 @@ +""" Test functions for limits module. + +""" +import warnings +import numpy as np +import pytest +from numpy.core import finfo, iinfo +from numpy import half, single, double, longdouble +from numpy.testing import assert_equal, assert_, assert_raises +from numpy.core.getlimits import _discovered_machar, _float_ma + +################################################## + +class TestPythonFloat: + def test_singleton(self): + ftype = finfo(float) + ftype2 = finfo(float) + assert_equal(id(ftype), id(ftype2)) + +class TestHalf: + def test_singleton(self): + ftype = finfo(half) + ftype2 = finfo(half) + assert_equal(id(ftype), id(ftype2)) + +class TestSingle: + def test_singleton(self): + ftype = finfo(single) + ftype2 = finfo(single) + assert_equal(id(ftype), id(ftype2)) + +class TestDouble: + def test_singleton(self): + ftype = finfo(double) + ftype2 = finfo(double) + assert_equal(id(ftype), id(ftype2)) + +class TestLongdouble: + def test_singleton(self): + ftype = finfo(longdouble) + ftype2 = finfo(longdouble) + assert_equal(id(ftype), id(ftype2)) + +def assert_finfo_equal(f1, f2): + # assert two finfo instances have the same attributes + for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep', + 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp', + 'nmant', 'precision', 'resolution', 'tiny', + 'smallest_normal', 'smallest_subnormal'): + assert_equal(getattr(f1, attr), getattr(f2, attr), + f'finfo instances {f1} and {f2} differ on {attr}') + +def assert_iinfo_equal(i1, i2): + # assert two iinfo instances have the same attributes + for attr in ('bits', 'min', 'max'): + assert_equal(getattr(i1, attr), getattr(i2, attr), + f'iinfo instances {i1} and {i2} differ on {attr}') + +class TestFinfo: + def test_basic(self): + dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'], + [np.float16, np.float32, np.float64, np.complex64, + np.complex128])) + for dt1, dt2 in dts: + assert_finfo_equal(finfo(dt1), finfo(dt2)) + + assert_raises(ValueError, finfo, 'i4') + + def test_regression_gh23108(self): + # np.float32(1.0) and np.float64(1.0) have the same hash and are + # equal under the == operator + f1 = np.finfo(np.float32(1.0)) + f2 = np.finfo(np.float64(1.0)) + assert f1 != f2 + + def test_regression_gh23867(self): + class NonHashableWithDtype: + __hash__ = None + dtype = np.dtype('float32') + + x = NonHashableWithDtype() + assert np.finfo(x) == np.finfo(x.dtype) + + +class TestIinfo: + def test_basic(self): + dts = list(zip(['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8'], + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64])) + for dt1, dt2 in dts: + assert_iinfo_equal(iinfo(dt1), iinfo(dt2)) + + assert_raises(ValueError, iinfo, 'f4') + + def test_unsigned_max(self): + types = np.sctypes['uint'] + for T in types: + with np.errstate(over="ignore"): + max_calculated = T(0) - T(1) + assert_equal(iinfo(T).max, max_calculated) + +class TestRepr: + def test_iinfo_repr(self): + expected = "iinfo(min=-32768, max=32767, dtype=int16)" + assert_equal(repr(np.iinfo(np.int16)), expected) + + def test_finfo_repr(self): + expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \ + " max=3.4028235e+38, dtype=float32)" + assert_equal(repr(np.finfo(np.float32)), expected) + + +def test_instances(): + # Test the finfo and iinfo results on numeric instances agree with + # the results on the corresponding types + + for c in [int, np.int16, np.int32, np.int64]: + class_iinfo = iinfo(c) + instance_iinfo = iinfo(c(12)) + + assert_iinfo_equal(class_iinfo, instance_iinfo) + + for c in [float, np.float16, np.float32, np.float64]: + class_finfo = finfo(c) + instance_finfo = finfo(c(1.2)) + assert_finfo_equal(class_finfo, instance_finfo) + + with pytest.raises(ValueError): + iinfo(10.) + + with pytest.raises(ValueError): + iinfo('hi') + + with pytest.raises(ValueError): + finfo(np.int64(1)) + + +def assert_ma_equal(discovered, ma_like): + # Check MachAr-like objects same as calculated MachAr instances + for key, value in discovered.__dict__.items(): + assert_equal(value, getattr(ma_like, key)) + if hasattr(value, 'shape'): + assert_equal(value.shape, getattr(ma_like, key).shape) + assert_equal(value.dtype, getattr(ma_like, key).dtype) + + +def test_known_types(): + # Test we are correctly compiling parameters for known types + for ftype, ma_like in ((np.float16, _float_ma[16]), + (np.float32, _float_ma[32]), + (np.float64, _float_ma[64])): + assert_ma_equal(_discovered_machar(ftype), ma_like) + # Suppress warning for broken discovery of double double on PPC + with np.errstate(all='ignore'): + ld_ma = _discovered_machar(np.longdouble) + bytes = np.dtype(np.longdouble).itemsize + if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): + # 80-bit extended precision + assert_ma_equal(ld_ma, _float_ma[80]) + elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: + # IEE 754 128-bit + assert_ma_equal(ld_ma, _float_ma[128]) + + +def test_subnormal_warning(): + """Test that the subnormal is zero warning is not being raised.""" + with np.errstate(all='ignore'): + ld_ma = _discovered_machar(np.longdouble) + bytes = np.dtype(np.longdouble).itemsize + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): + # 80-bit extended precision + ld_ma.smallest_subnormal + assert len(w) == 0 + elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: + # IEE 754 128-bit + ld_ma.smallest_subnormal + assert len(w) == 0 + else: + # Double double + ld_ma.smallest_subnormal + # This test may fail on some platforms + assert len(w) == 0 + + +def test_plausible_finfo(): + # Assert that finfo returns reasonable results for all types + for ftype in np.sctypes['float'] + np.sctypes['complex']: + info = np.finfo(ftype) + assert_(info.nmant > 1) + assert_(info.minexp < -1) + assert_(info.maxexp > 1) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_half.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_half.py new file mode 100644 index 00000000..fbc1bf6a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_half.py @@ -0,0 +1,572 @@ +import platform +import pytest + +import numpy as np +from numpy import uint16, float16, float32, float64 +from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + "Did not raise floating point %s error" % strmatch) + else: + assert_(False, + "Did not raise floating point %s error" % strmatch) + +class TestHalf: + def setup_method(self): + # An array of all possible float16 values + self.all_f16 = np.arange(0x10000, dtype=uint16) + self.all_f16.dtype = float16 + + # NaN value can cause an invalid FP exception if HW is been used + with np.errstate(invalid='ignore'): + self.all_f32 = np.array(self.all_f16, dtype=float32) + self.all_f64 = np.array(self.all_f16, dtype=float64) + + # An array of all non-NaN float16 values, in sorted order + self.nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16))) + self.nonan_f16.dtype = float16 + self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) + self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) + + # An array of all finite float16 values, in sorted order + self.finite_f16 = self.nonan_f16[1:-1] + self.finite_f32 = self.nonan_f32[1:-1] + self.finite_f64 = self.nonan_f64[1:-1] + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + + # Convert from float32 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(self.all_f32, dtype=float16) + # avoid testing NaNs due to differ bits wither Q/SNaNs + b_nn = b == b + assert_equal(self.all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert from float64 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(self.all_f64, dtype=float16) + b_nn = b == b + assert_equal(self.all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(self.nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=int) + assert_equal(i_int, j) + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_to_string(self, string_dt): + # Currently uses S/U32 (which is sufficient for float32) + expected_dt = np.dtype(f"{string_dt}32") + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt + + arr = np.ones(3, dtype=np.float16).astype(string_dt) + assert arr.dtype == expected_dt + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_from_string(self, string_dt): + string = np.array("3.1416", dtype=string_dt) + assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16) + + @pytest.mark.parametrize("offset", [None, "up", "down"]) + @pytest.mark.parametrize("shift", [None, "up", "down"]) + @pytest.mark.parametrize("float_t", [np.float32, np.float64]) + @np._no_nep50_warning() + def test_half_conversion_rounding(self, float_t, shift, offset): + # Assumes that round to even is used during casting. + max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) + + # Test all (positive) finite numbers, denormals are most interesting + # however: + f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) + f16s_float = f16s_patterns.view(np.float16).astype(float_t) + + # Shift the values by half a bit up or a down (or do not shift), + if shift == "up": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] + elif shift == "down": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] + else: + f16s_float = f16s_float[1:-1] + + # Increase the float by a minimal value: + if offset == "up": + f16s_float = np.nextafter(f16s_float, float_t(np.inf)) + elif offset == "down": + f16s_float = np.nextafter(f16s_float, float_t(-np.inf)) + + # Convert back to float16 and its bit pattern: + res_patterns = f16s_float.astype(np.float16).view(np.uint16) + + # The above calculations tries the original values, or the exact + # mid points between the float16 values. It then further offsets them + # by as little as possible. If no offset occurs, "round to even" + # logic will be necessary, an arbitrarily small offset should cause + # normal up/down rounding always. + + # Calculate the expected pattern: + cmp_patterns = f16s_patterns[1:-1].copy() + + if shift == "down" and offset != "up": + shift_pattern = -1 + elif shift == "up" and offset != "down": + shift_pattern = 1 + else: + # There cannot be a shift, either shift is None, so all rounding + # will go back to original, or shift is reduced by offset too much. + shift_pattern = 0 + + # If rounding occurs, is it normal rounding or round to even? + if offset is None: + # Round to even occurs, modify only non-even, cast to allow + (-1) + cmp_patterns[0::2].view(np.int16)[...] += shift_pattern + else: + cmp_patterns.view(np.int16)[...] += shift_pattern + + assert_equal(res_patterns, cmp_patterns) + + @pytest.mark.parametrize(["float_t", "uint_t", "bits"], + [(np.float32, np.uint32, 23), + (np.float64, np.uint64, 52)]) + def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): + # Test specifically that all bits are considered when deciding + # whether round to even should occur (i.e. no bits are lost at the + # end. Compare also gh-12721. The most bits can get lost for the + # smallest denormal: + smallest_value = np.uint16(1).view(np.float16).astype(float_t) + assert smallest_value == 2**-24 + + # Will be rounded to zero based on round to even rule: + rounded_to_zero = smallest_value / float_t(2) + assert rounded_to_zero.astype(np.float16) == 0 + + # The significand will be all 0 for the float_t, test that we do not + # lose the lower ones of these: + for i in range(bits): + # slightly increasing the value should make it round up: + larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) + larger_value = larger_pattern.view(float_t) + assert larger_value.astype(np.float16) == smallest_value + + def test_nans_infs(self): + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) + assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) + assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) + assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (self.all_f16 == nan).any()) + assert_(not (nan == self.all_f16).any()) + + assert_((self.all_f16 != nan).all()) + assert_((nan != self.all_f16).all()) + + assert_(not (self.all_f16 < nan).any()) + assert_(not (nan < self.all_f16).any()) + + assert_(not (self.all_f16 <= nan).any()) + assert_(not (nan <= self.all_f16).any()) + + assert_(not (self.all_f16 > nan).any()) + assert_(not (nan > self.all_f16).any()) + + assert_(not (self.all_f16 >= nan).any()) + assert_(not (nan >= self.all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1/1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b.dtype = float16 + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0+2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0+2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + + # Create an array of all finite float16s + a_bits = self.finite_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) + a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits & 0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp != -15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp == -15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(self.finite_f32, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + self.finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(self.finite_f64, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + self.finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + + # All non-NaN float16 values in reverse order + a = self.nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + hnan = np.array((np.nan,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + assert_equal(np.nextafter(hinf, a_f16), a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1]) + + assert_equal(np.nextafter(hinf, hinf), hinf) + assert_equal(np.nextafter(hinf, -hinf), a_f16[-1]) + assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, -hinf), -hinf) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + assert_equal(np.nextafter(hnan, hnan), hnan) + assert_equal(np.nextafter(hinf, hnan), hnan) + assert_equal(np.nextafter(hnan, hinf), hnan) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1]) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.positive(b), b) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + @np._no_nep50_warning() + def test_half_coercion(self, weak_promotion): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert np.power(a16, 2).dtype == float16 + assert np.power(a16, 2.0).dtype == float16 + assert np.power(a16, b16).dtype == float16 + expected_dt = float32 if weak_promotion else float16 + assert np.power(a16, b32).dtype == expected_dt + assert np.power(a16, a16).dtype == float16 + assert np.power(a16, a32).dtype == float32 + + expected_dt = float16 if weak_promotion else float64 + assert np.power(b16, 2).dtype == expected_dt + assert np.power(b16, 2.0).dtype == expected_dt + assert np.power(b16, b16).dtype, float16 + assert np.power(b16, b32).dtype, float32 + assert np.power(b16, a16).dtype, float16 + assert np.power(b16, a32).dtype, float32 + + assert np.power(a32, a16).dtype == float32 + assert np.power(a32, b16).dtype == float32 + expected_dt = float32 if weak_promotion else float16 + assert np.power(b32, a16).dtype == expected_dt + assert np.power(b32, b16).dtype == float32 + + @pytest.mark.skipif(platform.machine() == "armv5tel", + reason="See gh-413.") + @pytest.mark.skipif(IS_WASM, + reason="fp exceptions don't work in wasm.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14-2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b:a+b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b:a-b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + + # These should not raise + float16(65472)+float16(32) + float16(2**-13)/float16(2) + float16(2**-14)/float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + np.nextafter(float16(np.inf), float16(0)) + np.nextafter(float16(-np.inf), float16(0)) + np.nextafter(float16(0), float16(np.nan)) + np.nextafter(float16(np.nan), float16(0)) + float16(2**-14)/float16(2**10) + float16(-2**-14)/float16(2**10) + float16(2**-14+2**-23)/float16(2) + float16(-2**-14-2**-23)/float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_hashtable.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_hashtable.py new file mode 100644 index 00000000..bace4c05 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_hashtable.py @@ -0,0 +1,30 @@ +import pytest + +import random +from numpy.core._multiarray_tests import identityhash_tester + + +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1, 16, 2000]) +def test_identity_hashtable(key_length, length): + # use a 30 object pool for everything (duplicates will happen) + pool = [object() for i in range(20)] + keys_vals = [] + for i in range(length): + keys = tuple(random.choices(pool, k=key_length)) + keys_vals.append((keys, random.choice(pool))) + + dictionary = dict(keys_vals) + + # add a random item at the end: + keys_vals.append(random.choice(keys_vals)) + # the expected one could be different with duplicates: + expected = dictionary[keys_vals[-1][0]] + + res = identityhash_tester(key_length, keys_vals, replace=True) + assert res is expected + + # check that ensuring one duplicate definitely raises: + keys_vals.insert(0, keys_vals[-2]) + with pytest.raises(RuntimeError): + identityhash_tester(key_length, keys_vals) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_indexerrors.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_indexerrors.py new file mode 100644 index 00000000..a0e9a8c5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_indexerrors.py @@ -0,0 +1,133 @@ +import numpy as np +from numpy.testing import ( + assert_raises, assert_raises_regex, + ) + + +class TestIndexErrors: + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.itemset, 20, 0) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.itemset, (0, 0), 0) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_mapping_error_message(self): + a = np.zeros((3, 5)) + index = (1, 2, 3, 4, 5) + assert_raises_regex( + IndexError, + "too many indices for array: " + "array is 2-dimensional, but 5 were indexed", + lambda: a[index]) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) + a = np.zeros((0, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_indexing.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_indexing.py new file mode 100644 index 00000000..04293670 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_indexing.py @@ -0,0 +1,1417 @@ +import sys +import warnings +import functools +import operator + +import pytest + +import numpy as np +from numpy.core._multiarray_tests import array_indexing +from itertools import product +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM + ) + + +class TestIndexing: + def test_index_no_floats(self): + a = np.array([[[5]]]) + + assert_raises(IndexError, lambda: a[0.0]) + assert_raises(IndexError, lambda: a[0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0]) + assert_raises(IndexError, lambda: a[0.0,:]) + assert_raises(IndexError, lambda: a[:, 0.0]) + assert_raises(IndexError, lambda: a[:, 0.0,:]) + assert_raises(IndexError, lambda: a[0.0,:,:]) + assert_raises(IndexError, lambda: a[0, 0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0, 0]) + assert_raises(IndexError, lambda: a[0, 0.0, 0]) + assert_raises(IndexError, lambda: a[-1.4]) + assert_raises(IndexError, lambda: a[0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0]) + assert_raises(IndexError, lambda: a[-1.4,:]) + assert_raises(IndexError, lambda: a[:, -1.4]) + assert_raises(IndexError, lambda: a[:, -1.4,:]) + assert_raises(IndexError, lambda: a[-1.4,:,:]) + assert_raises(IndexError, lambda: a[0, 0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0, 0]) + assert_raises(IndexError, lambda: a[0, -1.4, 0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) + + def test_slicing_no_floats(self): + a = np.array([[5]]) + + # start as float. + assert_raises(TypeError, lambda: a[0.0:]) + assert_raises(TypeError, lambda: a[0:, 0.0:2]) + assert_raises(TypeError, lambda: a[0.0::2, :0]) + assert_raises(TypeError, lambda: a[0.0:1:2,:]) + assert_raises(TypeError, lambda: a[:, 0.0:]) + # stop as float. + assert_raises(TypeError, lambda: a[:0.0]) + assert_raises(TypeError, lambda: a[:0, 1:2.0]) + assert_raises(TypeError, lambda: a[:0.0:2, :0]) + assert_raises(TypeError, lambda: a[:0.0,:]) + assert_raises(TypeError, lambda: a[:, 0:4.0:2]) + # step as float. + assert_raises(TypeError, lambda: a[::1.0]) + assert_raises(TypeError, lambda: a[0:, :2:2.0]) + assert_raises(TypeError, lambda: a[1::4.0, :0]) + assert_raises(TypeError, lambda: a[::5.0,:]) + assert_raises(TypeError, lambda: a[:, 0:4:2.0]) + # mixed. + assert_raises(TypeError, lambda: a[1.0:2:2.0]) + assert_raises(TypeError, lambda: a[1.0::2.0]) + assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) + assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) + assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) + # should still get the DeprecationWarning if step = 0. + assert_raises(TypeError, lambda: a[::0.0]) + + def test_index_no_array_to_index(self): + # No non-scalar arrays. + a = np.array([[[1]]]) + + assert_raises(TypeError, lambda: a[a:a:a]) + + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if that + # is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:,None] + assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_ellipsis_index(self): + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_(a[...] is not a) + assert_equal(a[...], a) + # `a[...]` was `a` in numpy <1.9. + assert_(a[...].base is a) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0,:]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 30) + # Index overflow produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[np.array(True)], a[None]) + assert_equal(a[np.array(False)], a[None][0:0]) + + def test_boolean_shape_mismatch(self): + arr = np.ones((5, 4, 3)) + + index = np.array([True]) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.array([False] * 6) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + assert_raises(IndexError, arr.__getitem__, index) + + assert_raises(IndexError, arr.__getitem__, (slice(None), index)) + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[ 0., 0., 0.]]) + b = np.array([ True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + def test_boolean_assignment_needs_api(self): + # See also gh-7666 + # This caused a segfault on Python 2 due to the GIL not being + # held when the iterator does not need it, but the transfer function + # does + arr = np.zeros(1000) + indx = np.zeros(1000, dtype=bool) + indx[:100] = True + arr[indx] = np.ones(100, dtype=object) + + expected = np.zeros(1000) + expected[:100] = 1 + assert_array_equal(arr, expected) + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + def test_boolean_indexing_list(self): + # Regression test for #13715. It's a use-after-free bug which the + # test won't directly catch, but it will show up in valgrind. + a = np.array([1, 2, 3]) + b = [True, False, True] + # Two variants of the test because the first takes a fast path + assert_equal(a[b], [1, 3]) + assert_equal(a[None, b], [[1, 3]]) + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) + + def test_scalar_array_bool(self): + # NumPy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool_(True)], a[np.array(True)]) + assert_equal(a[np.bool_(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + def test_trivial_fancy_not_possible(self): + # Test that the fast path for trivial assignment is not incorrectly + # used when the index is not contiguous or 1D, see also gh-11467. + a = np.arange(6) + idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] + assert_array_equal(a[idx], idx) + + # this case must not go into the fast path, note that idx is + # a non-contiuguous none 1D array here. + a[idx] = -1 + res = np.arange(6) + res[0] = -1 + res[3] = -1 + assert_array_equal(a, res) + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + def test_array_like_values(self): + # Similar to the above test, but use a memoryview instead + a = np.zeros((5, 5)) + s = np.arange(25, dtype=np.float64).reshape(5, 5) + + a[[0, 1, 2, 3, 4], :] = memoryview(s) + assert_array_equal(a, s) + + a[:, [0, 1, 2, 3, 4]] = memoryview(s) + assert_array_equal(a, s) + + a[...] = memoryview(s) + assert_array_equal(a, s) + + def test_subclass_writeable(self): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + ind = np.array([False, True, True], dtype=bool) + assert_(d[ind].flags.writeable) + ind = np.array([0, 1]) + assert_(d[ind].flags.writeable) + assert_(d[...].flags.writeable) + assert_(d[0].flags.writeable) + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5,2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero: + # The most basic valid indexing + def __index__(self): + return 0 + + z = Zero() + + class ArrayLike: + # Simple array, should behave like the array + def __array__(self): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float_)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float_)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float_)) + assert_(isinstance(a[z, ArrayLike()], np.float_)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + if HAS_REFCOUNT: + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + #trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike: + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + # also test that field indexing does not segfault + # for a similar reason, by indexing a structured array + arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + def test_indexing_array_weird_strides(self): + # See also gh-6221 + # the shapes used here come from the issue and create the correct + # size for the iterator buffering size. + x = np.ones(10) + x2 = np.ones((10, 2)) + ind = np.arange(10)[:, None, None, None] + ind = np.broadcast_to(ind, (10, 55, 4, 4)) + + # single advanced index case + assert_array_equal(x[ind], x[ind.copy()]) + # higher dimensional advanced index + zind = np.zeros(4, dtype=np.intp) + assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) + + def test_indexing_array_negative_strides(self): + # From gh-8264, + # core dumps if negative strides are used in iteration + arro = np.zeros((4, 4)) + arr = arro[::-1, ::-1] + + slices = (slice(None), [0, 1, 2, 3]) + arr[slices] = 10 + assert_array_equal(arr, 10.) + + def test_character_assignment(self): + # This is an example a function going through CopyObject which + # used to have an untested special path for scalars + # (the character special dtype case, should be deprecated probably) + arr = np.zeros((1, 5), dtype="c") + arr[0] = np.str_("asdfg") # must assign as a sequence + assert_array_equal(arr[0], np.array("asdfg", dtype="c")) + assert arr[0, 1] == b"s" # make sure not all were set to "a" for both + + @pytest.mark.parametrize("index", + [True, False, np.array([0])]) + @pytest.mark.parametrize("num", [32, 40]) + @pytest.mark.parametrize("original_ndim", [1, 32]) + def test_too_many_advanced_indices(self, index, num, original_ndim): + # These are limitations based on the number of arguments we can process. + # For `num=32` (and all boolean cases), the result is actually define; + # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons. + arr = np.ones((1,) * original_ndim) + with pytest.raises(IndexError): + arr[(index,) * num] + with pytest.raises(IndexError): + arr[(index,) * num] = 1. + + @pytest.mark.skipif(IS_WASM, reason="no threading") + def test_structured_advanced_indexing(self): + # Test that copyswap(n) used by integer array indexing is threadsafe + # for structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)] * 2) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] + + rng = np.random.default_rng() + def func(arr): + indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) + arr[indx] + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + def test_nontuple_ndindex(self): + a = np.arange(25).reshape((5, 5)) + assert_equal(a[[0, 1]], np.array([a[0], a[1]])) + assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) + assert_raises(IndexError, a.__getitem__, [slice(None)]) + + +class TestFieldIndexing: + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a','f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments: + def assign(self, a, ind, val): + a[ind] = val + return a + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + a = np.zeros((5, 1)) + + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + @pytest.mark.parametrize("index", [ + (..., [1, 2], slice(None)), + ([0, 1], ..., 0), + (..., [1, 2], [1, 2])]) + def test_broadcast_error_reports_correct_shape(self, index): + values = np.zeros((100, 100)) # will never broadcast below + + arr = np.zeros((3, 4, 5, 6, 7)) + # We currently report without any spaces (could be changed) + shape_str = str(arr[index].shape).replace(" ", "") + + with pytest.raises(ValueError) as e: + arr[index] = values + + assert str(e.value).endswith(shape_str) + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:,None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses: + def test_basic(self): + # Test that indexing in various ways produces SubClass instances, + # and that the base is set up correctly: the original subclass + # instance for views, and a new ndarray for advanced/boolean indexing + # where a copy was made (latter a regression test for gh-11983). + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s_slice = s[:3] + assert_(type(s_slice) is SubClass) + assert_(s_slice.base is s) + assert_array_equal(s_slice, a[:3]) + + s_fancy = s[[0, 1, 2]] + assert_(type(s_fancy) is SubClass) + assert_(s_fancy.base is not s) + assert_(type(s_fancy.base) is np.ndarray) + assert_array_equal(s_fancy, a[[0, 1, 2]]) + assert_array_equal(s_fancy.base, a[[0, 1, 2]]) + + s_bool = s[s > 0] + assert_(type(s_bool) is SubClass) + assert_(s_bool.base is not s) + assert_(type(s_bool.base) is np.ndarray) + assert_array_equal(s_bool, a[a > 0]) + assert_array_equal(s_bool.base, a[a > 0]) + + def test_fancy_on_read_only(self): + # Test that fancy indexing on read-only SubClass does not make a + # read-only copy (gh-14132) + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s.flags.writeable = False + s_fancy = s[[0, 1, 2]] + assert_(s_fancy.flags.writeable) + + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0,1,2,3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + +class TestFancyIndexingCast: + def test_boolean_index_cast_assign(self): + # Setup the boolean index and float arrays. + shape = (8, 63) + bool_index = np.zeros(shape).astype(bool) + bool_index[0, 1] = True + zero_array = np.zeros(shape) + + # Assigning float is fine. + zero_array[bool_index] = np.array([1]) + assert_equal(zero_array[0, 1], 1) + + # Fancy indexing works, although we get a cast warning. + assert_warns(np.ComplexWarning, + zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) + assert_equal(zero_array[0, 1], 2) # No complex part + + # Cast complex to float, throwing away the imaginary portion. + assert_warns(np.ComplexWarning, + zero_array.__setitem__, bool_index, np.array([1j])) + assert_equal(zero_array[0, 1], 0) + +class TestFancyIndexingEquivalence: + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1,2), 3] + b[[0, 1, 2]] = [1, (1,2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1,2), 3]] + assert_array_equal(a, b[0]) + + # Check that swapping of axes works. + # There was a bug that made the later assignment throw a ValueError + # do to an incorrectly transposed temporary right hand side (gh-5714) + b = b.T + b[:3, [0]] = [[1], [(1,2)], [3]] + assert_array_equal(a, b[:, 0]) + + # Another test for the memory order of the subspace + arr = np.ones((3, 4, 5), dtype=object) + # Equivalent slicing assignment for comparison + cmp_arr = arr.copy() + cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + arr = arr.copy('F') + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated: + """ + These tests use code to mimic the C-Code indexing for selection. + + NOTE: + + * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + + Update 2016-11-30: It is probably not worth maintaining this test + indefinitely and it can be dropped if maintenance becomes a burden. + + """ + + def setup_method(self): + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) + self.complex_indices = ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1]*31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + # Some simpler indices that still cover a bit more + self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), + 'skip'] + # Very simple ones to fill the rest: + self.fill_indices = [slice(None, None), 0] + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indices])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + try: + indx = np.array(indx, dtype=np.intp) + except ValueError: + raise IndexError + in_indices[i] = indx + elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + raise IndexError('arrays used as indices must be of ' + 'integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d + # arrays. since a[()] makes sense, but not a[(),]. We will + # raise an error later on, unless a broadcasting error occurs + # first. + raise IndexError + + if ndim == 0 and None not in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * + (arr.ndim - ndim)) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + if indx.shape != arr.shape[ax:ax+indx.ndim]: + raise IndexError + + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax+indx.ndim], mode='raise') + except Exception: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0]*indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape((arr.shape[:ax] + + (np.prod(arr.shape[ax:ax+indx.ndim]),) + + arr.shape[ax+indx.ndim:])) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + else: + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + if indx.ndim == 0: + if indx >= arr.shape[ax] or indx < -arr.shape[ax]: + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if + # fancy indexing was active, the check was done later, + # possibly after broadcasting it away (1.7. or earlier). + # Now it is always done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if (len(indices) > 0 and + indices[-1][0] == 'f' and + ax != ellipsis_pos): + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while one may + # expect it to not trigger it, since a scalar would not be + # considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape((arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):])) + + # Check if broadcasting works + res = np.broadcast(*indx[1:]) + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.prod(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='raise') + except Exception: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + try: + arr = arr.reshape((arr.shape[:ax] + + mi.shape + + arr.shape[ax+1:])) + except ValueError: + # too many dimensions, probably + raise IndexError + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if HAS_REFCOUNT: + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), 3) + else: + assert_equal(sys.getrefcount(arr), 2) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool),)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + warnings.filterwarnings('error', '', np.VisibleDeprecationWarning) + + def isskip(idx): + return isinstance(idx, str) and idx == "skip" + + for simple_pos in [0, 2, 3]: + tocheck = [self.fill_indices, self.complex_indices, + self.fill_indices, self.fill_indices] + tocheck[simple_pos] = self.simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if not isskip(i)) + self._check_multi_index(self.a, index) + self._check_multi_index(self.b, index) + + # Check very simple item getting: + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + + def test_1d(self): + a = np.arange(10) + for index in self.complex_indices: + self._check_single_index(a, index) + +class TestFloatNonIntegerArgument: + """ + These test that ``TypeError`` is raised when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + """ + def test_valid_indexing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[np.array([0])] + a[[0, 0]] + a[:, [0, 0]] + a[:, 0,:] + a[:,:,:] + + def test_valid_slicing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[::] + a[0:] + a[:2] + a[0:2] + a[::2] + a[1::2] + a[:2:2] + a[1:2:2] + + def test_non_integer_argument_errors(self): + a = np.array([[5]]) + + assert_raises(TypeError, np.reshape, a, (1., 1., -1)) + assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) + assert_raises(TypeError, np.take, a, [0], 1.) + assert_raises(TypeError, np.take, a, [0], np.float64(1.)) + + def test_non_integer_sequence_multiplication(self): + # NumPy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + + assert_raises(TypeError, mult, [1], np.float_(3)) + # following should be OK + mult([1], np.int_(3)) + + def test_reduce_axis_float_index(self): + d = np.zeros((3,3,3)) + assert_raises(TypeError, np.min, d, 0.5) + assert_raises(TypeError, np.min, d, (0.5, 1)) + assert_raises(TypeError, np.min, d, (1, 2.2)) + assert_raises(TypeError, np.min, d, (.2, 1.2)) + + +class TestBooleanIndexing: + # Using a boolean as integer argument/indexing is an error. + def test_bool_as_int_argument_errors(self): + a = np.array([[[1]]]) + + assert_raises(TypeError, np.reshape, a, (True, -1)) + assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1)) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + assert_warns(DeprecationWarning, operator.index, np.True_) + assert_raises(TypeError, np.take, args=(a, [0], False)) + + def test_boolean_indexing_weirdness(self): + # Weird boolean indexing things + a = np.ones((2, 3, 4)) + assert a[False, True, ...].shape == (0, 2, 3, 4) + assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2) + assert_raises(IndexError, lambda: a[False, [0, 1], ...]) + + def test_boolean_indexing_fast_path(self): + # These used to either give the wrong error, or incorrectly give no + # error. + a = np.ones((3, 3)) + + # This used to incorrectly work (and give an array of shape (0,)) + idx1 = np.array([[False]*9]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 0; " + "dimension is 3 but corresponding boolean dimension is 1", + lambda: a[idx1]) + + # This used to incorrectly give a ValueError: operands could not be broadcast together + idx2 = np.array([[False]*8 + [True]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 0; " + "dimension is 3 but corresponding boolean dimension is 1", + lambda: a[idx2]) + + # This is the same as it used to be. The above two should work like this. + idx3 = np.array([[False]*10]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 0; " + "dimension is 3 but corresponding boolean dimension is 1", + lambda: a[idx3]) + + # This used to give ValueError: non-broadcastable operand + a = np.ones((1, 1, 2)) + idx = np.array([[[True], [False]]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 1; " + "dimension is 1 but corresponding boolean dimension is 2", + lambda: a[idx]) + + +class TestArrayToIndexDeprecation: + """Creating an index from array not 0-D is an error. + + """ + def test_array_to_index_error(self): + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + assert_raises(TypeError, operator.index, np.array([1])) + assert_raises(TypeError, np.reshape, a, (a, -1)) + assert_raises(TypeError, np.take, a, [0], a) + + +class TestNonIntegerArrayLike: + """Tests that array_likes only valid if can safely cast to integer. + + For instance, lists give IndexError when they cannot be safely cast to + an integer. + + """ + def test_basic(self): + a = np.arange(10) + + assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) + assert_raises(IndexError, a.__getitem__, (['1', '2'],)) + + # The following is valid + a.__getitem__([]) + + +class TestMultipleEllipsisError: + """An index can only have a single ellipsis. + + """ + def test_basic(self): + a = np.arange(10) + assert_raises(IndexError, lambda: a[..., ...]) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) + + +class TestCApiAccess: + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_item_selection.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_item_selection.py new file mode 100644 index 00000000..5660ef58 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_item_selection.py @@ -0,0 +1,165 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_raises, assert_array_equal, HAS_REFCOUNT + ) + + +class TestTake: + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty(tuple(), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object, which has a size that hits the + # default (non-specialized) path. + types = int, object, np.dtype([('', 'i2', 3)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + def test_refcounting(self): + objects = [object() for i in range(10)] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) + + +class TestPutMask: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,O"]) + def test_simple(self, dtype): + if dtype.lower() == "m": + dtype += "8[ns]" + + # putmask is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + mask = np.random.randint(2, size=1000).astype(bool) + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + np.putmask(arr, mask, vals) + assert_array_equal(arr[mask], vals[:len(mask)][mask]) + assert_array_equal(arr[~mask], zeros[~mask]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + mask = np.random.randint(2, size=1000).astype(bool) + + # Allowing empty values like this is weird... + np.put(arr, mask, []) + assert_array_equal(arr, arr_copy) + + +class TestPut: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_simple(self, dtype, mode): + if dtype.lower() == "m": + dtype += "8[ns]" + + # put is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + if mode == "clip": + # Special because 0 and -1 value are "reserved" for clip test + indx = np.random.permutation(len(arr) - 2)[:-500] + 1 + + indx[-1] = 0 + indx[-2] = len(arr) - 1 + indx_put = indx.copy() + indx_put[-1] = -1389 + indx_put[-2] = 1321 + else: + # Avoid duplicates (for simplicity) and fill half only + indx = np.random.permutation(len(arr) - 3)[:-500] + indx_put = indx + if mode == "wrap": + indx_put = indx_put + len(arr) + + np.put(arr, indx_put, vals, mode=mode) + assert_array_equal(arr[indx], vals[:len(indx)]) + untouched = np.ones(len(arr), dtype=bool) + untouched[indx] = False + assert_array_equal(arr[untouched], zeros[:untouched.sum()]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + + # Allowing empty values like this is weird... + np.put(arr, [1, 2, 3], []) + assert_array_equal(arr, arr_copy) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_limited_api.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_limited_api.py new file mode 100644 index 00000000..725de19b --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_limited_api.py @@ -0,0 +1,44 @@ +import os +import shutil +import subprocess +import sys +import sysconfig +import pytest + +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG"), + reason=( + "Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, " + "and Py_REF_DEBUG" + ), +) +def test_limited_api(tmp_path): + """Test building a third-party C extension with the limited API.""" + # Based in part on test_cython from random.tests.test_extending + + here = os.path.dirname(__file__) + ext_dir = os.path.join(here, "examples", "limited_api") + + cytest = str(tmp_path / "limited_api") + + shutil.copytree(ext_dir, cytest) + # build the examples and "install" them into a temporary directory + + install_log = str(tmp_path / "tmp_install_log.txt") + subprocess.check_output( + [ + sys.executable, + "setup.py", + "build", + "install", + "--prefix", str(tmp_path / "installdir"), + "--single-version-externally-managed", + "--record", + install_log, + ], + cwd=cytest, + ) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_longdouble.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_longdouble.py new file mode 100644 index 00000000..45721950 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_longdouble.py @@ -0,0 +1,395 @@ +import warnings +import platform +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, + temppath, IS_MUSL + ) +from numpy.core.tests._locales import CommaDecimalPointLocale + + +LD_INFO = np.finfo(np.longdouble) +longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) + + +_o = 1 + LD_INFO.eps +string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) +del _o + + +def test_scalar_extraction(): + """Confirm that extracting a value doesn't convert to python float""" + o = 1 + LD_INFO.eps + a = np.array([o, o, o]) + assert_equal(a[1], o) + + +# Conversions string -> long double + +# 0.1 not exactly representable in base 2 floating point. +repr_precision = len(repr(np.longdouble(0.1))) +# +2 from macro block starting around line 842 in scalartypes.c.src. + + +@pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") +@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, + reason="repr precision not enough to show eps") +def test_repr_roundtrip(): + # We will only see eps in repr if within printing precision. + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_repr_roundtrip_bytes(): + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(repr(o).encode("ascii")), o) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) +def test_array_and_stringlike_roundtrip(strtype): + """ + Test that string representations of long-double roundtrip both + for array casting and scalar coercion, see also gh-15608. + """ + o = 1 + LD_INFO.eps + + if strtype in (np.bytes_, bytes): + o_str = strtype(repr(o).encode("ascii")) + else: + o_str = strtype(repr(o)) + + # Test that `o` is correctly coerced from the string-like + assert o == np.longdouble(o_str) + + # Test that arrays also roundtrip correctly: + o_strarr = np.asarray([o] * 3, dtype=strtype) + assert (o == o_strarr.astype(np.longdouble)).all() + + # And array coercion and casting to string give the same as scalar repr: + assert (o_strarr == o_str).all() + assert (np.asarray([o] * 3).astype(strtype) == o_str).all() + + +def test_bogus_string(): + assert_raises(ValueError, np.longdouble, "spam") + assert_raises(ValueError, np.longdouble, "1.0 flub") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_fromstring(): + o = 1 + LD_INFO.eps + s = (" " + repr(o))*5 + a = np.array([o]*5) + assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, + err_msg="reading '%s'" % s) + + +def test_fromstring_complex(): + for ctype in ["complex", "cdouble", "cfloat"]: + # Check spacing between separator + assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), + np.array([1., 2., 3., 4.])) + # Real component not specified + assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), + np.array([1.j, -2.j, 3.j, 40.j])) + # Both components specified + assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), + np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + # Spaces at wrong places + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), + np.array([1j])) + + +def test_fromstring_bogus(): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), + np.array([1., 2., 3.])) + + +def test_fromstring_empty(): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("xxxxx", sep="x"), + np.array([])) + + +def test_fromstring_missing(): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), + np.array([1])) + + +class TestFileBased: + + ldbl = 1 + LD_INFO.eps + tgt = np.array([ldbl]*5) + out = ''.join([repr(t) + '\n' for t in tgt]) + + def test_fromfile_bogus(self): + with temppath() as path: + with open(path, 'w') as f: + f.write("1. 2. 3. flop 4.\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=float, sep=" ") + assert_equal(res, np.array([1., 2., 3.])) + + def test_fromfile_complex(self): + for ctype in ["complex", "cdouble", "cfloat"]: + # Check spacing between separator and only real component specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1, 2 , 3 ,4\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1., 2., 3., 4.])) + + # Real component not specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1j, -2j, 3j, 4e1j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) + + # Both components specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+2 j,3\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+ 2j,3\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1 +2j,3\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+j\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1j+1\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j])) + + + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_fromfile(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_genfromtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_loadtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_tofile_roundtrip(self): + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) + + +# Conversions long double -> string + + +def test_repr_exact(): + o = 1 + LD_INFO.eps + assert_(repr(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_format(): + o = 1 + LD_INFO.eps + assert_("{0:.40g}".format(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_percent(): + o = 1 + LD_INFO.eps + assert_("%.40g" % o != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, + reason="array repr problem") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_array_repr(): + o = 1 + LD_INFO.eps + a = np.array([o]) + b = np.array([1], dtype=np.longdouble) + if not np.all(a != b): + raise ValueError("precision loss creating arrays") + assert_(repr(a) != repr(b)) + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_repr_roundtrip_foreign(self): + o = 1.5 + assert_equal(o, np.longdouble(repr(o))) + + def test_fromstring_foreign_repr(self): + f = 1.234 + a = np.fromstring(repr(f), dtype=float, sep=" ") + assert_equal(a[0], f) + + def test_fromstring_best_effort_float(self): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1,234", dtype=float, sep=" "), + np.array([1.])) + + def test_fromstring_best_effort(self): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), + np.array([1.])) + + def test_fromstring_foreign(self): + s = "1.234" + a = np.fromstring(s, dtype=np.longdouble, sep=" ") + assert_equal(a[0], np.longdouble(s)) + + def test_fromstring_foreign_sep(self): + a = np.array([1, 2, 3, 4]) + b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") + assert_array_equal(a, b) + + def test_fromstring_foreign_value(self): + with assert_warns(DeprecationWarning): + b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") + assert_array_equal(b[0], 1) + + +@pytest.mark.parametrize("int_val", [ + # cases discussed in gh-10723 + # and gh-9968 + 2 ** 1024, 0]) +def test_longdouble_from_int(int_val): + # for issue gh-9968 + str_val = str(int_val) + # we'll expect a RuntimeWarning on platforms + # with np.longdouble equivalent to np.double + # for large integer input + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + # can be inf==inf on some platforms + assert np.longdouble(int_val) == np.longdouble(str_val) + # we can't directly compare the int and + # max longdouble value on all platforms + if np.allclose(np.finfo(np.longdouble).max, + np.finfo(np.double).max) and w: + assert w[0].category is RuntimeWarning + +@pytest.mark.parametrize("bool_val", [ + True, False]) +def test_longdouble_from_bool(bool_val): + assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) + + +@pytest.mark.skipif( + not (IS_MUSL and platform.machine() == "x86_64"), + reason="only need to run on musllinux_x86_64" +) +def test_musllinux_x86_64_signature(): + # this test may fail if you're emulating musllinux_x86_64 on a different + # architecture, but should pass natively. + known_sigs = [b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf'] + sig = (np.longdouble(-1.0) / np.longdouble(10.0) + ).newbyteorder('<').tobytes()[:10] + assert sig in known_sigs + + +def test_eps_positive(): + # np.finfo('g').eps should be positive on all platforms. If this isn't true + # then something may have gone wrong with the MachArLike, e.g. if + # np.core.getlimits._discovered_machar didn't work properly + assert np.finfo(np.longdouble).eps > 0. diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_machar.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_machar.py new file mode 100644 index 00000000..3a66ec51 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_machar.py @@ -0,0 +1,30 @@ +""" +Test machar. Given recent changes to hardcode type data, we might want to get +rid of both MachAr and this test at some point. + +""" +from numpy.core._machar import MachAr +import numpy.core.numerictypes as ntypes +from numpy import errstate, array + + +class TestMachAr: + def _run_machar_highprec(self): + # Instantiate MachAr instance with high enough precision to cause + # underflow + try: + hiprec = ntypes.float96 + MachAr(lambda v: array(v, hiprec)) + except AttributeError: + # Fixme, this needs to raise a 'skip' exception. + "Skipping test: no ntypes.float96 available on this platform." + + def test_underlow(self): + # Regression test for #759: + # instantiating MachAr for dtype = np.float96 raises spurious warning. + with errstate(all='raise'): + try: + self._run_machar_highprec() + except FloatingPointError as e: + msg = "Caught %s exception, should not have been raised." % e + raise AssertionError(msg) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_mem_overlap.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_mem_overlap.py new file mode 100644 index 00000000..1fd4c4d4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_mem_overlap.py @@ -0,0 +1,931 @@ +import itertools +import pytest + +import numpy as np +from numpy.core._multiarray_tests import solve_diophantine, internal_overlap +from numpy.core import _umath_tests +from numpy.lib.stride_tricks import as_strided +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal + ) + + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + +MAY_SHARE_BOUNDS = 0 +MAY_SHARE_EXACT = -1 + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + res.extend(itertools.product(ind, ind)) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + return itertools.product(ind, repeat=ndims) + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.prod(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert_(np.all(arr == cpy), + 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) + + +def test_overlapping_assignments(): + # Test automatically generated assignments which overlap in memory. + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple([a[0] for a in ind]) + dstidx = tuple([a[1] for a in ind]) + + _check_assignment(srcidx, dstidx) + + +@pytest.mark.slow +def test_diophantine_fuzz(): + # Fuzz test the diophantine solver + rng = np.random.RandomState(1234) + + max_int = np.iinfo(np.intp).max + + for ndim in range(10): + feasible_count = 0 + infeasible_count = 0 + + min_count = 500//(ndim + 1) + + while min(feasible_count, infeasible_count) < min_count: + # Ensure big and small integer problems + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 + + A_max = min(max_int, A_max) + U_max = min(max_int-1, U_max) + + A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) + for j in range(ndim)) + U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) + for j in range(ndim)) + + b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) + b = int(rng.randint(-1, b_ub+2, dtype=np.intp)) + + if ndim == 0 and feasible_count < min_count: + b = 0 + + X = solve_diophantine(A, U, b) + + if X is None: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is None, (A, U, b, X_simplified)) + + # Check no solution exists (provided the problem is + # small enough so that brute force checking doesn't + # take too long) + ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U)) + + size = 1 + for r in ranges: + size *= len(r) + if size < 100000: + assert_(not any(sum(w) == b for w in itertools.product(*ranges))) + infeasible_count += 1 + else: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is not None, (A, U, b, X_simplified)) + + # Check validity + assert_(sum(a*x for a, x in zip(A, X)) == b) + assert_(all(0 <= x <= ub for x, ub in zip(X, U))) + feasible_count += 1 + + +def test_diophantine_overflow(): + # Smoke test integer overflow detection + max_intp = np.iinfo(np.intp).max + max_int64 = np.iinfo(np.int64).max + + if max_int64 <= max_intp: + # Check that the algorithm works internally in 128-bit; + # solving this problem requires large intermediate numbers + A = (max_int64//2, max_int64//2 - 10) + U = (max_int64//2, max_int64//2 - 10) + b = 2*(max_int64//2) - 10 + + assert_equal(solve_diophantine(A, U, b), (1, 1)) + + +def check_may_share_memory_exact(a, b): + got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + assert_equal(np.may_share_memory(a, b), + np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) + + a.fill(0) + b.fill(0) + a.fill(1) + exact = b.any() + + err_msg = "" + if got != exact: + err_msg = " " + "\n ".join([ + "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), + "shape_a = %r" % (a.shape,), + "shape_b = %r" % (b.shape,), + "strides_a = %r" % (a.strides,), + "strides_b = %r" % (b.strides,), + "size_a = %r" % (a.size,), + "size_b = %r" % (b.size,) + ]) + + assert_equal(got, exact, err_msg=err_msg) + + +def test_may_share_memory_manual(): + # Manual test cases for may_share_memory + + # Base arrays + xs0 = [ + np.zeros([13, 21, 23, 22], dtype=np.int8), + np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] + ] + + # Generate all negative stride combinations + xs = [] + for x in xs0: + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): + xp = x[ss] + xs.append(xp) + + for x in xs: + # The default is a simple extent check + assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) + assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) + + # Exact checks + check_may_share_memory_exact(x[:,0,:], x[:,1,:]) + check_may_share_memory_exact(x[:,::7], x[:,3::3]) + + try: + xp = x.ravel() + if xp.flags.owndata: + continue + xp = xp.view(np.int16) + except ValueError: + continue + + # 0-size arrays cannot overlap + check_may_share_memory_exact(x.ravel()[6:6], + xp.reshape(13, 21, 23, 11)[:,::7]) + + # Test itemsize is dealt with + check_may_share_memory_exact(x[:,::7], + xp.reshape(13, 21, 23, 11)) + check_may_share_memory_exact(x[:,::7], + xp.reshape(13, 21, 23, 11)[:,3::3]) + check_may_share_memory_exact(x.ravel()[6:7], + xp.reshape(13, 21, 23, 11)[:,::7]) + + # Check unit size + x = np.zeros([1], dtype=np.int8) + check_may_share_memory_exact(x, x) + check_may_share_memory_exact(x, x.copy()) + + +def iter_random_view_pairs(x, same_steps=True, equal_size=False): + rng = np.random.RandomState(1234) + + if equal_size and same_steps: + raise ValueError() + + def random_slice(n, step): + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + def random_slice_fixed_size(n, step, size): + start = rng.randint(0, n+1 - size*step) + stop = start + (size-1)*step + 1 + if rng.randint(0, 2) == 0: + stop, start = start-1, stop-1 + if stop < 0: + stop = None + step *= -1 + return slice(start, stop, step) + + # First a few regular views + yield x, x + for j in range(1, 7, 3): + yield x[j:], x[:-j] + yield x[...,j:], x[...,:-j] + + # An array with zero stride internal overlap + strides = list(x.strides) + strides[0] = 0 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # An array with non-zero stride internal overlap + strides = list(x.strides) + if strides[0] > 1: + strides[0] = 1 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # Then discontiguous views + while True: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + + t1 = np.arange(x.ndim) + rng.shuffle(t1) + + if equal_size: + t2 = t1 + else: + t2 = np.arange(x.ndim) + rng.shuffle(t2) + + a = x[s1] + + if equal_size: + if a.size == 0: + continue + + steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) + if rng.randint(0, 5) == 0 else 1 + for p, s, pa in zip(x.shape, s1, a.shape)) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) + elif same_steps: + steps2 = steps + else: + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + + if not equal_size: + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + + a = a.transpose(t1) + b = x[s2].transpose(t2) + + yield a, b + + +def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): + # Check that overlap problems with common strides are solved with + # little work. + x = np.zeros([17,34,71,97], dtype=np.int16) + + feasible = 0 + infeasible = 0 + + pair_iter = iter_random_view_pairs(x, same_steps) + + while min(feasible, infeasible) < min_count: + a, b = next(pair_iter) + + bounds_overlap = np.may_share_memory(a, b) + may_share_answer = np.may_share_memory(a, b) + easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) + exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + if easy_answer != exact_answer: + # assert_equal is slow... + assert_equal(easy_answer, exact_answer) + + if may_share_answer != bounds_overlap: + assert_equal(may_share_answer, bounds_overlap) + + if bounds_overlap: + if exact_answer: + feasible += 1 + else: + infeasible += 1 + + +@pytest.mark.slow +def test_may_share_memory_easy_fuzz(): + # Check that overlap problems with common strides are always + # solved with little work. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, + same_steps=True, + min_count=2000) + + +@pytest.mark.slow +def test_may_share_memory_harder_fuzz(): + # Overlap problems with not necessarily common strides take more + # work. + # + # The work bound below can't be reduced much. Harder problems can + # also exist but not be detected here, as the set of problems + # comes from RNG. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, + same_steps=False, + min_count=2000) + + +def test_shares_memory_api(): + x = np.zeros([4, 5, 6], dtype=np.int8) + + assert_equal(np.shares_memory(x, x), True) + assert_equal(np.shares_memory(x, x.copy()), False) + + a = x[:,::2,::3] + b = x[:,::3,::2] + assert_equal(np.shares_memory(a, b), True) + assert_equal(np.shares_memory(a, b, max_work=None), True) + assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) + + +def test_may_share_memory_bad_max_work(): + x = np.zeros([1]) + assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) + assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) + + +def test_internal_overlap_diophantine(): + def check(A, U, exists=None): + X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) + + if exists is None: + exists = (X is not None) + + if X is not None: + assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) + assert_(all(0 <= x <= u for x, u in zip(X, U))) + assert_(any(x != u//2 for x, u in zip(X, U))) + + if exists: + assert_(X is not None, repr(X)) + else: + assert_(X is None, repr(X)) + + # Smoke tests + check((3, 2), (2*2, 3*2), exists=True) + check((3*2, 2), (15*2, (3-1)*2), exists=False) + + +def test_internal_overlap_slices(): + # Slicing an array never generates internal overlap + + x = np.zeros([17,34,71,97], dtype=np.int16) + + rng = np.random.RandomState(1234) + + def random_slice(n, step): + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + cases = 0 + min_count = 5000 + + while cases < min_count: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + t1 = np.arange(x.ndim) + rng.shuffle(t1) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + a = x[s1].transpose(t1) + + assert_(not internal_overlap(a)) + cases += 1 + + +def check_internal_overlap(a, manual_expected=None): + got = internal_overlap(a) + + # Brute-force check + m = set() + ranges = tuple(range(n) for n in a.shape) + for v in itertools.product(*ranges): + offset = sum(s*w for s, w in zip(a.strides, v)) + if offset in m: + expected = True + break + else: + m.add(offset) + else: + expected = False + + # Compare + if got != expected: + assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) + if manual_expected is not None and expected != manual_expected: + assert_equal(expected, manual_expected) + return got + + +def test_internal_overlap_manual(): + # Stride tricks can construct arrays with internal overlap + + # We don't care about memory bounds, the array is not + # read/write accessed + x = np.arange(1).astype(np.int8) + + # Check low-dimensional special cases + + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim + + a = as_strided(x, strides=(3, 4), shape=(4, 4)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(3, 4), shape=(5, 4)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0,), shape=(0,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(1,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(2,)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(87, 22)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(1, 22)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0, -9993), shape=(0, 22)) + check_internal_overlap(a, False) + + +def test_internal_overlap_fuzz(): + # Fuzz check; the brute-force check is fairly slow + + x = np.arange(1).astype(np.int8) + + overlap = 0 + no_overlap = 0 + min_count = 100 + + rng = np.random.RandomState(1234) + + while min(overlap, no_overlap) < min_count: + ndim = rng.randint(1, 4, dtype=np.intp) + + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) + + a = as_strided(x, strides=strides, shape=shape) + result = check_internal_overlap(a) + + if result: + overlap += 1 + else: + no_overlap += 1 + + +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray: + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2: + def __init__(self, data): + self.data = data + + def __array__(self): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + +def view_element_first_byte(x): + """Construct an array viewing the first byte of each element of `x`""" + from numpy.lib.stride_tricks import DummyArray + interface = dict(x.__array_interface__) + interface['typestr'] = '|b1' + interface['descr'] = [('', '|b1')] + return np.asarray(DummyArray(interface, x)) + + +def assert_copy_equivalent(operation, args, out, **kwargs): + """ + Check that operation(*args, out=out) produces results + equivalent to out[...] = operation(*args, out=out.copy()) + """ + + kwargs['out'] = out + kwargs2 = dict(kwargs) + kwargs2['out'] = out.copy() + + out_orig = out.copy() + out[...] = operation(*args, **kwargs2) + expected = out.copy() + out[...] = out_orig + + got = operation(*args, **kwargs).copy() + + if (got != expected).any(): + assert_equal(got, expected) + + +class TestUFunc: + """ + Test ufunc call memory overlap handling + """ + + def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, + count=5000): + shapes = [7, 13, 8, 21, 29, 32] + + rng = np.random.RandomState(1234) + + for ndim in range(1, 6): + x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = count // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + a_orig = a.copy() + b_orig = b.copy() + + if get_out_axis_size is None: + assert_copy_equivalent(operation, [a], out=b) + + if np.shares_memory(a, b): + overlapping += 1 + else: + for axis in itertools.chain(range(ndim), [None]): + a[...] = a_orig + b[...] = b_orig + + # Determine size for reduction axis (None if scalar) + outsize, scalarize = get_out_axis_size(a, b, axis) + if outsize == 'skip': + continue + + # Slice b to get an output array of the correct size + sl = [slice(None)] * ndim + if axis is None: + if outsize is None: + sl = [slice(0, 1)] + [0]*(ndim - 1) + else: + sl = [slice(0, outsize)] + [0]*(ndim - 1) + else: + if outsize is None: + k = b.shape[axis]//2 + if ndim == 1: + sl[axis] = slice(k, k + 1) + else: + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) + b_out = b[tuple(sl)] + + if scalarize: + b_out = b_out.reshape([]) + + if np.shares_memory(a, b_out): + overlapping += 1 + + # Check result + assert_copy_equivalent(operation, [a], out=b_out, axis=axis) + + @pytest.mark.slow + def test_unary_ufunc_call_fuzz(self): + self.check_unary_fuzz(np.invert, None, np.int16) + + @pytest.mark.slow + def test_unary_ufunc_call_complex_fuzz(self): + # Complex typically has a smaller alignment than itemsize + self.check_unary_fuzz(np.negative, None, np.complex128, count=500) + + def test_binary_ufunc_accumulate_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # accumulate doesn't support this + else: + return a.shape[axis], False + + self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduce_fuzz(self): + def get_out_axis_size(a, b, axis): + return None, (axis is None or a.ndim == 1) + + self.check_unary_fuzz(np.add.reduce, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # reduceat doesn't support this + else: + return a.shape[axis], False + + def do_reduceat(a, out, axis): + if axis is None: + size = len(a) + step = size//len(out) + else: + size = a.shape[axis] + step = a.shape[axis] // out.shape[axis] + idx = np.arange(0, size, step) + return np.add.reduceat(a, idx, out=out, axis=axis) + + self.check_unary_fuzz(do_reduceat, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_manual(self): + def check(ufunc, a, ind, out): + c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) + c2 = ufunc.reduceat(a, ind, out=out) + assert_array_equal(c1, c2) + + # Exactly same input/output arrays + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1].copy(), a) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1], a) + + @pytest.mark.slow + def test_unary_gufunc_fuzz(self): + shapes = [7, 13, 8, 21, 29, 32] + gufunc = _umath_tests.euclidean_pdist + + rng = np.random.RandomState(1234) + + for ndim in range(2, 6): + x = rng.rand(*shapes[:ndim]) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = 500 // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: + continue + + # Ensure the shapes are so that euclidean_pdist is happy + if b.shape[-1] > b.shape[-2]: + b = b[...,0,:] + else: + b = b[...,:,0] + + n = a.shape[-2] + p = n * (n - 1) // 2 + if p <= b.shape[-1] and p > 0: + b = b[...,:p] + else: + n = max(2, int(np.sqrt(b.shape[-1]))//2) + p = n * (n - 1) // 2 + a = a[...,:n,:] + b = b[...,:p] + + # Call + if np.shares_memory(a, b): + overlapping += 1 + + with np.errstate(over='ignore', invalid='ignore'): + assert_copy_equivalent(gufunc, [a], out=b) + + def test_ufunc_at_manual(self): + def check(ufunc, a, ind, b=None): + a0 = a.copy() + if b is None: + ufunc.at(a0, ind.copy()) + c1 = a0.copy() + ufunc.at(a, ind) + c2 = a.copy() + else: + ufunc.at(a0, ind.copy(), b.copy()) + c1 = a0.copy() + ufunc.at(a, ind, b) + c2 = a.copy() + assert_array_equal(c1, c2) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.invert, a[::-1], a) + + # Overlap with second data array + a = np.arange(100, dtype=np.int16) + ind = np.arange(0, 100, 2, dtype=np.int16) + check(np.add, a, ind, a[25:75]) + + def test_unary_ufunc_1d_manual(self): + # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`) + + def check(a, b): + a_orig = a.copy() + b_orig = b.copy() + + b0 = b.copy() + c1 = ufunc(a, out=b0) + c2 = ufunc(a, out=b) + assert_array_equal(c1, c2) + + # Trigger "fancy ufunc loop" code path + mask = view_element_first_byte(b).view(np.bool_) + + a[...] = a_orig + b[...] = b_orig + c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() + + a[...] = a_orig + b[...] = b_orig + c2 = ufunc(a, out=b, where=mask.copy()).copy() + + # Also, mask overlapping with output + a[...] = a_orig + b[...] = b_orig + c3 = ufunc(a, out=b, where=mask).copy() + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + + dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, np.complex64, np.complex128] + dtypes = [np.dtype(x) for x in dtypes] + + for dtype in dtypes: + if np.issubdtype(dtype, np.integer): + ufunc = np.invert + else: + ufunc = np.reciprocal + + n = 1000 + k = 10 + indices = [ + np.index_exp[:n], + np.index_exp[k:k+n], + np.index_exp[n-1::-1], + np.index_exp[k+n-1:k-1:-1], + np.index_exp[:2*n:2], + np.index_exp[k:k+2*n:2], + np.index_exp[2*n-1::-2], + np.index_exp[k+2*n-1:k-1:-2], + ] + + for xi, yi in itertools.product(indices, indices): + v = np.arange(1, 1 + n*2 + k, dtype=dtype) + x = v[xi] + y = v[yi] + + with np.errstate(all='ignore'): + check(x, y) + + # Scalar cases + check(x[:1], y) + check(x[-1:], y) + check(x[:1].reshape([]), y) + check(x[-1:].reshape([]), y) + + def test_unary_ufunc_where_same(self): + # Check behavior at wheremask overlap + ufunc = np.invert + + def check(a, out, mask): + c1 = ufunc(a, out=out.copy(), where=mask.copy()) + c2 = ufunc(a, out=out, where=mask) + assert_array_equal(c1, c2) + + # Check behavior with same input and output arrays + x = np.arange(100).astype(np.bool_) + check(x, x, x) + check(x, x.copy(), x) + check(x, x, x.copy()) + + @pytest.mark.slow + def test_binary_ufunc_1d_manual(self): + ufunc = np.add + + def check(a, b, c): + c0 = c.copy() + c1 = ufunc(a, b, out=c0) + c2 = ufunc(a, b, out=c) + assert_array_equal(c1, c2) + + for dtype in [np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, np.complex64, np.complex128]: + # Check different data dependency orders + + n = 1000 + k = 10 + + indices = [] + for p in [1, 2]: + indices.extend([ + np.index_exp[:p*n:p], + np.index_exp[k:k+p*n:p], + np.index_exp[p*n-1::-p], + np.index_exp[k+p*n-1:k-1:-p], + ]) + + for x, y, z in itertools.product(indices, indices, indices): + v = np.arange(6*n).astype(dtype) + x = v[x] + y = v[y] + z = v[z] + + check(x, y, z) + + # Scalar cases + check(x[:1], y, z) + check(x[-1:], y, z) + check(x[:1].reshape([]), y, z) + check(x[-1:].reshape([]), y, z) + check(x, y[:1], z) + check(x, y[-1:], z) + check(x, y[:1].reshape([]), z) + check(x, y[-1:].reshape([]), z) + + def test_inplace_op_simple_manual(self): + rng = np.random.RandomState(1234) + x = rng.rand(200, 200) # bigger than bufsize + + x += x.T + assert_array_equal(x - x.T, 0) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_mem_policy.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_mem_policy.py new file mode 100644 index 00000000..a381fa1d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_mem_policy.py @@ -0,0 +1,443 @@ +import asyncio +import gc +import os +import pytest +import numpy as np +import threading +import warnings +from numpy.testing import extbuild, assert_warns, IS_WASM +import sys + + +# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on +# Python 3.12 and up. It's an internal test utility, so for now we just skip +# these tests. + + +@pytest.fixture +def get_module(tmp_path): + """ Add a memory policy that returns a false pointer 64 bytes into the + actual allocation, and fill the prefix with some text. Then check at each + memory manipulation that the prefix exists, to make sure all alloc/realloc/ + free/calloc go via the functions here. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + functions = [ + ("get_default_policy", "METH_NOARGS", """ + Py_INCREF(PyDataMem_DefaultHandler); + return PyDataMem_DefaultHandler; + """), + ("set_secret_data_policy", "METH_NOARGS", """ + PyObject *secret_data = + PyCapsule_New(&secret_data_handler, "mem_handler", NULL); + if (secret_data == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(secret_data); + Py_DECREF(secret_data); + return old; + """), + ("set_old_policy", "METH_O", """ + PyObject *old; + if (args != NULL && PyCapsule_CheckExact(args)) { + old = PyDataMem_SetHandler(args); + } + else { + old = PyDataMem_SetHandler(NULL); + } + return old; + """), + ("get_array", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL, + buf, NPY_ARRAY_WRITEABLE, NULL); + """), + ("set_own", "METH_O", """ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_ValueError, + "need an ndarray"); + return NULL; + } + PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA); + // Maybe try this too? + // PyArray_BASE(PyArrayObject *)args) = NULL; + Py_RETURN_NONE; + """), + ("get_array_with_base", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, + NULL, buf, + NPY_ARRAY_WRITEABLE, NULL); + if (arr == NULL) return NULL; + PyObject *obj = PyCapsule_New(buf, "buf capsule", + (PyCapsule_Destructor)&warn_on_free); + if (obj == NULL) { + Py_DECREF(arr); + return NULL; + } + if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) { + Py_DECREF(arr); + Py_DECREF(obj); + return NULL; + } + return arr; + + """), + ] + prologue = ''' + #define NPY_TARGET_VERSION NPY_1_22_API_VERSION + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include <numpy/arrayobject.h> + /* + * This struct allows the dynamic configuration of the allocator funcs + * of the `secret_data_allocator`. It is provided here for + * demonstration purposes, as a valid `ctx` use-case scenario. + */ + typedef struct { + void *(*malloc)(size_t); + void *(*calloc)(size_t, size_t); + void *(*realloc)(void *, size_t); + void (*free)(void *); + } SecretDataAllocatorFuncs; + + NPY_NO_EXPORT void * + shift_alloc(void *ctx, size_t sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->malloc(sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld", (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void * + shift_zero(void *ctx, size_t sz, size_t cnt) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->calloc(sz + 64, cnt); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld via zero", + (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void + shift_free(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p == NULL) { + return ; + } + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_free, " + "no appropriate prefix\\n"); + /* Make C runtime crash by calling free on the wrong address */ + funcs->free((char *)p + 10); + /* funcs->free(real); */ + } + else { + npy_uintp i = (npy_uintp)atoi(real +20); + if (i != sz) { + fprintf(stderr, "uh-oh, unmatched shift_free" + "(ptr, %ld) but allocated %ld\\n", sz, i); + /* This happens in some places, only print */ + funcs->free(real); + } + else { + funcs->free(real); + } + } + } + NPY_NO_EXPORT void * + shift_realloc(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p != NULL) { + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_realloc\\n"); + return realloc(p, sz); + } + return (void *)((char *)funcs->realloc(real, sz + 64) + 64); + } + else { + char *real = (char *)funcs->realloc(p, sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated " + "%ld via realloc", (unsigned long)sz); + return (void *)(real + 64); + } + } + /* As an example, we use the standard {m|c|re}alloc/free funcs. */ + static SecretDataAllocatorFuncs secret_data_handler_ctx = { + malloc, + calloc, + realloc, + free + }; + static PyDataMem_Handler secret_data_handler = { + "secret_data_allocator", + 1, + { + &secret_data_handler_ctx, /* ctx */ + shift_alloc, /* malloc */ + shift_zero, /* calloc */ + shift_realloc, /* realloc */ + shift_free /* free */ + } + }; + void warn_on_free(void *capsule) { + PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1); + void * obj = PyCapsule_GetPointer(capsule, + PyCapsule_GetName(capsule)); + free(obj); + }; + ''' + more_init = "import_array();" + try: + import mem_policy + return mem_policy + except ImportError: + pass + # if it does not exist, build and load it + return extbuild.build_and_import_extension('mem_policy', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +def test_set_policy(get_module): + + get_handler_name = np.core.multiarray.get_handler_name + get_handler_version = np.core.multiarray.get_handler_version + orig_policy_name = get_handler_name() + + a = np.arange(10).reshape((2, 5)) # a doesn't own its own data + assert get_handler_name(a) is None + assert get_handler_version(a) is None + assert get_handler_name(a.base) == orig_policy_name + assert get_handler_version(a.base) == 1 + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10).reshape((2, 5)) # b doesn't own its own data + assert get_handler_name(b) is None + assert get_handler_version(b) is None + assert get_handler_name(b.base) == 'secret_data_allocator' + assert get_handler_version(b.base) == 1 + + if orig_policy_name == 'default_allocator': + get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL) + assert get_handler_name() == 'default_allocator' + else: + get_module.set_old_policy(orig_policy) + assert get_handler_name() == orig_policy_name + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +def test_default_policy_singleton(get_module): + get_handler_name = np.core.multiarray.get_handler_name + + # set the policy to default + orig_policy = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # re-set the policy to default + def_policy_1 = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # set the policy to original + def_policy_2 = get_module.set_old_policy(orig_policy) + + # since default policy is a singleton, + # these should be the same object + assert def_policy_1 is def_policy_2 is get_module.get_default_policy() + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +def test_policy_propagation(get_module): + # The memory policy goes hand-in-hand with flags.owndata + + class MyArr(np.ndarray): + pass + + get_handler_name = np.core.multiarray.get_handler_name + orig_policy_name = get_handler_name() + a = np.arange(10).view(MyArr).reshape((2, 5)) + assert get_handler_name(a) is None + assert a.flags.owndata is False + + assert get_handler_name(a.base) is None + assert a.base.flags.owndata is False + + assert get_handler_name(a.base.base) == orig_policy_name + assert a.base.base.flags.owndata is True + + +async def concurrent_context1(get_module, orig_policy_name, event): + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert np.core.multiarray.get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert np.core.multiarray.get_handler_name() == 'default_allocator' + event.set() + + +async def concurrent_context2(get_module, orig_policy_name, event): + await event.wait() + # the policy is not affected by changes in parallel contexts + assert np.core.multiarray.get_handler_name() == orig_policy_name + # change policy in the child context + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert np.core.multiarray.get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert np.core.multiarray.get_handler_name() == 'default_allocator' + + +async def async_test_context_locality(get_module): + orig_policy_name = np.core.multiarray.get_handler_name() + + event = asyncio.Event() + # the child contexts inherit the parent policy + concurrent_task1 = asyncio.create_task( + concurrent_context1(get_module, orig_policy_name, event)) + concurrent_task2 = asyncio.create_task( + concurrent_context2(get_module, orig_policy_name, event)) + await concurrent_task1 + await concurrent_task2 + + # the parent context is not affected by child policy changes + assert np.core.multiarray.get_handler_name() == orig_policy_name + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +def test_context_locality(get_module): + if (sys.implementation.name == 'pypy' + and sys.pypy_version_info[:3] < (7, 3, 6)): + pytest.skip('no context-locality support in PyPy < 7.3.6') + asyncio.run(async_test_context_locality(get_module)) + + +def concurrent_thread1(get_module, event): + get_module.set_secret_data_policy() + assert np.core.multiarray.get_handler_name() == 'secret_data_allocator' + event.set() + + +def concurrent_thread2(get_module, event): + event.wait() + # the policy is not affected by changes in parallel threads + assert np.core.multiarray.get_handler_name() == 'default_allocator' + # change policy in the child thread + get_module.set_secret_data_policy() + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +def test_thread_locality(get_module): + orig_policy_name = np.core.multiarray.get_handler_name() + + event = threading.Event() + # the child threads do not inherit the parent policy + concurrent_task1 = threading.Thread(target=concurrent_thread1, + args=(get_module, event)) + concurrent_task2 = threading.Thread(target=concurrent_thread2, + args=(get_module, event)) + concurrent_task1.start() + concurrent_task2.start() + concurrent_task1.join() + concurrent_task2.join() + + # the parent thread is not affected by child policy changes + assert np.core.multiarray.get_handler_name() == orig_policy_name + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +@pytest.mark.skip(reason="too slow, see gh-23975") +def test_new_policy(get_module): + a = np.arange(10) + orig_policy_name = np.core.multiarray.get_handler_name(a) + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10) + assert np.core.multiarray.get_handler_name(b) == 'secret_data_allocator' + + # test array manipulation. This is slow + if orig_policy_name == 'default_allocator': + # when the np.core.test tests recurse into this test, the + # policy will be set so this "if" will be false, preventing + # infinite recursion + # + # if needed, debug this by + # - running tests with -- -s (to not capture stdout/stderr + # - setting verbose=2 + # - setting extra_argv=['-vv'] here + assert np.core.test('full', verbose=1, extra_argv=[]) + # also try the ma tests, the pickling test is quite tricky + assert np.ma.test('full', verbose=1, extra_argv=[]) + + get_module.set_old_policy(orig_policy) + + c = np.arange(10) + assert np.core.multiarray.get_handler_name(c) == orig_policy_name + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +@pytest.mark.xfail(sys.implementation.name == "pypy", + reason=("bad interaction between getenv and " + "os.environ inside pytest")) +@pytest.mark.parametrize("policy", ["0", "1", None]) +def test_switch_owner(get_module, policy): + a = get_module.get_array() + assert np.core.multiarray.get_handler_name(a) is None + get_module.set_own(a) + + if policy is None: + # See what we expect to be set based on the env variable + policy = os.getenv("NUMPY_WARN_IF_NO_MEM_POLICY", "0") == "1" + oldval = None + else: + policy = policy == "1" + oldval = np.core._multiarray_umath._set_numpy_warn_if_no_mem_policy( + policy) + try: + # The policy should be NULL, so we have to assume we can call + # "free". A warning is given if the policy == "1" + if policy: + with assert_warns(RuntimeWarning) as w: + del a + gc.collect() + else: + del a + gc.collect() + + finally: + if oldval is not None: + np.core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) + + +@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") +def test_owner_is_base(get_module): + a = get_module.get_array_with_base() + with pytest.warns(UserWarning, match='warn_on_free'): + del a + gc.collect() + gc.collect() diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_memmap.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_memmap.py new file mode 100644 index 00000000..ad074b31 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_memmap.py @@ -0,0 +1,215 @@ +import sys +import os +import mmap +import pytest +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryFile + +from numpy import ( + memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply) + +from numpy import arange, allclose, asarray +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY, + break_cycles + ) + +class TestMemmap: + def setup_method(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def teardown_method(self): + self.tmpfp.close() + self.data = None + if IS_PYPY: + break_cycles() + break_cycles() + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + assert_equal(newfp.flags.writeable, False) + + def test_open_with_filename(self, tmp_path): + tmpname = tmp_path / 'mmap' + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) + del fp + + def test_filename(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = Path(os.path.abspath(tmpname)) + fp[:] = self.data[:] + assert_equal(abspath, fp.filename) + b = fp[:1] + assert_equal(abspath, b.filename) + del b + del fp + + def test_path(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', + shape=self.shape) + # os.path.realpath does not resolve symlinks on Windows + # see: https://bugs.python.org/issue9949 + # use Path.resolve, just as memmap class does internally + abspath = str(Path(tmpname).resolve()) + fp[:] = self.data[:] + assert_equal(abspath, str(fp.filename.resolve())) + b = fp[:1] + assert_equal(abspath, str(b.filename.resolve())) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + assert_equal(fp.filename, self.tmpfp.name) + + @pytest.mark.skipif(sys.platform == 'gnu0', + reason="Known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[(1, 2), (2, 3)] + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert_(fp[:2, :2]._mmap is fp._mmap) + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert_(new1.base is fp) + assert_(new2.base is fp) + new_array = asarray(fp) + assert_(new_array.base is fp) + + def test_ufunc_return_ndarray(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + with suppress_warnings() as sup: + sup.filter(FutureWarning, "np.average currently does not preserve") + for unary_op in [sum, average, prod]: + result = unary_op(fp) + assert_(isscalar(result)) + assert_(result.__class__ is self.data[0, 0].__class__) + + assert_(unary_op(fp, axis=0).__class__ is ndarray) + assert_(unary_op(fp, axis=1).__class__ is ndarray) + + for binary_op in [add, subtract, multiply]: + assert_(binary_op(fp, self.data).__class__ is ndarray) + assert_(binary_op(self.data, fp).__class__ is ndarray) + assert_(binary_op(fp, fp).__class__ is ndarray) + + fp += 1 + assert(fp.__class__ is memmap) + add(fp, 1, out=fp) + assert(fp.__class__ is memmap) + + def test_getitem(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + assert_(fp[1:, :-1].__class__ is memmap) + # Fancy indexing returns a copy that is not memmapped + assert_(fp[[0, 1]].__class__ is ndarray) + + def test_memmap_subclass(self): + class MemmapSubClass(memmap): + pass + + fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + # We keep previous behavior for subclasses of memmap, i.e. the + # ufunc and __getitem__ output is never turned into a ndarray + assert_(sum(fp, axis=0).__class__ is MemmapSubClass) + assert_(sum(fp).__class__ is MemmapSubClass) + assert_(fp[1:, :-1].__class__ is MemmapSubClass) + assert(fp[[0, 1]].__class__ is MemmapSubClass) + + def test_mmap_offset_greater_than_allocation_granularity(self): + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_(fp.offset == offset) + + def test_no_shape(self): + self.tmpfp.write(b'a'*16) + mm = memmap(self.tmpfp, dtype='float64') + assert_equal(mm.shape, (2,)) + + def test_empty_array(self): + # gh-12653 + with pytest.raises(ValueError, match='empty file'): + memmap(self.tmpfp, shape=(0,4), mode='w+') + + self.tmpfp.write(b'\0') + + # ok now the file is not empty + memmap(self.tmpfp, shape=(0,4), mode='w+') diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_multiarray.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_multiarray.py new file mode 100644 index 00000000..ace40049 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_multiarray.py @@ -0,0 +1,10054 @@ +from __future__ import annotations + +import collections.abc +import tempfile +import sys +import warnings +import operator +import io +import itertools +import functools +import ctypes +import os +import gc +import re +import weakref +import pytest +from contextlib import contextmanager + +from numpy.compat import pickle + +import pathlib +import builtins +from decimal import Decimal +import mmap + +import numpy as np +import numpy.core._multiarray_tests as _multiarray_tests +from numpy.core._rational_tests import rational +from numpy.testing import ( + assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, + assert_array_equal, assert_raises_regex, assert_array_almost_equal, + assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less, + runstring, temppath, suppress_warnings, break_cycles, _SUPPORTS_SVE, + ) +from numpy.testing._private.utils import requires_memory, _no_tracing +from numpy.core.tests._locales import CommaDecimalPointLocale +from numpy.lib.recfunctions import repack_fields +from numpy.core.multiarray import _get_ndarray_c_version + +# Need to test an object that does not fully implement math interface +from datetime import timedelta, datetime + + +def assert_arg_sorted(arr, arg): + # resulting array should be sorted and arg values should be unique + assert_equal(arr[arg], np.sort(arr)) + assert_equal(np.sort(arg), np.arange(len(arg))) + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """ + Allocate a new ndarray with aligned memory. + + The ndarray is guaranteed *not* aligned to twice the requested alignment. + Eg, if align=4, guarantees it is not aligned to 8. If align=None uses + dtype.alignment.""" + dtype = np.dtype(dtype) + if dtype == np.dtype(object): + # Can't do this, fall back to standard allocation (which + # should always be sufficiently aligned) + if align is not None: + raise ValueError("object array alignment not supported") + return np.zeros(shape, dtype=dtype, order=order) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + 2*align + 1, np.uint8) + + ptr = buf.__array_interface__['data'][0] + offset = ptr % align + if offset != 0: + offset = align - offset + if (ptr % (2*align)) == 0: + offset += align + + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset+size+1][:-1] + buf.fill(0) + data = np.ndarray(shape, dtype, buf, order=order) + return data + + +class TestFlags: + def setup_method(self): + self.a = np.arange(10) + + def test_writeable(self): + mydict = locals() + self.a.flags.writeable = False + assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) + assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) + self.a.flags.writeable = True + self.a[0] = 5 + self.a[0] = 0 + + def test_writeable_any_base(self): + # Ensure that any base being writeable is sufficient to change flag; + # this is especially interesting for arrays from an array interface. + arr = np.arange(10) + + class subclass(np.ndarray): + pass + + # Create subclass so base will not be collapsed, this is OK to change + view1 = arr.view(subclass) + view2 = view1[...] + arr.flags.writeable = False + view2.flags.writeable = False + view2.flags.writeable = True # Can be set to True again. + + arr = np.arange(10) + + class frominterface: + def __init__(self, arr): + self.arr = arr + self.__array_interface__ = arr.__array_interface__ + + view1 = np.asarray(frominterface) + view2 = view1[...] + view2.flags.writeable = False + view2.flags.writeable = True + + view1.flags.writeable = False + view2.flags.writeable = False + with assert_raises(ValueError): + # Must assume not writeable, since only base is not: + view2.flags.writeable = True + + def test_writeable_from_readonly(self): + # gh-9440 - make sure fromstring, from buffer on readonly buffers + # set writeable False + data = b'\x00' * 100 + vals = np.frombuffer(data, 'B') + assert_raises(ValueError, vals.setflags, write=True) + types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + values = np.core.records.fromstring(data, types) + vals = values['vals'] + assert_raises(ValueError, vals.setflags, write=True) + + def test_writeable_from_buffer(self): + data = bytearray(b'\x00' * 100) + vals = np.frombuffer(data, 'B') + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + values = np.core.records.fromstring(data, types) + vals = values['vals'] + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") + def test_writeable_pickle(self): + import pickle + # Small arrays will be copied without setting base. + # See condition for using PyArray_SetBaseObject in + # array_setstate. + a = np.arange(1000) + for v in range(pickle.HIGHEST_PROTOCOL): + vals = pickle.loads(pickle.dumps(a, v)) + assert_(vals.flags.writeable) + assert_(isinstance(vals.base, bytes)) + + def test_writeable_from_c_data(self): + # Test that the writeable flag can be changed for an array wrapping + # low level C-data, but not owning its data. + # Also see that this is deprecated to change from python. + from numpy.core._multiarray_tests import get_c_wrapping_array + + arr_writeable = get_c_wrapping_array(True) + assert not arr_writeable.flags.owndata + assert arr_writeable.flags.writeable + view = arr_writeable[...] + + # Toggling the writeable flag works on the view: + view.flags.writeable = False + assert not view.flags.writeable + view.flags.writeable = True + assert view.flags.writeable + # Flag can be unset on the arr_writeable: + arr_writeable.flags.writeable = False + + arr_readonly = get_c_wrapping_array(False) + assert not arr_readonly.flags.owndata + assert not arr_readonly.flags.writeable + + for arr in [arr_writeable, arr_readonly]: + view = arr[...] + view.flags.writeable = False # make sure it is readonly + arr.flags.writeable = False + assert not arr.flags.writeable + + with assert_raises(ValueError): + view.flags.writeable = True + + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + with assert_raises(DeprecationWarning): + arr.flags.writeable = True + + with assert_warns(DeprecationWarning): + arr.flags.writeable = True + + def test_warnonwrite(self): + a = np.arange(10) + a.flags._warn_on_write = True + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always') + a[1] = 10 + a[2] = 10 + # only warn once + assert_(len(w) == 1) + + @pytest.mark.parametrize(["flag", "flag_value", "writeable"], + [("writeable", True, True), + # Delete _warn_on_write after deprecation and simplify + # the parameterization: + ("_warn_on_write", True, False), + ("writeable", False, False)]) + def test_readonly_flag_protocols(self, flag, flag_value, writeable): + a = np.arange(10) + setattr(a.flags, flag, flag_value) + + class MyArr(): + __array_struct__ = a.__array_struct__ + + assert memoryview(a).readonly is not writeable + assert a.__array_interface__['data'][1] is not writeable + assert np.asarray(MyArr()).flags.writeable is writeable + + def test_otherflags(self): + assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) + assert_equal(self.a.flags.farray, False) + assert_equal(self.a.flags.behaved, True) + assert_equal(self.a.flags.fnc, False) + assert_equal(self.a.flags.forc, True) + assert_equal(self.a.flags.owndata, True) + assert_equal(self.a.flags.writeable, True) + assert_equal(self.a.flags.aligned, True) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + + def test_string_align(self): + a = np.zeros(4, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + # not power of two are accessed byte-wise and thus considered aligned + a = np.zeros(5, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + + def test_void_align(self): + a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) + assert_(a.flags.aligned) + + +class TestHash: + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + + +class TestAttributes: + def setup_method(self): + self.one = np.arange(10) + self.two = np.arange(20).reshape(4, 5) + self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + + def test_attributes(self): + assert_equal(self.one.shape, (10,)) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) + assert_equal(self.one.strides, (self.one.itemsize,)) + num = self.two.itemsize + assert_equal(self.two.strides, (5*num, num)) + num = self.three.itemsize + assert_equal(self.three.strides, (30*num, 6*num, num)) + assert_equal(self.one.ndim, 1) + assert_equal(self.two.ndim, 2) + assert_equal(self.three.ndim, 3) + num = self.two.itemsize + assert_equal(self.two.size, 20) + assert_equal(self.two.nbytes, 20*num) + assert_equal(self.two.itemsize, self.two.dtype.itemsize) + assert_equal(self.two.base, np.arange(20)) + + def test_dtypeattr(self): + assert_equal(self.one.dtype, np.dtype(np.int_)) + assert_equal(self.three.dtype, np.dtype(np.float_)) + assert_equal(self.one.dtype.char, 'l') + assert_equal(self.three.dtype.char, 'd') + assert_(self.three.dtype.str[0] in '<>') + assert_equal(self.one.dtype.str[1], 'i') + assert_equal(self.three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + # int_ doesn't inherit from Python int, because it's not fixed-width + assert_(not isinstance(numpy_int, int)) + + def test_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + return np.ndarray(size, buffer=x, dtype=int, + offset=offset*x.itemsize, + strides=strides*x.itemsize) + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3]*8)) + # Check behavior reported in gh-2503: + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + try: + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset*x.itemsize) + except Exception as e: + raise RuntimeError(e) + r.strides = strides = strides*x.itemsize + return r + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + + def set_strides(arr, strides): + arr.strides = strides + + assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + assert_raises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + a.strides = 1 + a[::2].strides = 2 + + # test 0d + arr_0d = np.array(0) + arr_0d.strides = () + assert_raises(TypeError, set_strides, arr_0d, None) + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = np.empty((3, 2, 1), t) + y = np.empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_max_uint64(self): + x = np.empty((3, 2, 1), dtype=np.uint64) + y = np.empty((3, 2, 1), dtype=np.uint64) + value = 2**64 - 1 + y[...] = value + x.fill(value) + assert_array_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + def test_fill_readonly(self): + # gh-22922 + a = np.zeros(11) + a.setflags(write=False) + with pytest.raises(ValueError, match=".*read-only"): + a.fill(0) + + +class TestArrayConstruction: + def test_array(self): + d = np.ones(6) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6))) + + d = np.ones(6) + tgt = np.ones((2, 6)) + r = np.array([d, d]) + assert_equal(r, tgt) + tgt[1] = 2 + r = np.array([d, d + 1]) + assert_equal(r, tgt) + + d = np.ones(6) + r = np.array([[d, d]]) + assert_equal(r, np.ones((1, 2, 6))) + + d = np.ones(6) + r = np.array([[d, d], [d, d]]) + assert_equal(r, np.ones((2, 2, 6))) + + d = np.ones((6, 6)) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6, 6))) + + d = np.ones((6, )) + r = np.array([[d, d + 1], d + 2], dtype=object) + assert_equal(len(r), 2) + assert_equal(r[0], [d, d + 1]) + assert_equal(r[1], d + 2) + + tgt = np.ones((2, 3), dtype=bool) + tgt[0, 2] = False + tgt[1, 0:2] = False + r = np.array([[True, True, False], [False, False, True]]) + assert_equal(r, tgt) + r = np.array([[True, False], [True, False], [False, True]]) + assert_equal(r, tgt.T) + + def test_array_empty(self): + assert_raises(TypeError, np.array) + + def test_0d_array_shape(self): + assert np.ones(np.array(3)).shape == (3,) + + def test_array_copy_false(self): + d = np.array([1, 2, 3]) + e = np.array(d, copy=False) + d[1] = 3 + assert_array_equal(e, [1, 3, 3]) + e = np.array(d, copy=False, order='F') + d[1] = 4 + assert_array_equal(e, [1, 4, 3]) + e[2] = 7 + assert_array_equal(d, [1, 4, 7]) + + def test_array_copy_true(self): + d = np.array([[1,2,3], [1, 2, 3]]) + e = np.array(d, copy=True) + d[0, 1] = 3 + e[0, 2] = -7 + assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) + assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) + e = np.array(d, copy=True, order='F') + d[0, 1] = 5 + e[0, 2] = 7 + assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) + assert_array_equal(d, [[1, 5, 3], [1,2,3]]) + + def test_array_cont(self): + d = np.ones(10)[::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.ascontiguousarray(d).flags.f_contiguous) + assert_(np.asfortranarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + d = np.ones((10, 10))[::2,::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_bad_arguments_error(self, func): + with pytest.raises(TypeError): + func(3, dtype="bad dtype") + with pytest.raises(TypeError): + func() # missing arguments + with pytest.raises(TypeError): + func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments + + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_as_keyword(self, func): + # This should likely be made positional only, but do not change + # the name accidentally. + if func is np.array: + func(object=3) + else: + func(a=3) + + +class TestAssignment: + def test_assignment_broadcasting(self): + a = np.arange(6).reshape(2, 3) + + # Broadcasting the input to the output + a[...] = np.arange(3) + assert_equal(a, [[0, 1, 2], [0, 1, 2]]) + a[...] = np.arange(2).reshape(2, 1) + assert_equal(a, [[0, 0, 0], [1, 1, 1]]) + + # For compatibility with <= 1.5, a limited version of broadcasting + # the output to the input. + # + # This behavior is inconsistent with NumPy broadcasting + # in general, because it only uses one of the two broadcasting + # rules (adding a new "1" dimension to the left of the shape), + # applied to the output instead of an input. In NumPy 2.0, this kind + # of broadcasting assignment will likely be disallowed. + a[...] = np.arange(6)[::-1].reshape(1, 2, 3) + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + # The other type of broadcasting would require a reduction operation. + + def assign(a, b): + a[...] = b + + assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) + + def test_assignment_errors(self): + # Address issue #2276 + class C: + pass + a = np.zeros(1) + + def assign(v): + a[0] = v + + assert_raises((AttributeError, TypeError), assign, C()) + assert_raises(ValueError, assign, [1]) + + def test_unicode_assignment(self): + # gh-5049 + from numpy.core.numeric import set_string_function + + @contextmanager + def inject_str(s): + """ replace ndarray.__str__ temporarily """ + set_string_function(lambda x: s, repr=False) + try: + yield + finally: + set_string_function(None, repr=False) + + a1d = np.array(['test']) + a0d = np.array('done') + with inject_str('bad'): + a1d[0] = a0d # previously this would invoke __str__ + assert_equal(a1d[0], 'done') + + # this would crash for the same reason + np.array([np.array('\xe5\xe4\xf6')]) + + def test_stringlike_empty_list(self): + # gh-8902 + u = np.array(['done']) + b = np.array([b'done']) + + class bad_sequence: + def __getitem__(self): pass + def __len__(self): raise RuntimeError + + assert_raises(ValueError, operator.setitem, u, 0, []) + assert_raises(ValueError, operator.setitem, b, 0, []) + + assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) + assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) + + def test_longdouble_assignment(self): + # only relevant if longdouble is larger than float + # we're looking for loss of precision + + for dtype in (np.longdouble, np.longcomplex): + # gh-8902 + tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) + tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) + + # construction + tiny1d = np.array([tinya]) + assert_equal(tiny1d[0], tinya) + + # scalar = scalar + tiny1d[0] = tinyb + assert_equal(tiny1d[0], tinyb) + + # 0d = scalar + tiny1d[0, ...] = tinya + assert_equal(tiny1d[0], tinya) + + # 0d = 0d + tiny1d[0, ...] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + # scalar = 0d + tiny1d[0] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + arr = np.array([np.array(tinya)]) + assert_equal(arr[0], tinya) + + def test_cast_to_string(self): + # cast to str should do "str(scalar)", not "str(scalar.item())" + # Example: In python2, str(float) is truncated, so we want to avoid + # str(np.float64(...).item()) as this would incorrectly truncate. + a = np.zeros(1, dtype='S20') + a[:] = np.array(['1.12345678901234567890'], dtype='f8') + assert_equal(a[0], b"1.1234567890123457") + + +class TestDtypedescr: + def test_construction(self): + d1 = np.dtype('i4') + assert_equal(d1, np.dtype(np.int32)) + d2 = np.dtype('f8') + assert_equal(d2, np.dtype(np.float64)) + + def test_byteorders(self): + assert_(np.dtype('<i4') != np.dtype('>i4')) + assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')])) + + def test_structured_non_void(self): + fields = [('a', '<i2'), ('b', '<i2')] + dt_int = np.dtype(('i4', fields)) + assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])") + + # gh-9821 + arr_int = np.zeros(4, dt_int) + assert_equal(repr(arr_int), + "array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))") + + +class TestZeroRank: + def setup_method(self): + self.d = np.array(0), np.array('x', object) + + def test_ellipsis_subscript(self): + a, b = self.d + assert_equal(a[...], 0) + assert_equal(b[...], 'x') + assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. + assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. + + def test_empty_subscript(self): + a, b = self.d + assert_equal(a[()], 0) + assert_equal(b[()], 'x') + assert_(type(a[()]) is a.dtype.type) + assert_(type(b[()]) is str) + + def test_invalid_subscript(self): + a, b = self.d + assert_raises(IndexError, lambda x: x[0], a) + assert_raises(IndexError, lambda x: x[0], b) + assert_raises(IndexError, lambda x: x[np.array([], int)], a) + assert_raises(IndexError, lambda x: x[np.array([], int)], b) + + def test_ellipsis_subscript_assignment(self): + a, b = self.d + a[...] = 42 + assert_equal(a, 42) + b[...] = '' + assert_equal(b.item(), '') + + def test_empty_subscript_assignment(self): + a, b = self.d + a[()] = 42 + assert_equal(a, 42) + b[()] = '' + assert_equal(b.item(), '') + + def test_invalid_subscript_assignment(self): + a, b = self.d + + def assign(x, i, v): + x[i] = v + + assert_raises(IndexError, assign, a, 0, 42) + assert_raises(IndexError, assign, b, 0, '') + assert_raises(ValueError, assign, a, (), '') + + def test_newaxis(self): + a, b = self.d + assert_equal(a[np.newaxis].shape, (1,)) + assert_equal(a[..., np.newaxis].shape, (1,)) + assert_equal(a[np.newaxis, ...].shape, (1,)) + assert_equal(a[..., np.newaxis].shape, (1,)) + assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) + assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) + assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) + assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) + + def test_invalid_newaxis(self): + a, b = self.d + + def subscript(x, i): + x[i] + + assert_raises(IndexError, subscript, a, (np.newaxis, 0)) + assert_raises(IndexError, subscript, a, (np.newaxis,)*50) + + def test_constructor(self): + x = np.ndarray(()) + x[()] = 5 + assert_equal(x[()], 5) + y = np.ndarray((), buffer=x) + y[()] = 6 + assert_equal(x[()], 6) + + # strides and shape must be the same length + with pytest.raises(ValueError): + np.ndarray((2,), strides=()) + with pytest.raises(ValueError): + np.ndarray((), strides=(2,)) + + def test_output(self): + x = np.array(2) + assert_raises(ValueError, np.add, x, [1], x) + + def test_real_imag(self): + # contiguity checks are for gh-11245 + x = np.array(1j) + xr = x.real + xi = x.imag + + assert_equal(xr, np.array(0)) + assert_(type(xr) is np.ndarray) + assert_equal(xr.flags.contiguous, True) + assert_equal(xr.flags.f_contiguous, True) + + assert_equal(xi, np.array(1)) + assert_(type(xi) is np.ndarray) + assert_equal(xi.flags.contiguous, True) + assert_equal(xi.flags.f_contiguous, True) + + +class TestScalarIndexing: + def setup_method(self): + self.d = np.array([0, 1])[0] + + def test_ellipsis_subscript(self): + a = self.d + assert_equal(a[...], 0) + assert_equal(a[...].shape, ()) + + def test_empty_subscript(self): + a = self.d + assert_equal(a[()], 0) + assert_equal(a[()].shape, ()) + + def test_invalid_subscript(self): + a = self.d + assert_raises(IndexError, lambda x: x[0], a) + assert_raises(IndexError, lambda x: x[np.array([], int)], a) + + def test_invalid_subscript_assignment(self): + a = self.d + + def assign(x, i, v): + x[i] = v + + assert_raises(TypeError, assign, a, 0, 42) + + def test_newaxis(self): + a = self.d + assert_equal(a[np.newaxis].shape, (1,)) + assert_equal(a[..., np.newaxis].shape, (1,)) + assert_equal(a[np.newaxis, ...].shape, (1,)) + assert_equal(a[..., np.newaxis].shape, (1,)) + assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) + assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) + assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) + assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) + + def test_invalid_newaxis(self): + a = self.d + + def subscript(x, i): + x[i] + + assert_raises(IndexError, subscript, a, (np.newaxis, 0)) + assert_raises(IndexError, subscript, a, (np.newaxis,)*50) + + def test_overlapping_assignment(self): + # With positive strides + a = np.arange(4) + a[:-1] = a[1:] + assert_equal(a, [1, 2, 3, 3]) + + a = np.arange(4) + a[1:] = a[:-1] + assert_equal(a, [0, 0, 1, 2]) + + # With positive and negative strides + a = np.arange(4) + a[:] = a[::-1] + assert_equal(a, [3, 2, 1, 0]) + + a = np.arange(6).reshape(2, 3) + a[::-1,:] = a[:, ::-1] + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + + a = np.arange(6).reshape(2, 3) + a[::-1, ::-1] = a[:, ::-1] + assert_equal(a, [[3, 4, 5], [0, 1, 2]]) + + # With just one element overlapping + a = np.arange(5) + a[:3] = a[2:] + assert_equal(a, [2, 3, 4, 3, 4]) + + a = np.arange(5) + a[2:] = a[:3] + assert_equal(a, [0, 1, 0, 1, 2]) + + a = np.arange(5) + a[2::-1] = a[2:] + assert_equal(a, [4, 3, 2, 3, 4]) + + a = np.arange(5) + a[2:] = a[2::-1] + assert_equal(a, [0, 1, 2, 1, 0]) + + a = np.arange(5) + a[2::-1] = a[:1:-1] + assert_equal(a, [2, 3, 4, 3, 4]) + + a = np.arange(5) + a[:1:-1] = a[2::-1] + assert_equal(a, [0, 1, 0, 1, 2]) + + +class TestCreation: + """ + Test the np.array constructor + """ + def test_from_attribute(self): + class x: + def __array__(self, dtype=None): + pass + + assert_raises(ValueError, np.array, x()) + + def test_from_string(self): + types = np.typecodes['AllInteger'] + np.typecodes['Float'] + nstr = ['123', '123'] + result = np.array([123, 123], dtype=int) + for type in types: + msg = 'String conversion for %s' % type + assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) + + def test_void(self): + arr = np.array([], dtype='V') + assert arr.dtype == 'V8' # current default + # Same length scalars (those that go to the same void) work: + arr = np.array([b"1234", b"1234"], dtype="V") + assert arr.dtype == "V4" + + # Promoting different lengths will fail (pre 1.20 this worked) + # by going via S5 and casting to V5. + with pytest.raises(TypeError): + np.array([b"1234", b"12345"], dtype="V") + with pytest.raises(TypeError): + np.array([b"12345", b"1234"], dtype="V") + + # Check the same for the casting path: + arr = np.array([b"1234", b"1234"], dtype="O").astype("V") + assert arr.dtype == "V4" + with pytest.raises(TypeError): + np.array([b"1234", b"12345"], dtype="O").astype("V") + + @pytest.mark.parametrize("idx", + [pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")]) + def test_structured_void_promotion(self, idx): + arr = np.array( + [np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i')[idx]], + dtype="V") + assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i")) + # The following fails to promote the two dtypes, resulting in an error + with pytest.raises(TypeError): + np.array( + [np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]], + dtype="V") + + + def test_too_big_error(self): + # 45341 is the smallest integer greater than sqrt(2**31 - 1). + # 3037000500 is the smallest integer greater than sqrt(2**63 - 1). + # We want to make sure that the square byte array with those dimensions + # is too big on 32 or 64 bit systems respectively. + if np.iinfo('intp').max == 2**31 - 1: + shape = (46341, 46341) + elif np.iinfo('intp').max == 2**63 - 1: + shape = (3037000500, 3037000500) + else: + return + assert_raises(ValueError, np.empty, shape, dtype=np.int8) + assert_raises(ValueError, np.zeros, shape, dtype=np.int8) + assert_raises(ValueError, np.ones, shape, dtype=np.int8) + + @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8, + reason="malloc may not fail on 32 bit systems") + def test_malloc_fails(self): + # This test is guaranteed to fail due to a too large allocation + with assert_raises(np.core._exceptions._ArrayMemoryError): + np.empty(np.iinfo(np.intp).max, dtype=np.uint8) + + def test_zeros(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + for dt in types: + d = np.zeros((13,), dtype=dt) + assert_equal(np.count_nonzero(d), 0) + # true for ieee floats + assert_equal(d.sum(), 0) + assert_(not d.any()) + + d = np.zeros(2, dtype='(2,4)i4') + assert_equal(np.count_nonzero(d), 0) + assert_equal(d.sum(), 0) + assert_(not d.any()) + + d = np.zeros(2, dtype='4i4') + assert_equal(np.count_nonzero(d), 0) + assert_equal(d.sum(), 0) + assert_(not d.any()) + + d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') + assert_equal(np.count_nonzero(d), 0) + + @pytest.mark.slow + def test_zeros_big(self): + # test big array as they might be allocated different by the system + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + for dt in types: + d = np.zeros((30 * 1024**2,), dtype=dt) + assert_(not d.any()) + # This test can fail on 32-bit systems due to insufficient + # contiguous memory. Deallocating the previous array increases the + # chance of success. + del(d) + + def test_zeros_obj(self): + # test initialization from PyLong(0) + d = np.zeros((13,), dtype=object) + assert_array_equal(d, [0] * 13) + assert_equal(np.count_nonzero(d), 0) + + def test_zeros_obj_obj(self): + d = np.zeros(10, dtype=[('k', object, 2)]) + assert_array_equal(d['k'], 0) + + def test_zeros_like_like_zeros(self): + # test zeros_like returns the same as zeros + for c in np.typecodes['All']: + if c == 'V': + continue + d = np.zeros((3,3), dtype=c) + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + # explicitly check some special cases + d = np.zeros((3,3), dtype='S5') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + d = np.zeros((3,3), dtype='U5') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + + d = np.zeros((3,3), dtype='<i4') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + d = np.zeros((3,3), dtype='>i4') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + + d = np.zeros((3,3), dtype='<M8[s]') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + d = np.zeros((3,3), dtype='>M8[s]') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + + d = np.zeros((3,3), dtype='f4,f4') + assert_array_equal(np.zeros_like(d), d) + assert_equal(np.zeros_like(d).dtype, d.dtype) + + def test_empty_unicode(self): + # don't throw decode errors on garbage memory + for i in range(5, 100, 5): + d = np.empty(i, dtype='U') + str(d) + + def test_sequence_non_homogeneous(self): + assert_equal(np.array([4, 2**80]).dtype, object) + assert_equal(np.array([4, 2**80, 4]).dtype, object) + assert_equal(np.array([2**80, 4]).dtype, object) + assert_equal(np.array([2**80] * 3).dtype, object) + assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex) + assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex) + assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex) + + def test_non_sequence_sequence(self): + """Should not segfault. + + Class Fail breaks the sequence protocol for new style classes, i.e., + those derived from object. Class Map is a mapping type indicated by + raising a ValueError. At some point we may raise a warning instead + of an error in the Fail case. + + """ + class Fail: + def __len__(self): + return 1 + + def __getitem__(self, index): + raise ValueError() + + class Map: + def __len__(self): + return 1 + + def __getitem__(self, index): + raise KeyError() + + a = np.array([Map()]) + assert_(a.shape == (1,)) + assert_(a.dtype == np.dtype(object)) + assert_raises(ValueError, np.array, [Fail()]) + + def test_no_len_object_type(self): + # gh-5100, want object array from iterable object without len() + class Point2: + def __init__(self): + pass + + def __getitem__(self, ind): + if ind in [0, 1]: + return ind + else: + raise IndexError() + d = np.array([Point2(), Point2(), Point2()]) + assert_equal(d.dtype, np.dtype(object)) + + def test_false_len_sequence(self): + # gh-7264, segfault for this example + class C: + def __getitem__(self, i): + raise IndexError + def __len__(self): + return 42 + + a = np.array(C()) # segfault? + assert_equal(len(a), 0) + + def test_false_len_iterable(self): + # Special case where a bad __getitem__ makes us fall back on __iter__: + class C: + def __getitem__(self, x): + raise Exception + def __iter__(self): + return iter(()) + def __len__(self): + return 2 + + a = np.empty(2) + with assert_raises(ValueError): + a[:] = C() # Segfault! + + np.array(C()) == list(C()) + + def test_failed_len_sequence(self): + # gh-7393 + class A: + def __init__(self, data): + self._data = data + def __getitem__(self, item): + return type(self)(self._data[item]) + def __len__(self): + return len(self._data) + + # len(d) should give 3, but len(d[0]) will fail + d = A([1,2,3]) + assert_equal(len(np.array(d)), 3) + + def test_array_too_big(self): + # Test that array creation succeeds for arrays addressable by intp + # on the byte level and fails for too large arrays. + buf = np.zeros(100) + + max_bytes = np.iinfo(np.intp).max + for dtype in ["intp", "S20", "b"]: + dtype = np.dtype(dtype) + itemsize = dtype.itemsize + + np.ndarray(buffer=buf, strides=(0,), + shape=(max_bytes//itemsize,), dtype=dtype) + assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), + shape=(max_bytes//itemsize + 1,), dtype=dtype) + + def _ragged_creation(self, seq): + # without dtype=object, the ragged object raises + with pytest.raises(ValueError, match=".*detected shape was"): + a = np.array(seq) + + return np.array(seq, dtype=object) + + def test_ragged_ndim_object(self): + # Lists of mismatching depths are treated as object arrays + a = self._ragged_creation([[1], 2, 3]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = self._ragged_creation([1, [2], 3]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = self._ragged_creation([1, 2, [3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + def test_ragged_shape_object(self): + # The ragged dimension of a list is turned into an object array + a = self._ragged_creation([[1, 1], [2], [3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = self._ragged_creation([[1], [2, 2], [3]]) + assert_equal(a.shape, (3,)) + assert_equal(a.dtype, object) + + a = self._ragged_creation([[1], [2], [3, 3]]) + assert a.shape == (3,) + assert a.dtype == object + + def test_array_of_ragged_array(self): + outer = np.array([None, None]) + outer[0] = outer[1] = np.array([1, 2, 3]) + assert np.array(outer).shape == (2,) + assert np.array([outer]).shape == (1, 2) + + outer_ragged = np.array([None, None]) + outer_ragged[0] = np.array([1, 2, 3]) + outer_ragged[1] = np.array([1, 2, 3, 4]) + # should both of these emit deprecation warnings? + assert np.array(outer_ragged).shape == (2,) + assert np.array([outer_ragged]).shape == (1, 2,) + + def test_deep_nonragged_object(self): + # None of these should raise, even though they are missing dtype=object + a = np.array([[[Decimal(1)]]]) + a = np.array([1, Decimal(1)]) + a = np.array([[1], [Decimal(1)]]) + + @pytest.mark.parametrize("dtype", [object, "O,O", "O,(3)O", "(2,3)O"]) + @pytest.mark.parametrize("function", [ + np.ndarray, np.empty, + lambda shape, dtype: np.empty_like(np.empty(shape, dtype=dtype))]) + def test_object_initialized_to_None(self, function, dtype): + # NumPy has support for object fields to be NULL (meaning None) + # but generally, we should always fill with the proper None, and + # downstream may rely on that. (For fully initialized arrays!) + arr = function(3, dtype=dtype) + # We expect a fill value of None, which is not NULL: + expected = np.array(None).tobytes() + expected = expected * (arr.nbytes // len(expected)) + assert arr.tobytes() == expected + + @pytest.mark.parametrize("func", [ + np.array, np.asarray, np.asanyarray, np.ascontiguousarray, + np.asfortranarray]) + def test_creation_from_dtypemeta(self, func): + dtype = np.dtype('i') + arr1 = func([1, 2, 3], dtype=dtype) + arr2 = func([1, 2, 3], dtype=type(dtype)) + assert_array_equal(arr1, arr2) + assert arr2.dtype == dtype + + +class TestStructured: + def test_subarray_field_access(self): + a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) + a['a'] = np.arange(60).reshape(3, 5, 2, 2) + + # Since the subarray is always in C-order, a transpose + # does not swap the subarray: + assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) + + # In Fortran order, the subarray gets appended + # like in all other cases, not prepended as a special case + b = a.copy(order='F') + assert_equal(a['a'].shape, b['a'].shape) + assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) + + def test_subarray_comparison(self): + # Check that comparisons between record arrays with + # multi-dimensional field types work properly + a = np.rec.fromrecords( + [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], + dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) + b = a.copy() + assert_equal(a == b, [True, True]) + assert_equal(a != b, [False, False]) + b[1].b = 'c' + assert_equal(a == b, [True, False]) + assert_equal(a != b, [False, True]) + for i in range(3): + b[0].a = a[0].a + b[0].a[i] = 5 + assert_equal(a == b, [False, False]) + assert_equal(a != b, [True, True]) + for i in range(2): + for j in range(2): + b = a.copy() + b[0].c[i, j] = 10 + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Check that broadcasting with a subarray works, including cases that + # require promotion to work: + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) + assert_equal(a == b, [[True, True, False], [False, False, True]]) + assert_equal(b == a, [[True, True, False], [False, False, True]]) + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) + assert_equal(a == b, [[True, True, False], [False, False, True]]) + assert_equal(b == a, [[True, True, False], [False, False, True]]) + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a == b, [[True, False, False], [False, False, True]]) + assert_equal(b == a, [[True, False, False], [False, False, True]]) + + # Check that broadcasting Fortran-style arrays with a subarray work + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a == b, [[True, False, False], [False, False, True]]) + assert_equal(b == a, [[True, False, False], [False, False, True]]) + + # Check that incompatible sub-array shapes don't result to broadcasting + x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + # The main importance is that it does not return True: + with pytest.raises(TypeError): + x == y + + x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + # The main importance is that it does not return True: + with pytest.raises(TypeError): + x == y + + def test_empty_structured_array_comparison(self): + # Check that comparison works on empty arrays with nontrivially + # shaped fields + a = np.zeros(0, [('a', '<f8', (1, 1))]) + assert_equal(a, a) + a = np.zeros(0, [('a', '<f8', (1,))]) + assert_equal(a, a) + a = np.zeros((0, 0), [('a', '<f8', (1, 1))]) + assert_equal(a, a) + a = np.zeros((1, 0, 1), [('a', '<f8', (1, 1))]) + assert_equal(a, a) + + @pytest.mark.parametrize("op", [operator.eq, operator.ne]) + def test_structured_array_comparison_bad_broadcasts(self, op): + a = np.zeros(3, dtype='i,i') + b = np.array([], dtype="i,i") + with pytest.raises(ValueError): + op(a, b) + + def test_structured_comparisons_with_promotion(self): + # Check that structured arrays can be compared so long as their + # dtypes promote fine: + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')]) + b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', '<f8')]) + b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Including with embedded subarray dtype (although subarray comparison + # itself may still be a bit weird and compare the raw data) + a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5<f8')]) + b = np.array([(5, 43), (10, 1)], dtype=[('a', '10<i8'), ('b', '5>i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + @pytest.mark.parametrize("op", [ + operator.eq, lambda x, y: operator.eq(y, x), + operator.ne, lambda x, y: operator.ne(y, x)]) + def test_void_comparison_failures(self, op): + # In principle, one could decide to return an array of False for some + # if comparisons are impossible. But right now we return TypeError + # when "void" dtype are involved. + x = np.zeros(3, dtype=[('a', 'i1')]) + y = np.zeros(3) + # Cannot compare non-structured to structured: + with pytest.raises(TypeError): + op(x, y) + + # Added title prevents promotion, but casts are OK: + y = np.zeros(3, dtype=[(('title', 'a'), 'i1')]) + assert np.can_cast(y.dtype, x.dtype) + with pytest.raises(TypeError): + op(x, y) + + x = np.zeros(3, dtype="V7") + y = np.zeros(3, dtype="V8") + with pytest.raises(TypeError): + op(x, y) + + def test_casting(self): + # Check that casting a structured array to change its byte order + # works + a = np.array([(1,)], dtype=[('a', '<i4')]) + assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe')) + b = a.astype([('a', '>i4')]) + assert_equal(b, a.byteswap().newbyteorder()) + assert_equal(a['a'][0], b['a'][0]) + + # Check that equality comparison works on structured arrays if + # they are 'equiv'-castable + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')]) + b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')]) + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + assert_equal(a == b, [True, True]) + + # Check that 'equiv' casting can change byte order + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + c = a.astype(b.dtype, casting='equiv') + assert_equal(a == c, [True, True]) + + # Check that 'safe' casting can change byte order and up-cast + # fields + t = [('a', '<i8'), ('b', '>f8')] + assert_(np.can_cast(a.dtype, t, casting='safe')) + c = a.astype(t, casting='safe') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that 'same_kind' casting can change byte order and + # change field widths within a "kind" + t = [('a', '<i4'), ('b', '>f4')] + assert_(np.can_cast(a.dtype, t, casting='same_kind')) + c = a.astype(t, casting='same_kind') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that casting fails if the casting rule should fail on + # any of the fields + t = [('a', '>i8'), ('b', '<f4')] + assert_(not np.can_cast(a.dtype, t, casting='safe')) + assert_raises(TypeError, a.astype, t, casting='safe') + t = [('a', '>i2'), ('b', '<f8')] + assert_(not np.can_cast(a.dtype, t, casting='equiv')) + assert_raises(TypeError, a.astype, t, casting='equiv') + t = [('a', '>i8'), ('b', '<i2')] + assert_(not np.can_cast(a.dtype, t, casting='same_kind')) + assert_raises(TypeError, a.astype, t, casting='same_kind') + assert_(not np.can_cast(a.dtype, b.dtype, casting='no')) + assert_raises(TypeError, a.astype, b.dtype, casting='no') + + # Check that non-'unsafe' casting can't change the set of field names + for casting in ['no', 'safe', 'equiv', 'same_kind']: + t = [('a', '>i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + + def test_objview(self): + # https://github.com/numpy/numpy/issues/3286 + a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')]) + a[['a', 'b']] # TypeError? + + # https://github.com/numpy/numpy/issues/3253 + dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')]) + dat2[['B', 'A']] # TypeError? + + def test_setfield(self): + # https://github.com/numpy/numpy/issues/3126 + struct_dt = np.dtype([('elem', 'i4', 5),]) + dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)]) + x = np.zeros(1, dt) + x[0]['field'] = np.ones(10, dtype='i4') + x[0]['struct'] = np.ones(1, dtype=struct_dt) + assert_equal(x[0]['field'], np.ones(10, dtype='i4')) + + def test_setfield_object(self): + # make sure object field assignment with ndarray value + # on void scalar mimics setitem behavior + b = np.zeros(1, dtype=[('x', 'O')]) + # next line should work identically to b['x'][0] = np.arange(3) + b[0]['x'] = np.arange(3) + assert_equal(b[0]['x'], np.arange(3)) + + # check that broadcasting check still works + c = np.zeros(1, dtype=[('x', 'O', 5)]) + + def testassign(): + c[0]['x'] = np.arange(3) + + assert_raises(ValueError, testassign) + + def test_zero_width_string(self): + # Test for PR #6430 / issues #473, #4955, #2585 + + dt = np.dtype([('I', int), ('S', 'S0')]) + + x = np.zeros(4, dtype=dt) + + assert_equal(x['S'], [b'', b'', b'', b'']) + assert_equal(x['S'].itemsize, 0) + + x['S'] = ['a', 'b', 'c', 'd'] + assert_equal(x['S'], [b'', b'', b'', b'']) + assert_equal(x['I'], [0, 0, 0, 0]) + + # Variation on test case from #4955 + x['S'][x['I'] == 0] = 'hello' + assert_equal(x['S'], [b'', b'', b'', b'']) + assert_equal(x['I'], [0, 0, 0, 0]) + + # Variation on test case from #2585 + x['S'] = 'A' + assert_equal(x['S'], [b'', b'', b'', b'']) + assert_equal(x['I'], [0, 0, 0, 0]) + + # Allow zero-width dtypes in ndarray constructor + y = np.ndarray(4, dtype=x['S'].dtype) + assert_equal(y.itemsize, 0) + assert_equal(x['S'], y) + + # More tests for indexing an array with zero-width fields + assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'), + ('b', 'u1')])['a'].itemsize, 0) + assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0) + assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0) + + xx = x['S'].reshape((2, 2)) + assert_equal(xx.itemsize, 0) + assert_equal(xx, [[b'', b''], [b'', b'']]) + # check for no uninitialized memory due to viewing S0 array + assert_equal(xx[:].dtype, xx.dtype) + assert_array_equal(eval(repr(xx), dict(array=np.array)), xx) + + b = io.BytesIO() + np.save(b, xx) + + b.seek(0) + yy = np.load(b) + assert_equal(yy.itemsize, 0) + assert_equal(xx, yy) + + with temppath(suffix='.npy') as tmp: + np.save(tmp, xx) + yy = np.load(tmp) + assert_equal(yy.itemsize, 0) + assert_equal(xx, yy) + + def test_base_attr(self): + a = np.zeros(3, dtype='i4,f4') + b = a[0] + assert_(b.base is a) + + def test_assignment(self): + def testassign(arr, v): + c = arr.copy() + c[0] = v # assign using setitem + c[1:] = v # assign using "dtype_transfer" code paths + return c + + dt = np.dtype([('foo', 'i8'), ('bar', 'i8')]) + arr = np.ones(2, dt) + v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')]) + v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')]) + v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')]) + v4 = np.array([(2,)], dtype=[('bar', 'i8')]) + v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')]) + w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]}) + + ans = np.array([(2,3),(2,3)], dtype=dt) + assert_equal(testassign(arr, v1), ans) + assert_equal(testassign(arr, v2), ans) + assert_equal(testassign(arr, v3), ans) + assert_raises(TypeError, lambda: testassign(arr, v4)) + assert_equal(testassign(arr, v5), ans) + w[:] = 4 + assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt)) + + # test field-reordering, assignment by position, and self-assignment + a = np.array([(1,2,3)], + dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')]) + a[['foo', 'bar']] = a[['bar', 'foo']] + assert_equal(a[0].item(), (2,1,3)) + + # test that this works even for 'simple_unaligned' structs + # (ie, that PyArray_EquivTypes cares about field order too) + a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')]) + a[['a', 'b']] = a[['b', 'a']] + assert_equal(a[0].item(), (2,1)) + + def test_scalar_assignment(self): + with assert_raises(ValueError): + arr = np.arange(25).reshape(5, 5) + arr.itemset(3) + + def test_structuredscalar_indexing(self): + # test gh-7262 + x = np.empty(shape=1, dtype="(2)3S,(2)3U") + assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]]) + assert_equal(x[0], x[0][()]) + + def test_multiindex_titles(self): + a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')]) + assert_raises(KeyError, lambda : a[['a','c']]) + assert_raises(KeyError, lambda : a[['a','a']]) + assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated + a[['b','c']] # no exception + + def test_structured_cast_promotion_fieldorder(self): + # gh-15494 + # dtypes with different field names are not promotable + A = ("a", "<i8") + B = ("b", ">i8") + ab = np.array([(1, 2)], dtype=[A, B]) + ba = np.array([(1, 2)], dtype=[B, A]) + assert_raises(TypeError, np.concatenate, ab, ba) + assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype) + assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype) + + # dtypes with same field names/order but different memory offsets + # and byte-order are promotable to packed nbo. + assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype), + repack_fields(ab.dtype.newbyteorder('N'))) + + # gh-13667 + # dtypes with different fieldnames but castable field types are castable + assert_equal(np.can_cast(ab.dtype, ba.dtype), True) + assert_equal(ab.astype(ba.dtype).dtype, ba.dtype) + assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')], + casting='unsafe'), True) + + ab[:] = ba # make sure assignment still works + + # tests of type-promotion of corresponding fields + dt1 = np.dtype([("", "i4")]) + dt2 = np.dtype([("", "i8")]) + assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')])) + assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')])) + assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")])) + assert_equal(np.promote_types('i4,f8', 'i8,f4'), + np.dtype([('f0', 'i8'), ('f1', 'f8')])) + # test nested case + dt1nest = np.dtype([("", dt1)]) + dt2nest = np.dtype([("", dt2)]) + assert_equal(np.promote_types(dt1nest, dt2nest), + np.dtype([('f0', np.dtype([('f0', 'i8')]))])) + + # note that offsets are lost when promoting: + dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]}) + a = np.ones(3, dtype=dt) + assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')])) + + @pytest.mark.parametrize("dtype_dict", [ + dict(names=["a", "b"], formats=["i4", "f"], itemsize=100), + dict(names=["a", "b"], formats=["i4", "f"], + offsets=[0, 12])]) + @pytest.mark.parametrize("align", [True, False]) + def test_structured_promotion_packs(self, dtype_dict, align): + # Structured dtypes are packed when promoted (we consider the packed + # form to be "canonical"), so tere is no extra padding. + dtype = np.dtype(dtype_dict, align=align) + # Remove non "canonical" dtype options: + dtype_dict.pop("itemsize", None) + dtype_dict.pop("offsets", None) + expected = np.dtype(dtype_dict, align=align) + + res = np.promote_types(dtype, dtype) + assert res.itemsize == expected.itemsize + assert res.fields == expected.fields + + # But the "expected" one, should just be returned unchanged: + res = np.promote_types(expected, expected) + assert res is expected + + def test_structured_asarray_is_view(self): + # A scalar viewing an array preserves its view even when creating a + # new array. This test documents behaviour, it may not be the best + # desired behaviour. + arr = np.array([1], dtype="i,i") + scalar = arr[0] + assert not scalar.flags.owndata # view into the array + assert np.asarray(scalar).base is scalar + # But never when a dtype is passed in: + assert np.asarray(scalar, dtype=scalar.dtype).base is None + # A scalar which owns its data does not have this property. + # It is not easy to create one, one method is to use pickle: + scalar = pickle.loads(pickle.dumps(scalar)) + assert scalar.flags.owndata + assert np.asarray(scalar).base is None + +class TestBool: + def test_test_interning(self): + a0 = np.bool_(0) + b0 = np.bool_(False) + assert_(a0 is b0) + a1 = np.bool_(1) + b1 = np.bool_(True) + assert_(a1 is b1) + assert_(np.array([True])[0] is a1) + assert_(np.array(True)[()] is a1) + + def test_sum(self): + d = np.ones(101, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + def check_count_nonzero(self, power, length): + powers = [2 ** i for i in range(length)] + for i in range(2**power): + l = [(i & x) != 0 for x in powers] + a = np.array(l, dtype=bool) + c = builtins.sum(l) + assert_equal(np.count_nonzero(a), c) + av = a.view(np.uint8) + av *= 3 + assert_equal(np.count_nonzero(a), c) + av *= 4 + assert_equal(np.count_nonzero(a), c) + av[av != 0] = 0xFF + assert_equal(np.count_nonzero(a), c) + + def test_count_nonzero(self): + # check all 12 bit combinations in a length 17 array + # covers most cases of the 16 byte unrolled code + self.check_count_nonzero(12, 17) + + @pytest.mark.slow + def test_count_nonzero_all(self): + # check all combinations in a length 17 array + # covers all cases of the 16 byte unrolled code + self.check_count_nonzero(17, 17) + + def test_count_nonzero_unaligned(self): + # prevent mistakes as e.g. gh-4060 + for o in range(7): + a = np.zeros((18,), dtype=bool)[o+1:] + a[:o] = True + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + a = np.ones((18,), dtype=bool)[o+1:] + a[:o] = False + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + + def _test_cast_from_flexible(self, dtype): + # empty string -> false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.str_) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible: + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, str) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhs') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhs') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) + + def test_resize(self): + # previously an error + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_dumps(self): + zs = self._zeros(10, int) + assert_equal(zs, pickle.loads(zs.dumps())) + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs, protocol=proto) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + def test_pickle_empty(self): + """Checking if an empty array pickled and un-pickled will not cause a + segmentation fault""" + arr = np.array([]).reshape(999999, 0) + pk_dmp = pickle.dumps(arr) + pk_load = pickle.loads(pk_dmp) + + assert pk_load.size == 0 + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_pickle_with_buffercallback(self): + array = np.arange(10) + buffers = [] + bytes_string = pickle.dumps(array, buffer_callback=buffers.append, + protocol=5) + array_from_buffer = pickle.loads(bytes_string, buffers=buffers) + # when using pickle protocol 5 with buffer callbacks, + # array_from_buffer is reconstructed from a buffer holding a view + # to the initial array's data, so modifying an element in array + # should modify it in array_from_buffer too. + array[0] = -1 + assert array_from_buffer[0] == -1, array_from_buffer[0] + + +class TestMethods: + + sort_kinds = ['quicksort', 'heapsort', 'stable'] + + def test_all_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[True, False, True], + [False, False, False], + [True, False, True]]) + wh_lower = np.array([[False], + [False], + [True]]) + for _ax in [0, None]: + assert_equal(a.all(axis=_ax, where=wh_lower), + np.all(a[wh_lower[:,0],:], axis=_ax)) + assert_equal(np.all(a, axis=_ax, where=wh_lower), + a[wh_lower[:,0],:].all(axis=_ax)) + + assert_equal(a.all(where=wh_full), True) + assert_equal(np.all(a, where=wh_full), True) + assert_equal(a.all(where=False), True) + assert_equal(np.all(a, where=False), True) + + def test_any_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[False, True, False], + [True, True, True], + [False, False, False]]) + wh_middle = np.array([[False], + [True], + [False]]) + for _ax in [0, None]: + assert_equal(a.any(axis=_ax, where=wh_middle), + np.any(a[wh_middle[:,0],:], axis=_ax)) + assert_equal(np.any(a, axis=_ax, where=wh_middle), + a[wh_middle[:,0],:].any(axis=_ax)) + assert_equal(a.any(where=wh_full), False) + assert_equal(np.any(a, where=wh_full), False) + assert_equal(a.any(where=False), False) + assert_equal(np.any(a, where=False), False) + + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2*np.ones((3,), dtype=int) + y = 3*np.ones((3,), dtype=int) + x2 = 2*np.ones((2, 3), dtype=int) + y2 = 3*np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + oned = np.ones(1) + # gh-12031, caused SEGFAULT + assert_raises(TypeError, oned.choose,np.void(0), [oned]) + + out = np.array(0) + ret = np.choose(np.array(1), [10, 20, 30], out=out) + assert out is ret + assert_equal(out[()], 20) + + # gh-6272 check overlap on out + x = np.arange(5) + y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + assert_equal(y, np.array([0, 1, 2])) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_repeat(self, dtype): + m = np.array([1, 2, 3, 4, 5, 6], dtype=dtype) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + + def test_round(self): + def check_round(arr, expected, *round_args): + assert_equal(arr.round(*round_args), expected) + # With output array + out = np.zeros_like(arr) + res = arr.round(*round_args, out=out) + assert_equal(out, expected) + assert out is res + + check_round(np.array([1.2, 1.5]), [1, 2]) + check_round(np.array(1.5), 2) + check_round(np.array([12.2, 15.5]), [10, 20], -1) + check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) + # Complex rounding + check_round(np.array([4.5 + 1.5j]), [4 + 2j]) + check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + + def test_transpose(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the less-than comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = np.sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = np.sort(a) + assert_equal(b, a[::-1], msg) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + + @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.longdouble]) + def test_sort_unsigned(self, dtype): + a = np.arange(101, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "scalar sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', + [np.int8, np.int16, np.int32, np.int64, np.float16, + np.float32, np.float64, np.longdouble]) + def test_sort_signed(self, dtype): + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "scalar sort, kind=%s" % (kind) + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) + @pytest.mark.parametrize('part', ['real', 'imag']) + def test_sort_complex(self, part, dtype): + # test complex sorts. These use the same code as the scalars + # but the compare function differs. + cdtype = { + np.single: np.csingle, + np.double: np.cdouble, + np.longdouble: np.clongdouble, + }[dtype] + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + ai = (a * (1+1j)).astype(cdtype) + bi = (b * (1+1j)).astype(cdtype) + setattr(ai, part, 1) + setattr(bi, part, 1) + for kind in self.sort_kinds: + msg = "complex sort, %s part == 1, kind=%s" % (part, kind) + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + + def test_sort_complex_byte_swapping(self): + # test sorting of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + c = arr.copy() + c.sort() + msg = 'byte-swapped complex sort, dtype={0}'.format(dt) + assert_equal(c, arr, msg) + + @pytest.mark.parametrize('dtype', [np.bytes_, np.str_]) + def test_sort_string(self, dtype): + # np.array will perform the encoding to bytes for us in the bytes test + a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_object(self): + # test object array sorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize("dt", [ + np.dtype([('f', float), ('i', int)]), + np.dtype([('f', float), ('i', object)])]) + @pytest.mark.parametrize("step", [1, 2]) + def test_sort_structured(self, dt, step): + # test record array sorts. + a = np.array([(i, i) for i in range(101*step)], dtype=dt) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "kind=%s" % kind + c = a.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[::step], msg) + assert_equal(a[::step][indx], a[::step], msg) + c = b.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[step-1::step], msg) + assert_equal(b[::step][indx], a[step-1::step], msg) + + @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) + def test_sort_time(self, dtype): + # test datetime64 and timedelta64 sorts. + a = np.arange(0, 101, dtype=dtype) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_axis(self): + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + def test_sort_size_0(self): + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array sort with axis={0}'.format(axis) + assert_equal(np.sort(a, axis=axis), a, msg) + msg = 'test empty array sort with axis=None' + assert_equal(np.sort(a, axis=None), a.ravel(), msg) + + def test_sort_bad_ordering(self): + # test generic class with bogus ordering, + # should not segfault. + class Boom: + def __lt__(self, other): + return True + + a = np.array([Boom()] * 100, dtype=object) + for kind in self.sort_kinds: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_void_sort(self): + # gh-8210 - previously segfaulted + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view('V4') + arr[::-1].sort() + + dt = np.dtype([('val', 'i4', (1,))]) + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view(dt) + arr[::-1].sort() + + def test_sort_raises(self): + #gh-9404 + arr = np.array([0, datetime.now(), 1], dtype=object) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + #gh-3879 + class Raiser: + def raises_anything(*args, **kwargs): + raise TypeError("SOMETHING ERRORED") + __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything + arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) + np.random.shuffle(arr) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + + def test_sort_degraded(self): + # test degraded dataset would take minutes to run with normal qsort + d = np.arange(1000000) + do = d.copy() + x = d + # create a median of 3 killer where each median is the sorted second + # last element of the quicksort partition + while x.size > 3: + mid = x.size // 2 + x[mid], x[-2] = x[-2], x[mid] + x = x[:-2] + + assert_equal(np.sort(d), do) + assert_equal(d[np.argsort(d)], do) + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + @pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O']) + def test__deepcopy__(self, dtype): + # Force the entry of NULLs into array + a = np.empty(4, dtype=dtype) + ctypes.memset(a.ctypes.data, 0, a.nbytes) + + # Ensure no error is raised, see gh-21833 + b = a.__deepcopy__({}) + + a[0] = 42 + with pytest.raises(AssertionError): + assert_array_equal(a, b) + + def test__deepcopy__catches_failure(self): + class MyObj: + def __deepcopy__(self, *args, **kwargs): + raise RuntimeError + + arr = np.array([1, MyObj(), 3], dtype='O') + with pytest.raises(RuntimeError): + arr.__deepcopy__({}) + + def test_sort_order(self): + # Test sorting an array with fields + x1 = np.array([21, 32, 14]) + x2 = np.array(['my', 'first', 'name']) + x3 = np.array([3.1, 4.5, 6.2]) + r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, np.array([14, 21, 32])) + assert_equal(r.word, np.array(['name', 'my', 'first'])) + assert_equal(r.number, np.array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, np.array([32, 21, 14])) + assert_equal(r.word, np.array(['first', 'my', 'name'])) + assert_equal(r.number, np.array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, np.array([21, 32, 14])) + assert_equal(r.word, np.array(['my', 'first', 'name'])) + assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = '<i2' + mydtype = [('name', 'U5'), ('col2', strtype)] + r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)], + dtype=mydtype) + r.sort(order='col2') + assert_equal(r['col2'], [1, 3, 255, 258]) + assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], + dtype=mydtype)) + + def test_argsort(self): + # all c scalar argsorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + + for dtype in [np.int32, np.uint32, np.float32]: + a = np.arange(101, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype) + assert_equal(a.copy().argsort(kind=kind), a, msg) + assert_equal(b.copy().argsort(kind=kind), b, msg) + + # test complex argsorts. These use the same code as the scalars + # but the compare function differs. + ai = a*1j + 1 + bi = b*1j + 1 + for kind in self.sort_kinds: + msg = "complex argsort, kind=%s" % kind + assert_equal(ai.copy().argsort(kind=kind), a, msg) + assert_equal(bi.copy().argsort(kind=kind), b, msg) + ai = a + 1j + bi = b + 1j + for kind in self.sort_kinds: + msg = "complex argsort, kind=%s" % kind + assert_equal(ai.copy().argsort(kind=kind), a, msg) + assert_equal(bi.copy().argsort(kind=kind), b, msg) + + # test argsort of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) + assert_equal(arr.argsort(), + np.arange(len(arr), dtype=np.intp), msg) + + # test string argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "string argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test unicode argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.str_) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "unicode argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test object array argsorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "object argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test structured array argsorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "structured array argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test datetime64 argsorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = "datetime64 argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test timedelta64 argsorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = "timedelta64 argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # check axis handling. This should be the same for all type + # specific argsorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) + assert_equal(a.copy().argsort(axis=0), b) + assert_equal(a.copy().argsort(axis=1), c) + assert_equal(a.copy().argsort(), c) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array argsort with axis={0}'.format(axis) + assert_equal(np.argsort(a, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argsort with axis=None' + assert_equal(np.argsort(a, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + # check that stable argsorts are stable + r = np.arange(100) + # scalars + a = np.zeros(100) + assert_equal(a.argsort(kind='m'), r) + # complex + a = np.zeros(100, dtype=complex) + assert_equal(a.argsort(kind='m'), r) + # string + a = np.array(['aaaaaaaaa' for i in range(100)]) + assert_equal(a.argsort(kind='m'), r) + # unicode + a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.str_) + assert_equal(a.argsort(kind='m'), r) + + def test_sort_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.sort, kind=k) + assert_raises(ValueError, d.argsort, kind=k) + + @pytest.mark.parametrize('a', [ + np.array([0, 1, np.nan], dtype=np.float16), + np.array([0, 1, np.nan], dtype=np.float32), + np.array([0, 1, np.nan]), + ]) + def test_searchsorted_floats(self, a): + # test for floats arrays containing nans. Explicitly test + # half, single, and double precision floats to verify that + # the NaN-handling is correct. + msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(3), msg) + msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 4), msg) + # check keyword arguments + a.searchsorted(v=1) + x = np.array([0, 1, np.nan], dtype='float32') + y = np.searchsorted(x, x[-1]) + assert_equal(y, 2) + + def test_searchsorted_complex(self): + # test for complex arrays containing nans. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 10), msg) + msg = "Test searchsorted with little endian, side='l'" + a = np.array([0, 128], dtype='<i4') + b = a.searchsorted(np.array(128, dtype='<i4')) + assert_equal(b, 1, msg) + msg = "Test searchsorted with big endian, side='l'" + a = np.array([0, 128], dtype='>i4') + b = a.searchsorted(np.array(128, dtype='>i4')) + assert_equal(b, 1, msg) + + def test_searchsorted_n_elements(self): + # Check 0 elements + a = np.ones(0) + b = a.searchsorted([0, 1, 2], 'left') + assert_equal(b, [0, 0, 0]) + b = a.searchsorted([0, 1, 2], 'right') + assert_equal(b, [0, 0, 0]) + a = np.ones(1) + # Check 1 element + b = a.searchsorted([0, 1, 2], 'left') + assert_equal(b, [0, 0, 1]) + b = a.searchsorted([0, 1, 2], 'right') + assert_equal(b, [0, 1, 1]) + # Check all elements equal + a = np.ones(2) + b = a.searchsorted([0, 1, 2], 'left') + assert_equal(b, [0, 0, 2]) + b = a.searchsorted([0, 1, 2], 'right') + assert_equal(b, [0, 2, 2]) + + def test_searchsorted_unaligned_array(self): + # Test searching unaligned array + a = np.arange(10) + aligned = np.empty(a.itemsize * a.size + 1, 'uint8') + unaligned = aligned[1:].view(a.dtype) + unaligned[:] = a + # Test searching unaligned array + b = unaligned.searchsorted(a, 'left') + assert_equal(b, a) + b = unaligned.searchsorted(a, 'right') + assert_equal(b, a + 1) + # Test searching for unaligned keys + b = a.searchsorted(unaligned, 'left') + assert_equal(b, a) + b = a.searchsorted(unaligned, 'right') + assert_equal(b, a + 1) + + def test_searchsorted_resetting(self): + # Test smart resetting of binsearch indices + a = np.arange(5) + b = a.searchsorted([6, 5, 4], 'left') + assert_equal(b, [5, 5, 4]) + b = a.searchsorted([6, 5, 4], 'right') + assert_equal(b, [5, 5, 5]) + + def test_searchsorted_type_specific(self): + # Test all type specific binary search functions + types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], + np.typecodes['Datetime'], '?O')) + for dt in types: + if dt == 'M': + dt = 'M8[D]' + if dt == '?': + a = np.arange(2, dtype=dt) + out = np.arange(2) + else: + a = np.arange(0, 5, dtype=dt) + out = np.arange(5) + b = a.searchsorted(a, 'left') + assert_equal(b, out) + b = a.searchsorted(a, 'right') + assert_equal(b, out + 1) + # Test empty array, use a fresh array to get warnings in + # valgrind if access happens. + e = np.ndarray(shape=0, buffer=b'', dtype=dt) + b = e.searchsorted(a, 'left') + assert_array_equal(b, np.zeros(len(a), dtype=np.intp)) + b = a.searchsorted(e, 'left') + assert_array_equal(b, np.zeros(0, dtype=np.intp)) + + def test_searchsorted_unicode(self): + # Test searchsorted on unicode strings. + + # 1.6.1 contained a string length miscalculation in + # arraytypes.c.src:UNICODE_compare() which manifested as + # incorrect/inconsistent results from searchsorted. + a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', + 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'], + dtype=np.str_) + ind = np.arange(len(a)) + assert_equal([a.searchsorted(v, 'left') for v in a], ind) + assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1) + assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind) + assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) + + def test_searchsorted_with_invalid_sorter(self): + a = np.array([5, 2, 1, 3, 4]) + s = np.argsort(a) + assert_raises(TypeError, np.searchsorted, a, 0, + sorter=np.array((1, (2, 3)), dtype=object)) + assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) + + # bounds check + assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) + + def test_searchsorted_with_sorter(self): + a = np.random.rand(300) + s = a.argsort() + b = np.sort(a) + k = np.linspace(0, 1, 20) + assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) + + a = np.array([0, 1, 2, 3, 5]*20) + s = a.argsort() + k = [0, 1, 2, 3, 5] + expected = [0, 20, 40, 60, 80] + assert_equal(a.searchsorted(k, side='left', sorter=s), expected) + expected = [20, 40, 60, 80, 100] + assert_equal(a.searchsorted(k, side='right', sorter=s), expected) + + # Test searching unaligned array + keys = np.arange(10) + a = keys.copy() + np.random.shuffle(s) + s = a.argsort() + aligned = np.empty(a.itemsize * a.size + 1, 'uint8') + unaligned = aligned[1:].view(a.dtype) + # Test searching unaligned array + unaligned[:] = a + b = unaligned.searchsorted(keys, 'left', s) + assert_equal(b, keys) + b = unaligned.searchsorted(keys, 'right', s) + assert_equal(b, keys + 1) + # Test searching for unaligned keys + unaligned[:] = keys + b = a.searchsorted(unaligned, 'left', s) + assert_equal(b, keys) + b = a.searchsorted(unaligned, 'right', s) + assert_equal(b, keys + 1) + + # Test all type specific indirect binary search functions + types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], + np.typecodes['Datetime'], '?O')) + for dt in types: + if dt == 'M': + dt = 'M8[D]' + if dt == '?': + a = np.array([1, 0], dtype=dt) + # We want the sorter array to be of a type that is different + # from np.intp in all platforms, to check for #4698 + s = np.array([1, 0], dtype=np.int16) + out = np.array([1, 0]) + else: + a = np.array([3, 4, 1, 2, 0], dtype=dt) + # We want the sorter array to be of a type that is different + # from np.intp in all platforms, to check for #4698 + s = np.array([4, 2, 3, 0, 1], dtype=np.int16) + out = np.array([3, 4, 1, 2, 0], dtype=np.intp) + b = a.searchsorted(a, 'left', s) + assert_equal(b, out) + b = a.searchsorted(a, 'right', s) + assert_equal(b, out + 1) + # Test empty array, use a fresh array to get warnings in + # valgrind if access happens. + e = np.ndarray(shape=0, buffer=b'', dtype=dt) + b = e.searchsorted(a, 'left', s[:0]) + assert_array_equal(b, np.zeros(len(a), dtype=np.intp)) + b = a.searchsorted(e, 'left', s) + assert_array_equal(b, np.zeros(0, dtype=np.intp)) + + # Test non-contiguous sorter array + a = np.array([3, 4, 1, 2, 0]) + srt = np.empty((10,), dtype=np.intp) + srt[1::2] = -1 + srt[::2] = [4, 2, 3, 0, 1] + s = srt[::2] + out = np.array([3, 4, 1, 2, 0], dtype=np.intp) + b = a.searchsorted(a, 'left', s) + assert_equal(b, out) + b = a.searchsorted(a, 'right', s) + assert_equal(b, out + 1) + + def test_searchsorted_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + s = np.arange(5).view(A) + assert_(not isinstance(a.searchsorted(b, 'left'), A)) + assert_(not isinstance(a.searchsorted(b, 'right'), A)) + assert_(not isinstance(a.searchsorted(b, 'left', s), A)) + assert_(not isinstance(a.searchsorted(b, 'right', s), A)) + + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_argpartition_out_of_range(self, dtype): + # Test out of range values in kth raise an error, gh-5469 + d = np.arange(10).astype(dtype=dtype) + assert_raises(ValueError, d.argpartition, 10) + assert_raises(ValueError, d.argpartition, -11) + + @pytest.mark.parametrize("dtype", np.typecodes["All"]) + def test_partition_out_of_range(self, dtype): + # Test out of range values in kth raise an error, gh-5469 + d = np.arange(10).astype(dtype=dtype) + assert_raises(ValueError, d.partition, 10) + assert_raises(ValueError, d.partition, -11) + + def test_argpartition_integer(self): + # Test non-integer values in kth raise an error/ + d = np.arange(10) + assert_raises(TypeError, d.argpartition, 9.) + # Test also for generic type argpartition, which uses sorting + # and used to not bound check kth + d_obj = np.arange(10, dtype=object) + assert_raises(TypeError, d_obj.argpartition, 9.) + + def test_partition_integer(self): + # Test out of range values in kth raise an error, gh-5469 + d = np.arange(10) + assert_raises(TypeError, d.partition, 9.) + # Test also for generic type partition, which uses sorting + # and used to not bound check kth + d_obj = np.arange(10, dtype=object) + assert_raises(TypeError, d_obj.partition, 9.) + + @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) + def test_partition_empty_array(self, kth_dtype): + # check axis handling for multidimensional empty arrays + kth = np.array(0, dtype=kth_dtype)[()] + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array partition with axis={0}'.format(axis) + assert_equal(np.partition(a, kth, axis=axis), a, msg) + msg = 'test empty array partition with axis=None' + assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg) + + @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) + def test_argpartition_empty_array(self, kth_dtype): + # check axis handling for multidimensional empty arrays + kth = np.array(0, dtype=kth_dtype)[()] + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array argpartition with axis={0}'.format(axis) + assert_equal(np.partition(a, kth, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argpartition with axis=None' + assert_equal(np.partition(a, kth, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + def test_partition(self): + d = np.arange(10) + assert_raises(TypeError, np.partition, d, 2, kind=1) + assert_raises(ValueError, np.partition, d, 2, kind="nonsense") + assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense") + assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense") + assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense") + for k in ("introselect",): + d = np.array([]) + assert_array_equal(np.partition(d, 0, kind=k), d) + assert_array_equal(np.argpartition(d, 0, kind=k), d) + d = np.ones(1) + assert_array_equal(np.partition(d, 0, kind=k)[0], d) + assert_array_equal(d[np.argpartition(d, 0, kind=k)], + np.partition(d, 0, kind=k)) + + # kth not modified + kth = np.array([30, 15, 5]) + okth = kth.copy() + np.partition(np.arange(40), kth) + assert_array_equal(kth, okth) + + for r in ([2, 1], [1, 2], [1, 1]): + d = np.array(r) + tgt = np.sort(d) + assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) + assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) + assert_array_equal(d[np.argpartition(d, 0, kind=k)], + np.partition(d, 0, kind=k)) + assert_array_equal(d[np.argpartition(d, 1, kind=k)], + np.partition(d, 1, kind=k)) + for i in range(d.size): + d[i:].partition(0, kind=k) + assert_array_equal(d, tgt) + + for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], + [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): + d = np.array(r) + tgt = np.sort(d) + assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) + assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) + assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2]) + assert_array_equal(d[np.argpartition(d, 0, kind=k)], + np.partition(d, 0, kind=k)) + assert_array_equal(d[np.argpartition(d, 1, kind=k)], + np.partition(d, 1, kind=k)) + assert_array_equal(d[np.argpartition(d, 2, kind=k)], + np.partition(d, 2, kind=k)) + for i in range(d.size): + d[i:].partition(0, kind=k) + assert_array_equal(d, tgt) + + d = np.ones(50) + assert_array_equal(np.partition(d, 0, kind=k), d) + assert_array_equal(d[np.argpartition(d, 0, kind=k)], + np.partition(d, 0, kind=k)) + + # sorted + d = np.arange(49) + assert_equal(np.partition(d, 5, kind=k)[5], 5) + assert_equal(np.partition(d, 15, kind=k)[15], 15) + assert_array_equal(d[np.argpartition(d, 5, kind=k)], + np.partition(d, 5, kind=k)) + assert_array_equal(d[np.argpartition(d, 15, kind=k)], + np.partition(d, 15, kind=k)) + + # rsorted + d = np.arange(47)[::-1] + assert_equal(np.partition(d, 6, kind=k)[6], 6) + assert_equal(np.partition(d, 16, kind=k)[16], 16) + assert_array_equal(d[np.argpartition(d, 6, kind=k)], + np.partition(d, 6, kind=k)) + assert_array_equal(d[np.argpartition(d, 16, kind=k)], + np.partition(d, 16, kind=k)) + + assert_array_equal(np.partition(d, -6, kind=k), + np.partition(d, 41, kind=k)) + assert_array_equal(np.partition(d, -16, kind=k), + np.partition(d, 31, kind=k)) + assert_array_equal(d[np.argpartition(d, -6, kind=k)], + np.partition(d, 41, kind=k)) + + # median of 3 killer, O(n^2) on pure median 3 pivot quickselect + # exercises the median of median of 5 code used to keep O(n) + d = np.arange(1000000) + x = np.roll(d, d.size // 2) + mid = x.size // 2 + 1 + assert_equal(np.partition(x, mid)[mid], mid) + d = np.arange(1000001) + x = np.roll(d, d.size // 2 + 1) + mid = x.size // 2 + 1 + assert_equal(np.partition(x, mid)[mid], mid) + + # max + d = np.ones(10) + d[1] = 4 + assert_equal(np.partition(d, (2, -1))[-1], 4) + assert_equal(np.partition(d, (2, -1))[2], 1) + assert_equal(d[np.argpartition(d, (2, -1))][-1], 4) + assert_equal(d[np.argpartition(d, (2, -1))][2], 1) + d[1] = np.nan + assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1])) + assert_(np.isnan(np.partition(d, (2, -1))[-1])) + + # equal elements + d = np.arange(47) % 7 + tgt = np.sort(np.arange(47) % 7) + np.random.shuffle(d) + for i in range(d.size): + assert_equal(np.partition(d, i, kind=k)[i], tgt[i]) + assert_array_equal(d[np.argpartition(d, 6, kind=k)], + np.partition(d, 6, kind=k)) + assert_array_equal(d[np.argpartition(d, 16, kind=k)], + np.partition(d, 16, kind=k)) + for i in range(d.size): + d[i:].partition(0, kind=k) + assert_array_equal(d, tgt) + + d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 9]) + kth = [0, 3, 19, 20] + assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7)) + assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7)) + + d = np.array([2, 1]) + d.partition(0, kind=k) + assert_raises(ValueError, d.partition, 2) + assert_raises(np.AxisError, d.partition, 3, axis=1) + assert_raises(ValueError, np.partition, d, 2) + assert_raises(np.AxisError, np.partition, d, 2, axis=1) + assert_raises(ValueError, d.argpartition, 2) + assert_raises(np.AxisError, d.argpartition, 3, axis=1) + assert_raises(ValueError, np.argpartition, d, 2) + assert_raises(np.AxisError, np.argpartition, d, 2, axis=1) + d = np.arange(10).reshape((2, 5)) + d.partition(1, axis=0, kind=k) + d.partition(4, axis=1, kind=k) + np.partition(d, 1, axis=0, kind=k) + np.partition(d, 4, axis=1, kind=k) + np.partition(d, 1, axis=None, kind=k) + np.partition(d, 9, axis=None, kind=k) + d.argpartition(1, axis=0, kind=k) + d.argpartition(4, axis=1, kind=k) + np.argpartition(d, 1, axis=0, kind=k) + np.argpartition(d, 4, axis=1, kind=k) + np.argpartition(d, 1, axis=None, kind=k) + np.argpartition(d, 9, axis=None, kind=k) + assert_raises(ValueError, d.partition, 2, axis=0) + assert_raises(ValueError, d.partition, 11, axis=1) + assert_raises(TypeError, d.partition, 2, axis=None) + assert_raises(ValueError, np.partition, d, 9, axis=1) + assert_raises(ValueError, np.partition, d, 11, axis=None) + assert_raises(ValueError, d.argpartition, 2, axis=0) + assert_raises(ValueError, d.argpartition, 11, axis=1) + assert_raises(ValueError, np.argpartition, d, 9, axis=1) + assert_raises(ValueError, np.argpartition, d, 11, axis=None) + + td = [(dt, s) for dt in [np.int32, np.float32, np.complex64] + for s in (9, 16)] + for dt, s in td: + aae = assert_array_equal + at = assert_ + + d = np.arange(s, dtype=dt) + np.random.shuffle(d) + d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + for i in range(d.size): + p = np.partition(d, i, kind=k) + assert_equal(p[i], i) + # all before are smaller + assert_array_less(p[:i], p[i]) + # all after are larger + assert_array_less(p[i], p[i + 1:]) + aae(p, d[np.argpartition(d, i, kind=k)]) + + p = np.partition(d1, i, axis=1, kind=k) + aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:, :i].T <= p[:, i]).all(), + msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) + at((p[:, i + 1:].T > p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + aae(p, d1[np.arange(d1.shape[0])[:, None], + np.argpartition(d1, i, axis=1, kind=k)]) + + p = np.partition(d0, i, axis=0, kind=k) + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None, :]]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) + prev = k + 1 + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5]*4), [5]) + self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), + [5]*4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], + [5]*4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i,:], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None,:]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + def test_partition_cdtype(self): + d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) + + tgt = np.sort(d, order=['age', 'height']) + assert_array_equal(np.partition(d, range(d.size), + order=['age', 'height']), + tgt) + assert_array_equal(d[np.argpartition(d, range(d.size), + order=['age', 'height'])], + tgt) + for k in range(d.size): + assert_equal(np.partition(d, k, order=['age', 'height'])[k], + tgt[k]) + assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k], + tgt[k]) + + d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot']) + tgt = np.sort(d) + assert_array_equal(np.partition(d, range(d.size)), tgt) + for k in range(d.size): + assert_equal(np.partition(d, k)[k], tgt[k]) + assert_equal(d[np.argpartition(d, k)][k], tgt[k]) + + def test_partition_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.partition, 2, kind=k) + assert_raises(ValueError, d.argpartition, 2, kind=k) + + def test_partition_fuzz(self): + # a few rounds of random data testing + for j in range(10, 30): + for i in range(1, j - 2): + d = np.arange(j) + np.random.shuffle(d) + d = d % np.random.randint(2, 30) + idx = np.random.randint(d.size) + kth = [0, idx, i, i + 1] + tgt = np.sort(d)[kth] + assert_array_equal(np.partition(d, kth)[kth], tgt, + err_msg="data: %r\n kth: %r" % (d, kth)) + + @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"]) + def test_argpartition_gh5524(self, kth_dtype): + # A test for functionality of argpartition on lists. + kth = np.array(1, dtype=kth_dtype)[()] + d = [6, 7, 3, 2, 9, 0] + p = np.argpartition(d, kth) + self.assert_partitioned(np.array(d)[p],[1]) + + def test_flatten(self): + x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) + x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) + y0 = np.array([1, 2, 3, 4, 5, 6], np.int32) + y0f = np.array([1, 4, 2, 5, 3, 6], np.int32) + y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32) + y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32) + assert_equal(x0.flatten(), y0) + assert_equal(x0.flatten('F'), y0f) + assert_equal(x0.flatten('F'), x0.T.flatten()) + assert_equal(x1.flatten(), y1) + assert_equal(x1.flatten('F'), y1f) + assert_equal(x1.flatten('F'), x1.T.flatten()) + + + @pytest.mark.parametrize('func', (np.dot, np.matmul)) + def test_arr_mult(self, func): + a = np.array([[1, 0], [0, 1]]) + b = np.array([[0, 1], [1, 0]]) + c = np.array([[9, 1], [1, -9]]) + d = np.arange(24).reshape(4, 6) + ddt = np.array( + [[ 55, 145, 235, 325], + [ 145, 451, 757, 1063], + [ 235, 757, 1279, 1801], + [ 325, 1063, 1801, 2539]] + ) + dtd = np.array( + [[504, 540, 576, 612, 648, 684], + [540, 580, 620, 660, 700, 740], + [576, 620, 664, 708, 752, 796], + [612, 660, 708, 756, 804, 852], + [648, 700, 752, 804, 856, 908], + [684, 740, 796, 852, 908, 964]] + ) + + + # gemm vs syrk optimizations + for et in [np.float32, np.float64, np.complex64, np.complex128]: + eaf = a.astype(et) + assert_equal(func(eaf, eaf), eaf) + assert_equal(func(eaf.T, eaf), eaf) + assert_equal(func(eaf, eaf.T), eaf) + assert_equal(func(eaf.T, eaf.T), eaf) + assert_equal(func(eaf.T.copy(), eaf), eaf) + assert_equal(func(eaf, eaf.T.copy()), eaf) + assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf) + + # syrk validations + for et in [np.float32, np.float64, np.complex64, np.complex128]: + eaf = a.astype(et) + ebf = b.astype(et) + assert_equal(func(ebf, ebf), eaf) + assert_equal(func(ebf.T, ebf), eaf) + assert_equal(func(ebf, ebf.T), eaf) + assert_equal(func(ebf.T, ebf.T), eaf) + + # syrk - different shape, stride, and view validations + for et in [np.float32, np.float64, np.complex64, np.complex128]: + edf = d.astype(et) + assert_equal( + func(edf[::-1, :], edf.T), + func(edf[::-1, :].copy(), edf.T.copy()) + ) + assert_equal( + func(edf[:, ::-1], edf.T), + func(edf[:, ::-1].copy(), edf.T.copy()) + ) + assert_equal( + func(edf, edf[::-1, :].T), + func(edf, edf[::-1, :].T.copy()) + ) + assert_equal( + func(edf, edf[:, ::-1].T), + func(edf, edf[:, ::-1].T.copy()) + ) + assert_equal( + func(edf[:edf.shape[0] // 2, :], edf[::2, :].T), + func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy()) + ) + assert_equal( + func(edf[::2, :], edf[:edf.shape[0] // 2, :].T), + func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy()) + ) + + # syrk - different shape + for et in [np.float32, np.float64, np.complex64, np.complex128]: + edf = d.astype(et) + eddtf = ddt.astype(et) + edtdf = dtd.astype(et) + assert_equal(func(edf, edf.T), eddtf) + assert_equal(func(edf.T, edf), edtdf) + + @pytest.mark.parametrize('func', (np.dot, np.matmul)) + @pytest.mark.parametrize('dtype', 'ifdFD') + def test_no_dgemv(self, func, dtype): + # check vector arg for contiguous before gemv + # gh-12156 + a = np.arange(8.0, dtype=dtype).reshape(2, 4) + b = np.broadcast_to(1., (4, 1)) + ret1 = func(a, b) + ret2 = func(a, b.copy()) + assert_equal(ret1, ret2) + + ret1 = func(b.T, a.T) + ret2 = func(b.T.copy(), a.T) + assert_equal(ret1, ret2) + + # check for unaligned data + dt = np.dtype(dtype) + a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype) + a = a.reshape(2, 4) + b = a[0] + # make sure it is not aligned + assert_(a.__array_interface__['data'][0] % dt.itemsize != 0) + ret1 = func(a, b) + ret2 = func(a.copy(), b.copy()) + assert_equal(ret1, ret2) + + ret1 = func(b.T, a.T) + ret2 = func(b.T.copy(), a.T.copy()) + assert_equal(ret1, ret2) + + def test_dot(self): + a = np.array([[1, 0], [0, 1]]) + b = np.array([[0, 1], [1, 0]]) + c = np.array([[9, 1], [1, -9]]) + # function versus methods + assert_equal(np.dot(a, b), a.dot(b)) + assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) + + # test passing in an output array + c = np.zeros_like(a) + a.dot(b, c) + assert_equal(c, np.dot(a, b)) + + # test keyword args + c = np.zeros_like(a) + a.dot(b=b, out=c) + assert_equal(c, np.dot(a, b)) + + def test_dot_type_mismatch(self): + c = 1. + A = np.array((1,1), dtype='i,i') + + assert_raises(TypeError, np.dot, c, A) + assert_raises(TypeError, np.dot, A, c) + + def test_dot_out_mem_overlap(self): + np.random.seed(1) + + # Test BLAS and non-BLAS code paths, including all dtypes + # that dot() supports + dtypes = [np.dtype(code) for code in np.typecodes['All'] + if code not in 'USVM'] + for dtype in dtypes: + a = np.random.rand(3, 3).astype(dtype) + + # Valid dot() output arrays must be aligned + b = _aligned_zeros((3, 3), dtype=dtype) + b[...] = np.random.rand(3, 3) + + y = np.dot(a, b) + x = np.dot(a, b, out=b) + assert_equal(x, y, err_msg=repr(dtype)) + + # Check invalid output array + assert_raises(ValueError, np.dot, a, b, out=b[::2]) + assert_raises(ValueError, np.dot, a, b, out=b.T) + + def test_dot_matmul_out(self): + # gh-9641 + class Sub(np.ndarray): + pass + a = np.ones((2, 2)).view(Sub) + b = np.ones((2, 2)).view(Sub) + out = np.ones((2, 2)) + + # make sure out can be any ndarray (not only subclass of inputs) + np.dot(a, b, out=out) + np.matmul(a, b, out=out) + + def test_dot_matmul_inner_array_casting_fails(self): + + class A: + def __array__(self, *args, **kwargs): + raise NotImplementedError + + # Don't override the error from calling __array__() + assert_raises(NotImplementedError, np.dot, A(), A()) + assert_raises(NotImplementedError, np.matmul, A(), A()) + assert_raises(NotImplementedError, np.inner, A(), A()) + + def test_matmul_out(self): + # overlapping memory + a = np.arange(18).reshape(2, 3, 3) + b = np.matmul(a, a) + c = np.matmul(a, a, out=a) + assert_(c is a) + assert_equal(c, b) + a = np.arange(18).reshape(2, 3, 3) + c = np.matmul(a, a, out=a[::-1, ...]) + assert_(c.base is a.base) + assert_equal(c, b) + + def test_diagonal(self): + a = np.arange(12).reshape((3, 4)) + assert_equal(a.diagonal(), [0, 5, 10]) + assert_equal(a.diagonal(0), [0, 5, 10]) + assert_equal(a.diagonal(1), [1, 6, 11]) + assert_equal(a.diagonal(-1), [4, 9]) + assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5) + assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0) + assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5) + assert_raises(ValueError, a.diagonal, axis1=1, axis2=1) + + b = np.arange(8).reshape((2, 2, 2)) + assert_equal(b.diagonal(), [[0, 6], [1, 7]]) + assert_equal(b.diagonal(0), [[0, 6], [1, 7]]) + assert_equal(b.diagonal(1), [[2], [3]]) + assert_equal(b.diagonal(-1), [[4], [5]]) + assert_raises(ValueError, b.diagonal, axis1=0, axis2=0) + assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]]) + assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]]) + assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]]) + # Order of axis argument doesn't matter: + assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) + + def test_diagonal_view_notwriteable(self): + a = np.eye(3).diagonal() + assert_(not a.flags.writeable) + assert_(not a.flags.owndata) + + a = np.diagonal(np.eye(3)) + assert_(not a.flags.writeable) + assert_(not a.flags.owndata) + + a = np.diag(np.eye(3)) + assert_(not a.flags.writeable) + assert_(not a.flags.owndata) + + def test_diagonal_memleak(self): + # Regression test for a bug that crept in at one point + a = np.zeros((100, 100)) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a) < 50) + for i in range(100): + a.diagonal() + if HAS_REFCOUNT: + assert_(sys.getrefcount(a) < 50) + + def test_size_zero_memleak(self): + # Regression test for issue 9615 + # Exercises a special-case code path for dot products of length + # zero in cblasfuncs (making it is specific to floating dtypes). + a = np.array([], dtype=np.float64) + x = np.array(2.0) + for _ in range(100): + np.dot(a, a, out=x) + if HAS_REFCOUNT: + assert_(sys.getrefcount(x) < 50) + + def test_trace(self): + a = np.arange(12).reshape((3, 4)) + assert_equal(a.trace(), 15) + assert_equal(a.trace(0), 15) + assert_equal(a.trace(1), 18) + assert_equal(a.trace(-1), 13) + + b = np.arange(8).reshape((2, 2, 2)) + assert_equal(b.trace(), [6, 8]) + assert_equal(b.trace(0), [6, 8]) + assert_equal(b.trace(1), [2, 3]) + assert_equal(b.trace(-1), [4, 5]) + assert_equal(b.trace(0, 0, 1), [6, 8]) + assert_equal(b.trace(0, 0, 2), [5, 9]) + assert_equal(b.trace(0, 1, 2), [3, 11]) + assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3]) + + out = np.array(1) + ret = a.trace(out=out) + assert ret is out + + def test_trace_subclass(self): + # The class would need to overwrite trace to ensure single-element + # output also has the right subclass. + class MyArray(np.ndarray): + pass + + b = np.arange(8).reshape((2, 2, 2)).view(MyArray) + t = b.trace() + assert_(isinstance(t, MyArray)) + + def test_put(self): + icodes = np.typecodes['AllInteger'] + fcodes = np.typecodes['AllFloat'] + for dt in icodes + fcodes + 'O': + tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt) + + # test 1-d + a = np.zeros(6, dtype=dt) + a.put([1, 3, 5], [1, 3, 5]) + assert_equal(a, tgt) + + # test 2-d + a = np.zeros((2, 3), dtype=dt) + a.put([1, 3, 5], [1, 3, 5]) + assert_equal(a, tgt.reshape(2, 3)) + + for dt in '?': + tgt = np.array([False, True, False, True, False, True], dtype=dt) + + # test 1-d + a = np.zeros(6, dtype=dt) + a.put([1, 3, 5], [True]*3) + assert_equal(a, tgt) + + # test 2-d + a = np.zeros((2, 3), dtype=dt) + a.put([1, 3, 5], [True]*3) + assert_equal(a, tgt.reshape(2, 3)) + + # check must be writeable + a = np.zeros(6) + a.flags.writeable = False + assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5]) + + # when calling np.put, make sure a + # TypeError is raised if the object + # isn't an ndarray + bad_array = [1, 2, 3] + assert_raises(TypeError, np.put, bad_array, [0, 2], 5) + + def test_ravel(self): + a = np.array([[0, 1], [2, 3]]) + assert_equal(a.ravel(), [0, 1, 2, 3]) + assert_(not a.ravel().flags.owndata) + assert_equal(a.ravel('F'), [0, 2, 1, 3]) + assert_equal(a.ravel(order='C'), [0, 1, 2, 3]) + assert_equal(a.ravel(order='F'), [0, 2, 1, 3]) + assert_equal(a.ravel(order='A'), [0, 1, 2, 3]) + assert_(not a.ravel(order='A').flags.owndata) + assert_equal(a.ravel(order='K'), [0, 1, 2, 3]) + assert_(not a.ravel(order='K').flags.owndata) + assert_equal(a.ravel(), a.reshape(-1)) + + a = np.array([[0, 1], [2, 3]], order='F') + assert_equal(a.ravel(), [0, 1, 2, 3]) + assert_equal(a.ravel(order='A'), [0, 2, 1, 3]) + assert_equal(a.ravel(order='K'), [0, 2, 1, 3]) + assert_(not a.ravel(order='A').flags.owndata) + assert_(not a.ravel(order='K').flags.owndata) + assert_equal(a.ravel(), a.reshape(-1)) + assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) + + a = np.array([[0, 1], [2, 3]])[::-1, :] + assert_equal(a.ravel(), [2, 3, 0, 1]) + assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) + assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) + assert_equal(a.ravel(order='A'), [2, 3, 0, 1]) + # 'K' doesn't reverse the axes of negative strides + assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) + assert_(a.ravel(order='K').flags.owndata) + + # Test simple 1-d copy behaviour: + a = np.arange(10)[::2] + assert_(a.ravel('K').flags.owndata) + assert_(a.ravel('C').flags.owndata) + assert_(a.ravel('F').flags.owndata) + + # Not contiguous and 1-sized axis with non matching stride + a = np.arange(2**3 * 2)[::2] + a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) + strides = list(a.strides) + strides[1] = 123 + a.strides = strides + assert_(a.ravel(order='K').flags.owndata) + assert_equal(a.ravel('K'), np.arange(0, 15, 2)) + + # contiguous and 1-sized axis with non matching stride works: + a = np.arange(2**3) + a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) + strides = list(a.strides) + strides[1] = 123 + a.strides = strides + assert_(np.may_share_memory(a.ravel(order='K'), a)) + assert_equal(a.ravel(order='K'), np.arange(2**3)) + + # Test negative strides (not very interesting since non-contiguous): + a = np.arange(4)[::-1].reshape(2, 2) + assert_(a.ravel(order='C').flags.owndata) + assert_(a.ravel(order='K').flags.owndata) + assert_equal(a.ravel('C'), [3, 2, 1, 0]) + assert_equal(a.ravel('K'), [3, 2, 1, 0]) + + # 1-element tidy strides test: + a = np.array([[1]]) + a.strides = (123, 432) + # If the following stride is not 8, NPY_RELAXED_STRIDES_DEBUG is + # messing them up on purpose: + if np.ones(1).strides == (8,): + assert_(np.may_share_memory(a.ravel('K'), a)) + assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) + + for order in ('C', 'F', 'A', 'K'): + # 0-d corner case: + a = np.array(0) + assert_equal(a.ravel(order), [0]) + assert_(np.may_share_memory(a.ravel(order), a)) + + # Test that certain non-inplace ravels work right (mostly) for 'K': + b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2) + a = b[..., ::2] + assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28]) + assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28]) + assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28]) + assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28]) + + a = b[::2, ...] + assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14]) + assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14]) + assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14]) + assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14]) + + def test_ravel_subclass(self): + class ArraySubclass(np.ndarray): + pass + + a = np.arange(10).view(ArraySubclass) + assert_(isinstance(a.ravel('C'), ArraySubclass)) + assert_(isinstance(a.ravel('F'), ArraySubclass)) + assert_(isinstance(a.ravel('A'), ArraySubclass)) + assert_(isinstance(a.ravel('K'), ArraySubclass)) + + a = np.arange(10)[::2].view(ArraySubclass) + assert_(isinstance(a.ravel('C'), ArraySubclass)) + assert_(isinstance(a.ravel('F'), ArraySubclass)) + assert_(isinstance(a.ravel('A'), ArraySubclass)) + assert_(isinstance(a.ravel('K'), ArraySubclass)) + + def test_swapaxes(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + idx = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + b = a.copy() + # check exceptions + assert_raises(np.AxisError, a.swapaxes, -5, 0) + assert_raises(np.AxisError, a.swapaxes, 4, 0) + assert_raises(np.AxisError, a.swapaxes, 0, -5) + assert_raises(np.AxisError, a.swapaxes, 0, 4) + + for i in range(-4, 4): + for j in range(-4, 4): + for k, src in enumerate((a, b)): + c = src.swapaxes(i, j) + # check shape + shape = list(src.shape) + shape[i] = src.shape[j] + shape[j] = src.shape[i] + assert_equal(c.shape, shape, str((i, j, k))) + # check array contents + i0, i1, i2, i3 = [dim-1 for dim in c.shape] + j0, j1, j2, j3 = [dim-1 for dim in src.shape] + assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]], + c[idx[i0], idx[i1], idx[i2], idx[i3]], + str((i, j, k))) + # check a view is always returned, gh-5260 + assert_(not c.flags['OWNDATA'], str((i, j, k))) + # check on non-contiguous input array + if k == 1: + b = c + + def test_conjugate(self): + a = np.array([1-1j, 1+1j, 23+23.0j]) + ac = a.conj() + assert_equal(a.real, ac.real) + assert_equal(a.imag, -ac.imag) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1-1j, 1+1j, 23+23.0j], 'F') + ac = a.conj() + assert_equal(a.real, ac.real) + assert_equal(a.imag, -ac.imag) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1, 2, 3]) + ac = a.conj() + assert_equal(a, ac) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1.0, 2.0, 3.0]) + ac = a.conj() + assert_equal(a, ac) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1-1j, 1+1j, 1, 2.0], object) + ac = a.conj() + assert_equal(ac, [k.conjugate() for k in a]) + assert_equal(ac, a.conjugate()) + assert_equal(ac, np.conjugate(a)) + + a = np.array([1-1j, 1, 2.0, 'f'], object) + assert_raises(TypeError, lambda: a.conj()) + assert_raises(TypeError, lambda: a.conjugate()) + + def test_conjugate_out(self): + # Minimal test for the out argument being passed on correctly + # NOTE: The ability to pass `out` is currently undocumented! + a = np.array([1-1j, 1+1j, 23+23.0j]) + out = np.empty_like(a) + res = a.conjugate(out) + assert res is out + assert_array_equal(out, a.conjugate()) + + def test__complex__(self): + dtypes = ['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8', + 'f', 'd', 'g', 'F', 'D', 'G', + '?', 'O'] + for dt in dtypes: + a = np.array(7, dtype=dt) + b = np.array([7], dtype=dt) + c = np.array([[[[[7]]]]], dtype=dt) + + msg = 'dtype: {0}'.format(dt) + ap = complex(a) + assert_equal(ap, a, msg) + + with assert_warns(DeprecationWarning): + bp = complex(b) + assert_equal(bp, b, msg) + + with assert_warns(DeprecationWarning): + cp = complex(c) + assert_equal(cp, c, msg) + + def test__complex__should_not_work(self): + dtypes = ['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8', + 'f', 'd', 'g', 'F', 'D', 'G', + '?', 'O'] + for dt in dtypes: + a = np.array([1, 2, 3], dtype=dt) + assert_raises(TypeError, complex, a) + + dt = np.dtype([('a', 'f8'), ('b', 'i1')]) + b = np.array((1.0, 3), dtype=dt) + assert_raises(TypeError, complex, b) + + c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt) + assert_raises(TypeError, complex, c) + + d = np.array('1+1j') + assert_raises(TypeError, complex, d) + + e = np.array(['1+1j'], 'U') + with assert_warns(DeprecationWarning): + assert_raises(TypeError, complex, e) + +class TestCequenceMethods: + def test_array_contains(self): + assert_(4.0 in np.arange(16.).reshape(4,4)) + assert_(20.0 not in np.arange(16.).reshape(4,4)) + +class TestBinop: + def test_inplace(self): + # test refcount 1 inplace conversion + assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]), + [0.5, 1.0]) + + d = np.array([0.5, 0.5])[::2] + assert_array_almost_equal(d * (d * np.array([1.0, 2.0])), + [0.25, 0.5]) + + a = np.array([0.5]) + b = np.array([0.5]) + c = a + b + c = a - b + c = a * b + c = a / b + assert_equal(a, b) + assert_almost_equal(c, 1.) + + c = a + b * 2. / b * a - a / b + assert_equal(a, b) + assert_equal(c, 0.5) + + # true divide + a = np.array([5]) + b = np.array([3]) + c = (a * a) / b + + assert_almost_equal(c, 25 / 3) + assert_equal(a, 5) + assert_equal(b, 3) + + # ndarray.__rop__ always calls ufunc + # ndarray.__iop__ always calls ufunc + # ndarray.__op__, __rop__: + # - defer if other has __array_ufunc__ and it is None + # or other is not a subclass and has higher array priority + # - else, call ufunc + @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862") + def test_ufunc_binop_interaction(self): + # Python method name (without underscores) + # -> (numpy ufunc, has_in_place_version, preferred_dtype) + ops = { + 'add': (np.add, True, float), + 'sub': (np.subtract, True, float), + 'mul': (np.multiply, True, float), + 'truediv': (np.true_divide, True, float), + 'floordiv': (np.floor_divide, True, float), + 'mod': (np.remainder, True, float), + 'divmod': (np.divmod, False, float), + 'pow': (np.power, True, int), + 'lshift': (np.left_shift, True, int), + 'rshift': (np.right_shift, True, int), + 'and': (np.bitwise_and, True, int), + 'xor': (np.bitwise_xor, True, int), + 'or': (np.bitwise_or, True, int), + 'matmul': (np.matmul, True, float), + # 'ge': (np.less_equal, False), + # 'gt': (np.less, False), + # 'le': (np.greater_equal, False), + # 'lt': (np.greater, False), + # 'eq': (np.equal, False), + # 'ne': (np.not_equal, False), + } + + class Coerced(Exception): + pass + + def array_impl(self): + raise Coerced + + def op_impl(self, other): + return "forward" + + def rop_impl(self, other): + return "reverse" + + def iop_impl(self, other): + return "in-place" + + def array_ufunc_impl(self, ufunc, method, *args, **kwargs): + return ("__array_ufunc__", ufunc, method, args, kwargs) + + # Create an object with the given base, in the given module, with a + # bunch of placeholder __op__ methods, and optionally a + # __array_ufunc__ and __array_priority__. + def make_obj(base, array_priority=False, array_ufunc=False, + alleged_module="__main__"): + class_namespace = {"__array__": array_impl} + if array_priority is not False: + class_namespace["__array_priority__"] = array_priority + for op in ops: + class_namespace["__{0}__".format(op)] = op_impl + class_namespace["__r{0}__".format(op)] = rop_impl + class_namespace["__i{0}__".format(op)] = iop_impl + if array_ufunc is not False: + class_namespace["__array_ufunc__"] = array_ufunc + eval_namespace = {"base": base, + "class_namespace": class_namespace, + "__name__": alleged_module, + } + MyType = eval("type('MyType', (base,), class_namespace)", + eval_namespace) + if issubclass(MyType, np.ndarray): + # Use this range to avoid special case weirdnesses around + # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. + return np.arange(3, 7).reshape(2, 2).view(MyType) + else: + return MyType() + + def check(obj, binop_override_expected, ufunc_override_expected, + inplace_override_expected, check_scalar=True): + for op, (ufunc, has_inplace, dtype) in ops.items(): + err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' + % (op, ufunc, has_inplace, dtype)) + check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] + if check_scalar: + check_objs.append(check_objs[0][0]) + for arr in check_objs: + arr_method = getattr(arr, "__{0}__".format(op)) + + def first_out_arg(result): + if op == "divmod": + assert_(isinstance(result, tuple)) + return result[0] + else: + return result + + # arr __op__ obj + if binop_override_expected: + assert_equal(arr_method(obj), NotImplemented, err_msg) + elif ufunc_override_expected: + assert_equal(arr_method(obj)[0], "__array_ufunc__", + err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) + # obj __op__ arr + arr_rmethod = getattr(arr, "__r{0}__".format(op)) + if ufunc_override_expected: + res = arr_rmethod(obj) + assert_equal(res[0], "__array_ufunc__", + err_msg=err_msg) + assert_equal(res[1], ufunc, err_msg=err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) + + # arr __iop__ obj + # array scalars don't have in-place operators + if has_inplace and isinstance(arr, np.ndarray): + arr_imethod = getattr(arr, "__i{0}__".format(op)) + if inplace_override_expected: + assert_equal(arr_method(obj), NotImplemented, + err_msg=err_msg) + elif ufunc_override_expected: + res = arr_imethod(obj) + assert_equal(res[0], "__array_ufunc__", err_msg) + assert_equal(res[1], ufunc, err_msg) + assert_(type(res[-1]["out"]) is tuple, err_msg) + assert_(res[-1]["out"][0] is arr, err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) + + op_fn = getattr(operator, op, None) + if op_fn is None: + op_fn = getattr(operator, op + "_", None) + if op_fn is None: + op_fn = getattr(builtins, op) + assert_equal(op_fn(obj, arr), "forward", err_msg) + if not isinstance(obj, np.ndarray): + if binop_override_expected: + assert_equal(op_fn(arr, obj), "reverse", err_msg) + elif ufunc_override_expected: + assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", + err_msg) + if ufunc_override_expected: + assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", + err_msg) + + # No array priority, no array_ufunc -> nothing called + check(make_obj(object), False, False, False) + # Negative array priority, no array_ufunc -> nothing called + # (has to be very negative, because scalar priority is -1000000.0) + check(make_obj(object, array_priority=-2**30), False, False, False) + # Positive array priority, no array_ufunc -> binops and iops only + check(make_obj(object, array_priority=1), True, False, True) + # ndarray ignores array_priority for ndarray subclasses + check(make_obj(np.ndarray, array_priority=1), False, False, False, + check_scalar=False) + # Positive array_priority and array_ufunc -> array_ufunc only + check(make_obj(object, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + check(make_obj(np.ndarray, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + # array_ufunc set to None -> defer binops only + check(make_obj(object, array_ufunc=None), True, False, False) + check(make_obj(np.ndarray, array_ufunc=None), True, False, False, + check_scalar=False) + + @pytest.mark.parametrize("priority", [None, "runtime error"]) + def test_ufunc_binop_bad_array_priority(self, priority): + # Mainly checks that this does not crash. The second array has a lower + # priority than -1 ("error value"). If the __radd__ actually exists, + # bad things can happen (I think via the scalar paths). + # In principle both of these can probably just be errors in the future. + class BadPriority: + @property + def __array_priority__(self): + if priority == "runtime error": + raise RuntimeError("RuntimeError in __array_priority__!") + return priority + + def __radd__(self, other): + return "result" + + class LowPriority(np.ndarray): + __array_priority__ = -1000 + + # Priority failure uses the same as scalars (smaller -1000). So the + # LowPriority wins with 'result' for each element (inner operation). + res = np.arange(3).view(LowPriority) + BadPriority() + assert res.shape == (3,) + assert res[0] == 'result' + + + def test_ufunc_override_normalize_signature(self): + # gh-5674 + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return kw + + a = SomeClass() + kw = np.add(a, [1]) + assert_('sig' not in kw and 'signature' not in kw) + kw = np.add(a, [1], sig='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + kw = np.add(a, [1], signature='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + + def test_array_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + # This also checks implicitly that 'out' is always a tuple. + class CheckIndex: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + for i, a in enumerate(inputs): + if a is self: + return i + # calls below mean we must be in an output. + for j, a in enumerate(kw['out']): + if a is self: + return (j,) + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), (0,)) + assert_equal(np.sin(dummy, out=a), (0,)) + assert_equal(np.sin(dummy, out=(a,)), (0,)) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), (0,)) + assert_equal(np.modf(dummy, None, a), (1,)) + assert_equal(np.modf(dummy, dummy, a), (1,)) + assert_equal(np.modf(dummy, out=(a, None)), (0,)) + assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) + assert_equal(np.modf(dummy, out=(None, a)), (1,)) + assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) + assert_equal(np.modf(a, out=(dummy, a)), 0) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs + np.modf(dummy, out=a) + + assert_raises(ValueError, np.modf, dummy, out=(a,)) + + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), (0,)) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), (0,)) + assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][0][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + + def test_pow_override_with_errors(self): + # regression test for gh-9112 + class PowerOnly(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if ufunc is not np.power: + raise NotImplementedError + return "POWER!" + # explicit cast to float, to ensure the fast power path is taken. + a = np.array(5., dtype=np.float64).view(PowerOnly) + assert_equal(a ** 2.5, "POWER!") + with assert_raises(NotImplementedError): + a ** 0.5 + with assert_raises(NotImplementedError): + a ** 0 + with assert_raises(NotImplementedError): + a ** 1 + with assert_raises(NotImplementedError): + a ** -1 + with assert_raises(NotImplementedError): + a ** 2 + + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass: + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __div__(self, other): + raise AssertionError('__div__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + + def test_pos_array_ufunc_override(self): + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*[i.view(np.ndarray) for + i in inputs], **kwargs) + tst = np.array('foo').view(A) + with assert_raises(TypeError): + +tst + + +class TestTemporaryElide: + # elision is only triggered on relatively large arrays + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy.core._multiarray_tests import incref_elide + d = np.ones(100000) + orig, res = incref_elide(d) + d + d + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy.core._multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwritten + l = [1, 1, 1, 1, np.ones(100000)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(100000)) + assert_array_equal(res, l[4] + l[4]) + + def test_temporary_with_cast(self): + # check that we don't elide into a temporary which would need casting + d = np.ones(200000, dtype=np.int64) + assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) + + r = ((d + d) / 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = np.true_divide((d + d), 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) / 2.) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) // 2) + assert_equal(r.dtype, np.dtype(np.int64)) + + # commutative elision into the astype result + f = np.ones(100000, dtype=np.float32) + assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) + + # no elision into lower type + d = f.astype(np.float64) + assert_equal(((f + f) + d).dtype, d.dtype) + l = np.ones(100000, dtype=np.longdouble) + assert_equal(((d + d) + l).dtype, l.dtype) + + # test unary abs with different output dtype + for dt in (np.complex64, np.complex128, np.clongdouble): + c = np.ones(100000, dtype=dt) + r = abs(c * 2.0) + assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + + def test_elide_broadcast(self): + # test no elision on broadcast to higher dimension + # only triggers elision code path in debug mode as triggering it in + # normal mode needs 256kb large matching dimension, so a lot of memory + d = np.ones((2000, 1), dtype=int) + b = np.ones((2000), dtype=bool) + r = (1 - d) + b + assert_equal(r, 1) + assert_equal(r.shape, (2000, 2000)) + + def test_elide_scalar(self): + # check inplace op does not create ndarray from scalars + a = np.bool_() + assert_(type(~(a & a)) is np.bool_) + + def test_elide_scalar_readonly(self): + # The imaginary part of a real array is readonly. This needs to go + # through fast_scalar_power which is only called for powers of + # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for + # elision which can be gotten for the imaginary part of a real + # array. Should not error. + a = np.empty(100000, dtype=np.float64) + a.imag ** 2 + + def test_elide_readonly(self): + # don't try to elide readonly temporaries + r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 + assert_equal(r, 0) + + def test_elide_updateifcopy(self): + a = np.ones(2**20)[::2] + b = a.flat.__array__() + 1 + del b + assert_equal(a, 1) + + +class TestCAPI: + def test_IsPythonScalar(self): + from numpy.core._multiarray_tests import IsPythonScalar + assert_(IsPythonScalar(b'foobar')) + assert_(IsPythonScalar(1)) + assert_(IsPythonScalar(2**80)) + assert_(IsPythonScalar(2.)) + assert_(IsPythonScalar("a")) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + def test_intp_sequence_converters(self, converter): + # Test simple values (-1 is special for error return paths) + assert converter(10) == (10,) + assert converter(-1) == (-1,) + # A 0-D array looks a bit like a sequence but must take the integer + # path: + assert converter(np.array(123)) == (123,) + # Test simple sequences (intp_from_sequence only supports length 1): + assert converter((10,)) == (10,) + assert converter(np.array([11])) == (11,) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_intp_sequence_converters_errors(self, converter): + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, "): + converter(object()) + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, " + "got '32.0'"): + converter(32.) + with pytest.raises(TypeError, + match="'float' object cannot be interpreted as an integer"): + converter([32.]) + with pytest.raises(ValueError, + match="Maximum allowed dimension"): + # These converters currently convert overflows to a ValueError + converter(2**64) + + +class TestSubscripting: + def test_test_zero_rank(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x[0], np.int_)) + assert_(type(x[0, ...]) is np.ndarray) + + +class TestPickling: + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, + reason=('this tests the error messages when trying to' + 'protocol 5 although it is not available')) + def test_correct_protocol5_error_message(self): + array = np.arange(10) + + def test_record_array_with_object_dtype(self): + my_object = object() + + arr_with_object = np.array( + [(my_object, 1, 2.0)], + dtype=[('a', object), ('b', int), ('c', float)]) + arr_without_object = np.array( + [('xxx', 1, 2.0)], + dtype=[('a', str), ('b', int), ('c', float)]) + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_arr_with_object = pickle.loads( + pickle.dumps(arr_with_object, protocol=proto)) + depickled_arr_without_object = pickle.loads( + pickle.dumps(arr_without_object, protocol=proto)) + + assert_equal(arr_with_object.dtype, + depickled_arr_with_object.dtype) + assert_equal(arr_without_object.dtype, + depickled_arr_without_object.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_f_contiguous_array(self): + f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') + buffers = [] + + # When using pickle protocol 5, Fortran-contiguous arrays can be + # serialized using out-of-band buffers + bytes_string = pickle.dumps(f_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_f_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(f_contiguous_array, depickled_f_contiguous_array) + + def test_non_contiguous_array(self): + non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] + assert not non_contiguous_array.flags.c_contiguous + assert not non_contiguous_array.flags.f_contiguous + + # make sure non-contiguous arrays can be pickled-depickled + # using any protocol + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_non_contiguous_array = pickle.loads( + pickle.dumps(non_contiguous_array, protocol=proto)) + + assert_equal(non_contiguous_array, depickled_non_contiguous_array) + + def test_roundtrip(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + carray = np.array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + np.transpose(carray), + np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + refs = [weakref.ref(a) for a in DATA] + for a in DATA: + assert_equal( + a, pickle.loads(pickle.dumps(a, protocol=proto)), + err_msg="%r" % a) + del a, DATA, carray + break_cycles() + # check for reference leaks (gh-12793) + for ref in refs: + assert ref() is None + + def _loads(self, obj): + return pickle.loads(obj, encoding='latin1') + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + def test_version0_int8(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + def test_version0_float32(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' + a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) + p = self._loads(s) + assert_equal(a, p) + + def test_version0_object(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' + a = np.array([{'a': 1}, {'b': 2}]) + p = self._loads(s) + assert_equal(a, p) + + # version 1 pickles, using protocol=2 to pickle + def test_version1_int8(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + def test_version1_float32(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' + a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) + p = self._loads(s) + assert_equal(a, p) + + def test_version1_object(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' + a = np.array([{'a': 1}, {'b': 2}]) + p = self._loads(s) + assert_equal(a, p) + + def test_subarray_int_shape(self): + s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb." + a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)]) + p = self._loads(s) + assert_equal(a, p) + + def test_datetime64_byteorder(self): + original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]') + + original_byte_reversed = original.copy(order='K') + original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S') + original_byte_reversed.byteswap(inplace=True) + + new = pickle.loads(pickle.dumps(original_byte_reversed)) + + assert_equal(original.dtype, new.dtype) + + +class TestFancyIndexing: + def test_list(self): + x = np.ones((1, 1)) + x[:, [0]] = 2.0 + assert_array_equal(x, np.array([[2.0]])) + + x = np.ones((1, 1, 1)) + x[:, :, [0]] = 2.0 + assert_array_equal(x, np.array([[[2.0]]])) + + def test_tuple(self): + x = np.ones((1, 1)) + x[:, (0,)] = 2.0 + assert_array_equal(x, np.array([[2.0]])) + x = np.ones((1, 1, 1)) + x[:, :, (0,)] = 2.0 + assert_array_equal(x, np.array([[[2.0]]])) + + def test_mask(self): + x = np.array([1, 2, 3, 4]) + m = np.array([0, 1, 0, 0], bool) + assert_array_equal(x[m], np.array([2])) + + def test_mask2(self): + x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + m = np.array([0, 1], bool) + m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) + m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) + assert_array_equal(x[m], np.array([[5, 6, 7, 8]])) + assert_array_equal(x[m2], np.array([2, 5])) + assert_array_equal(x[m3], np.array([2])) + + def test_assign_mask(self): + x = np.array([1, 2, 3, 4]) + m = np.array([0, 1, 0, 0], bool) + x[m] = 5 + assert_array_equal(x, np.array([1, 5, 3, 4])) + + def test_assign_mask2(self): + xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + m = np.array([0, 1], bool) + m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) + m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) + x = xorig.copy() + x[m] = 10 + assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]])) + x = xorig.copy() + x[m2] = 10 + assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]])) + x = xorig.copy() + x[m3] = 10 + assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]])) + + +class TestStringCompare: + def test_string(self): + g1 = np.array(["This", "is", "example"]) + g2 = np.array(["This", "was", "example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = np.array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + def test_unicode(self): + g1 = np.array(["This", "is", "example"]) + g2 = np.array(["This", "was", "example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + +class TestArgmaxArgminCommon: + + sizes = [(), (3,), (3, 2), (2, 3), + (3, 3), (2, 3, 4), (4, 3, 2), + (1, 2, 3, 4), (2, 3, 4, 1), + (3, 4, 1, 2), (4, 1, 2, 3), + (64,), (128,), (256,)] + + @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis) + for axis in list(range(-len(size), len(size))) + [None]] + for size in sizes])) + @pytest.mark.parametrize('method', [np.argmax, np.argmin]) + def test_np_argmin_argmax_keepdims(self, size, axis, method): + + arr = np.random.normal(size=size) + + # contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size) + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(res.shape, dtype=res.dtype) + res1 = method(arr, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + # non-contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size)[::-1] + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr.T, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr.T, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(new_shape[::-1], dtype=res.dtype) + outarray = outarray.T + res1 = method(arr.T, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + # one dimension lesser for non-zero sized + # array should raise an error + with pytest.raises(ValueError): + method(arr[0], axis=axis, + out=outarray, keepdims=True) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + @pytest.mark.parametrize('method', ['max', 'min']) + def test_all(self, method): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + arg_method = getattr(a, 'arg' + method) + val_method = getattr(a, method) + for i in range(a.ndim): + a_maxmin = val_method(i) + aarg_maxmin = arg_method(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(np.all(a_maxmin == aarg_maxmin.choose( + *a.transpose(i, *axes)))) + + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_output_shape(self, method): + # see also gh-616 + a = np.ones((10, 5)) + arg_method = getattr(a, method) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones(10, dtype=np.int_) + arg_method(-1, out=out) + assert_equal(out, arg_method(-1)) + + @pytest.mark.parametrize('ndim', [0, 1]) + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_ret_is_out(self, ndim, method): + a = np.ones((4,) + (256,)*ndim) + arg_method = getattr(a, method) + out = np.empty((256,)*ndim, dtype=np.intp) + ret = arg_method(axis=0, out=out) + assert ret is out + + @pytest.mark.parametrize('np_array, method, idx, val', + [(np.zeros, 'argmax', 5942, "as"), + (np.ones, 'argmin', 6001, "0")]) + def test_unicode(self, np_array, method, idx, val): + d = np_array(6031, dtype='<U9') + arg_method = getattr(d, method) + d[idx] = val + assert_equal(arg_method(), idx) + + @pytest.mark.parametrize('arr_method, np_method', + [('argmax', np.argmax), + ('argmin', np.argmin)]) + def test_np_vs_ndarray(self, arr_method, np_method): + # make sure both ndarray.argmax/argmin and + # numpy.argmax/argmin support out/axis args + a = np.random.normal(size=(2, 3)) + arg_method = getattr(a, arr_method) + + # check positional args + out1 = np.zeros(2, dtype=int) + out2 = np.zeros(2, dtype=int) + assert_equal(arg_method(1, out1), np_method(a, 1, out2)) + assert_equal(out1, out2) + + # check keyword args + out1 = np.zeros(3, dtype=int) + out2 = np.zeros(3, dtype=int) + assert_equal(arg_method(out=out1, axis=0), + np_method(a, out=out2, axis=0)) + assert_equal(out1, out2) + + @pytest.mark.leaks_references(reason="replaces None with NULL.") + @pytest.mark.parametrize('method, vals', + [('argmax', (10, 30)), + ('argmin', (30, 10))]) + def test_object_with_NULLs(self, method, vals): + # See gh-6032 + a = np.empty(4, dtype='O') + arg_method = getattr(a, method) + ctypes.memset(a.ctypes.data, 0, a.nbytes) + assert_equal(arg_method(), 0) + a[3] = vals[0] + assert_equal(arg_method(), 3) + a[1] = vals[1] + assert_equal(arg_method(), 1) + +class TestArgmax: + usg_data = [ + ([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0), + ([3, 3, 3, 3, 2, 2, 2, 2], 0), + ([0, 1, 2, 3, 4, 5, 6, 7], 7), + ([7, 6, 5, 4, 3, 2, 1, 0], 0) + ] + sg_data = usg_data + [ + ([1, 2, 3, 4, -4, -3, -2, -1], 3), + ([1, 2, 3, 4, -1, -2, -3, -4], 3) + ] + darr = [(np.array(d[0], dtype=t), d[1]) for d, t in ( + itertools.product(usg_data, ( + np.uint8, np.uint16, np.uint32, np.uint64 + )) + )] + darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in ( + itertools.product(sg_data, ( + np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 + )) + )] + darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in ( + itertools.product(( + ([0, 1, 2, 3, np.nan], 4), + ([0, 1, 2, np.nan, 3], 3), + ([np.nan, 0, 1, 2, 3], 0), + ([np.nan, 0, np.nan, 2, 3], 0), + # To hit the tail of SIMD multi-level(x4, x1) inner loops + # on variant SIMD widthes + ([1] * (2*5-1) + [np.nan], 2*5-1), + ([1] * (4*5-1) + [np.nan], 4*5-1), + ([1] * (8*5-1) + [np.nan], 8*5-1), + ([1] * (16*5-1) + [np.nan], 16*5-1), + ([1] * (32*5-1) + [np.nan], 32*5-1) + ), ( + np.float32, np.float64 + )) + )] + nan_arr = darr + [ + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), + + ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), + ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), + ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), + + ([np.datetime64('1923-04-14T12:43:12'), + np.datetime64('1994-06-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('1995-11-25T16:02:16'), + np.datetime64('2005-01-04T03:14:12'), + np.datetime64('2041-12-03T14:05:03')], 5), + ([np.datetime64('1935-09-14T04:40:11'), + np.datetime64('1949-10-12T12:32:11'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('2015-11-20T12:20:59'), + np.datetime64('1932-09-23T10:10:13'), + np.datetime64('2014-10-10T03:50:30')], 3), + # Assorted tests with NaTs + ([np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('NaT'), + np.datetime64('2015-09-23T10:10:13'), + np.datetime64('1932-10-10T03:50:30')], 0), + ([np.datetime64('2059-03-14T12:43:12'), + np.datetime64('1996-09-21T14:43:15'), + np.datetime64('NaT'), + np.datetime64('2022-12-25T16:02:16'), + np.datetime64('1963-10-04T03:14:12'), + np.datetime64('2013-05-08T18:15:23')], 2), + ([np.timedelta64(2, 's'), + np.timedelta64(1, 's'), + np.timedelta64('NaT', 's'), + np.timedelta64(3, 's')], 2), + ([np.timedelta64('NaT', 's')] * 3, 0), + + ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), + timedelta(days=-1, seconds=23)], 0), + ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), + timedelta(days=5, seconds=14)], 1), + ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), + timedelta(days=10, seconds=43)], 2), + + ([False, False, False, False, True], 4), + ([False, False, False, True, False], 3), + ([True, False, False, False, False], 0), + ([True, False, True, False, False], 0), + ] + + @pytest.mark.parametrize('data', nan_arr) + def test_combinations(self, data): + arr, pos = data + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + val = np.max(arr) + + assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) + assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr) + + # add padding to test SIMD loops + rarr = np.repeat(arr, 129) + rpos = pos * 129 + assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr) + assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr) + + padd = np.repeat(np.min(arr), 513) + rarr = np.concatenate((arr, padd)) + rpos = pos + assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr) + assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr) + + + def test_maximum_signed_integers(self): + + a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8) + assert_equal(np.argmax(a), 1) + a = a.repeat(129) + assert_equal(np.argmax(a), 129) + + a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16) + assert_equal(np.argmax(a), 1) + a = a.repeat(129) + assert_equal(np.argmax(a), 129) + + a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32) + assert_equal(np.argmax(a), 1) + a = a.repeat(129) + assert_equal(np.argmax(a), 129) + + a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64) + assert_equal(np.argmax(a), 1) + a = a.repeat(129) + assert_equal(np.argmax(a), 129) + +class TestArgmin: + usg_data = [ + ([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 8), + ([3, 3, 3, 3, 2, 2, 2, 2], 4), + ([0, 1, 2, 3, 4, 5, 6, 7], 0), + ([7, 6, 5, 4, 3, 2, 1, 0], 7) + ] + sg_data = usg_data + [ + ([1, 2, 3, 4, -4, -3, -2, -1], 4), + ([1, 2, 3, 4, -1, -2, -3, -4], 7) + ] + darr = [(np.array(d[0], dtype=t), d[1]) for d, t in ( + itertools.product(usg_data, ( + np.uint8, np.uint16, np.uint32, np.uint64 + )) + )] + darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in ( + itertools.product(sg_data, ( + np.int8, np.int16, np.int32, np.int64, np.float32, np.float64 + )) + )] + darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in ( + itertools.product(( + ([0, 1, 2, 3, np.nan], 4), + ([0, 1, 2, np.nan, 3], 3), + ([np.nan, 0, 1, 2, 3], 0), + ([np.nan, 0, np.nan, 2, 3], 0), + # To hit the tail of SIMD multi-level(x4, x1) inner loops + # on variant SIMD widthes + ([1] * (2*5-1) + [np.nan], 2*5-1), + ([1] * (4*5-1) + [np.nan], 4*5-1), + ([1] * (8*5-1) + [np.nan], 8*5-1), + ([1] * (16*5-1) + [np.nan], 16*5-1), + ([1] * (32*5-1) + [np.nan], 32*5-1) + ), ( + np.float32, np.float64 + )) + )] + nan_arr = darr + [ + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), + + ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), + ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), + ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), + + ([np.datetime64('1923-04-14T12:43:12'), + np.datetime64('1994-06-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('1995-11-25T16:02:16'), + np.datetime64('2005-01-04T03:14:12'), + np.datetime64('2041-12-03T14:05:03')], 0), + ([np.datetime64('1935-09-14T04:40:11'), + np.datetime64('1949-10-12T12:32:11'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('2014-11-20T12:20:59'), + np.datetime64('2015-09-23T10:10:13'), + np.datetime64('1932-10-10T03:50:30')], 5), + # Assorted tests with NaTs + ([np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('NaT'), + np.datetime64('2015-09-23T10:10:13'), + np.datetime64('1932-10-10T03:50:30')], 0), + ([np.datetime64('2059-03-14T12:43:12'), + np.datetime64('1996-09-21T14:43:15'), + np.datetime64('NaT'), + np.datetime64('2022-12-25T16:02:16'), + np.datetime64('1963-10-04T03:14:12'), + np.datetime64('2013-05-08T18:15:23')], 2), + ([np.timedelta64(2, 's'), + np.timedelta64(1, 's'), + np.timedelta64('NaT', 's'), + np.timedelta64(3, 's')], 2), + ([np.timedelta64('NaT', 's')] * 3, 0), + + ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), + timedelta(days=-1, seconds=23)], 2), + ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), + timedelta(days=5, seconds=14)], 0), + ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), + timedelta(days=10, seconds=43)], 1), + + ([True, True, True, True, False], 4), + ([True, True, True, False, True], 3), + ([False, True, True, True, True], 0), + ([False, True, False, True, True], 0), + ] + + @pytest.mark.parametrize('data', nan_arr) + def test_combinations(self, data): + arr, pos = data + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + min_val = np.min(arr) + + assert_equal(np.argmin(arr), pos, err_msg="%r" % arr) + assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr) + + # add padding to test SIMD loops + rarr = np.repeat(arr, 129) + rpos = pos * 129 + assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr) + assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr) + + padd = np.repeat(np.max(arr), 513) + rarr = np.concatenate((arr, padd)) + rpos = pos + assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr) + assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr) + + def test_minimum_signed_integers(self): + + a = np.array([1, -2**7, -2**7 + 1, 2**7 - 1], dtype=np.int8) + assert_equal(np.argmin(a), 1) + a = a.repeat(129) + assert_equal(np.argmin(a), 129) + + a = np.array([1, -2**15, -2**15 + 1, 2**15 - 1], dtype=np.int16) + assert_equal(np.argmin(a), 1) + a = a.repeat(129) + assert_equal(np.argmin(a), 129) + + a = np.array([1, -2**31, -2**31 + 1, 2**31 - 1], dtype=np.int32) + assert_equal(np.argmin(a), 1) + a = a.repeat(129) + assert_equal(np.argmin(a), 129) + + a = np.array([1, -2**63, -2**63 + 1, 2**63 - 1], dtype=np.int64) + assert_equal(np.argmin(a), 1) + a = a.repeat(129) + assert_equal(np.argmin(a), 129) + +class TestMinMax: + + def test_scalar(self): + assert_raises(np.AxisError, np.amax, 1, 1) + assert_raises(np.AxisError, np.amin, 1, 1) + + assert_equal(np.amax(1, axis=0), 1) + assert_equal(np.amin(1, axis=0), 1) + assert_equal(np.amax(1, axis=None), 1) + assert_equal(np.amin(1, axis=None), 1) + + def test_axis(self): + assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000) + assert_equal(np.amax([[1, 2, 3]], axis=1), 3) + + def test_datetime(self): + # Do not ignore NaT + for dtype in ('m8[s]', 'm8[Y]'): + a = np.arange(10).astype(dtype) + assert_equal(np.amin(a), a[0]) + assert_equal(np.amax(a), a[9]) + a[3] = 'NaT' + assert_equal(np.amin(a), a[3]) + assert_equal(np.amax(a), a[3]) + + +class TestNewaxis: + def test_basic(self): + sk = np.array([0, -0.1, 0.1]) + res = 250*sk[:, np.newaxis] + assert_almost_equal(res.ravel(), 250*sk) + + +class TestClip: + def _check_range(self, x, cmin, cmax): + assert_(np.all(x >= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self, type_group, array_max, + clip_min, clip_max, inplace=False, + expected_min=None, expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + # The tests that call us pass clip_min and clip_max that + # might not fit in the destination dtype. They were written + # assuming the previous unsafe casting, which now must be + # passed explicitly to avoid a warning. + x.clip(clip_min, clip_max, x, casting='unsafe') + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': + byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type( + 'float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type( + 'float', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'int', 1024, -120, 100, inplace=inplace) + self._clip_type( + 'int', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'uint', 1024, 0, 0, inplace=inplace) + self._clip_type( + 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')]) + y = rec['x'].clip(-0.3, 0.5) + self._check_range(y, -0.3, 0.5) + + def test_max_or_min(self): + val = np.array([0, 1, 2, 3, 4, 5, 6, 7]) + x = val.clip(3) + assert_(np.all(x >= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + def test_nan(self): + input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) + result = input_arr.clip(-1, 1) + expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) + assert_array_equal(result, expected) + + +class TestCompress: + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + +class TestPutmask: + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_equal(x[mask], np.array(val, T)) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(1000)*100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np.sctypes.values(): + for T in types: + if T not in unchecked_types: + if val < 0 and np.dtype(T).kind == "u": + val = np.iinfo(T).max - 99 + self.tst_basic(x.copy().astype(T), T, mask, val) + + # Also test string of a length which uses an untypical length + dt = np.dtype("S3") + self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3]) + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + @pytest.mark.parametrize('dtype', ('>i4', '<i4')) + def test_byteorder(self, dtype): + x = np.array([1, 2, 3], dtype) + np.putmask(x, [True, False, True], -1) + assert_array_equal(x, [-1, 2, -1]) + + def test_record_array(self): + # Note mixed byteorder. + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) + np.putmask(rec['x'], [True, False], 10) + assert_array_equal(rec['x'], [10, 5]) + assert_array_equal(rec['y'], [2, 4]) + assert_array_equal(rec['z'], [3, 3]) + np.putmask(rec['y'], [True, False], 11) + assert_array_equal(rec['x'], [10, 5]) + assert_array_equal(rec['y'], [11, 4]) + assert_array_equal(rec['z'], [3, 3]) + + def test_overlaps(self): + # gh-6272 check overlap + x = np.array([True, False, True, False]) + np.putmask(x[1:4], [True, True, True], x[:3]) + assert_equal(x, np.array([True, True, False, True])) + + x = np.array([True, False, True, False]) + np.putmask(x[1:4], x[:3], [True, False, True]) + assert_equal(x, np.array([True, True, True, True])) + + def test_writeable(self): + a = np.arange(5) + a.flags.writeable = False + + with pytest.raises(ValueError): + np.putmask(a, a >= 2, 3) + + def test_kwargs(self): + x = np.array([0, 0]) + np.putmask(x, [0, 1], [-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, mask=[0, 1], values=[-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, values=[-1, -2], mask=[0, 1]) + assert_array_equal(x, [0, -2]) + + with pytest.raises(TypeError): + np.putmask(a=x, values=[-1, -2], mask=[0, 1]) + + +class TestTake: + def tst_basic(self, x): + ind = list(range(x.shape[0])) + assert_array_equal(x.take(ind, axis=0), x) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + for types in np.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T)) + + # Also test string of a length which uses an untypical length + self.tst_basic(x.astype("S3")) + + def test_raise(self): + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + assert_raises(IndexError, x.take, [0, 1, 2], axis=0) + assert_raises(IndexError, x.take, [-3], axis=0) + assert_array_equal(x.take([-1], axis=0)[0], x[1]) + + def test_clip(self): + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) + assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) + + def test_wrap(self): + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) + assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) + assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) + + @pytest.mark.parametrize('dtype', ('>i4', '<i4')) + def test_byteorder(self, dtype): + x = np.array([1, 2, 3], dtype) + assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) + + def test_record_array(self): + # Note mixed byteorder. + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) + rec1 = rec.take([1]) + assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) + + def test_out_overlap(self): + # gh-6272 check overlap on out + x = np.arange(5) + y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap') + assert_equal(y, np.array([1, 2, 3])) + + @pytest.mark.parametrize('shape', [(1, 2), (1,), ()]) + def test_ret_is_out(self, shape): + # 0d arrays should not be an exception to this rule + x = np.arange(5) + inds = np.zeros(shape, dtype=np.intp) + out = np.zeros(shape, dtype=x.dtype) + ret = np.take(x, inds, out=out) + assert ret is out + + +class TestLexsort: + @pytest.mark.parametrize('dtype',[ + np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64 + ]) + def test_basic(self, dtype): + a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype) + b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype) + idx = np.lexsort((b, a)) + expected_idx = np.array([0, 4, 2, 1, 3, 5]) + assert_array_equal(idx, expected_idx) + assert_array_equal(a[idx], np.sort(a)) + + def test_mixed(self): + a = np.array([1, 2, 1, 3, 1, 5]) + b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]') + + idx = np.lexsort((b, a)) + expected_idx = np.array([0, 4, 2, 1, 3, 5]) + assert_array_equal(idx, expected_idx) + + def test_datetime(self): + a = np.array([0,0,0], dtype='datetime64[D]') + b = np.array([2,1,0], dtype='datetime64[D]') + idx = np.lexsort((b, a)) + expected_idx = np.array([2, 1, 0]) + assert_array_equal(idx, expected_idx) + + a = np.array([0,0,0], dtype='timedelta64[D]') + b = np.array([2,1,0], dtype='timedelta64[D]') + idx = np.lexsort((b, a)) + expected_idx = np.array([2, 1, 0]) + assert_array_equal(idx, expected_idx) + + def test_object(self): # gh-6312 + a = np.random.choice(10, 1000) + b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000) + + for u in a, b: + left = np.lexsort((u.astype('O'),)) + right = np.argsort(u, kind='mergesort') + assert_array_equal(left, right) + + for u, v in (a, b), (b, a): + idx = np.lexsort((u, v)) + assert_array_equal(idx, np.lexsort((u.astype('O'), v))) + assert_array_equal(idx, np.lexsort((u, v.astype('O')))) + u, v = np.array(u, dtype='object'), np.array(v, dtype='object') + assert_array_equal(idx, np.lexsort((u, v))) + + def test_invalid_axis(self): # gh-7528 + x = np.linspace(0., 1., 42*3).reshape(42, 3) + assert_raises(np.AxisError, np.lexsort, x, axis=2) + +class TestIO: + """Test tofile, fromfile, tobytes, and fromstring""" + + @pytest.fixture() + def x(self): + shape = (2, 4, 3) + rand = np.random.random + x = rand(shape) + rand(shape).astype(complex) * 1j + x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan] + return x + + @pytest.fixture(params=["string", "path_obj"]) + def tmp_filename(self, tmp_path, request): + # This fixture covers two cases: + # one where the filename is a string and + # another where it is a pathlib object + filename = tmp_path / "file" + if request.param == "string": + filename = str(filename) + yield filename + + def test_nofile(self): + # this should probably be supported as a file + # but for now test for proper errors + b = io.BytesIO() + assert_raises(OSError, np.fromfile, b, np.uint8, 80) + d = np.ones(7) + assert_raises(OSError, lambda x: x.tofile(b), d) + + def test_bool_fromstring(self): + v = np.array([True, False, True, False], dtype=np.bool_) + y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) + assert_array_equal(v, y) + + def test_uint64_fromstring(self): + d = np.fromstring("9923372036854775807 104783749223640", + dtype=np.uint64, sep=' ') + e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64) + assert_array_equal(d, e) + + def test_int64_fromstring(self): + d = np.fromstring("-25041670086757 104783749223640", + dtype=np.int64, sep=' ') + e = np.array([-25041670086757, 104783749223640], dtype=np.int64) + assert_array_equal(d, e) + + def test_fromstring_count0(self): + d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0) + assert d.shape == (0,) + + def test_empty_files_text(self, tmp_filename): + with open(tmp_filename, 'w') as f: + pass + y = np.fromfile(tmp_filename) + assert_(y.size == 0, "Array not empty") + + def test_empty_files_binary(self, tmp_filename): + with open(tmp_filename, 'wb') as f: + pass + y = np.fromfile(tmp_filename, sep=" ") + assert_(y.size == 0, "Array not empty") + + def test_roundtrip_file(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) + # NB. doesn't work with flush+seek, due to use of C stdio + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_roundtrip(self, x, tmp_filename): + x.tofile(tmp_filename) + y = np.fromfile(tmp_filename, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_roundtrip_dump_pathlib(self, x, tmp_filename): + p = pathlib.Path(tmp_filename) + x.dump(p) + y = np.load(p, allow_pickle=True) + assert_array_equal(y, x) + + def test_roundtrip_binary_str(self, x): + s = x.tobytes() + y = np.frombuffer(s, dtype=x.dtype) + assert_array_equal(y, x.flat) + + s = x.tobytes('F') + y = np.frombuffer(s, dtype=x.dtype) + assert_array_equal(y, x.flatten('F')) + + def test_roundtrip_str(self, x): + x = x.real.ravel() + s = "@".join(map(str, x)) + y = np.fromstring(s, sep="@") + # NB. str imbues less precision + nan_mask = ~np.isfinite(x) + assert_array_equal(x[nan_mask], y[nan_mask]) + assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) + + def test_roundtrip_repr(self, x): + x = x.real.ravel() + s = "@".join(map(repr, x)) + y = np.fromstring(s, sep="@") + assert_array_equal(x, y) + + def test_unseekable_fromfile(self, x, tmp_filename): + # gh-6246 + x.tofile(tmp_filename) + + def fail(*args, **kwargs): + raise OSError('Can not tell or seek') + + with io.open(tmp_filename, 'rb', buffering=0) as f: + f.seek = fail + f.tell = fail + assert_raises(OSError, np.fromfile, f, dtype=x.dtype) + + def test_io_open_unbuffered_fromfile(self, x, tmp_filename): + # gh-6632 + x.tofile(tmp_filename) + with io.open(tmp_filename, 'rb', buffering=0) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_largish_file(self, tmp_filename): + # check the fallocate path on files > 16MB + d = np.zeros(4 * 1024 ** 2) + d.tofile(tmp_filename) + assert_equal(os.path.getsize(tmp_filename), d.nbytes) + assert_array_equal(d, np.fromfile(tmp_filename)) + # check offset + with open(tmp_filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + # check append mode (gh-8329) + open(tmp_filename, "w").close() # delete file contents + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_array_equal(d, np.fromfile(tmp_filename)) + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + + def test_io_open_buffered_fromfile(self, x, tmp_filename): + # gh-6632 + x.tofile(tmp_filename) + with io.open(tmp_filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_file_position_after_fromfile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE//8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE*8] + + for size in sizes: + with open(tmp_filename, 'wb') as f: + f.seek(size-1) + f.write(b'\0') + + for mode in ['rb', 'r+b']: + err_msg = "%d %s" % (size, mode) + + with open(tmp_filename, mode) as f: + f.read(2) + np.fromfile(f, dtype=np.float64, count=1) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_file_position_after_tofile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE//8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE*8] + + for size in sizes: + err_msg = "%d" % (size,) + + with open(tmp_filename, 'wb') as f: + f.seek(size-1) + f.write(b'\0') + f.seek(10) + f.write(b'12') + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) + + with open(tmp_filename, 'r+b') as f: + f.read(2) + f.seek(0, 1) # seek between read&write required by ANSI C + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_load_object_array_fromfile(self, tmp_filename): + # gh-12300 + with open(tmp_filename, 'w') as f: + # Ensure we have a file with consistent contents + pass + + with open(tmp_filename, 'rb') as f: + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, f, dtype=object) + + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, tmp_filename, dtype=object) + + def test_fromfile_offset(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) + + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype, offset=0) + assert_array_equal(y, x.flat) + + with open(tmp_filename, 'rb') as f: + count_items = len(x.flat) // 8 + offset_items = len(x.flat) // 4 + offset_bytes = x.dtype.itemsize * offset_items + y = np.fromfile( + f, dtype=x.dtype, count=count_items, offset=offset_bytes + ) + assert_array_equal( + y, x.flat[offset_items:offset_items+count_items] + ) + + # subsequent seeks should stack + offset_bytes = x.dtype.itemsize + z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) + assert_array_equal(z, x.flat[offset_items+count_items+1:]) + + with open(tmp_filename, 'wb') as f: + x.tofile(f, sep=",") + + with open(tmp_filename, 'rb') as f: + assert_raises_regex( + TypeError, + "'offset' argument only permitted for binary files", + np.fromfile, tmp_filename, dtype=x.dtype, + sep=",", offset=1) + + @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") + def test_fromfile_bad_dup(self, x, tmp_filename): + def dup_str(fd): + return 'abc' + + def dup_bigint(fd): + return 2**68 + + old_dup = os.dup + try: + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + os.dup = dup + assert_raises(exc, np.fromfile, f) + finally: + os.dup = old_dup + + def _check_from(self, s, value, filename, **kw): + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) + assert_array_equal(y, value) + + with open(filename, 'wb') as f: + f.write(s) + y = np.fromfile(filename, **kw) + assert_array_equal(y, value) + + @pytest.fixture(params=["period", "comma"]) + def decimal_sep_localization(self, request): + """ + Including this fixture in a test will automatically + execute it with both types of decimal separator. + + So:: + + def test_decimal(decimal_sep_localization): + pass + + is equivalent to the following two tests:: + + def test_decimal_period_separator(): + pass + + def test_decimal_comma_separator(): + with CommaDecimalPointLocale(): + pass + """ + if request.param == "period": + yield + elif request.param == "comma": + with CommaDecimalPointLocale(): + yield + else: + assert False, request.param + + def test_nan(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + tmp_filename, + sep=' ') + + def test_inf(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"inf +inf -inf infinity -Infinity iNfInItY -inF", + [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + tmp_filename, + sep=' ') + + def test_numbers(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], + tmp_filename, + sep=' ') + + def test_binary(self, tmp_filename): + self._check_from( + b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', + np.array([1, 2, 3, 4]), + tmp_filename, + dtype='<f4') + + def test_string(self, tmp_filename): + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',') + + def test_counted_string(self, tmp_filename, decimal_sep_localization): + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',') + + def test_string_with_ws(self, tmp_filename): + self._check_from( + b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ') + + def test_counted_string_with_ws(self, tmp_filename): + self._check_from( + b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int, + sep=' ') + + def test_ascii(self, tmp_filename, decimal_sep_localization): + self._check_from( + b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',') + + def test_malformed(self, tmp_filename, decimal_sep_localization): + with assert_warns(DeprecationWarning): + self._check_from( + b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ') + + def test_long_sep(self, tmp_filename): + self._check_from( + b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_') + + def test_dtype(self, tmp_filename): + v = np.array([1, 2, 3, 4], dtype=np.int_) + self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_) + + def test_dtype_bool(self, tmp_filename): + # can't use _check_from because fromstring can't handle True/False + v = np.array([True, False, True, False], dtype=np.bool_) + s = b'1,0,-2.3,0' + with open(tmp_filename, 'wb') as f: + f.write(s) + y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_) + assert_(y.dtype == '?') + assert_array_equal(y, v) + + def test_tofile_sep(self, tmp_filename, decimal_sep_localization): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + with open(tmp_filename, 'w') as f: + x.tofile(f, sep=',') + with open(tmp_filename, 'r') as f: + s = f.read() + #assert_equal(s, '1.51,2.0,3.51,4.0') + y = np.array([float(p) for p in s.split(',')]) + assert_array_equal(x,y) + + def test_tofile_format(self, tmp_filename, decimal_sep_localization): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + with open(tmp_filename, 'w') as f: + x.tofile(f, sep=',', format='%.2f') + with open(tmp_filename, 'r') as f: + s = f.read() + assert_equal(s, '1.51,2.00,3.51,4.00') + + def test_tofile_cleanup(self, tmp_filename): + x = np.zeros((10), dtype=object) + with open(tmp_filename, 'wb') as f: + assert_raises(OSError, lambda: x.tofile(f, sep='')) + # Dup-ed file handle should be closed or remove will fail on Windows OS + os.remove(tmp_filename) + + # Also make sure that we close the Python handle + assert_raises(OSError, lambda: x.tofile(tmp_filename)) + os.remove(tmp_filename) + + def test_fromfile_subarray_binary(self, tmp_filename): + # Test subarray dtypes which are absorbed into the shape + x = np.arange(24, dtype="i4").reshape(2, 3, 4) + x.tofile(tmp_filename) + res = np.fromfile(tmp_filename, dtype="(3,4)i4") + assert_array_equal(x, res) + + x_str = x.tobytes() + with assert_warns(DeprecationWarning): + # binary fromstring is deprecated + res = np.fromstring(x_str, dtype="(3,4)i4") + assert_array_equal(x, res) + + def test_parsing_subarray_unsupported(self, tmp_filename): + # We currently do not support parsing subarray dtypes + data = "12,42,13," * 50 + with pytest.raises(ValueError): + expected = np.fromstring(data, dtype="(3,)i", sep=",") + + with open(tmp_filename, "w") as f: + f.write(data) + + with pytest.raises(ValueError): + np.fromfile(tmp_filename, dtype="(3,)i", sep=",") + + def test_read_shorter_than_count_subarray(self, tmp_filename): + # Test that requesting more values does not cause any problems + # in conjunction with subarray dimensions being absorbed into the + # array dimension. + expected = np.arange(511 * 10, dtype="i").reshape(-1, 10) + + binary = expected.tobytes() + with pytest.raises(ValueError): + with pytest.warns(DeprecationWarning): + np.fromstring(binary, dtype="(10,)i", count=10000) + + expected.tofile(tmp_filename) + res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000) + assert_array_equal(res, expected) + + +class TestFromBuffer: + @pytest.mark.parametrize('byteorder', ['<', '>']) + @pytest.mark.parametrize('dtype', [float, int, complex]) + def test_basic(self, byteorder, dtype): + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7)) * 5).astype(dt) + buf = x.tobytes() + assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) + + @pytest.mark.parametrize("obj", [np.arange(10), b"12345678"]) + def test_array_base(self, obj): + # Objects (including NumPy arrays), which do not use the + # `release_buffer` slot should be directly used as a base object. + # See also gh-21612 + new = np.frombuffer(obj) + assert new.base is obj + + def test_empty(self): + assert_array_equal(np.frombuffer(b''), np.array([])) + + @pytest.mark.skipif(IS_PYPY, + reason="PyPy's memoryview currently does not track exports. See: " + "https://foss.heptapod.net/pypy/pypy/-/issues/3724") + def test_mmap_close(self): + # The old buffer protocol was not safe for some things that the new + # one is. But `frombuffer` always used the old one for a long time. + # Checks that it is safe with the new one (using memoryviews) + with tempfile.TemporaryFile(mode='wb') as tmp: + tmp.write(b"asdf") + tmp.flush() + mm = mmap.mmap(tmp.fileno(), 0) + arr = np.frombuffer(mm, dtype=np.uint8) + with pytest.raises(BufferError): + mm.close() # cannot close while array uses the buffer + del arr + mm.close() + +class TestFlat: + def setup_method(self): + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0.shape = (4, 5) + a.flags.writeable = False + self.a = a + self.b = a[::2, ::2] + self.a0 = a0 + self.b0 = a0[::2, ::2] + + def test_contiguous(self): + testpassed = False + try: + self.a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.a.flat[12] == 12.0) + + def test_discontiguous(self): + testpassed = False + try: + self.b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.b.flat[4] == 12.0) + + def test___array__(self): + c = self.a.flat.__array__() + d = self.b.flat.__array__() + e = self.a0.flat.__array__() + f = self.b0.flat.__array__() + + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # includes regression test for reference count error gh-13165 + inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None] + indtype = np.dtype(np.intp) + rc_indtype = sys.getrefcount(indtype) + for ind in inds: + rc_ind = sys.getrefcount(ind) + for _ in range(100): + try: + self.a.flat[ind] + except IndexError: + pass + assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) + assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + + def test_index_getset(self): + it = np.arange(10).reshape(2, 1, 5).flat + with pytest.raises(AttributeError): + it.index = 10 + + for _ in it: + pass + # Check the value of `.index` is updated correctly (see also gh-19153) + # If the type was incorrect, this would show up on big-endian machines + assert it.index == it.base.size + + +class TestResize: + + @_no_tracing + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + if IS_PYPY: + x.resize((5, 5), refcheck=False) + else: + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + assert_raises(ValueError, x.resize, (5, 1)) + del y # avoid pyflakes unused variable warning. + + @_no_tracing + def test_int_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, refcheck=False) + else: + x.resize(3) + assert_array_equal(x, np.eye(3)[0,:]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_0d_shape(self): + # to it multiple times to test it does not break alloc cache gh-9216 + for i in range(10): + x = np.empty((1,)) + x.resize(()) + assert_equal(x.shape, ()) + assert_equal(x.size, 1) + x = np.empty(()) + x.resize((1,)) + assert_equal(x.shape, (1,)) + assert_equal(x.size, 1) + + def test_invalid_arguments(self): + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + + @_no_tracing + def test_freeform_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, 2, 1, refcheck=False) + else: + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + @_no_tracing + def test_zeros_appended(self): + x = np.eye(3) + if IS_PYPY: + x.resize(2, 3, 3, refcheck=False) + else: + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + @_no_tracing + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = np.ones(10, dtype=[('k', object, 2)]) + if IS_PYPY: + a.resize(15, refcheck=False) + else: + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) + + def test_check_weakref(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + xref = weakref.ref(x) + assert_raises(ValueError, x.resize, (5, 1)) + del xref # avoid pyflakes unused variable warning. + + +class TestRecord: + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + def test_multiple_field_name_occurrence(self): + def test_dtype_init(): + np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) + + # Error raised when multiple fields have the same name + assert_raises(ValueError, test_dtype_init) + + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + def test_multiple_field_name_unicode(self): + def test_dtype_unicode(): + np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_dtype_unicode) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + # on both Python 2 and 3: + x = np.core.records.fromarrays( + [[0], [1]], names='a,b', formats='i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name in both Python 2 and + # 3: + name = 'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x['b'], np.array([1, 2, 3])) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + b = a.copy() + fn1 = str('f1') + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = str('not at all') + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = str('f3') + sfn1 = str('sf1') + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = str('f2') + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + + # non-ascii unicode field indexing is well behaved + assert_raises(ValueError, a.__setitem__, '\u03e0', 1) + assert_raises(ValueError, a.__getitem__, '\u03e0') + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + assert_raises(TypeError, hash, a[0]) + + def test_empty_structure_creation(self): + # make sure these do not raise errors (gh-5631) + np.array([()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + + def test_multifield_indexing_view(self): + a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) + v = a[['a', 'c']] + assert_(v.base is a) + assert_(v.dtype == np.dtype({'names': ['a', 'c'], + 'formats': ['i4', 'u4'], + 'offsets': [0, 8]})) + v[:] = (4,5) + assert_equal(a[0].item(), (4, 1, 5)) + +class TestView: + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], + dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='<i4') + # ... and again without the keyword. + z = x.view('<i4') + assert_array_equal(y, z) + assert_array_equal(y, [67305985, 134678021]) + + +def _mean(a, **args): + return a.mean(**args) + + +def _var(a, **args): + return a.var(**args) + + +def _std(a, **args): + return a.std(**args) + + +class TestStats: + + funcs = [_mean, _var, _std] + + def setup_method(self): + np.random.seed(range(3)) + self.rmat = np.random.random((4, 5)) + self.cmat = self.rmat + 1j * self.rmat + self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat]) + self.omat = self.omat.reshape(4, 5) + + def test_python_type(self): + for x in (np.float16(1.), 1, 1., 1+0j): + assert_equal(np.mean([x]), 1.) + assert_equal(np.std([x]), 0.) + assert_equal(np.var([x]), 0.) + + def test_keepdims(self): + mat = np.eye(3) + for f in self.funcs: + for axis in [0, 1]: + res = f(mat, axis=axis, keepdims=True) + assert_(res.ndim == mat.ndim) + assert_(res.shape[axis] == 1) + for axis in [None]: + res = f(mat, axis=axis, keepdims=True) + assert_(res.shape == (1, 1)) + + def test_out(self): + mat = np.eye(3) + for f in self.funcs: + out = np.zeros(3) + tgt = f(mat, axis=1) + res = f(mat, axis=1, out=out) + assert_almost_equal(res, out) + assert_almost_equal(res, tgt) + out = np.empty(2) + assert_raises(ValueError, f, mat, axis=1, out=out) + out = np.empty((2, 2)) + assert_raises(ValueError, f, mat, axis=1, out=out) + + def test_dtype_from_input(self): + + icodes = np.typecodes['AllInteger'] + fcodes = np.typecodes['AllFloat'] + + # object type + for f in self.funcs: + mat = np.array([[Decimal(1)]*3]*3) + tgt = mat.dtype.type + res = f(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + res = type(f(mat, axis=None)) + assert_(res is Decimal) + + # integer types + for f in self.funcs: + for c in icodes: + mat = np.eye(3, dtype=c) + tgt = np.float64 + res = f(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + res = f(mat, axis=None).dtype.type + assert_(res is tgt) + + # mean for float types + for f in [_mean]: + for c in fcodes: + mat = np.eye(3, dtype=c) + tgt = mat.dtype.type + res = f(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + res = f(mat, axis=None).dtype.type + assert_(res is tgt) + + # var, std for float types + for f in [_var, _std]: + for c in fcodes: + mat = np.eye(3, dtype=c) + # deal with complex types + tgt = mat.real.dtype.type + res = f(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + res = f(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + + # stats for integer types + # FIXME: + # this needs definition as there are lots places along the line + # where type casting may take place. + + # for f in self.funcs: + # for c in np.typecodes['AllInteger']: + # tgt = np.dtype(c).type + # res = f(mat, axis=1, dtype=c).dtype.type + # assert_(res is tgt) + # # scalar case + # res = f(mat, axis=None, dtype=c).dtype.type + # assert_(res is tgt) + + # stats for float types + for f in self.funcs: + for c in np.typecodes['AllFloat']: + tgt = np.dtype(c).type + res = f(mat, axis=1, dtype=c).dtype.type + assert_(res is tgt) + # scalar case + res = f(mat, axis=None, dtype=c).dtype.type + assert_(res is tgt) + + def test_ddof(self): + for f in [_var]: + for ddof in range(3): + dim = self.rmat.shape[1] + tgt = f(self.rmat, axis=1) * dim + res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) + for f in [_std]: + for ddof in range(3): + dim = self.rmat.shape[1] + tgt = f(self.rmat, axis=1) * np.sqrt(dim) + res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) + assert_almost_equal(res, tgt) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + dim = self.rmat.shape[1] + for f in [_var, _std]: + for ddof in range(dim, dim + 2): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(self.rmat, axis=1, ddof=ddof) + assert_(not (res < 0).any()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_mean_float16(self): + # This fail if the sum inside mean is done in float16 instead + # of float32. + assert_(_mean(np.ones(100000, dtype='float16')) == 1) + + def test_mean_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).mean(axis=2) + + def test_mean_where(self): + a = np.arange(16).reshape((4, 4)) + wh_full = np.array([[False, True, False, True], + [True, False, True, False], + [True, True, False, False], + [False, False, True, True]]) + wh_partial = np.array([[False], + [True], + [True], + [False]]) + _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]), + (0, wh_full, [6., 5., 10., 9.]), + (1, wh_full, [2., 5., 8.5, 14.5]), + (0, wh_partial, [6., 7., 8., 9.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.mean(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.mean(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + with pytest.warns(RuntimeWarning) as w: + assert_allclose(a.mean(axis=1, where=wh_partial), + np.array([np.nan, 5.5, 9.5, np.nan])) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.mean(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.mean(a, where=False), np.nan) + + def test_var_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize(('complex_dtype', 'ndec'), ( + ('complex64', 6), + ('complex128', 7), + ('clongdouble', 7), + )) + def test_var_complex_values(self, complex_dtype, ndec): + # Test fast-paths for every builtin complex type + for axis in [0, 1, None]: + mat = self.cmat.copy().astype(complex_dtype) + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt, decimal=ndec) + + def test_var_dimensions(self): + # _var paths for complex number introduce additions on views that + # increase dimensions. Ensure this generalizes to higher dims + mat = np.stack([self.cmat]*3) + for axis in [0, 1, 2, -1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_var_complex_byteorder(self): + # Test that var fast-path does not cause failures for complex arrays + # with non-native byteorder + cmat = self.cmat.copy().astype('complex128') + cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) + assert_almost_equal(cmat.var(), cmat_swapped.var()) + + def test_var_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).var(axis=2) + + def test_var_where(self): + a = np.arange(25).reshape((5, 5)) + wh_full = np.array([[False, True, False, True, True], + [True, False, True, True, False], + [True, True, False, False, True], + [False, True, True, False, True], + [True, False, True, True, False]]) + wh_partial = np.array([[False], + [True], + [True], + [False], + [True]]) + _cases = [(0, True, [50., 50., 50., 50., 50.]), + (1, True, [2., 2., 2., 2., 2.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.var(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.var(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(np.var(a, axis=1, where=wh_full), + np.var(a[wh_full].reshape((5, 3)), axis=1)) + assert_allclose(np.var(a, axis=0, where=wh_partial), + np.var(a[wh_partial[:,0]], axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.var(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.var(a, where=False), np.nan) + + def test_std_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_where(self): + a = np.arange(25).reshape((5,5))[::-1] + whf = np.array([[False, True, False, True, True], + [True, False, True, False, True], + [True, True, False, True, False], + [True, False, True, True, False], + [False, True, False, True, True]]) + whp = np.array([[False], + [False], + [True], + [True], + [False]]) + _cases = [ + (0, True, 7.07106781*np.ones((5))), + (1, True, 1.41421356*np.ones((5))), + (0, whf, + np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5*np.ones((5))) + ] + for _ax, _wh, _res in _cases: + assert_allclose(a.std(axis=_ax, where=_wh), _res) + assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(a.std(axis=1, where=whf), + np.std(a[whf].reshape((5,3)), axis=1)) + assert_allclose(np.std(a, axis=1, where=whf), + (a[whf].reshape((5,3))).std(axis=1)) + assert_allclose(a.std(axis=0, where=whp), + np.std(a[whp[:,0]], axis=0)) + assert_allclose(np.std(a, axis=0, where=whp), + (a[whp[:,0]]).std(axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.std(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.std(a, where=False), np.nan) + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + + +class TestVdot: + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + + +class TestDot: + def setup_method(self): + np.random.seed(128) + self.A = np.random.rand(4, 2) + self.b1 = np.random.rand(2, 1) + self.b2 = np.random.rand(2) + self.b3 = np.random.rand(1, 2) + self.b4 = np.random.rand(4) + self.N = 7 + + def test_dotmatmat(self): + A = self.A + res = np.dot(A.transpose(), A) + tgt = np.array([[1.45046013, 0.86323640], + [0.86323640, 0.84934569]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec(self): + A, b1 = self.A, self.b1 + res = np.dot(A, b1) + tgt = np.array([[0.32114320], [0.04889721], + [0.15696029], [0.33612621]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec2(self): + A, b2 = self.A, self.b2 + res = np.dot(A, b2) + tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat(self): + A, b4 = self.A, self.b4 + res = np.dot(b4, A) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat2(self): + b3, A = self.b3, self.A + res = np.dot(b3, A.transpose()) + tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat3(self): + A, b4 = self.A, self.b4 + res = np.dot(A.transpose(), b4) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecouter(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b1, b3) + tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecinner(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b3, b1) + tgt = np.array([[ 0.23129668]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect1(self): + b1 = np.ones((3, 1)) + b2 = [5.3] + res = np.dot(b1, b2) + tgt = np.array([5.3, 5.3, 5.3]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect2(self): + b1 = np.ones((3, 1)).transpose() + b2 = [6.2] + res = np.dot(b2, b1) + tgt = np.array([6.2, 6.2, 6.2]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar(self): + np.random.seed(100) + b1 = np.random.rand(1, 1) + b2 = np.random.rand(1, 4) + res = np.dot(b1, b2) + tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar2(self): + np.random.seed(100) + b1 = np.random.rand(4, 1) + b2 = np.random.rand(1, 1) + res = np.dot(b1, b2) + tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] + for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): + b1 = np.zeros(dim1) + b2 = np.zeros(dim2) + res = np.dot(b1, b2) + tgt = np.zeros(dim) + assert_(res.shape == tgt.shape) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_vecobject(self): + class Vec: + def __init__(self, sequence=None): + if sequence is None: + sequence = [] + self.array = np.array(sequence) + + def __add__(self, other): + out = Vec() + out.array = self.array + other.array + return out + + def __sub__(self, other): + out = Vec() + out.array = self.array - other.array + return out + + def __mul__(self, other): # with scalar + out = Vec(self.array.copy()) + out.array *= other + return out + + def __rmul__(self, other): + return self*other + + U_non_cont = np.transpose([[1., 1.], [1., 2.]]) + U_cont = np.ascontiguousarray(U_non_cont) + x = np.array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + def test_dot_2args(self): + from numpy.core.multiarray import dot + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + for i in range(12): + dot(f, v, r) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(r), 2) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_out_result(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + assert np.array_equal(res, y) + assert np.array_equal(z, y) + + def test_dot_out_aliasing(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + z[0] = 2 + assert np.array_equal(res, z) + + def test_dot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + @pytest.mark.slow + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) + @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + def test_huge_vectordot(self, dtype): + # Large vector multiplications are chunked with 32bit BLAS + # Test that the chunking does the right thing, see also gh-22262 + data = np.ones(2**30+100, dtype=dtype) + res = np.dot(data, data) + assert res == 2**30+100 + + def test_dtype_discovery_fails(self): + # See gh-14247, error checking was missing for failed dtype discovery + class BadObject(object): + def __array__(self): + raise TypeError("just this tiny mint leaf") + + with pytest.raises(TypeError): + np.dot(BadObject(), BadObject()) + + with pytest.raises(TypeError): + np.dot(3.0, BadObject()) + + +class MatmulCommon: + """Common tests for '@' operator and numpy.matmul. + + """ + # Should work with these types. Will want to add + # "O" at some point + types = "?bhilqBHILQefdgFDGO" + + def test_exceptions(self): + dims = [ + ((1,), (2,)), # mismatched vector vector + ((2, 1,), (2,)), # mismatched matrix vector + ((2,), (1, 2)), # mismatched vector matrix + ((1, 2), (3, 1)), # mismatched matrix matrix + ((1,), ()), # vector scalar + ((), (1)), # scalar vector + ((1, 1), ()), # matrix scalar + ((), (1, 1)), # scalar matrix + ((2, 2, 1), (3, 1, 2)), # cannot broadcast + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + assert_raises(ValueError, self.matmul, a, b) + + def test_shapes(self): + dims = [ + ((1, 1), (2, 1, 1)), # broadcast first argument + ((2, 1, 1), (1, 1)), # broadcast second argument + ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + res = self.matmul(a, b) + assert_(res.shape == (2, 1, 1)) + + # vector vector returns scalars. + for dt in self.types: + a = np.ones((2,), dtype=dt) + b = np.ones((2,), dtype=dt) + c = self.matmul(a, b) + assert_(np.array(c).shape == ()) + + def test_result_types(self): + mat = np.ones((1,1)) + vec = np.ones((1,)) + for dt in self.types: + m = mat.astype(dt) + v = vec.astype(dt) + for arg in [(m, v), (v, m), (m, m)]: + res = self.matmul(*arg) + assert_(res.dtype == dt) + + # vector vector returns scalars + if dt != "O": + res = self.matmul(v, v) + assert_(type(res) is np.dtype(dt).type) + + def test_scalar_output(self): + vec1 = np.array([2]) + vec2 = np.array([3, 4]).reshape(1, -1) + tgt = np.array([6, 8]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt) + res = self.matmul(v2.T, v1) + assert_equal(res, tgt) + + # boolean type + vec = np.array([True, True], dtype='?').reshape(1, -1) + res = self.matmul(vec[:, 0], vec) + assert_equal(res, True) + + def test_vector_vector_values(self): + vec1 = np.array([1, 2]) + vec2 = np.array([3, 4]).reshape(-1, 1) + tgt1 = np.array([11]) + tgt2 = np.array([[3, 6], [4, 8]]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt1) + # no broadcast, we must make v1 into a 2d ndarray + res = self.matmul(v2, v1.reshape(1, -1)) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, True], dtype='?') + res = self.matmul(vec, vec) + assert_equal(res, True) + + def test_vector_matrix_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([7, 10]) + tgt2 = np.stack([tgt1]*2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(v, m1) + assert_equal(res, tgt1) + res = self.matmul(v, m2) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1]*2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_vector_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([5, 11]) + tgt2 = np.stack([tgt1]*2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(m1, v) + assert_equal(res, tgt1) + res = self.matmul(m2, v) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1]*2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_matrix_values(self): + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.array([[1, 0], [1, 1]]) + mat12 = np.stack([mat1, mat2], axis=0) + mat21 = np.stack([mat2, mat1], axis=0) + tgt11 = np.array([[7, 10], [15, 22]]) + tgt12 = np.array([[3, 2], [7, 4]]) + tgt21 = np.array([[1, 2], [4, 6]]) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + for dt in self.types[1:]: + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + m12 = mat12.astype(dt) + m21 = mat21.astype(dt) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + # boolean type + m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) + m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) + m12 = np.stack([m1, m2], axis=0) + m21 = np.stack([m2, m1], axis=0) + tgt11 = m1 + tgt12 = m1 + tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + +class TestMatmul(MatmulCommon): + matmul = np.matmul + + def test_out_arg(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + tgt = np.dot(a, b) + + # test as positional argument + msg = "out positional argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out) + assert_array_equal(out, tgt, err_msg=msg) + + # test as keyword argument + msg = "out keyword argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out=out) + assert_array_equal(out, tgt, err_msg=msg) + + # test out with not allowed type cast (safe casting) + msg = "Cannot cast ufunc .* output" + out = np.zeros((5, 2), dtype=np.int32) + assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) + + # test out with type upcast to complex + out = np.zeros((5, 2), dtype=np.complex128) + c = self.matmul(a, b, out=out) + assert_(c is out) + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, '') + c = c.astype(tgt.dtype) + assert_array_equal(c, tgt) + + def test_empty_out(self): + # Check that the output cannot be broadcast, so that it cannot be + # size zero when the outer dimensions (iterator size) has size zero. + arr = np.ones((0, 1, 1)) + out = np.ones((1, 1, 1)) + assert self.matmul(arr, arr).shape == (0, 1, 1) + + with pytest.raises(ValueError, match=r"non-broadcastable"): + self.matmul(arr, arr, out=out) + + def test_out_contiguous(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + v = np.array([1, 3], dtype=float) + tgt = np.dot(a, b) + tgt_mv = np.dot(a, v) + + # test out non-contiguous + out = np.ones((5, 2, 2), dtype=float) + c = self.matmul(a, b, out=out[..., 0]) + assert c.base is out + assert_array_equal(c, tgt) + c = self.matmul(a, v, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + c = self.matmul(v, a.T, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + + # test out contiguous in only last dim + out = np.ones((10, 2), dtype=float) + c = self.matmul(a, b, out=out[::2, :]) + assert_array_equal(c, tgt) + + # test transposes of out, args + out = np.ones((5, 2), dtype=float) + c = self.matmul(b.T, a.T, out=out.T) + assert_array_equal(out, tgt) + + m1 = np.arange(15.).reshape(5, 3) + m2 = np.arange(21.).reshape(3, 7) + m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous + vc = np.arange(10.) + vr = np.arange(6.) + m0 = np.zeros((3, 0)) + @pytest.mark.parametrize('args', ( + # matrix-matrix + (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), + # matrix-matrix-transpose, contiguous and non + (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), + (m3, m3.T), (m3.T, m3), + # matrix-matrix non-contiguous + (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), + # vector-matrix, matrix-vector, contiguous + (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), + # vector-matrix, matrix-vector, vector non-contiguous + (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), + # vector-matrix, matrix-vector, matrix non-contiguous + (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), + # vector-matrix, matrix-vector, both non-contiguous + (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), + # size == 0 + (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), + )) + def test_dot_equivalent(self, args): + r1 = np.matmul(*args) + r2 = np.dot(*args) + assert_equal(r1, r2) + + r3 = np.matmul(args[0].copy(), args[1].copy()) + assert_equal(r1, r3) + + def test_matmul_object(self): + import fractions + + f = np.vectorize(fractions.Fraction) + def random_ints(): + return np.random.randint(1, 1000, size=(10, 3, 3)) + M1 = f(random_ints(), random_ints()) + M2 = f(random_ints(), random_ints()) + + M3 = self.matmul(M1, M2) + + [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] + + assert_allclose(N3, self.matmul(N1, N2)) + + def test_matmul_object_type_scalar(self): + from fractions import Fraction as F + v = np.array([F(2,3), F(5,7)]) + res = self.matmul(v, v) + assert_(type(res) is F) + + def test_matmul_empty(self): + a = np.empty((3, 0), dtype=object) + b = np.empty((0, 3), dtype=object) + c = np.zeros((3, 3)) + assert_array_equal(np.matmul(a, b), c) + + def test_matmul_exception_multiply(self): + # test that matmul fails if `__mul__` is missing + class add_not_multiply(): + def __add__(self, other): + return self + a = np.full((3,3), add_not_multiply()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_exception_add(self): + # test that matmul fails if `__add__` is missing + class multiply_not_add(): + def __mul__(self, other): + return self + a = np.full((3,3), multiply_not_add()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0],[1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4*5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) + + +class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul + + def test_array_priority_override(self): + + class A: + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc')) + + +class TestMatmulInplace: + DTYPES = {} + for i in MatmulCommon.types: + for j in MatmulCommon.types: + if np.can_cast(j, i): + DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j)) + + @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES) + def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None: + a = np.arange(10).reshape(5, 2).astype(dtype1) + a_id = id(a) + b = np.ones((2, 2), dtype=dtype2) + + ref = a @ b + a @= b + + assert id(a) == a_id + assert a.dtype == dtype1 + assert a.shape == (5, 2) + if dtype1.kind in "fc": + np.testing.assert_allclose(a, ref) + else: + np.testing.assert_array_equal(a, ref) + + SHAPES = { + "2d_large": ((10**5, 10), (10, 10)), + "3d_large": ((10**4, 10, 10), (1, 10, 10)), + "1d": ((3,), (3,)), + "2d_1d": ((3, 3), (3,)), + "1d_2d": ((3,), (3, 3)), + "2d_broadcast": ((3, 3), (3, 1)), + "2d_broadcast_reverse": ((1, 3), (3, 3)), + "3d_broadcast1": ((3, 3, 3), (1, 3, 1)), + "3d_broadcast2": ((3, 3, 3), (1, 3, 3)), + "3d_broadcast3": ((3, 3, 3), (3, 3, 1)), + "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)), + "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)), + "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)), + } + + @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES) + def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): + a_size = np.prod(a_shape) + a = np.arange(a_size).reshape(a_shape).astype(np.float64) + a_id = id(a) + + b_size = np.prod(b_shape) + b = np.arange(b_size).reshape(b_shape) + + ref = a @ b + if ref.shape != a_shape: + with pytest.raises(ValueError): + a @= b + return + else: + a @= b + + assert id(a) == a_id + assert a.dtype.type == np.float64 + assert a.shape == a_shape + np.testing.assert_allclose(a, ref) + + +def test_matmul_axes(): + a = np.arange(3*4*5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) + + +class TestInner: + + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1,1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = np.zeros(shape=(1, 80), dtype=np.float64) + p = np.inner(a, a) + assert_almost_equal(p, 0, decimal=14) + + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) + assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2,3,4).astype(dt) + b = np.arange(24, 48).reshape(2,3,4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]] + ).astype(dt) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) + + +class TestChoose: + def setup_method(self): + self.x = 2*np.ones((3,), dtype=int) + self.y = 3*np.ones((3,), dtype=int) + self.x2 = 2*np.ones((2, 3), dtype=int) + self.y2 = 3*np.ones((2, 3), dtype=int) + self.ind = [0, 0, 1] + + def test_basic(self): + A = np.choose(self.ind, (self.x, self.y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + A = np.choose(self.ind, (self.x2, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + A = np.choose(self.ind, (self.x, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + @pytest.mark.parametrize("ops", + [(1000, np.array([1], dtype=np.uint8)), + (-1, np.array([1], dtype=np.uint8)), + (1., np.float32(3)), + (1., np.array([3], dtype=np.float32))],) + def test_output_dtype(self, ops): + expected_dt = np.result_type(*ops) + assert(np.choose([0], ops).dtype == expected_dt) + + +class TestRepeat: + def setup_method(self): + self.m = np.array([1, 2, 3, 4, 5, 6]) + self.m_rect = self.m.reshape((2, 3)) + + def test_basic(self): + A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + A = np.repeat(self.m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + A = np.repeat(self.m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + A = np.repeat(self.m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + +@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) +class TestNeighborhoodIter: + # Simple, 2d tests + def test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test with start in the middle + r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2) + assert_array_equal(l, r) + + def test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Simple, 1d tests + def test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test mirror modes + def test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[1], NEIGH_MODE['mirror']) + assert_([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + # Circular mode + def test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter: + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings: + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1-2j, 1+2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", np.ComplexWarning) + assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + + +class TestMinScalarType: + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8-1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16-1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32-1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63-1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +from numpy.core._internal import _dtype_from_pep3118 + + +class TestPEP3118Dtype: + def _check(self, spec, wanted): + dt = np.dtype(wanted) + actual = _dtype_from_pep3118(spec) + assert_equal(actual, dt, + err_msg="spec %r != dtype %r" % (spec, wanted)) + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@'+s, {'f0': ('i1', 0), + 'f1': ('i', align*(1 + j//align))}) + self._check('='+s, {'f0': ('i1', 0), + 'f1': ('i', 1+j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return align*(1 + (n-1)//align) + + base = dict(formats=['i'], names=['f0']) + + self._check('ix', dict(itemsize=aligned(size + 1), **base)) + self._check('ixx', dict(itemsize=aligned(size + 2), **base)) + self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) + self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) + self._check('i7x', dict(itemsize=aligned(size + 7), **base)) + + self._check('^ix', dict(itemsize=size + 1, **base)) + self._check('^ixx', dict(itemsize=size + 2, **base)) + self._check('^ixxx', dict(itemsize=size + 3, **base)) + self._check('^ixxxx', dict(itemsize=size + 4, **base)) + self._check('^i7x', dict(itemsize=size + 7, **base)) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return (align*(1 + (n-1)//align)) + + self._check('(3)T{ix}', (dict( + names=['f0'], + formats=['i'], + offsets=[0], + itemsize=aligned(size + 1) + ), (3,))) + + def test_char_vs_string(self): + dt = np.dtype('c') + self._check('c', dt) + + dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) + self._check('4c4s', dt) + + def test_field_order(self): + # gh-9053 - previously, we relied on dictionary key order + self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) + self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) + + def test_unnamed_fields(self): + self._check('ii', [('f0', 'i'), ('f1', 'i')]) + self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) + + self._check('i', 'i') + self._check('i:f0:', [('f0', 'i')]) + + +class TestNewBufferProtocol: + """ Test PEP3118 buffers """ + + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b'xxx', True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='<i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i4') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='<i4') + self._check_roundtrip(x) + + # check long long can be represented as non-native + x = np.array([1, 2, 3], dtype='>q') + self._check_roundtrip(x) + + # Native-only data types can be passed through the buffer interface + # only in native byte order + if sys.byteorder == 'little': + x = np.array([1, 2, 3], dtype='>g') + assert_raises(ValueError, self._check_roundtrip, x) + x = np.array([1, 2, 3], dtype='<g') + self._check_roundtrip(x) + else: + x = np.array([1, 2, 3], dtype='>g') + self._check_roundtrip(x) + x = np.array([1, 2, 3], dtype='<g') + assert_raises(ValueError, self._check_roundtrip, x) + + def test_roundtrip_half(self): + half_list = [ + 1.0, + -2.0, + 6.5504 * 10**4, # (max half precision) + 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal) + 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal) + 0.0, + -0.0, + float('+inf'), + float('-inf'), + 0.333251953125, # ~= 1/3 + ] + + x = np.array(half_list, dtype='>e') + self._check_roundtrip(x) + x = np.array(half_list, dtype='<e') + self._check_roundtrip(x) + + def test_roundtrip_single_types(self): + for typ in np.sctypeDict.values(): + dtype = np.dtype(typ) + + if dtype.char in 'Mm': + # datetimes cannot be used in buffers + continue + if dtype.char == 'V': + # skip void + continue + + x = np.zeros(4, dtype=dtype) + self._check_roundtrip(x) + + if dtype.char not in 'qQgG': + dt = dtype.newbyteorder('<') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + dt = dtype.newbyteorder('>') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b' ', True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, ()) + + sz = sum([np.dtype(b).itemsize for a, b in dt]) + if np.dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides + if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, ()) + assert_equal(y.ndim, 0) + assert_equal(y.strides, ()) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype='<i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, 'i') + else: + assert_equal(y.format, '<i') + + def test_export_flags(self): + # Check SIMPLE flag, see also gh-3613 (exception should be BufferError) + assert_raises(ValueError, + _multiarray_tests.get_buffer_info, + np.arange(5)[::2], ('SIMPLE',)) + + @pytest.mark.parametrize(["obj", "error"], [ + pytest.param(np.array([1, 2], dtype=rational), ValueError, id="array"), + pytest.param(rational(1, 2), TypeError, id="scalar")]) + def test_export_and_pickle_user_dtype(self, obj, error): + # User dtypes should export successfully when FORMAT was not requested. + with pytest.raises(error): + _multiarray_tests.get_buffer_info(obj, ("STRIDED_RO", "FORMAT")) + + _multiarray_tests.get_buffer_info(obj, ("STRIDED_RO",)) + + # This is currently also necessary to implement pickling: + pickle_obj = pickle.dumps(obj) + res = pickle.loads(pickle_obj) + assert_array_equal(res, obj) + + def test_padding(self): + for j in range(8): + x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) + self._check_roundtrip(x) + + def test_reference_leak(self): + if HAS_REFCOUNT: + count_1 = sys.getrefcount(np.core._internal) + a = np.zeros(4) + b = memoryview(a) + c = np.asarray(b) + if HAS_REFCOUNT: + count_2 = sys.getrefcount(np.core._internal) + assert_equal(count_1, count_2) + del c # avoid pyflakes unused variable warning. + + def test_padded_struct_array(self): + dt1 = np.dtype( + [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1) + self._check_roundtrip(x1) + + dt2 = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], + align=True) + x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2) + self._check_roundtrip(x2) + + dt3 = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3) + self._check_roundtrip(x3) + + @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") + def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): + # Note: c defined as parameter so that it is persistent and leak + # checks will notice gh-16934 (buffer info cache leak). + c.strides = (-1, 80, 8) # strides need to be fixed at export + + assert_(memoryview(c).strides == (800, 80, 8)) + + # Writing C-contiguous data to a BytesIO buffer should work + fd = io.BytesIO() + fd.write(c.data) + + fortran = c.T + assert_(memoryview(fortran).strides == (8, 80, 800)) + + arr = np.ones((1, 10)) + if arr.flags.f_contiguous: + shape, strides = _multiarray_tests.get_buffer_info( + arr, ['F_CONTIGUOUS']) + assert_(strides[0] == 8) + arr = np.ones((10, 1), order='F') + shape, strides = _multiarray_tests.get_buffer_info( + arr, ['C_CONTIGUOUS']) + assert_(strides[-1] == 8) + + @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") + @pytest.mark.skipif(not np.ones((10, 1), order="C").flags.f_contiguous, + reason="Test is unnecessary (but fails) without relaxed strides.") + def test_relaxed_strides_buffer_info_leak(self, arr=np.ones((1, 10))): + """Test that alternating export of C- and F-order buffers from + an array which is both C- and F-order when relaxed strides is + active works. + This test defines array in the signature to ensure leaking more + references every time the test is run (catching the leak with + pytest-leaks). + """ + for i in range(10): + _, s = _multiarray_tests.get_buffer_info(arr, ['F_CONTIGUOUS']) + assert s == (8, 8) + _, s = _multiarray_tests.get_buffer_info(arr, ['C_CONTIGUOUS']) + assert s == (80, 8) + + def test_out_of_order_fields(self): + dt = np.dtype(dict( + formats=['<i4', '<i4'], + names=['one', 'two'], + offsets=[4, 0], + itemsize=8 + )) + + # overlapping fields cannot be represented by PEP3118 + arr = np.empty(1, dt) + with assert_raises(ValueError): + memoryview(arr) + + def test_max_dims(self): + a = np.ones((1,) * 32) + self._check_roundtrip(a) + + @pytest.mark.slow + def test_error_too_many_dims(self): + def make_ctype(shape, scalar_type): + t = scalar_type + for dim in shape[::-1]: + t = dim * t + return t + + # construct a memoryview with 33 dimensions + c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8) + m = memoryview(c_u8_33d()) + assert_equal(m.ndim, 33) + + assert_raises_regex( + RuntimeError, "ndim", + np.array, m) + + # The above seems to create some deep cycles, clean them up for + # easier reference count debugging: + del c_u8_33d, m + for i in range(33): + if gc.collect() == 0: + break + + def test_error_pointer_type(self): + # gh-6741 + m = memoryview(ctypes.pointer(ctypes.c_uint8())) + assert_('&' in m.format) + + assert_raises_regex( + ValueError, "format string", + np.array, m) + + def test_error_message_unsupported(self): + # wchar has no corresponding numpy type - if this changes in future, we + # need a better way to construct an invalid memoryview format. + t = ctypes.c_wchar * 4 + with assert_raises(ValueError) as cm: + np.array(t()) + + exc = cm.exception + with assert_raises_regex( + NotImplementedError, + r"Unrepresentable .* 'u' \(UCS-2 strings\)" + ): + raise exc.__cause__ + + def test_ctypes_integer_via_memoryview(self): + # gh-11150, due to bpo-10746 + for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}: + value = c_integer(42) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) + np.asarray(value) + + def test_ctypes_struct_via_memoryview(self): + # gh-10528 + class foo(ctypes.Structure): + _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)] + f = foo(a=1, b=2) + + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning) + arr = np.asarray(f) + + assert_equal(arr['a'], 1) + assert_equal(arr['b'], 2) + f.a = 3 + assert_equal(arr['a'], 3) + + @pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]]) + def test_error_if_stored_buffer_info_is_corrupted(self, obj): + """ + If a user extends a NumPy array before 1.20 and then runs it + on NumPy 1.20+. A C-subclassed array might in theory modify + the new buffer-info field. This checks that an error is raised + if this happens (for buffer export), an error is written on delete. + This is a sanity check to help users transition to safe code, it + may be deleted at any point. + """ + # corrupt buffer info: + _multiarray_tests.corrupt_or_fix_bufferinfo(obj) + name = type(obj) + with pytest.raises(RuntimeError, + match=f".*{name} appears to be C subclassed"): + memoryview(obj) + # Fix buffer info again before we delete (or we lose the memory) + _multiarray_tests.corrupt_or_fix_bufferinfo(obj) + + def test_no_suboffsets(self): + try: + import _testbuffer + except ImportError: + raise pytest.skip("_testbuffer is not available") + + for shape in [(2, 3), (2, 3, 4)]: + data = list(range(np.prod(shape))) + buffer = _testbuffer.ndarray(data, shape, format='i', + flags=_testbuffer.ND_PIL) + msg = "NumPy currently does not support.*suboffsets" + with pytest.raises(BufferError, match=msg): + np.asarray(buffer) + with pytest.raises(BufferError, match=msg): + np.asarray([buffer]) + + # Also check (unrelated and more limited but similar) frombuffer: + with pytest.raises(BufferError): + np.frombuffer(buffer) + + +class TestArrayCreationCopyArgument(object): + + class RaiseOnBool: + + def __bool__(self): + raise ValueError + + true_vals = [True, np._CopyMode.ALWAYS, np.True_] + false_vals = [False, np._CopyMode.IF_NEEDED, np.False_] + + def test_scalars(self): + # Test both numpy and python scalars + for dtype in np.typecodes["All"]: + arr = np.zeros((), dtype=dtype) + scalar = arr[()] + pyscalar = arr.item(0) + + # Test never-copy raises error: + assert_raises(ValueError, np.array, scalar, + copy=np._CopyMode.NEVER) + assert_raises(ValueError, np.array, pyscalar, + copy=np._CopyMode.NEVER) + assert_raises(ValueError, np.array, pyscalar, + copy=self.RaiseOnBool()) + assert_raises(ValueError, _multiarray_tests.npy_ensurenocopy, + [1]) + # Casting with a dtype (to unsigned integers) can be special: + with pytest.raises(ValueError): + np.array(pyscalar, dtype=np.int64, copy=np._CopyMode.NEVER) + + def test_compatible_cast(self): + + # Some types are compatible even though they are different, no + # copy is necessary for them. This is mostly true for some integers + def int_types(byteswap=False): + int_types = (np.typecodes["Integer"] + + np.typecodes["UnsignedInteger"]) + for int_type in int_types: + yield np.dtype(int_type) + if byteswap: + yield np.dtype(int_type).newbyteorder() + + for int1 in int_types(): + for int2 in int_types(True): + arr = np.arange(10, dtype=int1) + + for copy in self.true_vals: + res = np.array(arr, copy=copy, dtype=int2) + assert res is not arr and res.flags.owndata + assert_array_equal(res, arr) + + if int1 == int2: + # Casting is not necessary, base check is sufficient here + for copy in self.false_vals: + res = np.array(arr, copy=copy, dtype=int2) + assert res is arr or res.base is arr + + res = np.array(arr, + copy=np._CopyMode.NEVER, + dtype=int2) + assert res is arr or res.base is arr + + else: + # Casting is necessary, assert copy works: + for copy in self.false_vals: + res = np.array(arr, copy=copy, dtype=int2) + assert res is not arr and res.flags.owndata + assert_array_equal(res, arr) + + assert_raises(ValueError, np.array, + arr, copy=np._CopyMode.NEVER, + dtype=int2) + assert_raises(ValueError, np.array, + arr, copy=None, + dtype=int2) + + def test_buffer_interface(self): + + # Buffer interface gives direct memory access (no copy) + arr = np.arange(10) + view = memoryview(arr) + + # Checking bases is a bit tricky since numpy creates another + # memoryview, so use may_share_memory. + for copy in self.true_vals: + res = np.array(view, copy=copy) + assert not np.may_share_memory(arr, res) + for copy in self.false_vals: + res = np.array(view, copy=copy) + assert np.may_share_memory(arr, res) + res = np.array(view, copy=np._CopyMode.NEVER) + assert np.may_share_memory(arr, res) + + def test_array_interfaces(self): + # Array interface gives direct memory access (much like a memoryview) + base_arr = np.arange(10) + + class ArrayLike: + __array_interface__ = base_arr.__array_interface__ + + arr = ArrayLike() + + for copy, val in [(True, None), (np._CopyMode.ALWAYS, None), + (False, arr), (np._CopyMode.IF_NEEDED, arr), + (np._CopyMode.NEVER, arr)]: + res = np.array(arr, copy=copy) + assert res.base is val + + def test___array__(self): + base_arr = np.arange(10) + + class ArrayLike: + def __array__(self): + # __array__ should return a copy, numpy cannot know this + # however. + return base_arr + + arr = ArrayLike() + + for copy in self.true_vals: + res = np.array(arr, copy=copy) + assert_array_equal(res, base_arr) + # An additional copy is currently forced by numpy in this case, + # you could argue, numpy does not trust the ArrayLike. This + # may be open for change: + assert res is not base_arr + + for copy in self.false_vals: + res = np.array(arr, copy=False) + assert_array_equal(res, base_arr) + assert res is base_arr # numpy trusts the ArrayLike + + with pytest.raises(ValueError): + np.array(arr, copy=np._CopyMode.NEVER) + + @pytest.mark.parametrize( + "arr", [np.ones(()), np.arange(81).reshape((9, 9))]) + @pytest.mark.parametrize("order1", ["C", "F", None]) + @pytest.mark.parametrize("order2", ["C", "F", "A", "K"]) + def test_order_mismatch(self, arr, order1, order2): + # The order is the main (python side) reason that can cause + # a never-copy to fail. + # Prepare C-order, F-order and non-contiguous arrays: + arr = arr.copy(order1) + if order1 == "C": + assert arr.flags.c_contiguous + elif order1 == "F": + assert arr.flags.f_contiguous + elif arr.ndim != 0: + # Make array non-contiguous + arr = arr[::2, ::2] + assert not arr.flags.forc + + # Whether a copy is necessary depends on the order of arr: + if order2 == "C": + no_copy_necessary = arr.flags.c_contiguous + elif order2 == "F": + no_copy_necessary = arr.flags.f_contiguous + else: + # Keeporder and Anyorder are OK with non-contiguous output. + # This is not consistent with the `astype` behaviour which + # enforces contiguity for "A". It is probably historic from when + # "K" did not exist. + no_copy_necessary = True + + # Test it for both the array and a memoryview + for view in [arr, memoryview(arr)]: + for copy in self.true_vals: + res = np.array(view, copy=copy, order=order2) + assert res is not arr and res.flags.owndata + assert_array_equal(arr, res) + + if no_copy_necessary: + for copy in self.false_vals: + res = np.array(view, copy=copy, order=order2) + # res.base.obj refers to the memoryview + if not IS_PYPY: + assert res is arr or res.base.obj is arr + + res = np.array(view, copy=np._CopyMode.NEVER, + order=order2) + if not IS_PYPY: + assert res is arr or res.base.obj is arr + else: + for copy in self.false_vals: + res = np.array(arr, copy=copy, order=order2) + assert_array_equal(arr, res) + assert_raises(ValueError, np.array, + view, copy=np._CopyMode.NEVER, + order=order2) + assert_raises(ValueError, np.array, + view, copy=None, + order=order2) + + def test_striding_not_ok(self): + arr = np.array([[1, 2, 4], [3, 4, 5]]) + assert_raises(ValueError, np.array, + arr.T, copy=np._CopyMode.NEVER, + order='C') + assert_raises(ValueError, np.array, + arr.T, copy=np._CopyMode.NEVER, + order='C', dtype=np.int64) + assert_raises(ValueError, np.array, + arr, copy=np._CopyMode.NEVER, + order='F') + assert_raises(ValueError, np.array, + arr, copy=np._CopyMode.NEVER, + order='F', dtype=np.int64) + + +class TestArrayAttributeDeletion: + + def test_multiarray_writable_attributes_deletion(self): + # ticket #2046, should not seqfault, raise AttributeError + a = np.ones(2) + attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Assigning the 'data' attribute") + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + def test_multiarray_not_writable_attributes_deletion(self): + a = np.ones(2) + attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", + "ctypes", "T", "__array_interface__", "__array_struct__", + "__array_priority__", "__array_finalize__"] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + def test_multiarray_flags_writable_attribute_deletion(self): + a = np.ones(2).flags + attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable'] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + def test_multiarray_flags_not_writable_attribute_deletion(self): + a = np.ones(2).flags + attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", + "owndata", "fnc", "forc", "behaved", "carray", "farray", + "num"] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + +class TestArrayInterface(): + class Foo: + def __init__(self, value): + self.value = value + self.iface = {'typestr': 'f8'} + + def __float__(self): + return float(self.value) + + @property + def __array_interface__(self): + return self.iface + + + f = Foo(0.5) + + @pytest.mark.parametrize('val, iface, expected', [ + (f, {}, 0.5), + ([f], {}, [0.5]), + ([f, f], {}, [0.5, 0.5]), + (f, {'shape': ()}, 0.5), + (f, {'shape': None}, TypeError), + (f, {'shape': (1, 1)}, [[0.5]]), + (f, {'shape': (2,)}, ValueError), + (f, {'strides': ()}, 0.5), + (f, {'strides': (2,)}, ValueError), + (f, {'strides': 16}, TypeError), + ]) + def test_scalar_interface(self, val, iface, expected): + # Test scalar coercion within the array interface + self.f.iface = {'typestr': 'f8'} + self.f.iface.update(iface) + if HAS_REFCOUNT: + pre_cnt = sys.getrefcount(np.dtype('f8')) + if isinstance(expected, type): + assert_raises(expected, np.array, val) + else: + result = np.array(val) + assert_equal(np.array(val), expected) + assert result.dtype == 'f8' + del result + if HAS_REFCOUNT: + post_cnt = sys.getrefcount(np.dtype('f8')) + assert_equal(pre_cnt, post_cnt) + +def test_interface_no_shape(): + class ArrayLike: + array = np.array(1) + __array_interface__ = array.__array_interface__ + assert_equal(np.array(ArrayLike()), 1) + + +def test_array_interface_itemsize(): + # See gh-6361 + my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], + 'offsets': [0, 8], 'itemsize': 16}) + a = np.ones(10, dtype=my_dtype) + descr_t = np.dtype(a.__array_interface__['descr']) + typestr_t = np.dtype(a.__array_interface__['typestr']) + assert_equal(descr_t.itemsize, typestr_t.itemsize) + + +def test_array_interface_empty_shape(): + # See gh-7994 + arr = np.array([1, 2, 3]) + interface1 = dict(arr.__array_interface__) + interface1['shape'] = () + + class DummyArray1: + __array_interface__ = interface1 + + # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting + # the interface data to bytes would invoke the bug this tests for, that + # __array_interface__ with shape=() is not allowed if the data is an object + # exposing the buffer interface + interface2 = dict(interface1) + interface2['data'] = arr[0].tobytes() + + class DummyArray2: + __array_interface__ = interface2 + + arr1 = np.asarray(DummyArray1()) + arr2 = np.asarray(DummyArray2()) + arr3 = arr[:1].reshape(()) + assert_equal(arr1, arr2) + assert_equal(arr1, arr3) + +def test_array_interface_offset(): + arr = np.array([1, 2, 3], dtype='int32') + interface = dict(arr.__array_interface__) + interface['data'] = memoryview(arr) + interface['shape'] = (2,) + interface['offset'] = 4 + + + class DummyArray: + __array_interface__ = interface + + arr1 = np.asarray(DummyArray()) + assert_equal(arr1, arr[1:]) + +def test_array_interface_unicode_typestr(): + arr = np.array([1, 2, 3], dtype='int32') + interface = dict(arr.__array_interface__) + interface['typestr'] = '\N{check mark}' + + class DummyArray: + __array_interface__ = interface + + # should not be UnicodeEncodeError + with pytest.raises(TypeError): + np.asarray(DummyArray()) + +def test_flat_element_deletion(): + it = np.ones(3).flat + try: + del it[1] + del it[1:2] + except TypeError: + pass + except Exception: + raise AssertionError + + +def test_scalar_element_deletion(): + a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) + assert_raises(ValueError, a[0].__delitem__, 'x') + + +class TestMapIter: + def test_mapiter(self): + # The actual tests are within the C code in + # multiarray/_multiarray_tests.c.src + + a = np.arange(12).reshape((3, 4)).astype(float) + index = ([1, 1, 2, 0], + [0, 0, 2, 3]) + vals = [50, 50, 30, 16] + + _multiarray_tests.test_inplace_increment(a, index, vals) + assert_equal(a, [[0.00, 1., 2.0, 19.], + [104., 5., 6.0, 7.0], + [8.00, 9., 40., 11.]]) + + b = np.arange(6).astype(float) + index = (np.array([1, 2, 0]),) + vals = [50, 4, 100.1] + _multiarray_tests.test_inplace_increment(b, index, vals) + assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) + + +class TestAsCArray: + def test_1darray(self): + array = np.arange(24, dtype=np.double) + from_c = _multiarray_tests.test_as_c_array(array, 3) + assert_equal(array[3], from_c) + + def test_2darray(self): + array = np.arange(24, dtype=np.double).reshape(3, 8) + from_c = _multiarray_tests.test_as_c_array(array, 2, 4) + assert_equal(array[2, 4], from_c) + + def test_3darray(self): + array = np.arange(24, dtype=np.double).reshape(2, 3, 4) + from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3) + assert_equal(array[1, 2, 3], from_c) + + +class TestConversion: + def test_array_scalar_relational_operation(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + + # Unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + def test_to_bool_scalar(self): + assert_equal(bool(np.array([False])), False) + assert_equal(bool(np.array([True])), True) + assert_equal(bool(np.array([[42]])), True) + assert_raises(ValueError, bool, np.array([1, 2])) + + class NotConvertible: + def __bool__(self): + raise NotImplementedError + + assert_raises(NotImplementedError, bool, np.array(NotConvertible())) + assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + + self_containing = np.array([None]) + self_containing[0] = self_containing + + Error = RecursionError + + assert_raises(Error, bool, self_containing) # previously stack overflow + self_containing[0] = None # resolve circular reference + + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array(0)), 0) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([1])), 1) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.str_('6'))) + + # The delegation of int() to __trunc__ was deprecated in + # Python 3.11. + if sys.version_info < (3, 11): + class HasTrunc: + def __trunc__(self): + return 3 + assert_equal(3, int_func(np.array(HasTrunc()))) + with assert_warns(DeprecationWarning): + assert_equal(3, int_func(np.array([HasTrunc()]))) + else: + pass + + class NotConvertible: + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + with assert_warns(DeprecationWarning): + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) + + +class TestWhere: + def test_basic(self): + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # also check upcast + e = float(1e150) + assert_equal(np.where(True, d, e).dtype, np.float64) + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:,np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:,0], a[:,0]) + assert_array_equal(r[:,1], b[:,0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('<i4') + assert_equal(np.where(c, a, b), r) + + c = c.astype('>i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + def test_empty_result(self): + # pass empty where result through an assignment which reads the data of + # empty arrays, error detectable with valgrind, see gh-8922 + x = np.zeros((1, 1)) + ibad = np.vstack(np.where(x == 99.)) + assert_array_equal(ibad, + np.atleast_2d(np.array([[],[]], dtype=np.intp))) + + def test_largedim(self): + # invalid read regression gh-9304 + shape = [10, 2, 3, 4, 5, 6] + np.random.seed(2) + array = np.random.rand(*shape) + + for i in range(10): + benchmark = array.nonzero() + result = array.nonzero() + assert_array_equal(benchmark, result) + + def test_kwargs(self): + a = np.zeros(1) + with assert_raises(TypeError): + np.where(a, x=a, y=a) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestHashing: + + def test_arrays_not_hashable(self): + x = np.ones(3) + assert_raises(TypeError, hash, x) + + def test_collections_hashable(self): + x = np.array([]) + assert_(not isinstance(x, collections.abc.Hashable)) + + +class TestArrayPriority: + # This will go away when __array_priority__ is settled, meanwhile + # it serves to check unintended changes. + op = operator + binary_ops = [ + op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, + op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, + op.ge, op.lt, op.le, op.ne, op.eq + ] + + class Foo(np.ndarray): + __array_priority__ = 100. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Bar(np.ndarray): + __array_priority__ = 101. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Other: + __array_priority__ = 1000. + + def _all(self, other): + return self.__class__() + + __add__ = __radd__ = _all + __sub__ = __rsub__ = _all + __mul__ = __rmul__ = _all + __pow__ = __rpow__ = _all + __div__ = __rdiv__ = _all + __mod__ = __rmod__ = _all + __truediv__ = __rtruediv__ = _all + __floordiv__ = __rfloordiv__ = _all + __and__ = __rand__ = _all + __xor__ = __rxor__ = _all + __or__ = __ror__ = _all + __lshift__ = __rlshift__ = _all + __rshift__ = __rrshift__ = _all + __eq__ = _all + __ne__ = _all + __gt__ = _all + __ge__ = _all + __lt__ = _all + __le__ = _all + + def test_ndarray_subclass(self): + a = np.array([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_ndarray_other(self): + a = np.array([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + def test_subclass_subclass(self): + a = self.Foo([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_subclass_other(self): + a = self.Foo([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + +class TestBytestringArrayNonzero: + + def test_empty_bstring_array_is_falsey(self): + assert_(not np.array([''], dtype=str)) + + def test_whitespace_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0\0' + assert_(not a) + + def test_all_null_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0 \0' + assert_(a) + + +class TestUnicodeEncoding: + """ + Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping + issues, etc + """ + def test_round_trip(self): + """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """ + # gh-15363 + arr = np.zeros(shape=(), dtype="U1") + for i in range(1, sys.maxunicode + 1): + expected = chr(i) + arr[()] = expected + assert arr[()] == expected + assert arr.item() == expected + + def test_assign_scalar(self): + # gh-3258 + l = np.array(['aa', 'bb']) + l[:] = np.str_('cc') + assert_equal(l, ['cc', 'cc']) + + def test_fill_scalar(self): + # gh-7227 + l = np.array(['aa', 'bb']) + l.fill(np.str_('cc')) + assert_equal(l, ['cc', 'cc']) + + +class TestUnicodeArrayNonzero: + + def test_empty_ustring_array_is_falsey(self): + assert_(not np.array([''], dtype=np.str_)) + + def test_whitespace_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0\0' + assert_(not a) + + def test_all_null_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0 \0' + assert_(a) + + +class TestFormat: + + def test_0d(self): + a = np.array(np.pi) + assert_equal('{:0.3g}'.format(a), '3.14') + assert_equal('{:0.3g}'.format(a[()]), '3.14') + + def test_1d_no_format(self): + a = np.array([np.pi]) + assert_equal('{}'.format(a), str(a)) + + def test_1d_format(self): + # until gh-5543, ensure that the behaviour matches what it used to be + a = np.array([np.pi]) + assert_raises(TypeError, '{:30}'.format, a) + +from numpy.testing import IS_PYPY + +class TestCTypes: + + def test_ctypes_is_available(self): + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(ctypes, test_arr.ctypes._ctypes) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + + def test_ctypes_is_not_available(self): + from numpy.core import _internal + _internal.ctypes = None + try: + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + finally: + _internal.ctypes = ctypes + + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype= + np.dtype(dict( + formats=['<i4', '<i4'], + names=['a', 'b'], + offsets=[0, 2], + itemsize=6 + )) + ), + np.array([None], dtype=object), + np.array([]), + np.empty((0, 0)), + _make_readonly(np.array([1, 2, 3])), + ], ids=[ + '1d', + '2d', + 'structured', + 'overlapping', + 'object', + 'empty', + 'empty-2d', + 'readonly' + ]) + def test_ctypes_data_as_holds_reference(self, arr): + # gh-9647 + # create a copy to ensure that pytest does not mess with the refcounts + arr = arr.copy() + + arr_ref = weakref.ref(arr) + + ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p) + + # `ctypes_ptr` should hold onto `arr` + del arr + break_cycles() + assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference") + + # but when the `ctypes_ptr` object dies, so should `arr` + del ctypes_ptr + if IS_PYPY: + # Pypy does not recycle arr objects immediately. Trigger gc to + # release arr. Cpython uses refcounts. An explicit call to gc + # should not be needed here. + break_cycles() + assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + + def test_ctypes_as_parameter_holds_reference(self): + arr = np.array([None]).copy() + + arr_ref = weakref.ref(arr) + + ctypes_ptr = arr.ctypes._as_parameter_ + + # `ctypes_ptr` should hold onto `arr` + del arr + break_cycles() + assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference") + + # but when the `ctypes_ptr` object dies, so should `arr` + del ctypes_ptr + if IS_PYPY: + break_cycles() + assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + + +class TestWritebackIfCopy: + # all these tests use the WRITEBACKIFCOPY mechanism + def test_argmax_with_out(self): + mat = np.eye(5) + out = np.empty(5, dtype='i2') + res = np.argmax(mat, 0, out=out) + assert_equal(res, range(5)) + + def test_argmin_with_out(self): + mat = -np.eye(5) + out = np.empty(5, dtype='i2') + res = np.argmin(mat, 0, out=out) + assert_equal(res, range(5)) + + def test_insert_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_insert + np.place(a, a>2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + # hit one of the failing paths + assert_raises(ValueError, np.place, a, a>20, []) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a>2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3,3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3,3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3,3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + @pytest.mark.leaks_references( + reason="increments self in dealloc; ignore since deprecated path.") + def test_dealloc_warning(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + arr = np.arange(9).reshape(3, 3) + v = arr.T + _multiarray_tests.npy_abuse_writebackifcopy(v) + assert len(sup.log) == 1 + + def test_view_discard_refcount(self): + from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange: + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_require_range(self): + assert_raises(TypeError, np.arange) + assert_raises(TypeError, np.arange, step=3) + assert_raises(TypeError, np.arange, dtype='int64') + assert_raises(TypeError, np.arange, start=4) + + def test_start_stop_kwarg(self): + keyword_stop = np.arange(stop=3) + keyword_zerotostop = np.arange(start=0, stop=3) + keyword_start_stop = np.arange(start=3, stop=9) + + assert len(keyword_stop) == 3 + assert len(keyword_zerotostop) == 3 + assert len(keyword_start_stop) == 6 + assert_array_equal(keyword_stop, keyword_zerotostop) + + def test_arange_booleans(self): + # Arange makes some sense for booleans and works up to length 2. + # But it is weird since `arange(2, 4, dtype=bool)` works. + # Arguably, much or all of this could be deprecated/removed. + res = np.arange(False, dtype=bool) + assert_array_equal(res, np.array([], dtype="bool")) + + res = np.arange(True, dtype="bool") + assert_array_equal(res, [False]) + + res = np.arange(2, dtype="bool") + assert_array_equal(res, [False, True]) + + # This case is especially weird, but drops out without special case: + res = np.arange(6, 8, dtype="bool") + assert_array_equal(res, [True, True]) + + with pytest.raises(TypeError): + np.arange(3, dtype="bool") + + @pytest.mark.parametrize("dtype", ["S3", "U", "5i"]) + def test_rejects_bad_dtypes(self, dtype): + dtype = np.dtype(dtype) + DType_name = re.escape(str(type(dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange(2, dtype=dtype) + + def test_rejects_strings(self): + # Explicitly test error for strings which may call "b" - "a": + DType_name = re.escape(str(type(np.array("a").dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange("a", "b") + + def test_byteswapped(self): + res_be = np.arange(1, 1000, dtype=">i4") + res_le = np.arange(1, 1000, dtype="<i4") + assert res_be.dtype == ">i4" + assert res_le.dtype == "<i4" + assert_array_equal(res_le, res_be) + + @pytest.mark.parametrize("which", [0, 1, 2]) + def test_error_paths_and_promotion(self, which): + args = [0, 1, 2] # start, stop, and step + args[which] = np.float64(2.) # should ensure float64 output + + assert np.arange(*args).dtype == np.float64 + + # Cover stranger error path, test only to achieve code coverage! + args[which] = [None, []] + with pytest.raises(ValueError): + # Fails discovering start dtype + np.arange(*args) + + +class TestArrayFinalize: + """ Tests __array_finalize__ """ + + def test_receives_base(self): + # gh-11237 + class SavesBase(np.ndarray): + def __array_finalize__(self, obj): + self.saved_base = self.base + + a = np.array(1).view(SavesBase) + assert_(a.saved_base is a.base) + + def test_bad_finalize1(self): + class BadAttributeArray(np.ndarray): + @property + def __array_finalize__(self): + raise RuntimeError("boohoo!") + + with pytest.raises(TypeError, match="not callable"): + np.arange(10).view(BadAttributeArray) + + def test_bad_finalize2(self): + class BadAttributeArray(np.ndarray): + def __array_finalize__(self): + raise RuntimeError("boohoo!") + + with pytest.raises(TypeError, match="takes 1 positional"): + np.arange(10).view(BadAttributeArray) + + def test_bad_finalize3(self): + class BadAttributeArray(np.ndarray): + def __array_finalize__(self, obj): + raise RuntimeError("boohoo!") + + with pytest.raises(RuntimeError, match="boohoo!"): + np.arange(10).view(BadAttributeArray) + + def test_lifetime_on_error(self): + # gh-11237 + class RaisesInFinalize(np.ndarray): + def __array_finalize__(self, obj): + # crash, but keep this object alive + raise Exception(self) + + # a plain object can't be weakref'd + class Dummy: pass + + # get a weak reference to an object within an array + obj_arr = np.array(Dummy()) + obj_ref = weakref.ref(obj_arr[()]) + + # get an array that crashed in __array_finalize__ + with assert_raises(Exception) as e: + obj_arr.view(RaisesInFinalize) + + obj_subarray = e.exception.args[0] + del e + assert_(isinstance(obj_subarray, RaisesInFinalize)) + + # reference should still be held by obj_arr + break_cycles() + assert_(obj_ref() is not None, "object should not already be dead") + + del obj_arr + break_cycles() + assert_(obj_ref() is not None, "obj_arr should not hold the last reference") + + del obj_subarray + break_cycles() + assert_(obj_ref() is None, "no references should remain") + + def test_can_use_super(self): + class SuperFinalize(np.ndarray): + def __array_finalize__(self, obj): + self.saved_result = super().__array_finalize__(obj) + + a = np.array(1).view(SuperFinalize) + assert_(a.saved_result is None) + + +def test_orderconverter_with_nonASCII_unicode_ordering(): + # gh-7475 + a = np.arange(5) + assert_raises(ValueError, a.flatten, order='\xe2') + + +def test_equal_override(): + # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which + # did not respect overrides with __array_priority__ or __array_ufunc__. + # The PR fixed this for __array_priority__ and __array_ufunc__ = None. + class MyAlwaysEqual: + def __eq__(self, other): + return "eq" + + def __ne__(self, other): + return "ne" + + class MyAlwaysEqualOld(MyAlwaysEqual): + __array_priority__ = 10000 + + class MyAlwaysEqualNew(MyAlwaysEqual): + __array_ufunc__ = None + + array = np.array([(0, 1), (2, 3)], dtype='i4,i4') + for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: + my_always_equal = my_always_equal_cls() + assert_equal(my_always_equal == array, 'eq') + assert_equal(array == my_always_equal, 'eq') + assert_equal(my_always_equal != array, 'ne') + assert_equal(array != my_always_equal, 'ne') + + +@pytest.mark.parametrize("op", [operator.eq, operator.ne]) +@pytest.mark.parametrize(["dt1", "dt2"], [ + ([("f", "i")], [("f", "i")]), # structured comparison (successful) + ("M8", "d"), # impossible comparison: result is all True or False + ("d", "d"), # valid comparison + ]) +def test_equal_subclass_no_override(op, dt1, dt2): + # Test how the three different possible code-paths deal with subclasses + + class MyArr(np.ndarray): + called_wrap = 0 + + def __array_wrap__(self, new): + type(self).called_wrap += 1 + return super().__array_wrap__(new) + + numpy_arr = np.zeros(5, dtype=dt1) + my_arr = np.zeros(5, dtype=dt2).view(MyArr) + + assert type(op(numpy_arr, my_arr)) is MyArr + assert type(op(my_arr, numpy_arr)) is MyArr + # We expect 2 calls (more if there were more fields): + assert MyArr.called_wrap == 2 + + +@pytest.mark.parametrize(["dt1", "dt2"], [ + ("M8[ns]", "d"), + ("M8[s]", "l"), + ("m8[ns]", "d"), + # Missing: ("m8[ns]", "l") as timedelta currently promotes ints + ("M8[s]", "m8[s]"), + ("S5", "U5"), + # Structured/void dtypes have explicit paths not tested here. +]) +def test_no_loop_gives_all_true_or_false(dt1, dt2): + # Make sure they broadcast to test result shape, use random values, since + # the actual value should be ignored + arr1 = np.random.randint(5, size=100).astype(dt1) + arr2 = np.random.randint(5, size=99)[:, np.newaxis].astype(dt2) + + res = arr1 == arr2 + assert res.shape == (99, 100) + assert res.dtype == bool + assert not res.any() + + res = arr1 != arr2 + assert res.shape == (99, 100) + assert res.dtype == bool + assert res.all() + + # incompatible shapes raise though + arr2 = np.random.randint(5, size=99).astype(dt2) + with pytest.raises(ValueError): + arr1 == arr2 + + with pytest.raises(ValueError): + arr1 != arr2 + + # Basic test with another operation: + with pytest.raises(np.core._exceptions._UFuncNoLoopError): + arr1 > arr2 + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_comparisons_forwards_error(op): + class NotArray: + def __array__(self): + raise TypeError("run you fools") + + with pytest.raises(TypeError, match="run you fools"): + op(np.arange(2), NotArray()) + + with pytest.raises(TypeError, match="run you fools"): + op(NotArray(), np.arange(2)) + + +def test_richcompare_scalar_boolean_singleton_return(): + # These are currently guaranteed to be the boolean singletons, but maybe + # returning NumPy booleans would also be OK: + assert (np.array(0) == "a") is False + assert (np.array(0) != "a") is True + assert (np.int16(0) == "a") is False + assert (np.int16(0) != "a") is True + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_ragged_comparison_fails(op): + # This needs to convert the internal array to True/False, which fails: + a = np.array([1, np.array([1, 2, 3])], dtype=object) + b = np.array([1, np.array([1, 2, 3])], dtype=object) + + with pytest.raises(ValueError, match="The truth value.*ambiguous"): + op(a, b) + + +@pytest.mark.parametrize( + ["fun", "npfun"], + [ + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle) + ] +) +@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__()) +def test_npymath_complex(fun, npfun, x, y, test_dtype): + # Smoketest npymath functions + z = test_dtype(complex(x, y)) + with np.errstate(invalid='ignore'): + # Fallback implementations may emit a warning for +-inf (see gh-24876): + # RuntimeWarning: invalid value encountered in absolute + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy.core._multiarray_tests import ( + npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + +def test_uintalignment_and_alignment(): + # alignment code needs to satisfy these requirements: + # 1. numpy structs match C struct layout + # 2. ufuncs/casting is safe wrt to aligned access + # 3. copy code is safe wrt to "uint alidned" access + # + # Complex types are the main problem, whose alignment may not be the same + # as their "uint alignment". + # + # This test might only fail on certain platforms, where uint64 alignment is + # not equal to complex64 alignment. The second 2 tests will only fail + # for DEBUG=1. + + d1 = np.dtype('u1,c8', align=True) + d2 = np.dtype('u4,c8', align=True) + d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) + + assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) + + # check that C struct matches numpy struct size + s = _multiarray_tests.get_struct_alignments() + for d, (alignment, size) in zip([d1,d2,d3], s): + assert_equal(d.alignment, alignment) + assert_equal(d.itemsize, size) + + # check that ufuncs don't complain in debug mode + # (this is probably OK if the aligned flag is true above) + src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often + np.exp(src) # assert fails? + + # check that copy code doesn't complain in debug mode + dst = np.zeros((2,2), dtype='c8') + dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? + +class TestAlignment: + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + # In particular, check RELAXED_STRIDES don't trip alignment assertions in + # NDEBUG mode for size-0 arrays (gh-12503) + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + + def test_strided_loop_alignments(self): + # particularly test that complex64 and float128 use right alignment + # code-paths, since these are particularly problematic. It is useful to + # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. + for align in [1, 2, 4, 8, 12, 16, None]: + xf64 = _aligned_zeros(3, np.float64) + + xc64 = _aligned_zeros(3, np.complex64, align=align) + xf128 = _aligned_zeros(3, np.longdouble, align=align) + + # test casting, both to and from misaligned + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, "Casting complex values") + xc64.astype('f8') + xf64.astype(np.complex64) + test = xc64 + xf64 + + xf128.astype('f8') + xf64.astype(np.longdouble) + test = xf128 + xf64 + + test = xf128 + xc64 + + # test copy, both to and from misaligned + # contig copy + xf64[:] = xf64.copy() + xc64[:] = xc64.copy() + xf128[:] = xf128.copy() + # strided copy + xf64[::2] = xf64[::2].copy() + xc64[::2] = xc64[::2].copy() + xf128[::2] = xf128[::2].copy() + +def test_getfield(): + a = np.arange(32, dtype='uint16') + if sys.byteorder == 'little': + i = 0 + j = 1 + else: + i = 1 + j = 0 + b = a.getfield('int8', i) + assert_equal(b, a) + b = a.getfield('int8', j) + assert_equal(b, 0) + pytest.raises(ValueError, a.getfield, 'uint8', -1) + pytest.raises(ValueError, a.getfield, 'uint8', 16) + pytest.raises(ValueError, a.getfield, 'uint64', 0) + + +class TestViewDtype: + """ + Verify that making a view of a non-contiguous array works as expected. + """ + def test_smaller_dtype_multiple(self): + # x is non-contiguous + x = np.arange(10, dtype='<i4')[::2] + with pytest.raises(ValueError, + match='the last axis must be contiguous'): + x.view('<i2') + expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]] + assert_array_equal(x[:, np.newaxis].view('<i2'), expected) + + def test_smaller_dtype_not_multiple(self): + # x is non-contiguous + x = np.arange(5, dtype='<i4')[::2] + + with pytest.raises(ValueError, + match='the last axis must be contiguous'): + x.view('S3') + with pytest.raises(ValueError, + match='When changing to a smaller dtype'): + x[:, np.newaxis].view('S3') + + # Make sure the problem is because of the dtype size + expected = [[b''], [b'\x02'], [b'\x04']] + assert_array_equal(x[:, np.newaxis].view('S4'), expected) + + def test_larger_dtype_multiple(self): + # x is non-contiguous in the first dimension, contiguous in the last + x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :] + expected = np.array([[65536], [327684], [589832], + [851980], [1114128]], dtype='<i4') + assert_array_equal(x.view('<i4'), expected) + + def test_larger_dtype_not_multiple(self): + # x is non-contiguous in the first dimension, contiguous in the last + x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :] + with pytest.raises(ValueError, + match='When changing to a larger dtype'): + x.view('S3') + # Make sure the problem is because of the dtype size + expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'], + [b'\x0c\x00\r'], [b'\x10\x00\x11']] + assert_array_equal(x.view('S4'), expected) + + def test_f_contiguous(self): + # x is F-contiguous + x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T + with pytest.raises(ValueError, + match='the last axis must be contiguous'): + x.view('<i2') + + def test_non_c_contiguous(self): + # x is contiguous in axis=-1, but not C-contiguous in other axes + x = np.arange(2 * 3 * 4, dtype='i1').\ + reshape(2, 3, 4).transpose(1, 0, 2) + expected = [[[256, 770], [3340, 3854]], + [[1284, 1798], [4368, 4882]], + [[2312, 2826], [5396, 5910]]] + assert_array_equal(x.view('<i2'), expected) + + +@pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982") +# Test various array sizes that hit different code paths in quicksort-avx512 +@pytest.mark.parametrize("N", np.arange(1, 512)) +@pytest.mark.parametrize("dtype", ['e', 'f', 'd']) +def test_sort_float(N, dtype): + # Regular data with nan sprinkled + np.random.seed(42) + arr = -0.5 + np.random.sample(N).astype(dtype) + arr[np.random.choice(arr.shape[0], 3)] = np.nan + assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) + + # (2) with +INF + infarr = np.inf*np.ones(N, dtype=dtype) + infarr[np.random.choice(infarr.shape[0], 5)] = -1.0 + assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap')) + + # (3) with -INF + neginfarr = -np.inf*np.ones(N, dtype=dtype) + neginfarr[np.random.choice(neginfarr.shape[0], 5)] = 1.0 + assert_equal(np.sort(neginfarr, kind='quick'), + np.sort(neginfarr, kind='heap')) + + # (4) with +/-INF + infarr = np.inf*np.ones(N, dtype=dtype) + infarr[np.random.choice(infarr.shape[0], (int)(N/2))] = -np.inf + assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap')) + +def test_sort_float16(): + arr = np.arange(65536, dtype=np.int16) + temp = np.frombuffer(arr.tobytes(), dtype=np.float16) + data = np.copy(temp) + np.random.shuffle(data) + data_backup = data + assert_equal(np.sort(data, kind='quick'), + np.sort(data_backup, kind='heap')) + + +@pytest.mark.parametrize("N", np.arange(1, 512)) +@pytest.mark.parametrize("dtype", ['h', 'H', 'i', 'I', 'l', 'L']) +def test_sort_int(N, dtype): + # Random data with MAX and MIN sprinkled + minv = np.iinfo(dtype).min + maxv = np.iinfo(dtype).max + arr = np.random.randint(low=minv, high=maxv-1, size=N, dtype=dtype) + arr[np.random.choice(arr.shape[0], 10)] = minv + arr[np.random.choice(arr.shape[0], 10)] = maxv + assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) + + +def test_sort_uint(): + # Random data with NPY_MAX_UINT32 sprinkled + rng = np.random.default_rng(42) + N = 2047 + maxv = np.iinfo(np.uint32).max + arr = rng.integers(low=0, high=maxv, size=N).astype('uint32') + arr[np.random.choice(arr.shape[0], 10)] = maxv + assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) + +def test_private_get_ndarray_c_version(): + assert isinstance(_get_ndarray_c_version(), int) + + +@pytest.mark.parametrize("N", np.arange(1, 512)) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_argsort_float(N, dtype): + rnd = np.random.RandomState(116112) + # (1) Regular data with a few nan: doesn't use vectorized sort + arr = -0.5 + rnd.random(N).astype(dtype) + arr[rnd.choice(arr.shape[0], 3)] = np.nan + assert_arg_sorted(arr, np.argsort(arr, kind='quick')) + + # (2) Random data with inf at the end of array + # See: https://github.com/intel/x86-simd-sort/pull/39 + arr = -0.5 + rnd.rand(N).astype(dtype) + arr[N-1] = np.inf + assert_arg_sorted(arr, np.argsort(arr, kind='quick')) + + +@pytest.mark.parametrize("N", np.arange(2, 512)) +@pytest.mark.parametrize("dtype", [np.int32, np.uint32, np.int64, np.uint64]) +def test_argsort_int(N, dtype): + rnd = np.random.RandomState(1100710816) + # (1) random data with min and max values + minv = np.iinfo(dtype).min + maxv = np.iinfo(dtype).max + arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype) + i, j = rnd.choice(N, 2, replace=False) + arr[i] = minv + arr[j] = maxv + assert_arg_sorted(arr, np.argsort(arr, kind='quick')) + + # (2) random data with max value at the end of array + # See: https://github.com/intel/x86-simd-sort/pull/39 + arr = rnd.randint(low=minv, high=maxv, size=N, dtype=dtype) + arr[N-1] = maxv + assert_arg_sorted(arr, np.argsort(arr, kind='quick')) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_gh_22683(): + b = 777.68760986 + a = np.array([b] * 10000, dtype=object) + refc_start = sys.getrefcount(b) + np.choose(np.zeros(10000, dtype=int), [a], out=a) + np.choose(np.zeros(10000, dtype=int), [a], out=a) + refc_end = sys.getrefcount(b) + assert refc_end - refc_start < 10 + + +def test_gh_24459(): + a = np.zeros((50, 3), dtype=np.float64) + with pytest.raises(TypeError): + np.choose(a, [3, -1]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_nditer.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_nditer.py new file mode 100644 index 00000000..9f639c4c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_nditer.py @@ -0,0 +1,3348 @@ +import sys +import pytest + +import textwrap +import subprocess + +import numpy as np +import numpy.core._multiarray_tests as _multiarray_tests +from numpy import array, arange, nditer, all +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, + IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles + ) + + +def iter_multi_index(i): + ret = [] + while not i.finished: + ret.append(i.multi_index) + i.iternext() + return ret + +def iter_indices(i): + ret = [] + while not i.finished: + ret.append(i.index) + i.iternext() + return ret + +def iter_iterindices(i): + ret = [] + while not i.finished: + ret.append(i.iterindex) + i.iternext() + return ret + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_iter_refcount(): + # Make sure the iterator doesn't leak + + # Basic + a = arange(6) + dt = np.dtype('f4').newbyteorder() + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='unsafe', + op_dtypes=[dt]) as it: + assert_(not it.iterationneedsapi) + assert_(sys.getrefcount(a) > rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + # del 'it' + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + del it2 # avoid pyflakes unused variable warning + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal([x for x in i], a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal([x for x in i], a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal([x for x in i], a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal([x for x in i], aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal([x for x in i], aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal([x for x in i], aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal([x for x in i], aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal([x for x in i], aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal([x for x in i], aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='A')) + +def test_nditer_multi_index_set(): + # Test the multi_index set + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + + # Removes the iteration on two first elements of a[0] + it.multi_index = (0, 2,) + + assert_equal([i for i in it], [2, 3, 4, 5]) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_nditer_multi_index_set_refcount(): + # Test if the reference count on index variable is decreased + + index = 0 + i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) + + start_count = sys.getrefcount(index) + i.multi_index = (index,) + end_count = sys.getrefcount(index) + + assert_equal(start_count, end_count) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:,:, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1,:,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']]*2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']]*2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']]*2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + + # Verify that the error message mentions the right shapes + try: + nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) + + try: + nditer([arange(6).reshape(2, 3), arange(2)], + [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) + + try: + nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Too many operands + assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i:i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i:i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + + def assign_multi_index(i): + i.multi_index = (0,) + + def assign_index(i): + i.index = 0 + + def assign_iterindex(i): + i.iterindex = 0 + + def assign_iterrange(i): + i.iterrange = (0, 1) + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + with i: + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_assign_mapping(): + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][...] = 3 + it.operands[0][...] = 14 + assert_equal(a, 14) + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0][-1:1] + x[...] = 14 + it.operands[0][...] = -1234 + assert_equal(a, -1234) + # check for no warnings on dealloc + x = None + it = None + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + with i: + # context manager triggers WRITEBACKIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + assert_equal(au, [2]*6) + del i # should not raise a warning + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], + casting='equiv') as i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 12345 + i.operands[0][:] = 2 + assert_equal(au, [2]*6) + + # Unaligned input + a = np.zeros((6*4+1,), dtype='i1')[1:] + a.dtype = 'f4' + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a) + # With 'aligned', should make a copy + with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: + assert_(i.operands[0].flags.aligned) + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.operands[0], a) + i.operands[0][:] = 3 + assert_equal(a, [3]*6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]) + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + with i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast '<f4' -> '>f4' + a = np.arange(6, dtype='<f4').reshape(2, 3) + with nditer(a, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('>f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1,:, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that WRITEBACKIFCOPY is activated at exit + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + with nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # '<f4' -> '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('>f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a':3,'b':'d'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + if HAS_REFCOUNT: + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + with i: + for x in i: + x[...] = None + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_(sys.getrefcount(obj) == rc-1) + assert_equal(a, np.array([None]*4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + #Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + ob = i[0][()] + if HAS_REFCOUNT: + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + if HAS_REFCOUNT: + assert_(sys.getrefcount(ob) == rc-1) + assert_equal(a, np.arange(6)+98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.dtypes[1], np.dtype('f4')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('u4')) + assert_equal(i.dtypes[1], np.dtype('u4')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + assert_equal(i.dtypes[3], np.dtype('c16')) + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.dtypes[1], np.dtype('i4')) + assert_equal(i.dtypes[2], np.dtype('c16')) + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + +def test_iter_copy_if_overlap(): + # Ensure the iterator makes copies on read/write overlap, if requested + + # Copy not needed, 1 op + for flag in ['readonly', 'writeonly', 'readwrite']: + a = arange(10) + i = nditer([a], ['copy_if_overlap'], [[flag]]) + with i: + assert_(i.operands[0] is a) + + # Copy needed, 2 ops, read-write overlap + x = arange(10) + a = x[1:] + b = x[:-1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy not needed with elementwise, 2 ops, exactly same arrays + x = arange(10) + a = x + b = x + i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], + ['readwrite', 'overlap_assume_elementwise']]) + with i: + assert_(i.operands[0] is a and i.operands[1] is b) + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) + + # Copy not needed, 2 ops, no overlap + x = arange(10) + a = x[::2] + b = x[1::2] + i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) + assert_(i.operands[0] is a and i.operands[1] is b) + + # Copy needed, 2 ops, read-write overlap + x = arange(4, dtype=np.int8) + a = x[3:] + b = x.view(np.int32)[:1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy needed, 3 ops, read-write overlap + for flag in ['writeonly', 'readwrite']: + x = np.ones([10, 10]) + a = x + b = x.T + c = x + with nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], [flag]]) as i: + a2, b2, c2 = i.operands + assert_(not np.shares_memory(a2, c2)) + assert_(not np.shares_memory(b2, c2)) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = x.T + c = x + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = np.ones([10, 10]) + c = x.T + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, write-only overlap + x = np.arange(7) + a = x[:3] + b = x[3:6] + c = x[4:7] + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['writeonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x == y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + assert_(all([x == y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']]*2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype='<i4').reshape(2, 3, 4) + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='>f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + +@pytest.mark.parametrize("dtype", np.typecodes["All"]) +@pytest.mark.parametrize("loop_dtype", np.typecodes["All"]) +@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") +def test_iter_copy_casts(dtype, loop_dtype): + # Ensure the dtype is never flexible: + if loop_dtype.lower() == "m": + loop_dtype = loop_dtype + "[ms]" + elif np.dtype(loop_dtype).itemsize == 0: + loop_dtype = loop_dtype + "50" + + # Make things a bit more interesting by requiring a byte-swap as well: + arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder()) + try: + expected = arr.astype(loop_dtype) + except Exception: + # Some casts are not possible, do not worry about them + return + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[loop_dtype], casting="unsafe") + + if np.issubdtype(np.dtype(loop_dtype), np.number): + # Casting to strings may be strange, but for simple dtypes do not rely + # on the cast being correct: + assert_array_equal(expected, np.ones(1000, dtype=loop_dtype)) + + it_copy = it.copy() + res = next(it) + del it + res_copy = next(it_copy) + del it_copy + + assert_array_equal(res, expected) + assert_array_equal(res_copy, expected) + + +def test_iter_copy_casts_structured(): + # Test a complicated structured dtype for casting, as it requires + # both multiple steps and a more complex casting setup. + # Includes a structured -> unstructured (any to object), and many other + # casts, which cause this to require all steps in the casting machinery + # one level down as well as the iterator copy (which uses NpyAuxData clone) + in_dtype = np.dtype([("a", np.dtype("i,")), + ("b", np.dtype(">i,<i,>d,S17,>d,(3)f,O,i1"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))]) + arr = np.ones(1000, dtype=in_dtype) + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + expected = arr["a"].astype(out_dtype["a"]) + assert_array_equal(res1["a"], expected) + assert_array_equal(res2["a"], expected) + + for field in in_dtype["b"].names: + # Note that the .base avoids the subarray field + expected = arr["b"][field].astype(out_dtype["b"][field].base) + assert_array_equal(res1["b"][field], expected) + assert_array_equal(res2["b"][field], expected) + + +def test_iter_copy_casts_structured2(): + # Similar to the above, this is a fairly arcane test to cover internals + in_dtype = np.dtype([("a", np.dtype("O,O")), + ("b", np.dtype("(5)O,(3)O,(1,)O,(1,)i,(1,)O"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype("O,(3)i,(4)O,(4)O,(4)i"))]) + + arr = np.ones(1, dtype=in_dtype) + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + # Array of two structured scalars: + for res in res1, res2: + # Cast to tuple by getitem, which may be weird and changable?: + assert type(res["a"][0]) == tuple + assert res["a"][0] == (1, 1) + + for res in res1, res2: + assert_array_equal(res["b"]["f0"][0], np.ones(5, dtype=object)) + assert_array_equal(res["b"]["f1"], np.ones((1, 3), dtype="i")) + assert res["b"]["f2"].shape == (1, 4) + assert_array_equal(res["b"]["f2"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype="i")) + + +def test_iter_allocate_output_simple(): + # Check that the iterator will properly allocate outputs + + # Simple case + a = arange(6) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_buffered_readwrite(): + # Allocated output with buffering + delay_bufalloc + + a = arange(6) + i = nditer([a, None], ['buffered', 'delay_bufalloc'], + [['readonly'], ['allocate', 'readwrite']]) + with i: + i.operands[1][:] = 1 + i.reset() + for x in i: + x[1][...] += x[0][...] + assert_equal(i.operands[1], a+1) + +def test_iter_allocate_output_itorder(): + # The allocated output should match the iteration order + + # C-order input, best iteration order + a = arange(6, dtype='i4').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # F-order input, best iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).T + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # Non-contiguous input, C iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']], + order='C', + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, (32, 16, 4)) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_opaxes(): + # Specifying op_axes should work + + a = arange(24, dtype='i4').reshape(2, 3, 4) + i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], + op_dtypes=[np.dtype('u4'), None], + op_axes=[[1, 2, 0], None]) + assert_equal(i.operands[0].shape, (4, 2, 3)) + assert_equal(i.operands[0].strides, (4, 48, 16)) + assert_equal(i.operands[0].dtype, np.dtype('u4')) + +def test_iter_allocate_output_types_promotion(): + # Check type promotion of automatic outputs + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f4')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('u4')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + +def test_iter_allocate_output_types_byte_order(): + # Verify the rules for byte order changes + + # When there's just one input, the output type exactly matches + a = array([3], dtype='u4').newbyteorder() + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']]) + assert_equal(i.dtypes[0], i.dtypes[1]) + # With two or more inputs, the output type is in native byte order + i = nditer([a, a, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(i.dtypes[0] != i.dtypes[2]) + assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) + +def test_iter_allocate_output_types_scalar(): + # If the inputs are all scalars, the output should be a scalar + + i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], + [['writeonly', 'allocate']] + [['readonly']]*4) + assert_equal(i.operands[0].dtype, np.dtype('complex128')) + assert_equal(i.operands[0].ndim, 0) + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + class MyNDArray(np.ndarray): + __array_priority__ = 15 + + # subclass vs ndarray + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + b = np.arange(4).reshape(2, 2).T + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_equal(type(a), type(i.operands[2])) + assert_(type(b) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + + # If subtypes are disabled, we should get back an ndarray. + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_equal(type(b), type(i.operands[2])) + assert_(type(a) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + +def test_iter_allocate_output_errors(): + # Check that the iterator will throw errors for bad output allocations + + # Need an input if no output data type is specified + a = arange(6) + assert_raises(TypeError, nditer, [a, None], [], + [['writeonly'], ['writeonly', 'allocate']]) + # Allocated output should be flagged for writing + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['allocate', 'readonly']]) + # Allocated output can't have buffering without delayed bufalloc + assert_raises(ValueError, nditer, [a, None], ['buffered'], + ['allocate', 'readwrite']) + # Must specify dtype if there are no inputs (cannot promote existing ones; + # maybe this should use the 'f4' here, but it does not historically.) + assert_raises(TypeError, nditer, [None, None], [], + [['writeonly', 'allocate'], + ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + # If using op_axes, must specify all the axes + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 1]]) + # If using op_axes, the axes must be within bounds + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 3, 1]]) + # If using op_axes, there can't be duplicates + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 2, 1, 0]]) + # Not all axes may be specified if a reduction. If there is a hole + # in op_axes, this is an error. + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], ["reduce_ok"], + [['readonly'], ['readwrite', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 2]]) + +def test_all_allocated(): + # When no output and no shape is given, `()` is used as shape. + i = np.nditer([None], op_dtypes=["int64"]) + assert i.operands[0].shape == () + assert i.dtypes == (np.dtype("int64"),) + + i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4)) + assert i.operands[0].shape == (2, 3, 4) + +def test_iter_remove_axis(): + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + i.remove_axis(1) + assert_equal([x for x in i], a[:, 0,:].ravel()) + + a = a[::-1,:,:] + i = nditer(a, ['multi_index']) + i.remove_axis(0) + assert_equal([x for x in i], a[0,:,:].ravel()) + +def test_iter_remove_multi_index_inner_loop(): + # Check that removing multi-index support works + + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + assert_equal(i.ndim, 3) + assert_equal(i.shape, (2, 3, 4)) + assert_equal(i.itviews[0].shape, (2, 3, 4)) + + # Removing the multi-index tracking causes all dimensions to coalesce + before = [x for x in i] + i.remove_multi_index() + after = [x for x in i] + + assert_equal(before, after) + assert_equal(i.ndim, 1) + assert_raises(ValueError, lambda i:i.shape, i) + assert_equal(i.itviews[0].shape, (24,)) + + # Removing the inner loop means there's just one iteration + i.reset() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, tuple()) + i.enable_external_loop() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, (24,)) + assert_equal(i.value, arange(24)) + +def test_iter_iterindex(): + # Make sure iterindex works + + buffersize = 5 + a = arange(24).reshape(4, 3, 2) + for flags in ([], ['buffered']): + i = nditer(a, flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + + i = nditer(a, flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 5 + assert_equal(iter_iterindices(i), list(range(5, 24))) + + i = nditer(a[::-1], flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 9 + assert_equal(iter_iterindices(i), list(range(9, 24))) + + i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 13 + assert_equal(iter_iterindices(i), list(range(13, 24))) + + i = nditer(a[::1, ::-1], flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 23 + assert_equal(iter_iterindices(i), list(range(23, 24))) + i.reset() + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + +def test_iter_iterrange(): + # Make sure getting and resetting the iterrange works + + buffersize = 5 + a = arange(24, dtype='i4').reshape(4, 3, 2) + a_fort = a.ravel(order='F') + + i = nditer(a, ['ranged'], ['readonly'], order='F', + buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + def get_array(i): + val = np.array([], dtype='f8') + for x in i: + val = np.concatenate((val, x)) + return val + + i = nditer(a, ['ranged', 'buffered', 'external_loop'], + ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal(get_array(i), a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal(get_array(i), a_fort[r[0]:r[1]]) + +def test_iter_buffering(): + # Test buffering with several buffer sizes and types + arrays = [] + # F-order swapped array + arrays.append(np.arange(24, + dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap()) + # Contiguous 1-dimensional array + arrays.append(np.arange(10, dtype='f4')) + # Unaligned array + a = np.zeros((4*16+1,), dtype='i1')[1:] + a.dtype = 'i4' + a[:] = np.arange(16, dtype='i4') + arrays.append(a) + # 4-D F-order array + arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) + for a in arrays: + for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): + vals = [] + i = nditer(a, ['buffered', 'external_loop'], + [['readonly', 'nbo', 'aligned']], + order='C', + casting='equiv', + buffersize=buffersize) + while not i.finished: + assert_(i[0].size <= buffersize) + vals.append(i[0].copy()) + i.iternext() + assert_equal(np.concatenate(vals), a.ravel(order='C')) + +def test_iter_write_buffering(): + # Test that buffering of writes is working + + # F-order swapped array + a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap() + i = nditer(a, ['buffered'], + [['readwrite', 'nbo', 'aligned']], + casting='equiv', + order='C', + buffersize=16) + x = 0 + with i: + while not i.finished: + i[0] = x + x += 1 + i.iternext() + assert_equal(a.ravel(order='C'), np.arange(24)) + +def test_iter_buffering_delayed_alloc(): + # Test that delaying buffer allocation works + + a = np.arange(6) + b = np.arange(1, dtype='f4') + i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], + ['readwrite'], + casting='unsafe', + op_dtypes='f4') + assert_(i.has_delayed_bufalloc) + assert_raises(ValueError, lambda i:i.multi_index, i) + assert_raises(ValueError, lambda i:i[0], i) + assert_raises(ValueError, lambda i:i[0:2], i) + + def assign_iter(i): + i[0] = 0 + assert_raises(ValueError, assign_iter, i) + + i.reset() + assert_(not i.has_delayed_bufalloc) + assert_equal(i.multi_index, (0,)) + with i: + assert_equal(i[0], 0) + i[1] = 1 + assert_equal(i[0:2], [0, 1]) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) + +def test_iter_buffered_cast_simple(): + # Test that buffering can handle a simple cast + + a = np.arange(10, dtype='f4') + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f4')) + +def test_iter_buffered_cast_byteswapped(): + # Test that buffering can handle a cast which requires swap->cast->swap + + a = np.arange(10, dtype='f4').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f4')) + + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + + a = np.arange(10, dtype='f8').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f8')) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8').newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + if HAS_REFCOUNT: + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(a[0]), rc) + + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x_[()] for x_ in i], [5, 8]) + + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(TypeError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x_) for x_ in i], + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) + + +def test_iter_buffered_cast_structured_type_failure_with_cleanup(): + # make sure struct type -> struct type with different + # number of fields fails + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + + for intent in ["readwrite", "readonly", "writeonly"]: + # This test was initially designed to test an error at a different + # place, but will now raise earlier to to the cast not being possible: + # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails. + # Without a faulty DType, there is probably no reliable + # way to get the initial tested behaviour. + simple_arr = np.array([1, 2], dtype="i,i") # requires clean up + with pytest.raises(TypeError): + nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], + casting='unsafe', op_dtypes=["f,f", sdt2]) + + +def test_buffered_cast_error_paths(): + with pytest.raises(ValueError): + # The input is cast into an `S3` buffer + np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"], + casting="unsafe", flags=["buffered"]) + + # The `M8[ns]` is cast into the `S3` output + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + with pytest.raises(ValueError): + with it: + buf = next(it) + buf[...] = "a" # cannot be converted to int. + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") +def test_buffered_cast_error_paths_unraisable(): + # The following gives an unraisable error. Pytest sometimes captures that + # (depending python and/or pytest version). So with Python>=3.8 this can + # probably be cleaned out in the future to check for + # pytest.PytestUnraisableExceptionWarning: + code = textwrap.dedent(""" + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) + res = subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + assert "ValueError" in res + + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], b'abc') + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.str_) + assert_equal(a.dtype, np.dtype('U4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], 'abc') + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@pytest.mark.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2*3**5)[3**5:3**5+1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + with nditer2: + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(0, 3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + with nditer1: + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + nditer([a, b, c], [], + [['readonly', 'no_broadcast'], + ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + + +class TestIterNested: + + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # writebackifcopy - using context manager + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + # writebackifcopy - using close() + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i.close() + j.close() + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_iter_nested_iters_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + with it1, it2: + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + with i: + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + with i: + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + with it: + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + + # Iterator inner loop should take argument contiguity into account + x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) + x[...] = np.arange(x.size).reshape(x.shape) + y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) + y_base_copy = y_base.copy() + y = y_base[::2,:,None] + + it = np.nditer([y, x], + ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['readonly']]) + with it: + for a, b in it: + a.fill(2) + + assert_equal(y_base[1::2], y_base_copy[1::2]) + assert_equal(y_base[::2], 2) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize=5) + + with it: + bufsizes = [x.shape[0] for x, y in it] + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + + +def _is_buffered(iterator): + try: + iterator.itviews + except ValueError: + return True + return False + +@pytest.mark.parametrize("a", + [np.zeros((3,), dtype='f8'), + np.zeros((9876, 3*5), dtype='f8')[::2, :], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], + # Also test with the last dimension strided (so it does not fit if + # there is repeated access) + np.zeros((9,), dtype='f8')[::3], + np.zeros((9876, 3*10), dtype='f8')[::2, ::5], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) +def test_iter_writemasked(a): + # Note, the slicing above is to ensure that nditer cannot combine multiple + # axes into one. The repetition is just to make things a bit more + # interesting. + shape = a.shape + reps = shape[-1] // 3 + msk = np.empty(shape, dtype=bool) + msk[...] = [True, True, False] * reps + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape)) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # @seberg: I honestly don't currently understand why a "buffered" iterator + # would end up not using a buffer for the small array here at least when + # "writemasked" is used, that seems confusing... Check by testing for + # actual memory overlap! + is_buffered = True + with it: + for x, m in it: + x[...] = 2.5 + if np.may_share_memory(x, a): + is_buffered = False + + if not is_buffered: + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape)) + else: + # For large sizes, the iterator may be buffered: + assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape)) + a[...] = 2.5 + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + with it: + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape)) + + +@pytest.mark.parametrize(["mask", "mask_axes"], [ + # Allocated operand (only broadcasts with -1) + (None, [-1, 0]), + # Reduction along the first dimension (with and without op_axes) + (np.zeros((1, 4), dtype="bool"), [0, 1]), + (np.zeros((1, 4), dtype="bool"), None), + # Test 0-D and -1 op_axes + (np.zeros(4, dtype="bool"), [-1, 0]), + (np.zeros((), dtype="bool"), [-1, -1]), + (np.zeros((), dtype="bool"), None)]) +def test_iter_writemasked_broadcast_error(mask, mask_axes): + # This assumes that a readwrite mask makes sense. This is likely not the + # case and should simply be deprecated. + arr = np.zeros((3, 4)) + itflags = ["reduce_ok"] + mask_flags = ["arraymask", "readwrite", "allocate"] + a_flags = ["writeonly", "writemasked"] + if mask_axes is None: + op_axes = None + else: + op_axes = [mask_axes, [0, 1]] + + with assert_raises(ValueError): + np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags], + op_axes=op_axes) + + +def test_iter_writemasked_decref(): + # force casting (to make it interesting) by using a structured dtype. + arr = np.arange(10000).astype(">i,O") + original = arr.copy() + mask = np.random.randint(0, 2, size=10000).astype(bool) + + it = np.nditer([arr, mask], ['buffered', "refs_ok"], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=["<i,O", "?"]) + singleton = object() + if HAS_REFCOUNT: + count = sys.getrefcount(singleton) + for buf, mask_buf in it: + buf[...] = (3, singleton) + + del buf, mask_buf, it # delete everything to ensure correct cleanup + + if HAS_REFCOUNT: + # The buffer would have included additional items, they must be + # cleared correctly: + assert sys.getrefcount(singleton) - count == np.count_nonzero(mask) + + assert_array_equal(arr[~mask], original[~mask]) + assert (arr[mask] == np.array((3, singleton), arr.dtype)).all() + del arr + + if HAS_REFCOUNT: + assert sys.getrefcount(singleton) == count + + +def test_iter_non_writable_attribute_deletion(): + it = np.nditer(np.ones(2)) + attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc", + "iterationneedsapi", "has_multi_index", "has_index", "dtypes", + "ndim", "nop", "itersize", "finished"] + + for s in attr: + assert_raises(AttributeError, delattr, it, s) + + +def test_iter_writable_attribute_deletion(): + it = np.nditer(np.ones(2)) + attr = [ "multi_index", "index", "iterrange", "iterindex"] + for s in attr: + assert_raises(AttributeError, delattr, it, s) + + +def test_iter_element_deletion(): + it = np.nditer(np.ones(3)) + try: + del it[1] + del it[1:2] + except TypeError: + pass + except Exception: + raise AssertionError + +def test_iter_allocated_array_dtypes(): + # If the dtype of an allocated output has a shape, the shape gets + # tacked onto the end of the result. + it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))]) + for a, b in it: + b[0] = a - 1 + b[1] = a + 1 + assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) + + # Check the same (less sensitive) thing when `op_axes` with -1 is given. + it = np.nditer(([[1, 3, 20]], None), op_dtypes=[None, ('i4', (2,))], + flags=["reduce_ok"], op_axes=[None, (-1, 0)]) + for a, b in it: + b[0] = a - 1 + b[1] = a + 1 + assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) + + # Make sure this works for scalars too + it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) + for a, b, c in it: + c[0, 0] = a - b + c[0, 1] = a + b + c[1, 0] = a * b + c[1, 1] = a / b + assert_equal(it.operands[2], [[8, 12], [20, 5]]) + + +def test_0d_iter(): + # Basic test for iteration of 0-d arrays: + i = nditer([2, 3], ['multi_index'], [['readonly']]*2) + assert_equal(i.ndim, 0) + assert_equal(next(i), (2, 3)) + assert_equal(i.multi_index, ()) + assert_equal(i.iterindex, 0) + assert_raises(StopIteration, next, i) + # test reset: + i.reset() + assert_equal(next(i), (2, 3)) + assert_raises(StopIteration, next, i) + + # test forcing to 0-d + i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()]) + assert_equal(i.ndim, 0) + assert_equal(len(i), 1) + + i = nditer(np.arange(5), ['multi_index'], [['readonly']], + op_axes=[()], itershape=()) + assert_equal(i.ndim, 0) + assert_equal(len(i), 1) + + # passing an itershape alone is not enough, the op_axes are also needed + with assert_raises(ValueError): + nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=()) + + # Test a more complex buffered casting case (same as another test above) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.array(0.5, dtype='f4') + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', op_dtypes=sdt) + vals = next(i) + assert_equal(vals['a'], 0.5) + assert_equal(vals['b'], 0) + assert_equal(vals['c'], [[(0.5)]*3]*2) + assert_equal(vals['d'], 0.5) + +def test_object_iter_cleanup(): + # see gh-18450 + # object arrays can raise a python exception in ufunc inner loops using + # nditer, which should cause iteration to stop & cleanup. There were bugs + # in the nditer cleanup when decref'ing object arrays. + # This test would trigger valgrind "uninitialized read" before the bugfix. + assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None) + + # this more explicit code also triggers the invalid access + arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str) + oarr = arr.astype(object) + oarr[:, -1] = None + assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1])) + + # followup: this tests for a bug introduced in the first pass of gh-18450, + # caused by an incorrect fallthrough of the TypeError + class T: + def __bool__(self): + raise TypeError("Ambiguous") + assert_raises(TypeError, np.logical_or.reduce, + np.array([T(), T()], dtype='O')) + +def test_object_iter_cleanup_reduce(): + # Similar as above, but a complex reduction case that was previously + # missed (see gh-18810). + # The following array is special in that it cannot be flattened: + arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2] + with pytest.raises(TypeError): + np.sum(arr) + +@pytest.mark.parametrize("arr", [ + np.ones((8000, 4, 2), dtype=object)[:, ::2, :], + np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :], + np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")]) +def test_object_iter_cleanup_large_reduce(arr): + # More complicated calls are possible for large arrays: + out = np.ones(8000, dtype=np.intp) + # force casting with `dtype=object` + res = np.sum(arr, axis=(1, 2), dtype=object, out=out) + assert_array_equal(res, np.full(8000, 4, dtype=object)) + +def test_iter_too_large(): + # The total size of the iterator must not exceed the maximum intp due + # to broadcasting. Dividing by 1024 will keep it small enough to + # give a legal array. + size = np.iinfo(np.intp).max // 1024 + arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,)) + assert_raises(ValueError, nditer, (arr, arr[:, None])) + # test the same for multiindex. That may get more interesting when + # removing 0 dimensional axis is allowed (since an iterator can grow then) + assert_raises(ValueError, nditer, + (arr, arr[:, None]), flags=['multi_index']) + + +def test_iter_too_large_with_multiindex(): + # When a multi index is being tracked, the error is delayed this + # checks the delayed error messages and getting below that by + # removing an axis. + base_size = 2**10 + num = 1 + while base_size**num < np.iinfo(np.intp).max: + num += 1 + + shape_template = [1, 1] * num + arrays = [] + for i in range(num): + shape = shape_template[:] + shape[i * 2] = 2**10 + arrays.append(np.empty(shape)) + arrays = tuple(arrays) + + # arrays are now too large to be broadcast. The different modes test + # different nditer functionality with or without GIL. + for mode in range(6): + with assert_raises(ValueError): + _multiarray_tests.test_nditer_too_large(arrays, -1, mode) + # but if we do nothing with the nditer, it can be constructed: + _multiarray_tests.test_nditer_too_large(arrays, -1, 7) + + # When an axis is removed, things should work again (half the time): + for i in range(num): + for mode in range(6): + # an axis with size 1024 is removed: + _multiarray_tests.test_nditer_too_large(arrays, i*2, mode) + # an axis with size 1 is removed: + with assert_raises(ValueError): + _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode) + +def test_writebacks(): + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + it = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][:] = 100 + assert_equal(au, 100) + # do it again, this time raise an error, + it = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + try: + with it: + assert_equal(au.flags.writeable, False) + it.operands[0][:] = 0 + raise ValueError('exit context manager on exception') + except: + pass + assert_equal(au, 0) + assert_equal(au.flags.writeable, True) + # cannot reuse i outside context manager + assert_raises(ValueError, getattr, it, 'operands') + + it = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0] + x[:] = 6 + assert_(x.flags.writebackifcopy) + assert_equal(au, 6) + assert_(not x.flags.writebackifcopy) + x[:] = 123 # x.data still valid + assert_equal(au, 6) # but not connected to au + + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + # reentering works + with it: + with it: + for x in it: + x[...] = 123 + + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + # make sure exiting the inner context manager closes the iterator + with it: + with it: + for x in it: + x[...] = 123 + assert_raises(ValueError, getattr, it, 'operands') + # do not crash if original data array is decrefed + it = nditer(au, [], + [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + del au + with it: + for x in it: + x[...] = 123 + # make sure we cannot reenter the closed iterator + enter = it.__enter__ + assert_raises(RuntimeError, enter) + +def test_close_equivalent(): + ''' using a context amanger and using nditer.close are equivalent + ''' + def add_close(x, y, out=None): + addop = np.add + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + for (a, b, c) in it: + addop(a, b, out=c) + ret = it.operands[2] + it.close() + return ret + + def add_context(x, y, out=None): + addop = np.add + it = np.nditer([x, y, out], [], + [['readonly'], ['readonly'], ['writeonly','allocate']]) + with it: + for (a, b, c) in it: + addop(a, b, out=c) + return it.operands[2] + z = add_close(range(5), range(5)) + assert_equal(z, range(0, 10, 2)) + z = add_context(range(5), range(5)) + assert_equal(z, range(0, 10, 2)) + +def test_close_raises(): + it = np.nditer(np.arange(3)) + assert_equal (next(it), 0) + it.close() + assert_raises(StopIteration, next, it) + assert_raises(ValueError, getattr, it, 'operands') + +def test_close_parameters(): + it = np.nditer(np.arange(3)) + assert_raises(TypeError, it.close, 1) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_warn_noclose(): + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + it = np.nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', op_dtypes=[np.dtype('f4')]) + del it + assert len(sup.log) == 1 + + +@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32", + reason="Errors with Python 3.9 on Windows") +@pytest.mark.parametrize(["in_dtype", "buf_dtype"], + [("i", "O"), ("O", "i"), # most simple cases + ("i,O", "O,O"), # structured partially only copying O + ("O,i", "i,O"), # structured casting to and from O + ]) +@pytest.mark.parametrize("steps", [1, 2, 3]) +def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps): + """ + Checks for reference counting leaks during cleanup. Using explicit + reference counts lead to occasional false positives (at least in parallel + test setups). This test now should still test leaks correctly when + run e.g. with pytest-valgrind or pytest-leaks + """ + value = 2**30 + 1 # just a random value that Python won't intern + arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype) + + it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)], + flags=["buffered", "external_loop", "refs_ok"], casting="unsafe") + for step in range(steps): + # The iteration finishes in 3 steps, the first two are partial + next(it) + + del it # not necessary, but we test the cleanup + + # Repeat the test with `iternext` + it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)], + flags=["buffered", "external_loop", "refs_ok"], casting="unsafe") + for step in range(steps): + it.iternext() + + del it # not necessary, but we test the cleanup + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.parametrize(["in_dtype", "buf_dtype"], + [("O", "i"), # most simple cases + ("O,i", "i,O"), # structured casting to and from O + ]) +def test_partial_iteration_error(in_dtype, buf_dtype): + value = 123 # relies on python cache (leak-check will still find it) + arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype) + if in_dtype == "O": + arr[int(np.BUFSIZE * 1.5)] = None + else: + arr[int(np.BUFSIZE * 1.5)]["f0"] = None + + count = sys.getrefcount(value) + + it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)], + flags=["buffered", "external_loop", "refs_ok"], casting="unsafe") + with pytest.raises(TypeError): + # pytest.raises seems to have issues with the error originating + # in the for loop, so manually unravel: + next(it) + next(it) # raises TypeError + + # Repeat the test with `iternext` after resetting, the buffers should + # already be cleared from any references, so resetting is sufficient. + it.reset() + with pytest.raises(TypeError): + it.iternext() + it.iternext() + + assert count == sys.getrefcount(value) + + +def test_debug_print(capfd): + """ + Matches the expected output of a debug print with the actual output. + Note that the iterator dump should not be considered stable API, + this test is mainly to ensure the print does not crash. + + Currently uses a subprocess to avoid dealing with the C level `printf`s. + """ + # the expected output with all addresses and sizes stripped (they vary + # and/or are platform dependent). + expected = """ + ------ BEGIN ITERATOR DUMP ------ + | Iterator Address: + | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS + | NDim: 2 + | NOp: 2 + | IterSize: 50 + | IterStart: 0 + | IterEnd: 50 + | IterIndex: 0 + | Iterator SizeOf: + | BufferData SizeOf: + | AxisData SizeOf: + | + | Perm: 0 1 + | DTypes: + | DTypes: dtype('float64') dtype('int32') + | InitDataPtrs: + | BaseOffsets: 0 0 + | Operands: + | Operand DTypes: dtype('int64') dtype('float64') + | OpItFlags: + | Flags[0]: READ CAST ALIGNED + | Flags[1]: READ WRITE CAST ALIGNED REDUCE + | + | BufferData: + | BufferSize: 50 + | Size: 5 + | BufIterEnd: 5 + | REDUCE Pos: 0 + | REDUCE OuterSize: 10 + | REDUCE OuterDim: 1 + | Strides: 8 4 + | Ptrs: + | REDUCE Outer Strides: 40 0 + | REDUCE Outer Ptrs: + | ReadTransferFn: + | ReadTransferData: + | WriteTransferFn: + | WriteTransferData: + | Buffers: + | + | AxisData[0]: + | Shape: 5 + | Index: 0 + | Strides: 16 8 + | Ptrs: + | AxisData[1]: + | Shape: 10 + | Index: 0 + | Strides: 80 0 + | Ptrs: + ------- END ITERATOR DUMP ------- + """.strip().splitlines() + + arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2] + arr2 = np.arange(5.) + it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe", + flags=["reduce_ok", "buffered"], + op_flags=[["readonly"], ["readwrite"]]) + it.debug_print() + res = capfd.readouterr().out + res = res.strip().splitlines() + + assert len(res) == len(expected) + for res_line, expected_line in zip(res, expected): + # The actual output may have additional pointers listed that are + # stripped from the example output: + assert res_line.startswith(expected_line.strip()) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_nep50_promotions.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_nep50_promotions.py new file mode 100644 index 00000000..74a18a8d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_nep50_promotions.py @@ -0,0 +1,246 @@ +""" +This file adds basic tests to test the NEP 50 style promotion compatibility +mode. Most of these test are likely to be simply deleted again once NEP 50 +is adopted in the main test suite. A few may be moved elsewhere. +""" + +import operator + +import numpy as np + +import pytest +from numpy.testing import IS_WASM + + +@pytest.fixture(scope="module", autouse=True) +def _weak_promotion_enabled(): + state = np._get_promotion_state() + np._set_promotion_state("weak_and_warn") + yield + np._set_promotion_state(state) + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors") +def test_nep50_examples(): + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.uint8(1) + 2 + assert res.dtype == np.uint8 + + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.array([1], np.uint8) + np.int64(1) + assert res.dtype == np.int64 + + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) + assert res.dtype == np.int64 + + with pytest.warns(UserWarning, match="result dtype changed"): + # Note: For "weak_and_warn" promotion state the overflow warning is + # unfortunately not given (because we use the full array path). + with np.errstate(over="raise"): + res = np.uint8(100) + 200 + assert res.dtype == np.uint8 + + with pytest.warns(Warning) as recwarn: + res = np.float32(1) + 3e100 + + # Check that both warnings were given in the one call: + warning = str(recwarn.pop(UserWarning).message) + assert warning.startswith("result dtype changed") + warning = str(recwarn.pop(RuntimeWarning).message) + assert warning.startswith("overflow") + assert len(recwarn) == 0 # no further warnings + assert np.isinf(res) + assert res.dtype == np.float32 + + # Changes, but we don't warn for it (too noisy) + res = np.array([0.1], np.float32) == np.float64(0.1) + assert res[0] == False + + # Additional test, since the above silences the warning: + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.array([0.1], np.float32) + np.float64(0.1) + assert res.dtype == np.float64 + + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.array([1.], np.float32) + np.int64(3) + assert res.dtype == np.float64 + + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +def test_nep50_weak_integers(dtype): + # Avoids warning (different code path for scalars) + np._set_promotion_state("weak") + scalar_type = np.dtype(dtype).type + + maxint = int(np.iinfo(dtype).max) + + with np.errstate(over="warn"): + with pytest.warns(RuntimeWarning): + res = scalar_type(100) + maxint + assert res.dtype == dtype + + # Array operations are not expected to warn, but should give the same + # result dtype. + res = np.array(100, dtype=dtype) + maxint + assert res.dtype == dtype + + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_nep50_weak_integers_with_inexact(dtype): + # Avoids warning (different code path for scalars) + np._set_promotion_state("weak") + scalar_type = np.dtype(dtype).type + + too_big_int = int(np.finfo(dtype).max) * 2 + + if dtype in "dDG": + # These dtypes currently convert to Python float internally, which + # raises an OverflowError, while the other dtypes overflow to inf. + # NOTE: It may make sense to normalize the behavior! + with pytest.raises(OverflowError): + scalar_type(1) + too_big_int + + with pytest.raises(OverflowError): + np.array(1, dtype=dtype) + too_big_int + else: + # NumPy uses (or used) `int -> string -> longdouble` for the + # conversion. But Python may refuse `str(int)` for huge ints. + # In that case, RuntimeWarning would be correct, but conversion + # fails earlier (seems to happen on 32bit linux, possibly only debug). + if dtype in "gG": + try: + str(too_big_int) + except ValueError: + pytest.skip("`huge_int -> string -> longdouble` failed") + + # Otherwise, we overflow to infinity: + with pytest.warns(RuntimeWarning): + res = scalar_type(1) + too_big_int + assert res.dtype == dtype + assert res == np.inf + + with pytest.warns(RuntimeWarning): + # We force the dtype here, since windows may otherwise pick the + # double instead of the longdouble loop. That leads to slightly + # different results (conversion of the int fails as above). + res = np.add(np.array(1, dtype=dtype), too_big_int, dtype=dtype) + assert res.dtype == dtype + assert res == np.inf + + +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.eq]) +def test_weak_promotion_scalar_path(op): + # Some additional paths exercising the weak scalars. + np._set_promotion_state("weak") + + # Integer path: + res = op(np.uint8(3), 5) + assert res == op(3, 5) + assert res.dtype == np.uint8 or res.dtype == bool + + with pytest.raises(OverflowError): + op(np.uint8(3), 1000) + + # Float path: + res = op(np.float32(3), 5.) + assert res == op(3., 5.) + assert res.dtype == np.float32 or res.dtype == bool + + +def test_nep50_complex_promotion(): + np._set_promotion_state("weak") + + with pytest.warns(RuntimeWarning, match=".*overflow"): + res = np.complex64(3) + complex(2**300) + + assert type(res) == np.complex64 + + +def test_nep50_integer_conversion_errors(): + # Do not worry about warnings here (auto-fixture will reset). + np._set_promotion_state("weak") + # Implementation for error paths is mostly missing (as of writing) + with pytest.raises(OverflowError, match=".*uint8"): + np.array([1], np.uint8) + 300 + + with pytest.raises(OverflowError, match=".*uint8"): + np.uint8(1) + 300 + + # Error message depends on platform (maybe unsigned int or unsigned long) + with pytest.raises(OverflowError, + match="Python integer -1 out of bounds for uint8"): + np.uint8(1) + -1 + + +def test_nep50_integer_regression(): + # Test the old integer promotion rules. When the integer is too large, + # we need to keep using the old-style promotion. + np._set_promotion_state("legacy") + arr = np.array(1) + assert (arr + 2**63).dtype == np.float64 + assert (arr[()] + 2**63).dtype == np.float64 + + +def test_nep50_with_axisconcatenator(): + # I promised that this will be an error in the future in the 1.25 + # release notes; test this (NEP 50 opt-in makes the deprecation an error). + np._set_promotion_state("weak") + + with pytest.raises(OverflowError): + np.r_[np.arange(5, dtype=np.int8), 255] + + +@pytest.mark.parametrize("ufunc", [np.add, np.power]) +@pytest.mark.parametrize("state", ["weak", "weak_and_warn"]) +def test_nep50_huge_integers(ufunc, state): + # Very large integers are complicated, because they go to uint64 or + # object dtype. This tests covers a few possible paths (some of which + # cannot give the NEP 50 warnings). + np._set_promotion_state(state) + + with pytest.raises(OverflowError): + ufunc(np.int64(0), 2**63) # 2**63 too large for int64 + + if state == "weak_and_warn": + with pytest.warns(UserWarning, + match="result dtype changed.*float64.*uint64"): + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) + else: + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + + # However, 2**63 can be represented by the uint64 (and that is used): + if state == "weak_and_warn": + with pytest.warns(UserWarning, + match="result dtype changed.*float64.*uint64"): + res = ufunc(np.uint64(1), 2**63) + else: + res = ufunc(np.uint64(1), 2**63) + + assert res.dtype == np.uint64 + assert res == ufunc(1, 2**63, dtype=object) + + # The following paths fail to warn correctly about the change: + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**63) # np.array(2**63) would go to uint + + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**100) # np.array(2**100) would go to object + + # This would go to object and thus a Python float, not a NumPy one: + res = ufunc(1.0, 2**100) + assert isinstance(res, np.float64) + + +def test_nep50_in_concat_and_choose(): + np._set_promotion_state("weak_and_warn") + + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.concatenate([np.float32(1), 1.], axis=None) + assert res.dtype == "float32" + + with pytest.warns(UserWarning, match="result dtype changed"): + res = np.choose(1, [np.float32(1), 1.]) + assert res.dtype == "float32" diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numeric.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numeric.py new file mode 100644 index 00000000..e5edd3ef --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numeric.py @@ -0,0 +1,3593 @@ +import sys +import warnings +import itertools +import platform +import pytest +import math +from decimal import Decimal + +import numpy as np +from numpy.core import umath +from numpy.random import rand, randint, randn +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_almost_equal, assert_array_almost_equal, + assert_warns, assert_array_max_ulp, HAS_REFCOUNT, IS_WASM + ) +from numpy.core._rational_tests import rational + +from hypothesis import given, strategies as st +from hypothesis.extra import numpy as hynp + + +class TestResize: + def test_copies(self): + A = np.array([[1, 2], [3, 4]]) + Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_repeats(self): + A = np.array([1, 2, 3]) + Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_zeroresize(self): + A = np.array([[1, 2], [3, 4]]) + Ar = np.resize(A, (0,)) + assert_array_equal(Ar, np.array([])) + assert_equal(A.dtype, Ar.dtype) + + Ar = np.resize(A, (0, 2)) + assert_equal(Ar.shape, (0, 2)) + + Ar = np.resize(A, (2, 0)) + assert_equal(Ar.shape, (2, 0)) + + def test_reshape_from_zero(self): + # See also gh-6740 + A = np.zeros(0, dtype=[('a', np.float32)]) + Ar = np.resize(A, (2, 1)) + assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) + assert_equal(A.dtype, Ar.dtype) + + def test_negative_resize(self): + A = np.arange(0, 10, dtype=np.float32) + new_shape = (-10, -1) + with pytest.raises(ValueError, match=r"negative"): + np.resize(A, new_shape=new_shape) + + def test_subclass(self): + class MyArray(np.ndarray): + __array_priority__ = 1. + + my_arr = np.array([1]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + assert type(np.resize(my_arr, 0)) is MyArray + + my_arr = np.array([]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + + +class TestNonarrayArgs: + # check that non-array arguments to functions wrap them in arrays + def test_choose(self): + choices = [[0, 1, 2], + [3, 4, 5], + [5, 6, 7]] + tgt = [5, 1, 5] + a = [2, 0, 1] + + out = np.choose(a, choices) + assert_equal(out, tgt) + + def test_clip(self): + arr = [-1, 5, 2, 3, 10, -4, -9] + out = np.clip(arr, 2, 7) + tgt = [2, 5, 2, 3, 7, 2, 2] + assert_equal(out, tgt) + + def test_compress(self): + arr = [[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]] + tgt = [[5, 6, 7, 8, 9]] + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + def test_count_nonzero(self): + arr = [[0, 1, 7, 0, 0], + [3, 0, 0, 2, 19]] + tgt = np.array([2, 3]) + out = np.count_nonzero(arr, axis=1) + assert_equal(out, tgt) + + def test_cumproduct(self): + A = [[1, 2, 3], [4, 5, 6]] + with assert_warns(DeprecationWarning): + expected = np.array([1, 2, 6, 24, 120, 720]) + assert_(np.all(np.cumproduct(A) == expected)) + + def test_diagonal(self): + a = [[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]] + out = np.diagonal(a) + tgt = [0, 5, 10] + + assert_equal(out, tgt) + + def test_mean(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.mean(A) == 3.5) + assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) + assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.mean([]))) + assert_(w[0].category is RuntimeWarning) + + def test_ptp(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + + def test_prod(self): + arr = [[1, 2, 3, 4], + [5, 6, 7, 9], + [10, 3, 4, 5]] + tgt = [24, 1890, 600] + + assert_equal(np.prod(arr, axis=-1), tgt) + + def test_ravel(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + assert_equal(np.ravel(a), tgt) + + def test_repeat(self): + a = [1, 2, 3] + tgt = [1, 1, 2, 2, 3, 3] + + out = np.repeat(a, 2) + assert_equal(out, tgt) + + def test_reshape(self): + arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(np.reshape(arr, (2, 6)), tgt) + + def test_round(self): + arr = [1.56, 72.54, 6.35, 3.25] + tgt = [1.6, 72.5, 6.4, 3.2] + assert_equal(np.around(arr, decimals=1), tgt) + s = np.float64(1.) + assert_(isinstance(s.round(), np.float64)) + assert_equal(s.round(), 1.) + + @pytest.mark.parametrize('dtype', [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ]) + def test_dunder_round(self, dtype): + s = dtype(1) + assert_(isinstance(round(s), int)) + assert_(isinstance(round(s, None), int)) + assert_(isinstance(round(s, ndigits=None), int)) + assert_equal(round(s), 1) + assert_equal(round(s, None), 1) + assert_equal(round(s, ndigits=None), 1) + + @pytest.mark.parametrize('val, ndigits', [ + pytest.param(2**31 - 1, -1, + marks=pytest.mark.xfail(reason="Out of range of int32") + ), + (2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))), + (2**31 - 1, -math.ceil(math.log10(2**31 - 1))) + ]) + def test_dunder_round_edgecases(self, val, ndigits): + assert_equal(round(val, ndigits), round(np.int32(val), ndigits)) + + def test_dunder_round_accuracy(self): + f = np.float64(5.1 * 10**73) + assert_(isinstance(round(f, -73), np.float64)) + assert_array_max_ulp(round(f, -73), 5.0 * 10**73) + assert_(isinstance(round(f, ndigits=-73), np.float64)) + assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73) + + i = np.int64(501) + assert_(isinstance(round(i, -2), np.int64)) + assert_array_max_ulp(round(i, -2), 500) + assert_(isinstance(round(i, ndigits=-2), np.int64)) + assert_array_max_ulp(round(i, ndigits=-2), 500) + + @pytest.mark.xfail(raises=AssertionError, reason="gh-15896") + def test_round_py_consistency(self): + f = 5.1 * 10**73 + assert_equal(round(np.float64(f), -73), round(f, -73)) + + def test_searchsorted(self): + arr = [-8, -5, -1, 3, 6, 10] + out = np.searchsorted(arr, 0) + assert_equal(out, 3) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.size(A) == 6) + assert_(np.size(A, 0) == 2) + assert_(np.size(A, 1) == 3) + + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_equal(np.squeeze(A).shape, (3, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3)) + + def test_std(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.std(A), 1.707825127659933) + assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) + assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.std([]))) + assert_(w[0].category is RuntimeWarning) + + def test_swapaxes(self): + tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] + a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + out = np.swapaxes(a, 0, 2) + assert_equal(out, tgt) + + def test_sum(self): + m = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + tgt = [[6], [15], [24]] + out = np.sum(m, axis=1, keepdims=True) + + assert_equal(tgt, out) + + def test_take(self): + tgt = [2, 3, 5] + indices = [1, 2, 4] + a = [1, 2, 3, 4, 5] + + out = np.take(a, indices) + assert_equal(out, tgt) + + def test_trace(self): + c = [[1, 2], [3, 4], [5, 6]] + assert_equal(np.trace(c), 5) + + def test_transpose(self): + arr = [[1, 2], [3, 4], [5, 6]] + tgt = [[1, 3, 5], [2, 4, 6]] + assert_equal(np.transpose(arr, (1, 0)), tgt) + + def test_var(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.var(A), 2.9166666666666665) + assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) + assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.var([]))) + assert_(w[0].category is RuntimeWarning) + + B = np.array([None, 0]) + B[0] = 1j + assert_almost_equal(np.var(B), 0.25) + + +class TestIsscalar: + def test_isscalar(self): + assert_(np.isscalar(3.1)) + assert_(np.isscalar(np.int16(12345))) + assert_(np.isscalar(False)) + assert_(np.isscalar('numpy')) + assert_(not np.isscalar([3.1])) + assert_(not np.isscalar(None)) + + # PEP 3141 + from fractions import Fraction + assert_(np.isscalar(Fraction(5, 17))) + from numbers import Number + assert_(np.isscalar(Number())) + + +class TestBoolScalar: + def test_logical(self): + f = np.False_ + t = np.True_ + s = "xyz" + assert_((t and s) is s) + assert_((f and s) is f) + + def test_bitwise_or(self): + f = np.False_ + t = np.True_ + assert_((t | t) is t) + assert_((f | t) is t) + assert_((t | f) is t) + assert_((f | f) is f) + + def test_bitwise_and(self): + f = np.False_ + t = np.True_ + assert_((t & t) is t) + assert_((f & t) is f) + assert_((t & f) is f) + assert_((f & f) is f) + + def test_bitwise_xor(self): + f = np.False_ + t = np.True_ + assert_((t ^ t) is f) + assert_((f ^ t) is t) + assert_((t ^ f) is t) + assert_((f ^ f) is f) + + +class TestBoolArray: + def setup_method(self): + # offset for simd tests + self.t = np.array([True] * 41, dtype=bool)[1::] + self.f = np.array([False] * 41, dtype=bool)[1::] + self.o = np.array([False] * 42, dtype=bool)[2::] + self.nm = self.f.copy() + self.im = self.t.copy() + self.nm[3] = True + self.nm[-2] = True + self.im[3] = False + self.im[-2] = False + + def test_all_any(self): + assert_(self.t.all()) + assert_(self.t.any()) + assert_(not self.f.all()) + assert_(not self.f.any()) + assert_(self.nm.any()) + assert_(self.im.any()) + assert_(not self.nm.all()) + assert_(not self.im.all()) + # check bad element in all positions + for i in range(256 - 7): + d = np.array([False] * 256, dtype=bool)[7::] + d[i] = True + assert_(np.any(d)) + e = np.array([True] * 256, dtype=bool)[7::] + e[i] = False + assert_(not np.all(e)) + assert_array_equal(e, ~d) + # big array test for blocked libc loops + for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: + d = np.array([False] * 100043, dtype=bool) + d[i] = True + assert_(np.any(d), msg="%r" % i) + e = np.array([True] * 100043, dtype=bool) + e[i] = False + assert_(not np.all(e), msg="%r" % i) + + def test_logical_not_abs(self): + assert_array_equal(~self.t, self.f) + assert_array_equal(np.abs(~self.t), self.f) + assert_array_equal(np.abs(~self.f), self.t) + assert_array_equal(np.abs(self.f), self.f) + assert_array_equal(~np.abs(self.f), self.t) + assert_array_equal(~np.abs(self.t), self.f) + assert_array_equal(np.abs(~self.nm), self.im) + np.logical_not(self.t, out=self.o) + assert_array_equal(self.o, self.f) + np.abs(self.t, out=self.o) + assert_array_equal(self.o, self.t) + + def test_logical_and_or_xor(self): + assert_array_equal(self.t | self.t, self.t) + assert_array_equal(self.f | self.f, self.f) + assert_array_equal(self.t | self.f, self.t) + assert_array_equal(self.f | self.t, self.t) + np.logical_or(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t & self.t, self.t) + assert_array_equal(self.f & self.f, self.f) + assert_array_equal(self.t & self.f, self.f) + assert_array_equal(self.f & self.t, self.f) + np.logical_and(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t ^ self.t, self.f) + assert_array_equal(self.f ^ self.f, self.f) + assert_array_equal(self.t ^ self.f, self.t) + assert_array_equal(self.f ^ self.t, self.t) + np.logical_xor(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.f) + + assert_array_equal(self.nm & self.t, self.nm) + assert_array_equal(self.im & self.f, False) + assert_array_equal(self.nm & True, self.nm) + assert_array_equal(self.im & False, self.f) + assert_array_equal(self.nm | self.t, self.t) + assert_array_equal(self.im | self.f, self.im) + assert_array_equal(self.nm | True, self.t) + assert_array_equal(self.im | False, self.im) + assert_array_equal(self.nm ^ self.t, self.im) + assert_array_equal(self.im ^ self.f, self.im) + assert_array_equal(self.nm ^ True, self.im) + assert_array_equal(self.im ^ False, self.im) + + +class TestBoolCmp: + def setup_method(self): + self.f = np.ones(256, dtype=np.float32) + self.ef = np.ones(self.f.size, dtype=bool) + self.d = np.ones(128, dtype=np.float64) + self.ed = np.ones(self.d.size, dtype=bool) + # generate values for all permutation of 256bit simd vectors + s = 0 + for i in range(32): + self.f[s:s+8] = [i & 2**x for x in range(8)] + self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] + s += 8 + s = 0 + for i in range(16): + self.d[s:s+4] = [i & 2**x for x in range(4)] + self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] + s += 4 + + self.nf = self.f.copy() + self.nd = self.d.copy() + self.nf[self.ef] = np.nan + self.nd[self.ed] = np.nan + + self.inff = self.f.copy() + self.infd = self.d.copy() + self.inff[::3][self.ef[::3]] = np.inf + self.infd[::3][self.ed[::3]] = np.inf + self.inff[1::3][self.ef[1::3]] = -np.inf + self.infd[1::3][self.ed[1::3]] = -np.inf + self.inff[2::3][self.ef[2::3]] = np.nan + self.infd[2::3][self.ed[2::3]] = np.nan + self.efnonan = self.ef.copy() + self.efnonan[2::3] = False + self.ednonan = self.ed.copy() + self.ednonan[2::3] = False + + self.signf = self.f.copy() + self.signd = self.d.copy() + self.signf[self.ef] *= -1. + self.signd[self.ed] *= -1. + self.signf[1::6][self.ef[1::6]] = -np.inf + self.signd[1::6][self.ed[1::6]] = -np.inf + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the float32 sign test on riscv64 for -np.nan as the sign + # of the NaN will be lost when it's converted to a float32. + if platform.processor() != 'riscv64': + self.signf[3::6][self.ef[3::6]] = -np.nan + self.signd[3::6][self.ed[3::6]] = -np.nan + self.signf[4::6][self.ef[4::6]] = -0. + self.signd[4::6][self.ed[4::6]] = -0. + + def test_float(self): + # offset for alignment test + for i in range(4): + assert_array_equal(self.f[i:] > 0, self.ef[i:]) + assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) + assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) + assert_array_equal(-self.f[i:] < 0, self.ef[i:]) + assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) + r = self.f[i:] != 0 + assert_array_equal(r, self.ef[i:]) + r2 = self.f[i:] != np.zeros_like(self.f[i:]) + r3 = 0 != self.f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) + assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) + assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) + assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + + def test_double(self): + # offset for alignment test + for i in range(2): + assert_array_equal(self.d[i:] > 0, self.ed[i:]) + assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) + assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) + assert_array_equal(-self.d[i:] < 0, self.ed[i:]) + assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) + r = self.d[i:] != 0 + assert_array_equal(r, self.ed[i:]) + r2 = self.d[i:] != np.zeros_like(self.d[i:]) + r3 = 0 != self.d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) + assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) + assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) + assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + + +class TestSeterr: + def test_default(self): + err = np.geterr() + assert_equal(err, + dict(divide='warn', + invalid='warn', + over='warn', + under='ignore') + ) + + def test_set(self): + with np.errstate(): + err = np.seterr() + old = np.seterr(divide='print') + assert_(err == old) + new = np.seterr() + assert_(new['divide'] == 'print') + np.seterr(over='raise') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') + np.seterr(**old) + assert_(np.geterr() == old) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_divide_err(self): + with np.errstate(divide='raise'): + with assert_raises(FloatingPointError): + np.array([1.]) / np.array([0.]) + + np.seterr(divide='ignore') + np.array([1.]) / np.array([0.]) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_errobj(self): + olderrobj = np.geterrobj() + self.called = 0 + try: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(divide='warn'): + np.seterrobj([20000, 1, None]) + np.array([1.]) / np.array([0.]) + assert_equal(len(w), 1) + + def log_err(*args): + self.called += 1 + extobj_err = args + assert_(len(extobj_err) == 2) + assert_("divide" in extobj_err[0]) + + with np.errstate(divide='ignore'): + np.seterrobj([20000, 3, log_err]) + np.array([1.]) / np.array([0.]) + assert_equal(self.called, 1) + + np.seterrobj(olderrobj) + with np.errstate(divide='ignore'): + np.divide(1., 0., extobj=[20000, 3, log_err]) + assert_equal(self.called, 2) + finally: + np.seterrobj(olderrobj) + del self.called + + def test_errobj_noerrmask(self): + # errmask = 0 has a special code path for the default + olderrobj = np.geterrobj() + try: + # set errobj to something non default + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, + umath.ERR_DEFAULT + 1, None]) + # call a ufunc + np.isnan(np.array([6])) + # same with the default, lots of times to get rid of possible + # pre-existing stack in the code + for i in range(10000): + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, + None]) + np.isnan(np.array([6])) + finally: + np.seterrobj(olderrobj) + + +class TestFloatExceptions: + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + "Type %s raised wrong fpe error '%s'." % (ftype, exc)) + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + # `fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) + + # Test for all real and complex float types + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"]) + def test_floating_exceptions(self, typecode): + if 'bsd' in sys.platform and typecode in 'gG': + pytest.skip(reason="Fallback impl for (c)longdouble may not raise " + "FPE errors as expected on BSD OSes, " + "see gh-24876, gh-23379") + + # Test basic arithmetic function errors + with np.errstate(all='raise'): + ftype = np.obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi._machar.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi._machar.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + # The value of tiny for double double is NaN, so we need to + # pass the assert + if not np.isnan(ft_tiny): + self.assert_raises_fpe(underflow, + lambda a, b: a/b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b: a*b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b: a*b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b: a/b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b: a+b, ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b: a-b, -ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b: a/b, ftype(1), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a/b, ftype(0), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe( + invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a*b, ftype(0), ftype(np.inf)) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) + + +class TestTypes: + def check_promotion_cases(self, promote_func): + # tests that the scalars get coerced correctly. + b = np.bool_(0) + i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) + u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) + f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) + c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(np.int16)) + assert_equal(promote_func(i32, i8), np.dtype(np.int32)) + assert_equal(promote_func(i16, i64), np.dtype(np.int64)) + assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) + assert_equal(promote_func(f32, f64), np.dtype(np.float64)) + assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) + assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(np.int32)) + assert_equal(promote_func(b, u8), np.dtype(np.uint8)) + assert_equal(promote_func(i8, u8), np.dtype(np.int16)) + assert_equal(promote_func(u8, i32), np.dtype(np.int32)) + assert_equal(promote_func(i64, u32), np.dtype(np.int64)) + assert_equal(promote_func(u64, i32), np.dtype(np.float64)) + assert_equal(promote_func(i32, f32), np.dtype(np.float64)) + assert_equal(promote_func(i64, f32), np.dtype(np.float64)) + assert_equal(promote_func(f32, i16), np.dtype(np.float32)) + assert_equal(promote_func(f32, u32), np.dtype(np.float64)) + assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) + assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) + assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) + assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) + assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) + assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8)) + assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32)) + assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32)) + assert_equal(promote_func(np.int32(-1), np.array([u64])), + np.dtype(np.float64)) + assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32)) + assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32)) + assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64)) + assert_equal(promote_func(fld, np.array([c64])), + np.dtype(np.complex64)) + assert_equal(promote_func(c64, np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.complex64(3j), np.array([f64])), + np.dtype(np.complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) + assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) + assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) + + # uint and int are treated as the same "kind" for + # the purposes of array-scalar promotion. + assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16)) + + # float and complex are treated as the same "kind" for + # the purposes of array-scalar promotion, so that you can do + # (0j + float32array) to get a complex64 array instead of + # a complex128 array. + assert_equal(promote_func(np.array([f32]), c128), + np.dtype(np.complex64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + "array type %s" % a.dtype) + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + "array type %s" % a.dtype) + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8')) + assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21')) + assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21')) + + assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8')) + assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8')) + assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8')) + assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8')) + assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8')) + assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8')) + + def test_can_cast_and_promote_usertypes(self): + # The rational type defines safe casting for signed integers, + # boolean. Rational itself *does* cast safely to double. + # (rational does not actually cast to all signed integers, e.g. + # int64 can be both long and longlong and it registers only the first) + valid_types = ["int8", "int16", "int32", "int64", "bool"] + invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V" + + rational_dt = np.dtype(rational) + for numpy_dtype in valid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert np.can_cast(numpy_dtype, rational_dt) + assert np.promote_types(numpy_dtype, rational_dt) is rational_dt + + for numpy_dtype in invalid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert not np.can_cast(numpy_dtype, rational_dt) + with pytest.raises(TypeError): + np.promote_types(numpy_dtype, rational_dt) + + double_dt = np.dtype("double") + assert np.can_cast(rational_dt, double_dt) + assert np.promote_types(double_dt, rational_dt) is double_dt + + @pytest.mark.parametrize("swap", ["", "swap"]) + @pytest.mark.parametrize("string_dtype", ["U", "S"]) + def test_promote_types_strings(self, swap, string_dtype): + if swap == "swap": + promote_types = lambda a, b: np.promote_types(b, a) + else: + promote_types = np.promote_types + + S = string_dtype + + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S+'5')) + assert_equal(promote_types('b', S), np.dtype(S+'4')) + assert_equal(promote_types('u1', S), np.dtype(S+'3')) + assert_equal(promote_types('u2', S), np.dtype(S+'5')) + assert_equal(promote_types('u4', S), np.dtype(S+'10')) + assert_equal(promote_types('u8', S), np.dtype(S+'20')) + assert_equal(promote_types('i1', S), np.dtype(S+'4')) + assert_equal(promote_types('i2', S), np.dtype(S+'6')) + assert_equal(promote_types('i4', S), np.dtype(S+'11')) + assert_equal(promote_types('i8', S), np.dtype(S+'21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) + assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) + assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) + assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) + assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + # Promote with object: + assert_equal(promote_types('O', S+'30'), np.dtype('O')) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V6"), np.dtype("V10")], # mismatch shape + # Mismatching names: + [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], + ]) + def test_invalid_void_promotion(self, dtype1, dtype2): + with pytest.raises(TypeError): + np.promote_types(dtype1, dtype2) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V10"), np.dtype("V10")], + [np.dtype([("name1", "i8")]), + np.dtype([("name1", np.dtype("i8").newbyteorder())])], + [np.dtype("i8,i8"), np.dtype("i8,>i8")], + [np.dtype("i8,i8"), np.dtype("i4,i4")], + ]) + def test_valid_void_promotion(self, dtype1, dtype2): + assert np.promote_types(dtype1, dtype2) == dtype1 + + @pytest.mark.parametrize("dtype", + list(np.typecodes["All"]) + + ["i,i", "10i", "S3", "S100", "U3", "U100", rational]) + def test_promote_identical_types_metadata(self, dtype): + # The same type passed in twice to promote types always + # preserves metadata + metadata = {1: 1} + dtype = np.dtype(dtype, metadata=metadata) + + res = np.promote_types(dtype, dtype) + assert res.metadata == dtype.metadata + + # byte-swapping preserves and makes the dtype native: + dtype = dtype.newbyteorder() + if dtype.isnative: + # The type does not have byte swapping + return + + res = np.promote_types(dtype, dtype) + + # Metadata is (currently) generally lost on byte-swapping (except for + # unicode. + if dtype.char != "U": + assert res.metadata is None + else: + assert res.metadata == metadata + assert res.isnative + + @pytest.mark.slow + @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') + @pytest.mark.parametrize(["dtype1", "dtype2"], + itertools.product( + list(np.typecodes["All"]) + + ["i,i", "S3", "S100", "U3", "U100", rational], + repeat=2)) + def test_promote_types_metadata(self, dtype1, dtype2): + """Metadata handling in promotion does not appear formalized + right now in NumPy. This test should thus be considered to + document behaviour, rather than test the correct definition of it. + + This test is very ugly, it was useful for rewriting part of the + promotion, but probably should eventually be replaced/deleted + (i.e. when metadata handling in promotion is better defined). + """ + metadata1 = {1: 1} + metadata2 = {2: 2} + dtype1 = np.dtype(dtype1, metadata=metadata1) + dtype2 = np.dtype(dtype2, metadata=metadata2) + + try: + res = np.promote_types(dtype1, dtype2) + except TypeError: + # Promotion failed, this test only checks metadata + return + + if res.char not in "USV" or res.names is not None or res.shape != (): + # All except string dtypes (and unstructured void) lose metadata + # on promotion (unless both dtypes are identical). + # At some point structured ones did not, but were restrictive. + assert res.metadata is None + elif res == dtype1: + # If one result is the result, it is usually returned unchanged: + assert res is dtype1 + elif res == dtype2: + # dtype1 may have been cast to the same type/kind as dtype2. + # If the resulting dtype is identical we currently pick the cast + # version of dtype1, which lost the metadata: + if np.promote_types(dtype1, dtype2.kind) == dtype2: + res.metadata is None + else: + res.metadata == metadata2 + else: + assert res.metadata is None + + # Try again for byteswapped version + dtype1 = dtype1.newbyteorder() + assert dtype1.metadata == metadata1 + res_bs = np.promote_types(dtype1, dtype2) + assert res_bs == res + assert res_bs.metadata == res.metadata + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('<i8', '>i8', 'no')) + + assert_(np.can_cast('<i8', '>i8', 'equiv')) + assert_(not np.can_cast('<i4', '>i8', 'equiv')) + + assert_(np.can_cast('<i4', '>i8', 'safe')) + assert_(not np.can_cast('<i8', '>i4', 'safe')) + + assert_(np.can_cast('<i8', '>i4', 'same_kind')) + assert_(not np.can_cast('<i8', '>u4', 'same_kind')) + + assert_(np.can_cast('<i8', '>u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_simple_to_structured(self): + # Non-structured can only be cast to structured in 'unsafe' mode. + assert_(not np.can_cast('i4', 'i4,i4')) + assert_(not np.can_cast('i4', 'i4,i2')) + assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) + assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) + # Even if there is just a single field which is OK. + assert_(not np.can_cast('i2', [('f1', 'i4')])) + assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) + assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) + # It should be the same for recursive structured or subarrays. + assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) + assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) + assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) + assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) + + def test_can_cast_structured_to_simple(self): + # Need unsafe casting for structured to simple. + assert_(not np.can_cast([('f1', 'i4')], 'i4')) + assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) + assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) + # Since it is unclear what is being cast, multiple fields to + # single should not work even for unsafe casting. + assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) + # But a single field inside a single field is OK. + assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) + assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) + # And a subarray is fine too - it will just take the first element + # (arguably not very consistently; might also take the first field). + assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) + assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) + # But a structured subarray with multiple fields should fail. + assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', + casting='unsafe')) + + def test_can_cast_values(self): + # gh-5917 + for dt in np.sctypes['int'] + np.sctypes['uint']: + ii = np.iinfo(dt) + assert_(np.can_cast(ii.min, dt)) + assert_(np.can_cast(ii.max, dt)) + assert_(not np.can_cast(ii.min - 1, dt)) + assert_(not np.can_cast(ii.max + 1, dt)) + + for dt in np.sctypes['float']: + fi = np.finfo(dt) + assert_(np.can_cast(fi.min, dt)) + assert_(np.can_cast(fi.max, dt)) + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): + pass + + +class TestFromiter: + def makegen(self): + return (x**2 for x in range(24)) + + def test_types(self): + ai32 = np.fromiter(self.makegen(), np.int32) + ai64 = np.fromiter(self.makegen(), np.int64) + af = np.fromiter(self.makegen(), float) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) + + def test_lengths(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(np.all(a == expected, axis=0)) + assert_(np.all(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError('error at index %s' % eindex) + yield e + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)]) + def test_2592(self, count, error_index, dtype): + # Test iteration exceptions are correctly raised. The data/generator + # has `count` elements but errors at `error_index` + iterable = self.load_data(count, error_index) + with pytest.raises(NIterError): + np.fromiter(iterable, dtype=dtype, count=count) + + @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"]) + def test_empty_not_structured(self, dtype): + # Note, "S0" could be allowed at some point, so long "S" (without + # any length) is rejected. + with pytest.raises(ValueError, match="Must specify length"): + np.fromiter([], dtype=dtype) + + @pytest.mark.parametrize(["dtype", "data"], + [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + # subarray dtypes (important because their dimensions end up + # in the result arrays dimension: + ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + (np.dtype(("O", (2, 3))), + [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])]) + @pytest.mark.parametrize("length_hint", [0, 1]) + def test_growth_and_complicated_dtypes(self, dtype, data, length_hint): + dtype = np.dtype(dtype) + + data = data * 100 # make sure we realloc a bit + + class MyIter: + # Class/example from gh-15789 + def __length_hint__(self): + # only required to be an estimate, this is legal + return length_hint # 0 or 1 + + def __iter__(self): + return iter(data) + + res = np.fromiter(MyIter(), dtype=dtype) + expected = np.array(data, dtype=dtype) + + assert_array_equal(res, expected) + + def test_empty_result(self): + class MyIter: + def __length_hint__(self): + return 10 + + def __iter__(self): + return iter([]) # actual iterator is empty. + + res = np.fromiter(MyIter(), dtype="d") + assert res.shape == (0,) + assert res.dtype == "d" + + def test_too_few_items(self): + msg = "iterator too short: Expected 10 but iterator had only 3 items." + with pytest.raises(ValueError, match=msg): + np.fromiter([1, 2, 3], count=10, dtype=int) + + def test_failed_itemsetting(self): + with pytest.raises(TypeError): + np.fromiter([1, None, 3], dtype=int) + + # The following manages to hit somewhat trickier code paths: + iterable = ((2, 3, 4) for i in range(5)) + with pytest.raises(ValueError): + np.fromiter(iterable, dtype=np.dtype((int, 2))) + +class TestNonzero: + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(np.array([])), 0) + assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) + assert_equal(np.nonzero(np.array([])), ([],)) + + assert_equal(np.count_nonzero(np.array([0])), 0) + assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) + assert_equal(np.nonzero(np.array([0])), ([],)) + + assert_equal(np.count_nonzero(np.array([1])), 1) + assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) + assert_equal(np.nonzero(np.array([1])), ([0],)) + + def test_nonzero_zerod(self): + assert_equal(np.count_nonzero(np.array(0)), 0) + assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) + with assert_warns(DeprecationWarning): + assert_equal(np.nonzero(np.array(0)), ([],)) + + assert_equal(np.count_nonzero(np.array(1)), 1) + assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) + with assert_warns(DeprecationWarning): + assert_equal(np.nonzero(np.array(1)), ([0],)) + + def test_nonzero_onedim(self): + x = np.array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + # dtype=[('a', 'i4'), ('b', 'i2')]) + x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.count_nonzero(x['c']), 3) + assert_equal(np.count_nonzero(x['d']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = np.array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = np.array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=bool) + c[10 + i:20 + i] = True + c[20 + i*2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) + + def test_return_type(self): + class C(np.ndarray): + pass + + for view in (C, np.ndarray): + for nd in range(1, 4): + shape = tuple(range(2, 2+nd)) + x = np.arange(np.prod(shape)).reshape(shape).view(view) + for nzx in (np.nonzero(x), x.nonzero()): + for nzx_i in nzx: + assert_(type(nzx_i) is np.ndarray) + assert_(nzx_i.flags.writeable) + + def test_count_nonzero_axis(self): + # Basic check of functionality + m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) + + expected = np.array([1, 1, 1, 1, 1]) + assert_equal(np.count_nonzero(m, axis=0), expected) + + expected = np.array([2, 3]) + assert_equal(np.count_nonzero(m, axis=1), expected) + + assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) + assert_raises(TypeError, np.count_nonzero, m, axis='foo') + assert_raises(np.AxisError, np.count_nonzero, m, axis=3) + assert_raises(TypeError, np.count_nonzero, + m, axis=np.array([[1], [2]])) + + def test_count_nonzero_axis_all_dtypes(self): + # More thorough test that the axis argument is respected + # for all dtypes and responds correctly when presented with + # either integer or tuple arguments for axis + msg = "Mismatch for dtype: %s" + + def assert_equal_w_dt(a, b, err_msg): + assert_equal(a.dtype, b.dtype, err_msg=err_msg) + assert_equal(a, b, err_msg=err_msg) + + for dt in np.typecodes['All']: + err_msg = msg % (np.dtype(dt).name,) + + if dt != 'V': + if dt != 'M': + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: # np.zeros doesn't work for np.datetime64 + m = np.array(['1970-01-01'] * 9) + m = m.reshape((3, 3)) + + m[0, 0] = '1970-01-12' + m[1, 0] = '1970-01-12' + m = m.astype(dt) + + expected = np.array([2, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([1, 1, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(2) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + if dt == 'V': + # There are no 'nonzero' objects for np.void, so the testing + # setup is slightly different for this dtype + m = np.array([np.void(1)] * 6).reshape((2, 3)) + + expected = np.array([0, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(0) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + def test_count_nonzero_axis_consistent(self): + # Check that the axis behaviour for valid axes in + # non-special cases is consistent (and therefore + # correct) by checking it against an integer array + # that is then casted to the generic object dtype + from itertools import combinations, permutations + + axis = (0, 1, 2, 3) + size = (5, 5, 5, 5) + msg = "Mismatch for axis: %s" + + rng = np.random.RandomState(1234) + m = rng.randint(-100, 100, size=size) + n = m.astype(object) + + for length in range(len(axis)): + for combo in combinations(axis, length): + for perm in permutations(combo): + assert_equal( + np.count_nonzero(m, axis=perm), + np.count_nonzero(n, axis=perm), + err_msg=msg % (perm,)) + + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + + def test_countnonzero_keepdims(self): + a = np.array([[0, 0, 1, 0], + [0, 3, 5, 0], + [7, 9, 2, 0]]) + assert_equal(np.count_nonzero(a, axis=0, keepdims=True), + [[1, 2, 3, 0]]) + assert_equal(np.count_nonzero(a, axis=1, keepdims=True), + [[1], [2], [3]]) + assert_equal(np.count_nonzero(a, keepdims=True), + [[6]]) + + def test_array_method(self): + # Tests that the array method + # call to nonzero works + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + + def test_nonzero_invalid_object(self): + # gh-9295 + a = np.array([np.array([1, 2]), 3], dtype=object) + assert_raises(ValueError, np.nonzero, a) + + class BoolErrors: + def __bool__(self): + raise ValueError("Not allowed") + + assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) + + def test_nonzero_sideeffect_safety(self): + # gh-13631 + class FalseThenTrue: + _val = False + def __bool__(self): + try: + return self._val + finally: + self._val = True + + class TrueThenFalse: + _val = True + def __bool__(self): + try: + return self._val + finally: + self._val = False + + # result grows on the second pass + a = np.array([True, FalseThenTrue()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[True], [FalseThenTrue()]]) + assert_raises(RuntimeError, np.nonzero, a) + + # result shrinks on the second pass + a = np.array([False, TrueThenFalse()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[False], [TrueThenFalse()]]) + assert_raises(RuntimeError, np.nonzero, a) + + def test_nonzero_sideffects_structured_void(self): + # Checks that structured void does not mutate alignment flag of + # original array. + arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit + assert arr.flags.aligned # structs are considered "aligned" + assert not arr["f2"].flags.aligned + # make sure that nonzero/count_nonzero do not flip the flag: + np.nonzero(arr) + assert arr.flags.aligned + np.count_nonzero(arr) + assert arr.flags.aligned + + def test_nonzero_exception_safe(self): + # gh-13930 + + class ThrowsAfter: + def __init__(self, iters): + self.iters_left = iters + + def __bool__(self): + if self.iters_left == 0: + raise ValueError("called `iters` times") + + self.iters_left -= 1 + return True + + """ + Test that a ValueError is raised instead of a SystemError + + If the __bool__ function is called after the error state is set, + Python (cpython) will raise a SystemError. + """ + + # assert that an exception in first pass is handled correctly + a = np.array([ThrowsAfter(5)]*10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for 1-dimensional loop + a = np.array([ThrowsAfter(15)]*10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for n-dimensional loop + a = np.array([[ThrowsAfter(15)]]*10) + assert_raises(ValueError, np.nonzero, a) + + @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have threads") + def test_structured_threadsafety(self): + # Nonzero (and some other functions) should be threadsafe for + # structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)]) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] + def func(arr): + arr.nonzero() + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +class TestIndex: + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr: + def test_zero(self): + assert_equal(np.binary_repr(0), '0') + + def test_positive(self): + assert_equal(np.binary_repr(10), '1010') + assert_equal(np.binary_repr(12522), + '11000011101010') + assert_equal(np.binary_repr(10736848), + '101000111101010011010000') + + def test_negative(self): + assert_equal(np.binary_repr(-1), '-1') + assert_equal(np.binary_repr(-10), '-1010') + assert_equal(np.binary_repr(-12522), + '-11000011101010') + assert_equal(np.binary_repr(-10736848), + '-101000111101010011010000') + + def test_sufficient_width(self): + assert_equal(np.binary_repr(0, width=5), '00000') + assert_equal(np.binary_repr(10, width=7), '0001010') + assert_equal(np.binary_repr(-5, width=7), '1111011') + + def test_neg_width_boundaries(self): + # see gh-8670 + + # Ensure that the example in the issue does not + # break before proceeding to a more thorough test. + assert_equal(np.binary_repr(-128, width=8), '10000000') + + for width in range(1, 11): + num = -2**(width - 1) + exp = '1' + (width - 1) * '0' + assert_equal(np.binary_repr(num, width=width), exp) + + def test_large_neg_int64(self): + # See gh-14289. + assert_equal(np.binary_repr(np.int64(-2**62), width=64), + '11' + '0'*62) + + +class TestBaseRepr: + def test_base3(self): + assert_equal(np.base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(np.base_repr(12, 10), '12') + assert_equal(np.base_repr(12, 10, 4), '000012') + assert_equal(np.base_repr(12, 4), '30') + assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(np.base_repr(-12, 10), '-12') + assert_equal(np.base_repr(-12, 10, 4), '-000012') + assert_equal(np.base_repr(-12, 4), '-30') + + def test_base_range(self): + with assert_raises(ValueError): + np.base_repr(1, 1) + with assert_raises(ValueError): + np.base_repr(1, 37) + + +class TestArrayComparisons: + def test_array_equal(self): + res = np.array_equal(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) + assert_(res) + assert_(type(res) is bool) + res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), + np.array([('a', 1)], dtype='S1,u4')) + assert_(res) + assert_(type(res) is bool) + + def test_array_equal_equal_nan(self): + # Test array_equal with equal_nan kwarg + a1 = np.array([1, 2, np.nan]) + a2 = np.array([1, np.nan, 2]) + a3 = np.array([1, 2, np.inf]) + + # equal_nan=False by default + assert_(not np.array_equal(a1, a1)) + assert_(np.array_equal(a1, a1, equal_nan=True)) + assert_(not np.array_equal(a1, a2, equal_nan=True)) + # nan's not conflated with inf's + assert_(not np.array_equal(a1, a3, equal_nan=True)) + # 0-D arrays + a = np.array(np.nan) + assert_(not np.array_equal(a, a)) + assert_(np.array_equal(a, a, equal_nan=True)) + # Non-float dtype - equal_nan should have no effect + a = np.array([1, 2, 3], dtype=int) + assert_(np.array_equal(a, a)) + assert_(np.array_equal(a, a, equal_nan=True)) + # Multi-dimensional array + a = np.array([[0, 1], [np.nan, 1]]) + assert_(not np.array_equal(a, a)) + assert_(np.array_equal(a, a, equal_nan=True)) + # Complex values + a, b = [np.array([1 + 1j])]*2 + a.real, b.imag = np.nan, np.nan + assert_(not np.array_equal(a, b, equal_nan=False)) + assert_(np.array_equal(a, b, equal_nan=True)) + + def test_none_compares_elementwise(self): + a = np.array([None, 1, None], dtype=object) + assert_equal(a == None, [True, False, True]) + assert_equal(a != None, [False, True, False]) + + a = np.ones(3) + assert_equal(a == None, [False, False, False]) + assert_equal(a != None, [True, True, True]) + + def test_array_equiv(self): + res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = np.array_equiv(np.array([1, 1]), np.array([1])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([2])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + assert_(not res) + assert_(type(res) is bool) + + @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) + def test_compare_unstructured_voids(self, dtype): + zeros = np.zeros(3, dtype=dtype) + + assert_array_equal(zeros, zeros) + assert not (zeros != zeros).any() + + if dtype == "V0": + # Can't test != of actually different data + return + + nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) + + assert not (zeros == nonzeros).any() + assert (zeros != nonzeros).all() + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip: + def setup_method(self): + self.nr = 5 + self.nc = 3 + + def fastclip(self, a, m, M, out=None, **kwargs): + return a.clip(m, M, out=out, **kwargs) + + def clip(self, a, m, M, out=None): + # use a.choose to verify fastclip result + selector = np.less(a, m) + 2*np.greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j * rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(np.float32) + + def _neg_byteorder(self, a): + a = np.asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(np.int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(np.int32) + + # Now the real test cases + + @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') + def test_ones_pathological(self, dtype): + # for preservation of behavior described in + # gh-12519; amin > amax behavior may still change + # in the future + arr = np.ones(10, dtype=dtype) + expected = np.zeros(10, dtype=dtype) + actual = np.clip(arr, 1, 0) + if dtype == 'O': + assert actual.tolist() == expected.tolist() + else: + assert_equal(actual, expected) + + def test_simple_double(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + # Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + # Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = np.zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + # Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_complex(self): + # Address Issue gh-5354 for clipping complex arrays + # Test native complex input without explicit min/max + # ie, either min=None or max=None + a = np.ones(10, dtype=complex) + m = a.min() + M = a.max() + am = self.fastclip(a, m, None) + aM = self.fastclip(a, None, M) + assert_array_strict_equal(am, a) + assert_array_strict_equal(aM, a) + + def test_clip_non_contig(self): + # Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + @pytest.mark.parametrize("casting", [None, "unsafe"]) + def test_simple_int32_inout(self, casting): + # Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + if casting is None: + with pytest.raises(TypeError): + self.fastclip(a, m, M, ac, casting=casting) + else: + # explicitly passing "unsafe" will silence warning + self.fastclip(a, m, M, ac, casting=casting) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + # Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + # Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + # Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + # Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + # Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + # Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + # Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(np.int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + # Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, np.float64(m), np.float64(M)) + act = self.clip(a, np.float64(m), np.float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + # Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float32(-2) + M = np.float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + # Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * np.zeros(a.shape), M) + act = self.clip(a, m * np.zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + # Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + # Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + # Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + # Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + # Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.float32(-0.5) + M = np.float32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + # Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out=b) + self.clip(a, m, M, out=bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + # Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.int32(0) + M = np.int32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + # Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + # Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + # Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + # Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_transposed(self): + # Test that the out argument works when transposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + + def test_clip_inplace_array(self): + # Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out=argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = np.clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + assert_(a2 is a) + + def test_clip_nan(self): + d = np.arange(7.) + assert_equal(d.clip(min=np.nan), np.nan) + assert_equal(d.clip(max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=np.nan), np.nan) + assert_equal(d.clip(min=-2, max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=10), np.nan) + + def test_object_clip(self): + a = np.arange(10, dtype=object) + actual = np.clip(a, 1, 5) + expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) + assert actual.tolist() == expected.tolist() + + def test_clip_all_none(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, 'max or min'): + np.clip(a, None, None) + + def test_clip_invalid_casting(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, + 'casting must be one of'): + self.fastclip(a, 1, 8, casting="garbage") + + @pytest.mark.parametrize("amin, amax", [ + # two scalars + (1, 0), + # mix scalar and array + (1, np.zeros(10)), + # two arrays + (np.ones(10), np.zeros(10)), + ]) + def test_clip_value_min_max_flip(self, amin, amax): + a = np.arange(10, dtype=np.int64) + # requirement from ufunc_docstrings.py + expected = np.minimum(np.maximum(a, amin), amax) + actual = np.clip(a, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.parametrize("arr, amin, amax, exp", [ + # for a bug in npy_ObjectClip, based on a + # case produced by hypothesis + (np.zeros(10, dtype=np.int64), + 0, + -2**64+1, + np.full(10, -2**64+1, dtype=object)), + # for bugs in NPY_TIMEDELTA_MAX, based on a case + # produced by hypothesis + (np.zeros(10, dtype='m8') - 1, + 0, + 0, + np.zeros(10, dtype='m8')), + ]) + def test_clip_problem_cases(self, arr, amin, amax, exp): + actual = np.clip(arr, amin, amax) + assert_equal(actual, exp) + + @pytest.mark.parametrize("arr, amin, amax", [ + # problematic scalar nan case from hypothesis + (np.zeros(10, dtype=np.int64), + np.array(np.nan), + np.zeros(10, dtype=np.int32)), + ]) + def test_clip_scalar_nan_propagation(self, arr, amin, amax): + # enforcement of scalar nan propagation for comparisons + # called through clip() + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.xfail(reason="propagation doesn't match spec") + @pytest.mark.parametrize("arr, amin, amax", [ + (np.array([1] * 10, dtype='m8'), + np.timedelta64('NaT'), + np.zeros(10, dtype=np.int32)), + ]) + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + def test_NaT_propagation(self, arr, amin, amax): + # NOTE: the expected function spec doesn't + # propagate NaT, but clip() now does + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @given( + data=st.data(), + arr=hynp.arrays( + dtype=hynp.integer_dtypes() | hynp.floating_dtypes(), + shape=hynp.array_shapes() + ) + ) + def test_clip_property(self, data, arr): + """A property-based test using Hypothesis. + + This aims for maximum generality: it could in principle generate *any* + valid inputs to np.clip, and in practice generates much more varied + inputs than human testers come up with. + + Because many of the inputs have tricky dependencies - compatible dtypes + and mutually-broadcastable shapes - we use `st.data()` strategy draw + values *inside* the test function, from strategies we construct based + on previous values. An alternative would be to define a custom strategy + with `@st.composite`, but until we have duplicated code inline is fine. + + That accounts for most of the function; the actual test is just three + lines to calculate and compare actual vs expected results! + """ + numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes() + # Generate shapes for the bounds which can be broadcast with each other + # and with the base shape. Below, we might decide to use scalar bounds, + # but it's clearer to generate these shapes unconditionally in advance. + in_shapes, result_shape = data.draw( + hynp.mutually_broadcastable_shapes( + num_shapes=2, base_shape=arr.shape + ) + ) + # Scalar `nan` is deprecated due to the differing behaviour it shows. + s = numeric_dtypes.flatmap( + lambda x: hynp.from_dtype(x, allow_nan=False)) + amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[0], elements={"allow_nan": False})) + amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[1], elements={"allow_nan": False})) + + # Then calculate our result and expected result and check that they're + # equal! See gh-12519 and gh-19457 for discussion deciding on this + # property and the result_type argument. + result = np.clip(arr, amin, amax) + t = np.result_type(arr, amin, amax) + expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t) + assert result.dtype == t + assert_array_equal(result, expected) + + +class TestAllclose: + rtol = 1e-5 + atol = 1e-8 + + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) + + def tst_not_allclose(self, x, y): + assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) + + def test_ip_allclose(self): + # Parametric test factory. + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1+rtol+atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol*2), + (aran, aran + aran*rtol), + (np.inf, np.inf), + (np.inf, [np.inf])] + + for (x, y) in data: + self.tst_allclose(x, y) + + def test_ip_not_allclose(self): + # Parametric test factory. + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([np.inf, 0], [1, np.inf]), + ([np.inf, 0], [1, 0]), + ([np.inf, np.inf], [1, np.inf]), + ([np.inf, np.inf], [1, 0]), + ([-np.inf, 0], [np.inf, 0]), + ([np.nan, 0], [np.nan, 0]), + ([atol*2], [0]), + ([1], [1+rtol+atol*2]), + (aran, aran + aran*atol + atol*2), + (np.array([np.inf, 1]), np.array([0, np.inf]))] + + for (x, y) in data: + self.tst_not_allclose(x, y) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.allclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(np.allclose(a, a)) + + def test_equalnan(self): + x = np.array([1.0, np.nan]) + assert_(np.allclose(x, x, equal_nan=True)) + + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + + +class TestIsclose: + rtol = 1e-5 + atol = 1e-8 + + def _setup(self): + atol = self.atol + rtol = self.rtol + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol), + (aran, aran + aran*rtol), + (np.inf, np.inf), + (np.inf, [np.inf]), + ([np.inf, -np.inf], [np.inf, -np.inf]), + ] + self.none_close_tests = [ + ([np.inf, 0], [1, np.inf]), + ([np.inf, -np.inf], [1, 0]), + ([np.inf, np.inf], [1, -np.inf]), + ([np.inf, np.inf], [1, 0]), + ([np.nan, 0], [np.nan, -np.inf]), + ([atol*2], [0]), + ([1], [1 + rtol + atol*2]), + (aran, aran + rtol*1.1*aran + atol*1.1), + (np.array([np.inf, 1]), np.array([0, np.inf])), + ] + self.some_close_tests = [ + ([np.inf, 0], [np.inf, atol*2]), + ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), + (np.arange(3), [0, 1, 2.1]), + (np.nan, [np.nan, np.nan, np.nan]), + ([0], [atol, np.inf, -np.inf, np.nan]), + (0, [atol, np.inf, -np.inf, np.nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self._setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + assert_array_equal(np.isclose(x, y), result) + + def tst_all_isclose(self, x, y): + assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not np.any(np.isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) + else: + assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + + def test_ip_all_isclose(self): + self._setup() + for (x, y) in self.all_close_tests: + self.tst_all_isclose(x, y) + + def test_ip_none_isclose(self): + self._setup() + for (x, y) in self.none_close_tests: + self.tst_none_isclose(x, y) + + def test_ip_isclose_allclose(self): + self._setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + self.tst_isclose_allclose(x, y) + + def test_equal_nan(self): + assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) + arr = np.array([1.0, np.nan]) + assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + # Make sure to test the output type when arguments are interchanged. + + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(np.isclose(2, x))) + assert_(type(x) is type(np.isclose(x, 2))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) + assert_(type(x) is type(np.isclose(np.inf, x))) + assert_(type(x) is type(np.isclose(x, np.inf))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(np.nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + y = np.isclose(x, np.nan, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(np.isscalar(np.isclose(1, 1))) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.isclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool_) + + def test_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert np.isclose(a, a, atol=0, equal_nan=True).all() + assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() + assert np.allclose(a, a, atol=0, equal_nan=True) + assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + + +class TestStdVar: + def setup_method(self): + self.A = np.array([1, -1, 1, -1]) + self.real_var = 1 + + def test_basic(self): + assert_almost_equal(np.var(self.A), self.real_var) + assert_almost_equal(np.std(self.A)**2, self.real_var) + + def test_scalars(self): + assert_equal(np.var(1), 0) + assert_equal(np.std(1), 0) + + def test_ddof1(self): + assert_almost_equal(np.var(self.A, ddof=1), + self.real_var * len(self.A) / (len(self.A) - 1)) + assert_almost_equal(np.std(self.A, ddof=1)**2, + self.real_var*len(self.A) / (len(self.A) - 1)) + + def test_ddof2(self): + assert_almost_equal(np.var(self.A, ddof=2), + self.real_var * len(self.A) / (len(self.A) - 2)) + assert_almost_equal(np.std(self.A, ddof=2)**2, + self.real_var * len(self.A) / (len(self.A) - 2)) + + def test_out_scalar(self): + d = np.arange(10) + out = np.array(0.) + r = np.std(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.var(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.mean(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + + +class TestStdVarComplex: + def test_basic(self): + A = np.array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1j), 0) + assert_equal(np.std(1j), 0) + + +class TestCreationFuncs: + # Test ones, zeros, empty and full. + + def setup_method(self): + dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} + # void, bytes, str + variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + self.dtypes = sorted(dtypes - variable_sized | + {np.dtype(tp.str.replace("0", str(i))) + for tp in variable_sized for i in range(1, 10)}, + key=lambda dtype: dtype.str) + self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + self.ndims = 10 + + def check_function(self, func, fill_value=None): + par = ((0, 1, 2), + range(self.ndims), + self.orders, + self.dtypes) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + + for size, ndims, order, dtype in itertools.product(*par): + shape = ndims * [size] + + # do not fill void type + if fill_kwarg and dtype.str.startswith('|V'): + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + assert_equal(arr.dtype, dtype) + assert_(getattr(arr.flags, self.orders[order])) + + if fill_value is not None: + if dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.ones) + + def test_empty(self): + self.check_function(np.empty) + + def test_full(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim]*10, 0) + assert_(sys.getrefcount(dim) == beg) + + +class TestLikeFuncs: + '''Test ones_like, zeros_like, empty_like and full_like''' + + def setup_method(self): + self.data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + self.shapes = [(), (5,), (5,6,), (5,6,7,)] + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + # Conversion is close to what np.full_like uses + # but we may want to convert directly in the future + # which may result in errors (where this does not). + z = np.array(value).astype(dz.dtype) + assert_(np.all(dz == z)) + else: + assert_(np.all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in self.data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(np.array(dz.strides)*d.dtype.itemsize, + np.array(d.strides)*dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'shape' parameter + for s in self.shapes: + for o in 'CFA': + sz = like_function(d, dtype=dtype, shape=s, order=o, + **fill_kwarg) + assert_equal(sz.shape, s) + if dtype is None: + assert_equal(sz.dtype, d.dtype) + else: + assert_equal(sz.dtype, np.dtype(dtype)) + if o == 'C' or (o == 'A' and d.flags.c_contiguous): + assert_(sz.flags.c_contiguous) + elif o == 'F' or (o == 'A' and d.flags.f_contiguous): + assert_(sz.flags.f_contiguous) + self.compare_array_value(sz, value, fill_value) + + if (d.ndim != len(s)): + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(np.empty(s, dtype=dtype, + order='C').strides)) + else: + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(d.strides)) + + # Test the 'subok' parameter + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is MyNDArray) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not MyNDArray) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + self.check_like_function(np.full_like, 1000, True) + self.check_like_function(np.full_like, 123.456, True) + # Inf to integer casts cause invalid-value errors: ignore them. + with np.errstate(invalid="ignore"): + self.check_like_function(np.full_like, np.inf, True) + + @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like, + np.zeros_like, np.ones_like]) + @pytest.mark.parametrize('dtype', [str, bytes]) + def test_dtype_str_bytes(self, likefunc, dtype): + # Regression test for gh-19860 + a = np.arange(16).reshape(2, 8) + b = a[:, ::2] # Ensure b is not contiguous. + kwargs = {'fill_value': ''} if likefunc == np.full_like else {} + result = likefunc(b, dtype=dtype, **kwargs) + if dtype == str: + assert result.strides == (16, 4) + else: + # dtype is bytes + assert result.strides == (4, 1) + + +class TestCorrelate: + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) + + def test_float(self): + self._setup(float) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full') + assert_array_almost_equal(z, self.z1_4) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full') + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full') + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full') + assert_array_almost_equal(z, self.zs) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_complex(self): + x = np.array([1, 2, 3, 4+1j], dtype=complex) + y = np.array([-1, -2j, 3+1j], dtype=complex) + r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, mode='full') + assert_array_almost_equal(z, r_z) + + def test_zero_size(self): + with pytest.raises(ValueError): + np.correlate(np.array([]), np.ones(1000), mode='full') + with pytest.raises(ValueError): + np.correlate(np.ones(1000), np.array([]), mode='full') + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.correlate(d, k, mode='valid') + with assert_warns(DeprecationWarning): + valid_mode = np.correlate(d, k, mode='v') + assert_array_equal(valid_mode, default_mode) + # integer mode + with assert_raises(ValueError): + np.correlate(d, k, mode=-1) + assert_array_equal(np.correlate(d, k, mode=0), valid_mode) + # illegal arguments + with assert_raises(TypeError): + np.correlate(d, k, mode=None) + + +class TestConvolve: + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.convolve(d, k, mode='full') + with assert_warns(DeprecationWarning): + full_mode = np.convolve(d, k, mode='f') + assert_array_equal(full_mode, default_mode) + # integer mode + with assert_raises(ValueError): + np.convolve(d, k, mode=-1) + assert_array_equal(np.convolve(d, k, mode=2), full_mode) + # illegal arguments + with assert_raises(TypeError): + np.convolve(d, k, mode=None) + + +class TestArgwhere: + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,)*nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + +class TestStringFunction: + + def test_set_string_function(self): + a = np.array([1]) + np.set_string_function(lambda x: "FOO", repr=True) + assert_equal(repr(a), "FOO") + np.set_string_function(None, repr=True) + assert_equal(repr(a), "array([1])") + + np.set_string_function(lambda x: "FOO", repr=False) + assert_equal(str(a), "FOO") + np.set_string_function(None, repr=False) + assert_equal(str(a), "[1]") + + +class TestRoll: + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + # Roll multiple axes at once. + x2r = np.roll(x2, 1, axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (-1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (0, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, (0, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) + + x2r = np.roll(x2, (1, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (-1, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) + + # Roll the same axis multiple times. + x2r = np.roll(x2, 1, axis=(0, 0)) + assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) + + x2r = np.roll(x2, 1, axis=(1, 1)) + assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) + + # Roll more than one turn in either direction. + x2r = np.roll(x2, 6, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, -4, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + + +class TestRollaxis: + + # expected shape indexed by (axis, start) for array of + # shape (1, 2, 3, 4) + tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), + (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), + (0, 4): (2, 3, 4, 1), + (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), + (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), + (1, 4): (1, 3, 4, 2), + (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), + (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), + (2, 4): (1, 2, 4, 3), + (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), + (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), + (3, 4): (1, 2, 3, 4)} + + def test_exceptions(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) + assert_raises(np.AxisError, np.rollaxis, a, -5, 0) + assert_raises(np.AxisError, np.rollaxis, a, 0, -5) + assert_raises(np.AxisError, np.rollaxis, a, 4, 0) + assert_raises(np.AxisError, np.rollaxis, a, 0, 5) + + def test_results(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + aind = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + for (i, j) in self.tgtshape: + # positive axis, positive start + res = np.rollaxis(a, axis=i, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) + assert_(not res.flags['OWNDATA']) + + # negative axis, positive start + ip = i + 1 + res = np.rollaxis(a, axis=-ip, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, j)]) + assert_(not res.flags['OWNDATA']) + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=i, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=-ip, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + +class TestMoveaxis: + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(np.AxisError, 'source.*out of bounds', + np.moveaxis, x, 3, 0) + assert_raises_regex(np.AxisError, 'source.*out of bounds', + np.moveaxis, x, -4, 0) + assert_raises_regex(np.AxisError, 'destination.*out of bounds', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + +class TestCross: + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4) + # gh-5885 + u = np.ones((3, 4, 2)) + for axisc in range(-2, 2): + assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) + + def test_uint8_int32_mixed_dtypes(self): + # regression test for gh-19138 + u = np.array([[195, 8, 9]], np.uint8) + v = np.array([250, 166, 68], np.int32) + z = np.array([[950, 11010, -30370]], dtype=np.int32) + assert_equal(np.cross(v, u), z) + assert_equal(np.cross(u, v), -z) + + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5,5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + + +class TestIndices: + + def test_simple(self): + [x, y] = np.indices((4, 3)) + assert_array_equal(x, np.array([[0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3]])) + assert_array_equal(y, np.array([[0, 1, 2], + [0, 1, 2], + [0, 1, 2], + [0, 1, 2]])) + + def test_single_input(self): + [x] = np.indices((4,)) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + [x] = np.indices((4,), sparse=True) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + def test_scalar_input(self): + assert_array_equal([], np.indices(())) + assert_array_equal([], np.indices((), sparse=True)) + assert_array_equal([[]], np.indices((0,))) + assert_array_equal([[]], np.indices((0,), sparse=True)) + + def test_sparse(self): + [x, y] = np.indices((4,3), sparse=True) + assert_array_equal(x, np.array([[0], [1], [2], [3]])) + assert_array_equal(y, np.array([[0, 1, 2]])) + + @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) + def test_return_type(self, dtype, dims): + inds = np.indices(dims, dtype=dtype) + assert_(inds.dtype == dtype) + + for arr in np.indices(dims, dtype=dtype, sparse=True): + assert_(arr.dtype == dtype) + + +class TestRequire: + flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', + 'F', 'F_CONTIGUOUS', 'FORTRAN', + 'A', 'ALIGNED', + 'W', 'WRITEABLE', + 'O', 'OWNDATA'] + + def generate_all_false(self, dtype): + arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) + arr.setflags(write=False) + a = arr['a'] + assert_(not a.flags['C']) + assert_(not a.flags['F']) + assert_(not a.flags['O']) + assert_(not a.flags['W']) + assert_(not a.flags['A']) + return a + + def set_and_check_flag(self, flag, dtype, arr): + if dtype is None: + dtype = arr.dtype + b = np.require(arr, dtype, [flag]) + assert_(b.flags[flag]) + assert_(b.dtype == dtype) + + # a further call to np.require ought to return the same array + # unless OWNDATA is specified. + c = np.require(b, None, [flag]) + if flag[0] != 'O': + assert_(c is b) + else: + assert_(c.flags[flag]) + + def test_require_each(self): + + id = ['f8', 'i4'] + fd = [None, 'f8', 'c16'] + for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): + a = self.generate_all_false(idtype) + self.set_and_check_flag(flag, fdtype, a) + + def test_unknown_requirement(self): + a = self.generate_all_false('f8') + assert_raises(KeyError, np.require, a, None, 'Q') + + def test_non_array_input(self): + a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) + assert_(a.flags['O']) + assert_(a.flags['C']) + assert_(a.flags['A']) + assert_(a.dtype == 'i4') + assert_equal(a, [1, 2, 3, 4]) + + def test_C_and_F_simul(self): + a = self.generate_all_false('f8') + assert_raises(ValueError, np.require, a, None, ['C', 'F']) + + def test_ensure_array(self): + class ArraySubclass(np.ndarray): + pass + + a = ArraySubclass((2, 2)) + b = np.require(a, None, ['E']) + assert_(type(b) is np.ndarray) + + def test_preserve_subtype(self): + class ArraySubclass(np.ndarray): + pass + + for flag in self.flag_names: + a = ArraySubclass((2, 2)) + self.set_and_check_flag(flag, None, a) + + +class TestBroadcast: + def test_broadcast_in_args(self): + # gh-5881 + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), + np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + for mit in mits: + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 4) + for a, ia in zip(arrs, mit.iters): + assert_(a is ia.base) + + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + + def test_number_of_arguments(self): + arr = np.empty((5,)) + for j in range(35): + arrs = [arr] * j + if j > 32: + assert_raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert_equal(mit.numiter, j) + + def test_broadcast_error_kwargs(self): + #gh-13455 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) + assert_equal(mit.shape, mit2.shape) + assert_equal(mit.ndim, mit2.ndim) + assert_equal(mit.nd, mit2.nd) + assert_equal(mit.numiter, mit2.numiter) + assert_(mit.iters[0].base is mit2.iters[0].base) + + assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) + + def test_shape_mismatch_error_message(self): + with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " + r"arg 2 with shape \(2,\)"): + np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + + +class TestKeepdims: + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + +class TestTensordot: + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3,0)) + b = np.ndarray((0,4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) + + def test_zero_dimensional(self): + # gh-12130 + arr_0d = np.array(1) + ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + assert_array_equal(ret, arr_0d) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numerictypes.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numerictypes.py new file mode 100644 index 00000000..bab5bf24 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numerictypes.py @@ -0,0 +1,570 @@ +import sys +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), + b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), + b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little':'<', 'big':'>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(dtype, list): + l = normalize_descr(dtype) + out.append((item[0], l)) + else: + raise ValueError("Expected a str or list and got %s" % + (type(item))) + return out + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros: + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class TestCreateZerosPlain(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class TestCreateZerosNested(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class CreateValues: + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (2,)) + else: + assert_(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 2)) + else: + assert_(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 1, 2)) + else: + assert_(h.shape == (1, 1)) + + +class TestCreateValuesPlainSingle(CreateValues): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestCreateValuesPlainMultiple(CreateValues): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class TestCreateValuesNestedSingle(CreateValues): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class TestCreateValuesNestedMultiple(CreateValues): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class ReadValuesPlain: + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class TestReadValuesPlainSingle(ReadValuesPlain): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestReadValuesPlainMultiple(ReadValuesPlain): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class ReadValuesNested: + """Check the reading of values in heterogeneous arrays (nested)""" + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') + assert_(h.dtype['info']['Name'].name == 'str256') + assert_(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class TestReadValuesNestedSingle(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class TestReadValuesNestedMultiple(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField: + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + +class TestCommonType: + def test_scalar_loses1(self): + with pytest.warns(DeprecationWarning, match="np.find_common_type"): + res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) + assert_(res == 'f4') + + def test_scalar_loses2(self): + with pytest.warns(DeprecationWarning, match="np.find_common_type"): + res = np.find_common_type(['f4', 'f4'], ['i8']) + assert_(res == 'f4') + + def test_scalar_wins(self): + with pytest.warns(DeprecationWarning, match="np.find_common_type"): + res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) + assert_(res == 'c8') + + def test_scalar_wins2(self): + with pytest.warns(DeprecationWarning, match="np.find_common_type"): + res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) + assert_(res == 'f8') + + def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose + with pytest.warns(DeprecationWarning, match="np.find_common_type"): + res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) + assert_(res == 'f8') + +class TestMultipleFields: + def setup_method(self): + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + + def _bad_call(self): + return self.ary['f0', 'f1'] + + def test_no_tuple(self): + assert_raises(IndexError, self._bad_call) + + def test_return(self): + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + + +class TestIsSubDType: + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + + def test_nondtype_nonscalartype(self): + # See gh-14619 and gh-9505 which introduced the deprecation to fix + # this. These tests are directly taken from gh-9505 + assert not np.issubdtype(np.float32, 'float64') + assert not np.issubdtype(np.float32, 'f8') + assert not np.issubdtype(np.int32, str) + assert not np.issubdtype(np.int32, 'int64') + assert not np.issubdtype(np.str_, 'void') + # for the following the correct spellings are + # np.integer, np.floating, or np.complexfloating respectively: + assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_ + assert not np.issubdtype(np.float32, float) + assert not np.issubdtype(np.complex64, complex) + assert not np.issubdtype(np.float32, "float") + assert not np.issubdtype(np.float64, "f") + + # Test the same for the correct first datatype and abstract one + # in the case of int, float, complex: + assert np.issubdtype(np.float64, 'float64') + assert np.issubdtype(np.float64, 'f8') + assert np.issubdtype(np.str_, str) + assert np.issubdtype(np.int64, 'int64') + assert np.issubdtype(np.void, 'void') + assert np.issubdtype(np.int8, np.integer) + assert np.issubdtype(np.float32, np.floating) + assert np.issubdtype(np.complex64, np.complexfloating) + assert np.issubdtype(np.float64, "float") + assert np.issubdtype(np.float32, "f") + + +class TestSctypeDict: + def test_longdouble(self): + assert_(np.sctypeDict['f8'] is not np.longdouble) + assert_(np.sctypeDict['c16'] is not np.clongdouble) + + def test_ulong(self): + # Test that 'ulong' behaves like 'long'. np.sctypeDict['long'] is an + # alias for np.int_, but np.long is not supported for historical + # reasons (gh-21063) + assert_(np.sctypeDict['ulong'] is np.uint) + with pytest.warns(FutureWarning): + # We will probably allow this in the future: + assert not hasattr(np, 'ulong') + +class TestBitName: + def test_abstract(self): + assert_raises(ValueError, np.core.numerictypes.bitname, np.floating) + + +class TestMaximumSctype: + + # note that parametrizing with sctype['int'] and similar would skip types + # with the same size (gh-11923) + + @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong]) + def test_int(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1]) + + @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]) + def test_uint(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1]) + + @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) + def test_float(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1]) + + @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) + def test_complex(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1]) + + @pytest.mark.parametrize('t', [np.bool_, np.object_, np.str_, np.bytes_, + np.void]) + def test_other(self, t): + assert_equal(np.maximum_sctype(t), t) + + +class Test_sctype2char: + # This function is old enough that we're really just documenting the quirks + # at this point. + + def test_scalar_type(self): + assert_equal(np.sctype2char(np.double), 'd') + assert_equal(np.sctype2char(np.int_), 'l') + assert_equal(np.sctype2char(np.str_), 'U') + assert_equal(np.sctype2char(np.bytes_), 'S') + + def test_other_type(self): + assert_equal(np.sctype2char(float), 'd') + assert_equal(np.sctype2char(list), 'O') + assert_equal(np.sctype2char(np.ndarray), 'O') + + def test_third_party_scalar_type(self): + from numpy.core._rational_tests import rational + assert_raises(KeyError, np.sctype2char, rational) + assert_raises(KeyError, np.sctype2char, rational(1)) + + def test_array_instance(self): + assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') + + def test_abstract_type(self): + assert_raises(KeyError, np.sctype2char, np.floating) + + def test_non_type(self): + assert_raises(ValueError, np.sctype2char, 1) + +@pytest.mark.parametrize("rep, expected", [ + (np.int32, True), + (list, False), + (1.1, False), + (str, True), + (np.dtype(np.float64), True), + (np.dtype((np.int16, (3, 4))), True), + (np.dtype([('a', np.int8)]), True), + ]) +def test_issctype(rep, expected): + # ensure proper identification of scalar + # data-types by issctype() + actual = np.issctype(rep) + assert_equal(actual, expected) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +@pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") +class TestDocStrings: + def test_platform_dependent_aliases(self): + if np.int64 is np.int_: + assert_('int64' in np.int_.__doc__) + elif np.int64 is np.longlong: + assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.int_, np.longlong, + np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numpy_2_0_compat.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numpy_2_0_compat.py new file mode 100644 index 00000000..5224261f --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_numpy_2_0_compat.py @@ -0,0 +1,48 @@ +from os import path +import pickle + +import numpy as np + + +class TestNumPy2Compatibility: + + data_dir = path.join(path.dirname(__file__), "data") + filename = path.join(data_dir, "numpy_2_0_array.pkl") + + def test_importable__core_stubs(self): + """ + Checks if stubs for `numpy._core` are importable. + """ + from numpy._core.multiarray import _reconstruct + from numpy._core.umath import cos + from numpy._core._multiarray_umath import exp + from numpy._core._internal import ndarray + from numpy._core._dtype import _construction_repr + from numpy._core._dtype_ctypes import dtype_from_ctypes_type + + def test_unpickle_numpy_2_0_file(self): + """ + Checks that NumPy 1.26 and pickle is able to load pickles + created with NumPy 2.0 without errors/warnings. + """ + with open(self.filename, mode="rb") as file: + content = file.read() + + # Let's make sure that the pickle object we're loading + # was built with NumPy 2.0. + assert b"numpy._core.multiarray" in content + + arr = pickle.loads(content, encoding="latin1") + + assert isinstance(arr, np.ndarray) + assert arr.shape == (73,) and arr.dtype == np.float64 + + def test_numpy_load_numpy_2_0_file(self): + """ + Checks that `numpy.load` for NumPy 1.26 is able to load pickles + created with NumPy 2.0 without errors/warnings. + """ + arr = np.load(self.filename, encoding="latin1", allow_pickle=True) + + assert isinstance(arr, np.ndarray) + assert arr.shape == (73,) and arr.dtype == np.float64 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_overrides.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_overrides.py new file mode 100644 index 00000000..5924358e --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_overrides.py @@ -0,0 +1,759 @@ +import inspect +import sys +import os +import tempfile +from io import StringIO +from unittest import mock + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex) +from numpy.core.overrides import ( + _get_implementing_args, array_function_dispatch, + verify_matching_signatures) +from numpy.compat import pickle +import pytest + + +def _return_not_implemented(self, *args, **kwargs): + return NotImplemented + + +# need to define this at the top level to test pickling +@array_function_dispatch(lambda array: (array,)) +def dispatched_one_arg(array): + """Docstring.""" + return 'original' + + +@array_function_dispatch(lambda array1, array2: (array1, array2)) +def dispatched_two_arg(array1, array2): + """Docstring.""" + return 'original' + + +class TestGetImplementingArgs: + + def test_ndarray(self): + array = np.array(1) + + args = _get_implementing_args([array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, 1]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([1, array]) + assert_equal(list(args), [array]) + + def test_ndarray_subclasses(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + array = np.array(1).view(np.ndarray) + override_sub = np.array(1).view(OverrideSub) + no_override_sub = np.array(1).view(NoOverrideSub) + + args = _get_implementing_args([array, override_sub]) + assert_equal(list(args), [override_sub, array]) + + args = _get_implementing_args([array, no_override_sub]) + assert_equal(list(args), [no_override_sub, array]) + + args = _get_implementing_args( + [override_sub, no_override_sub]) + assert_equal(list(args), [override_sub, no_override_sub]) + + def test_ndarray_and_duck_array(self): + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + other = Other() + + args = _get_implementing_args([other, array]) + assert_equal(list(args), [other, array]) + + args = _get_implementing_args([array, other]) + assert_equal(list(args), [array, other]) + + def test_ndarray_subclass_and_duck_array(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + subarray = np.array(1).view(OverrideSub) + other = Other() + + assert_equal(_get_implementing_args([array, subarray, other]), + [subarray, array, other]) + assert_equal(_get_implementing_args([array, other, subarray]), + [subarray, array, other]) + + def test_many_duck_arrays(self): + + class A: + __array_function__ = _return_not_implemented + + class B(A): + __array_function__ = _return_not_implemented + + class C(A): + __array_function__ = _return_not_implemented + + class D: + __array_function__ = _return_not_implemented + + a = A() + b = B() + c = C() + d = D() + + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = dict(__array_function__=_return_not_implemented) + types = [type('A' + str(i), (object,), namespace) for i in range(33)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:32]) + assert_equal(actual, relevant_args[:32]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) + + +class TestNDArrayArrayFunction: + + def test_method(self): + + class Other: + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + array = np.array([1]) + other = Other() + no_override_sub = array.view(NoOverrideSub) + override_sub = array.view(OverrideSub) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray,), + args=(array, 1.), kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, Other), + args=(array, other), kwargs={}) + assert_(result is NotImplemented) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, NoOverrideSub), + args=(array, no_override_sub), + kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, OverrideSub), + args=(array, override_sub), + kwargs={}) + assert_equal(result, 'original') + + with assert_raises_regex(TypeError, 'no implementation found'): + np.concatenate((array, other)) + + expected = np.concatenate((array, array)) + result = np.concatenate((array, no_override_sub)) + assert_equal(result, expected.view(NoOverrideSub)) + result = np.concatenate((array, override_sub)) + assert_equal(result, expected.view(OverrideSub)) + + def test_no_wrapper(self): + # This shouldn't happen unless a user intentionally calls + # __array_function__ with invalid arguments, but check that we raise + # an appropriate error all the same. + array = np.array(1) + func = lambda x: x + with assert_raises_regex(AttributeError, '_implementation'): + array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + + +class TestArrayFunctionDispatch: + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + roundtripped = pickle.loads( + pickle.dumps(dispatched_one_arg, protocol=proto)) + assert_(roundtripped is dispatched_one_arg) + + def test_name_and_docstring(self): + assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') + if sys.flags.optimize < 2: + assert_equal(dispatched_one_arg.__doc__, 'Docstring.') + + def test_interface(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return (self, func, types, args, kwargs) + + original = MyArray() + (obj, func, types, args, kwargs) = dispatched_one_arg(original) + assert_(obj is original) + assert_(func is dispatched_one_arg) + assert_equal(set(types), {MyArray}) + # assert_equal uses the overloaded np.iscomplexobj() internally + assert_(args == (original,)) + assert_equal(kwargs, {}) + + def test_not_implemented(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return NotImplemented + + array = MyArray() + with assert_raises_regex(TypeError, 'no implementation found'): + dispatched_one_arg(array) + + def test_where_dispatch(self): + + class DuckArray: + def __array_function__(self, ufunc, method, *inputs, **kwargs): + return "overridden" + + array = np.array(1) + duck_array = DuckArray() + + result = np.std(array, where=duck_array) + + assert_equal(result, "overridden") + + +class TestVerifyMatchingSignatures: + + def test_verify_matching_signatures(self): + + verify_matching_signatures(lambda x: 0, lambda x: 0) + verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) + verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) + + with assert_raises(RuntimeError): + verify_matching_signatures(lambda a: 0, lambda b: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x: 0, lambda x=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) + + def test_array_function_dispatch(self): + + with assert_raises(RuntimeError): + @array_function_dispatch(lambda x: (x,)) + def f(y): + pass + + # should not raise + @array_function_dispatch(lambda x: (x,), verify=False) + def f(y): + pass + + +def _new_duck_type_and_implements(): + """Create a duck array type and implements functions.""" + HANDLED_FUNCTIONS = {} + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + if func not in HANDLED_FUNCTIONS: + return NotImplemented + if not all(issubclass(t, MyArray) for t in types): + return NotImplemented + return HANDLED_FUNCTIONS[func](*args, **kwargs) + + def implements(numpy_function): + """Register an __array_function__ implementations.""" + def decorator(func): + HANDLED_FUNCTIONS[numpy_function] = func + return func + return decorator + + return (MyArray, implements) + + +class TestArrayFunctionImplementation: + + def test_one_arg(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(dispatched_one_arg) + def _(array): + return 'myarray' + + assert_equal(dispatched_one_arg(1), 'original') + assert_equal(dispatched_one_arg(MyArray()), 'myarray') + + def test_optional_args(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array, option=None: (array,)) + def func_with_option(array, option='default'): + return option + + @implements(func_with_option) + def my_array_func_with_option(array, new_option='myarray'): + return new_option + + # we don't need to implement every option on __array_function__ + # implementations + assert_equal(func_with_option(1), 'default') + assert_equal(func_with_option(1, option='extra'), 'extra') + assert_equal(func_with_option(MyArray()), 'myarray') + with assert_raises(TypeError): + func_with_option(MyArray(), option='extra') + + # but new options on implementations can't be used + result = my_array_func_with_option(MyArray(), new_option='yes') + assert_equal(result, 'yes') + with assert_raises(TypeError): + func_with_option(MyArray(), new_option='no') + + def test_not_implemented(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array: (array,), module='my') + def func(array): + return array + + array = np.array(1) + assert_(func(array) is array) + assert_equal(func.__module__, 'my') + + with assert_raises_regex( + TypeError, "no implementation found for 'my.func'"): + func(MyArray()) + + @pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"]) + def test_signature_error_message_simple(self, name): + func = getattr(np, name) + try: + # all of these functions need an argument: + func() + except TypeError as e: + exc = e + + assert exc.args[0].startswith(f"{name}()") + + def test_signature_error_message(self): + # The lambda function will be named "<lambda>", but the TypeError + # should show the name as "func" + def _dispatcher(): + return () + + @array_function_dispatch(_dispatcher) + def func(): + pass + + try: + func._implementation(bad_arg=3) + except TypeError as e: + expected_exception = e + + try: + func(bad_arg=3) + raise AssertionError("must fail") + except TypeError as exc: + if exc.args[0].startswith("_dispatcher"): + # We replace the qualname currently, but it used `__name__` + # (relevant functions have the same name and qualname anyway) + pytest.skip("Python version is not using __qualname__ for " + "TypeError formatting.") + + assert exc.args == expected_exception.args + + @pytest.mark.parametrize("value", [234, "this func is not replaced"]) + def test_dispatcher_error(self, value): + # If the dispatcher raises an error, we must not attempt to mutate it + error = TypeError(value) + + def dispatcher(): + raise error + + @array_function_dispatch(dispatcher) + def func(): + return 3 + + try: + func() + raise AssertionError("must fail") + except TypeError as exc: + assert exc is error # unmodified exception + + def test_properties(self): + # Check that str and repr are sensible + func = dispatched_two_arg + assert str(func) == str(func._implementation) + repr_no_id = repr(func).split("at ")[0] + repr_no_id_impl = repr(func._implementation).split("at ")[0] + assert repr_no_id == repr_no_id_impl + + @pytest.mark.parametrize("func", [ + lambda x, y: 0, # no like argument + lambda like=None: 0, # not keyword only + lambda *, like=None, a=3: 0, # not last (not that it matters) + ]) + def test_bad_like_sig(self, func): + # We sanity check the signature, and these should fail. + with pytest.raises(RuntimeError): + array_function_dispatch()(func) + + def test_bad_like_passing(self): + # Cover internal sanity check for passing like as first positional arg + def func(*, like=None): + pass + + func_with_like = array_function_dispatch()(func) + with pytest.raises(TypeError): + func_with_like() + with pytest.raises(TypeError): + func_with_like(like=234) + + def test_too_many_args(self): + # Mainly a unit-test to increase coverage + objs = [] + for i in range(40): + class MyArr: + def __array_function__(self, *args, **kwargs): + return NotImplemented + + objs.append(MyArr()) + + def _dispatch(*args): + return args + + @array_function_dispatch(_dispatch) + def func(*args): + pass + + with pytest.raises(TypeError, match="maximum number"): + func(*objs) + + + +class TestNDArrayMethods: + + def test_repr(self): + # gh-12162: should still be defined even if __array_function__ doesn't + # implement np.array_repr() + + class MyArray(np.ndarray): + def __array_function__(*args, **kwargs): + return NotImplemented + + array = np.array(1).view(MyArray) + assert_equal(repr(array), 'MyArray(1)') + assert_equal(str(array), '1') + + +class TestNumPyFunctions: + + def test_set_module(self): + assert_equal(np.sum.__module__, 'numpy') + assert_equal(np.char.equal.__module__, 'numpy.char') + assert_equal(np.fft.fft.__module__, 'numpy.fft') + assert_equal(np.linalg.solve.__module__, 'numpy.linalg') + + def test_inspect_sum(self): + signature = inspect.signature(np.sum) + assert_('axis' in signature.parameters) + + def test_override_sum(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(np.sum) + def _(array): + return 'yes' + + assert_equal(np.sum(MyArray()), 'yes') + + def test_sum_on_mock_array(self): + + # We need a proxy for mocks because __array_function__ is only looked + # up in the class dict + class ArrayProxy: + def __init__(self, value): + self.value = value + def __array_function__(self, *args, **kwargs): + return self.value.__array_function__(*args, **kwargs) + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) + proxy.value.__array_function__.return_value = 1 + result = np.sum(proxy) + assert_equal(result, 1) + proxy.value.__array_function__.assert_called_once_with( + np.sum, (ArrayProxy,), (proxy,), {}) + proxy.value.__array__.assert_not_called() + + def test_sum_forwarding_implementation(self): + + class MyArray(np.ndarray): + + def sum(self, axis, out): + return 'summed' + + def __array_function__(self, func, types, args, kwargs): + return super().__array_function__(func, types, args, kwargs) + + # note: the internal implementation of np.sum() calls the .sum() method + array = np.array(1).view(MyArray) + assert_equal(np.sum(array), 'summed') + + +class TestArrayLike: + def setup_method(self): + class MyArray(): + def __init__(self, function=None): + self.function = function + + def __array_function__(self, func, types, args, kwargs): + assert func is getattr(np, func.__name__) + try: + my_func = getattr(self, func.__name__) + except AttributeError: + return NotImplemented + return my_func(*args, **kwargs) + + self.MyArray = MyArray + + class MyNoArrayFunctionArray(): + def __init__(self, function=None): + self.function = function + + self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + + def add_method(self, name, arr_class, enable_value_error=False): + def _definition(*args, **kwargs): + # Check that `like=` isn't propagated downstream + assert 'like' not in kwargs + + if enable_value_error and 'value_error' in kwargs: + raise ValueError + + return arr_class(getattr(arr_class, name)) + setattr(arr_class, name, _definition) + + def func_args(*args, **kwargs): + return args, kwargs + + def test_array_like_not_implemented(self): + self.add_method('array', self.MyArray) + + ref = self.MyArray.array() + + with assert_raises_regex(TypeError, 'no implementation found'): + array_like = np.asarray(1, like=ref) + + _array_tests = [ + ('array', *func_args((1,))), + ('asarray', *func_args((1,))), + ('asanyarray', *func_args((1,))), + ('ascontiguousarray', *func_args((2, 3))), + ('asfortranarray', *func_args((2, 3))), + ('require', *func_args((np.arange(6).reshape(2, 3),), + requirements=['A', 'F'])), + ('empty', *func_args((1,))), + ('full', *func_args((1,), 2)), + ('ones', *func_args((1,))), + ('zeros', *func_args((1,))), + ('arange', *func_args(3)), + ('frombuffer', *func_args(b'\x00' * 8, dtype=int)), + ('fromiter', *func_args(range(3), dtype=int)), + ('fromstring', *func_args('1,2', dtype=int, sep=',')), + ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))), + ('genfromtxt', *func_args(lambda: StringIO('1,2.1'), + dtype=[('int', 'i8'), ('float', 'f8')], + delimiter=',')), + ] + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like(self, function, args, kwargs, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + my_func = getattr(self.MyArray, function) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + + if numpy_ref is True: + assert type(array_like) is np.ndarray + + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + + # Special-case np.empty to ensure values match + if function == "empty": + np_arr.fill(1) + array_like.fill(1) + + assert_equal(array_like, np_arr) + else: + assert type(array_like) is self.MyArray + assert array_like.function is my_func + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) + def test_no_array_function_like(self, function, args, kwargs, ref): + self.add_method('array', self.MyNoArrayFunctionArray) + self.add_method(function, self.MyNoArrayFunctionArray) + np_func = getattr(np, function) + + # Instantiate ref if it's the MyNoArrayFunctionArray class + if ref == "MyNoArrayFunctionArray": + ref = self.MyNoArrayFunctionArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + + with assert_raises_regex(TypeError, + 'The `like` argument must be an array-like that implements'): + np_func(*like_args, **kwargs, like=ref) + + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like_fromfile(self, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method("fromfile", self.MyArray) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + data = np.random.random(5) + + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile + + def test_exception_handling(self): + self.add_method('array', self.MyArray, enable_value_error=True) + + ref = self.MyArray.array() + + with assert_raises(TypeError): + # Raises the error about `value_error` being invalid first + np.array(1, value_error=True, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_like_as_none(self, function, args, kwargs): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + + like_args = tuple(a() if callable(a) else a for a in args) + # required for loadtxt and genfromtxt to init w/o error. + like_args_exp = tuple(a() if callable(a) else a for a in args) + + array_like = np_func(*like_args, **kwargs, like=None) + expected = np_func(*like_args_exp, **kwargs) + # Special-case np.empty to ensure values match + if function == "empty": + array_like.fill(1) + expected.fill(1) + assert_equal(array_like, expected) + + +def test_function_like(): + # We provide a `__get__` implementation, make sure it works + assert type(np.mean) is np.core._multiarray_umath._ArrayFunctionDispatcher + + class MyClass: + def __array__(self): + # valid argument to mean: + return np.arange(3) + + func1 = staticmethod(np.mean) + func2 = np.mean + func3 = classmethod(np.mean) + + m = MyClass() + assert m.func1([10]) == 10 + assert m.func2() == 1 # mean of the arange + with pytest.raises(TypeError, match="unsupported operand type"): + # Tries to operate on the class + m.func3() + + # Manual binding also works (the above may shortcut): + bound = np.mean.__get__(m, MyClass) + assert bound() == 1 + + bound = np.mean.__get__(None, MyClass) # unbound actually + assert bound([10]) == 10 + + bound = np.mean.__get__(MyClass) # classmethod + with pytest.raises(TypeError, match="unsupported operand type"): + bound() + + +def test_scipy_trapz_support_shim(): + # SciPy 1.10 and earlier "clone" trapz in this way, so we have a + # support shim in place: https://github.com/scipy/scipy/issues/17811 + # That should be removed eventually. This test copies what SciPy does. + # Hopefully removable 1 year after SciPy 1.11; shim added to NumPy 1.25. + import types + import functools + + def _copy_func(f): + # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) + g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, + argdefs=f.__defaults__, closure=f.__closure__) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = f.__kwdefaults__ + return g + + trapezoid = _copy_func(np.trapz) + + assert np.trapz([1, 2]) == trapezoid([1, 2]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_print.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_print.py new file mode 100644 index 00000000..162686ee --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_print.py @@ -0,0 +1,202 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import assert_, assert_equal, IS_MUSL +from numpy.core.tests._locales import CommaDecimalPointLocale + + +from io import StringIO + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_types(tp): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(float(x)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_nan_inf_float(tp): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_types(tp): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(complex(x)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x*1j)), str(complex(x*1j)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_inf_nan(dtype): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "infj", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-infj", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+infj)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-infj)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nanj", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nanj", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nanj)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nanj)", + } + for c, s in TESTS.items(): + assert_equal(str(dtype(c)), s) + + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg='print failed for type%s' % tp) + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_type_print(tp): + """Check formatting when using print """ + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e16).itemsize > 4: + _test_redirected_print(float(1e16), tp) + else: + ref = '1e+16' + _test_redirected_print(float(1e16), tp, ref) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_type_print(tp): + """Check formatting when using print """ + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) + else: + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool_), + ('{0}', False, np.bool_), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble), + ('{0:g}', 1.5+0.5j, np.complex64), + ('{0:g}', 1.5+0.5j, np.complex128), + ('{0:g}', 1.5+0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + "failed with val %s, type %s" % (val, valtype)) + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_locale_single(self): + assert_equal(str(np.float32(1.2)), str(float(1.2))) + + def test_locale_double(self): + assert_equal(str(np.double(1.2)), str(float(1.2))) + + @pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") + def test_locale_longdouble(self): + assert_equal(str(np.longdouble('1.2')), str(float(1.2))) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_protocols.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_protocols.py new file mode 100644 index 00000000..55a2bcf7 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_protocols.py @@ -0,0 +1,44 @@ +import pytest +import warnings +import numpy as np + + +@pytest.mark.filterwarnings("error") +def test_getattr_warning(): + # issue gh-14735: make sure we clear only getattr errors, and let warnings + # through + class Wrapper: + def __init__(self, array): + self.array = array + + def __len__(self): + return len(self.array) + + def __getitem__(self, item): + return type(self)(self.array[item]) + + def __getattr__(self, name): + if name.startswith("__array_"): + warnings.warn("object got converted", UserWarning, stacklevel=1) + + return getattr(self.array, name) + + def __repr__(self): + return "<Wrapper({self.array})>".format(self=self) + + array = Wrapper(np.arange(10)) + with pytest.raises(UserWarning, match="object got converted"): + np.asarray(array) + + +def test_array_called(): + class Wrapper: + val = '0' * 100 + def __array__(self, result=None): + return np.array([self.val], dtype=object) + + + wrapped = Wrapper() + arr = np.array(wrapped, dtype=str) + assert arr.dtype == 'U100' + assert arr[0] == Wrapper.val diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_records.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_records.py new file mode 100644 index 00000000..a76ae2d9 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_records.py @@ -0,0 +1,520 @@ +import collections.abc +import textwrap +from io import BytesIO +from os import path +from pathlib import Path +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_array_almost_equal, + assert_raises, temppath, + ) +from numpy.compat import pickle + + +class TestFromrecords: + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + assert_equal(r['col3'].dtype.kind, 'f') + + def test_fromrecords_0len(self): + """ Verify fromrecords works with a 0-length input """ + dtype = [('a', float), ('b', float)] + r = np.rec.fromrecords([], dtype=dtype) + assert_equal(r.shape, (0,)) + + def test_fromrecords_2d(self): + data = [ + [(1, 2), (3, 4), (5, 6)], + [(6, 5), (4, 3), (2, 1)] + ] + expected_a = [[1, 3, 5], [6, 4, 2]] + expected_b = [[2, 4, 6], [5, 3, 1]] + + # try with dtype + r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) + assert_equal(r1['a'], expected_a) + assert_equal(r1['b'], expected_b) + + # try with names + r2 = np.rec.fromrecords(data, names=['a', 'b']) + assert_equal(r2['a'], expected_a) + assert_equal(r2['b'], expected_b) + + assert_equal(r1, r2) + + def test_method_array(self): + r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big') + assert_equal(r[1].item(), (25444, b'efg', 1633837924)) + + def test_method_array2(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1].item(), (2, 22.0, b'b')) + + def test_recarray_slices(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.seek(2880 * 2) + bytes_array = BytesIO() + bytes_array.write(fd.read()) + bytes_array.seek(0) + r3 = np.rec.fromfile(bytes_array, formats='f8,i4,a5', shape=3, byteorder='big') + fd.close() + assert_equal(r1, r2) + assert_equal(r2, r3) + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_((mine.date[i] == list(range(1, 10)))) + assert_((mine.data1[i] == 0.0)) + assert_((mine.data2[i] == 0.0)) + + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', '<i4'), ('bar', '<f8')]) + a = np.rec.array(a) + assert_equal( + repr(a), + textwrap.dedent("""\ + rec.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', '<i4'), ('bar', '<f8')])""") + ) + + # make sure non-structured dtypes also show up as rec.array + a = np.array(np.ones(4, dtype='f8')) + assert_(repr(np.rec.array(a)).startswith('rec.array')) + + # check that the 'np.record' part of the dtype isn't shown + a = np.rec.array(np.ones(3, dtype='i4,i4')) + assert_equal(repr(a).find('numpy.record'), -1) + a = np.rec.array(np.ones(3, dtype='i4')) + assert_(repr(a).find('dtype=int32') != -1) + + def test_0d_recarray_repr(self): + arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]') + assert_equal(repr(arr_0d), textwrap.dedent("""\ + rec.array((1, 2., '2003'), + dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])""")) + + record = arr_0d[()] + assert_equal(repr(record), "(1, 2., '2003')") + # 1.13 converted to python scalars before the repr + try: + np.set_printoptions(legacy='1.13') + assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))') + finally: + np.set_printoptions(legacy=False) + + def test_recarray_from_repr(self): + a = np.array([(1,'ABC'), (2, "DEF")], + dtype=[('foo', int), ('bar', 'S4')]) + recordarr = np.rec.array(a) + recarr = a.view(np.recarray) + recordview = a.view(np.dtype((np.record, a.dtype))) + + recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np}) + recarr_r = eval("numpy." + repr(recarr), {'numpy': np}) + recordview_r = eval("numpy." + repr(recordview), {'numpy': np}) + + assert_equal(type(recordarr_r), np.recarray) + assert_equal(recordarr_r.dtype.type, np.record) + assert_equal(recordarr, recordarr_r) + + assert_equal(type(recarr_r), np.recarray) + assert_equal(recarr_r.dtype.type, np.record) + assert_equal(recarr, recarr_r) + + assert_equal(type(recordview_r), np.ndarray) + assert_equal(recordview.dtype.type, np.record) + assert_equal(recordview, recordview_r) + + def test_recarray_views(self): + a = np.array([(1,'ABC'), (2, "DEF")], + dtype=[('foo', int), ('bar', 'S4')]) + b = np.array([1,2,3,4,5], dtype=np.int64) + + #check that np.rec.array gives right dtypes + assert_equal(np.rec.array(a).dtype.type, np.record) + assert_equal(type(np.rec.array(a)), np.recarray) + assert_equal(np.rec.array(b).dtype.type, np.int64) + assert_equal(type(np.rec.array(b)), np.recarray) + + #check that viewing as recarray does the same + assert_equal(a.view(np.recarray).dtype.type, np.record) + assert_equal(type(a.view(np.recarray)), np.recarray) + assert_equal(b.view(np.recarray).dtype.type, np.int64) + assert_equal(type(b.view(np.recarray)), np.recarray) + + #check that view to non-structured dtype preserves type=np.recarray + r = np.rec.array(np.ones(4, dtype="f4,i4")) + rv = r.view('f8').view('f4,i4') + assert_equal(type(rv), np.recarray) + assert_equal(rv.dtype.type, np.record) + + #check that getitem also preserves np.recarray and np.record + r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), + ('c', 'i4,i4')])) + assert_equal(r['c'].dtype.type, np.record) + assert_equal(type(r['c']), np.recarray) + + #and that it preserves subclasses (gh-6949) + class C(np.recarray): + pass + + c = r.view(C) + assert_equal(type(c['c']), C) + + # check that accessing nested structures keep record type, but + # not for subarrays, non-void structures, non-structured voids + test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)), + ('d', ('i8', 'i4,i4'))] + r = np.rec.array([((1,1), b'11111111', [1,1], 1), + ((1,1), b'11111111', [1,1], 1)], dtype=test_dtype) + assert_equal(r.a.dtype.type, np.record) + assert_equal(r.b.dtype.type, np.void) + assert_equal(r.c.dtype.type, np.float32) + assert_equal(r.d.dtype.type, np.int64) + # check the same, but for views + r = np.rec.array(np.ones(4, dtype='i4,i4')) + assert_equal(r.view('f4,f4').dtype.type, np.record) + assert_equal(r.view(('i4',2)).dtype.type, np.int32) + assert_equal(r.view('V8').dtype.type, np.void) + assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64) + + #check that we can undo the view + arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')] + for arr in arrs: + rec = np.rec.array(arr) + # recommended way to view as an ndarray: + arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray) + assert_equal(arr2.dtype.type, arr.dtype.type) + assert_equal(type(arr2), type(arr)) + + def test_recarray_from_names(self): + ra = np.rec.array([ + (1, 'abc', 3.7000002861022949, 0), + (2, 'xy', 6.6999998092651367, 1), + (0, ' ', 0.40000000596046448, 0)], + names='c1, c2, c3, c4') + pa = np.rec.fromrecords([ + (1, 'abc', 3.7000002861022949, 0), + (2, 'xy', 6.6999998092651367, 1), + (0, ' ', 0.40000000596046448, 0)], + names='c1, c2, c3, c4') + assert_(ra.dtype == pa.dtype) + assert_(ra.shape == pa.shape) + for k in range(len(ra)): + assert_(ra[k].item() == pa[k].item()) + + def test_recarray_conflict_fields(self): + ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), + (3, 'wrs', 1.3)], + names='field, shape, mean') + ra.mean = [1.1, 2.2, 3.3] + assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) + assert_(type(ra.mean) is type(ra.var)) + ra.shape = (1, 3) + assert_(ra.shape == (1, 3)) + ra.shape = ['A', 'B', 'C'] + assert_array_equal(ra['shape'], [['A', 'B', 'C']]) + ra.field = 5 + assert_array_equal(ra['field'], [[5, 5, 5]]) + assert_(isinstance(ra.field, collections.abc.Callable)) + + def test_fromrecords_with_explicit_dtype(self): + a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], + dtype=[('a', int), ('b', object)]) + assert_equal(a.a, [1, 2]) + assert_equal(a[0].a, 1) + assert_equal(a.b, ['a', 'bbb']) + assert_equal(a[-1].b, 'bbb') + # + ndtype = np.dtype([('a', int), ('b', object)]) + a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype) + assert_equal(a.a, [1, 2]) + assert_equal(a[0].a, 1) + assert_equal(a.b, ['a', 'bbb']) + assert_equal(a[-1].b, 'bbb') + + def test_recarray_stringtypes(self): + # Issue #3993 + a = np.array([('abc ', 1), ('abc', 2)], + dtype=[('foo', 'S4'), ('bar', int)]) + a = a.view(np.recarray) + assert_equal(a.foo[0] == a.foo[1], False) + + def test_recarray_returntypes(self): + qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)} + a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')), + ('abc', (2,3), 1, ('abcde', 'jklmn'))], + dtype=[('foo', 'S4'), + ('bar', [('A', int), ('B', int)]), + ('baz', int), ('qux', qux_fields)]) + assert_equal(type(a.foo), np.ndarray) + assert_equal(type(a['foo']), np.ndarray) + assert_equal(type(a.bar), np.recarray) + assert_equal(type(a['bar']), np.recarray) + assert_equal(a.bar.dtype.type, np.record) + assert_equal(type(a['qux']), np.recarray) + assert_equal(a.qux.dtype.type, np.record) + assert_equal(dict(a.qux.dtype.fields), qux_fields) + assert_equal(type(a.baz), np.ndarray) + assert_equal(type(a['baz']), np.ndarray) + assert_equal(type(a[0].bar), np.record) + assert_equal(type(a[0]['bar']), np.record) + assert_equal(a[0].bar.A, 1) + assert_equal(a[0].bar['A'], 1) + assert_equal(a[0]['bar'].A, 1) + assert_equal(a[0]['bar']['A'], 1) + assert_equal(a[0].qux.D, b'fgehi') + assert_equal(a[0].qux['D'], b'fgehi') + assert_equal(a[0]['qux'].D, b'fgehi') + assert_equal(a[0]['qux']['D'], b'fgehi') + + def test_zero_width_strings(self): + # Test for #6430, based on the test case from #1901 + + cols = [['test'] * 3, [''] * 3] + rec = np.rec.fromarrays(cols) + assert_equal(rec['f0'], ['test', 'test', 'test']) + assert_equal(rec['f1'], ['', '', '']) + + dt = np.dtype([('f0', '|S4'), ('f1', '|S')]) + rec = np.rec.fromarrays(cols, dtype=dt) + assert_equal(rec.itemsize, 4) + assert_equal(rec['f0'], [b'test', b'test', b'test']) + assert_equal(rec['f1'], [b'', b'', b'']) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_tofile_fromfile(self): + with temppath(suffix='.bin') as path: + path = Path(path) + np.random.seed(123) + a = np.random.rand(10).astype('f8,i4,a5') + a[5] = (0.5,10,'abcde') + with path.open("wb") as fd: + a.tofile(fd) + x = np.core.records.fromfile(path, + formats='f8,i4,a5', + shape=10) + assert_array_equal(x, a) + + +class TestRecord: + def setup_method(self): + self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], + dtype=[("col1", "<i4"), + ("col2", "<i4"), + ("col3", "<i4")]) + + def test_assignment1(self): + a = self.data + assert_equal(a.col1[0], 1) + a[0].col1 = 0 + assert_equal(a.col1[0], 0) + + def test_assignment2(self): + a = self.data + assert_equal(a.col1[0], 1) + a.col1[0] = 0 + assert_equal(a.col1[0], 0) + + def test_invalid_assignment(self): + a = self.data + + def assign_invalid_column(x): + x[0].col5 = 1 + + assert_raises(AttributeError, assign_invalid_column, a) + + def test_nonwriteable_setfield(self): + # gh-8171 + r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')]) + r.flags.writeable = False + with assert_raises(ValueError): + r.f = [2, 3] + with assert_raises(ValueError): + r.setfield([2,3], *r.dtype.fields['f']) + + def test_out_of_order_fields(self): + # names in the same order, padding added to descr + x = self.data[['col1', 'col2']] + assert_equal(x.dtype.names, ('col1', 'col2')) + assert_equal(x.dtype.descr, + [('col1', '<i4'), ('col2', '<i4'), ('', '|V4')]) + + # names change order to match indexing, as of 1.14 - descr can't + # represent that + y = self.data[['col2', 'col1']] + assert_equal(y.dtype.names, ('col2', 'col1')) + assert_raises(ValueError, lambda: y.dtype.descr) + + def test_pickle_1(self): + # Issue #1529 + a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto))) + assert_equal(a[0], pickle.loads(pickle.dumps(a[0], + protocol=proto))) + + def test_pickle_2(self): + a = self.data + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto))) + assert_equal(a[0], pickle.loads(pickle.dumps(a[0], + protocol=proto))) + + def test_pickle_3(self): + # Issue #7140 + a = self.data + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + pa = pickle.loads(pickle.dumps(a[0], protocol=proto)) + assert_(pa.flags.c_contiguous) + assert_(pa.flags.f_contiguous) + assert_(pa.flags.writeable) + assert_(pa.flags.aligned) + + def test_pickle_void(self): + # issue gh-13593 + dt = np.dtype([('obj', 'O'), ('int', 'i')]) + a = np.empty(1, dtype=dt) + data = (bytearray(b'eman'),) + a['obj'] = data + a['int'] = 42 + ctor, args = a[0].__reduce__() + # check the constructor is what we expect before interpreting the arguments + assert ctor is np.core.multiarray.scalar + dtype, obj = args + # make sure we did not pickle the address + assert not isinstance(obj, bytes) + + assert_raises(RuntimeError, ctor, dtype, 13) + + # Test roundtrip: + dump = pickle.dumps(a[0]) + unpickled = pickle.loads(dump) + assert a[0] == unpickled + + # Also check the similar (impossible) "object scalar" path: + with pytest.warns(DeprecationWarning): + assert ctor(np.dtype("O"), data) is data + + def test_objview_record(self): + # https://github.com/numpy/numpy/issues/2599 + dt = np.dtype([('foo', 'i8'), ('bar', 'O')]) + r = np.zeros((1,3), dtype=dt).view(np.recarray) + r.foo = np.array([1, 2, 3]) # TypeError? + + # https://github.com/numpy/numpy/issues/3256 + ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)]) + ra[['x','y']] # TypeError? + + def test_record_scalar_setitem(self): + # https://github.com/numpy/numpy/issues/3561 + rec = np.recarray(1, dtype=[('x', float, 5)]) + rec[0].x = 1 + assert_equal(rec[0].x, np.ones(5)) + + def test_missing_field(self): + # https://github.com/numpy/numpy/issues/4806 + arr = np.zeros((3,), dtype=[('x', int), ('y', int)]) + assert_raises(KeyError, lambda: arr[['nofield']]) + + def test_fromarrays_nested_structured_arrays(self): + arrays = [ + np.arange(10), + np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]), + ] + arr = np.rec.fromarrays(arrays) # ValueError? + + @pytest.mark.parametrize('nfields', [0, 1, 2]) + def test_assign_dtype_attribute(self, nfields): + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields]) + data = np.zeros(3, dt).view(np.recarray) + + # the original and resulting dtypes differ on whether they are records + assert data.dtype.type == np.record + assert dt.type != np.record + + # ensure that the dtype remains a record even when assigned + data.dtype = dt + assert data.dtype.type == np.record + + @pytest.mark.parametrize('nfields', [0, 1, 2]) + def test_nested_fields_are_records(self, nfields): + """ Test that nested structured types are treated as records too """ + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields]) + dt_outer = np.dtype([('inner', dt)]) + + data = np.zeros(3, dt_outer).view(np.recarray) + assert isinstance(data, np.recarray) + assert isinstance(data['inner'], np.recarray) + + data0 = data[0] + assert isinstance(data0, np.record) + assert isinstance(data0['inner'], np.record) + + def test_nested_dtype_padding(self): + """ test that trailing padding is preserved """ + # construct a dtype with padding at the end + dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)]) + dt_padded_end = dt[['a', 'b']] + assert dt_padded_end.itemsize == dt.itemsize + + dt_outer = np.dtype([('inner', dt_padded_end)]) + + data = np.zeros(3, dt_outer).view(np.recarray) + assert_equal(data['inner'].dtype, dt_padded_end) + + data0 = data[0] + assert_equal(data0['inner'].dtype, dt_padded_end) + + +def test_find_duplicate(): + l1 = [1, 2, 3, 4, 5, 6] + assert_(np.rec.find_duplicate(l1) == []) + + l2 = [1, 2, 1, 4, 5, 6] + assert_(np.rec.find_duplicate(l2) == [1]) + + l3 = [1, 2, 1, 4, 1, 6, 2, 3] + assert_(np.rec.find_duplicate(l3) == [1, 2]) + + l3 = [2, 2, 1, 4, 1, 6, 2, 3] + assert_(np.rec.find_duplicate(l3) == [2, 1]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_regression.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_regression.py new file mode 100644 index 00000000..678c727d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_regression.py @@ -0,0 +1,2568 @@ +import copy +import sys +import gc +import tempfile +import pytest +from os import path +from io import BytesIO +from itertools import chain + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, IS_PYPY, assert_almost_equal, + assert_array_equal, assert_array_almost_equal, assert_raises, + assert_raises_regex, assert_warns, suppress_warnings, + _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM + ) +from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.compat import asbytes, asunicode, pickle + + +class TestRegression: + def test_invalid_round(self): + # Ticket #3 + v = 4.7599999999999998 + assert_array_equal(np.array([v]), np.array(v)) + + def test_mem_empty(self): + # Ticket #7 + np.empty((1,), dtype=[('x', np.int64)]) + + def test_pickle_transposed(self): + # Ticket #16 + a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]])) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(a, f, protocol=proto) + f.seek(0) + b = pickle.load(f) + assert_array_equal(a, b) + + def test_dtype_names(self): + # Ticket #35 + # Should succeed + np.dtype([(('name', 'label'), np.int32, 3)]) + + def test_reduce(self): + # Ticket #40 + assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5) + + def test_zeros_order(self): + # Ticket #43 + np.zeros([3], int, 'C') + np.zeros([3], order='C') + np.zeros([3], int, order='C') + + def test_asarray_with_order(self): + # Check that nothing is done when order='F' and array C/F-contiguous + a = np.ones(2) + assert_(a is np.asarray(a, order='F')) + + def test_ravel_with_order(self): + # Check that ravel works when order='F' and array C/F-contiguous + a = np.ones(2) + assert_(not a.ravel('F').flags.owndata) + + def test_sort_bigendian(self): + # Ticket #47 + a = np.linspace(0, 10, 11) + c = a.astype(np.dtype('<f8')) + c.sort() + assert_array_almost_equal(c, a) + + def test_negative_nd_indexing(self): + # Ticket #49 + c = np.arange(125).reshape((5, 5, 5)) + origidx = np.array([-1, 0, 1]) + idx = np.array(origidx) + c[idx] + assert_array_equal(idx, origidx) + + def test_char_dump(self): + # Ticket #50 + ca = np.char.array(np.arange(1000, 1010), itemsize=4) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(ca, f, protocol=proto) + f.seek(0) + ca = np.load(f, allow_pickle=True) + + def test_noncontiguous_fill(self): + # Ticket #58. + a = np.zeros((5, 3)) + b = a[:, :2,] + + def rs(): + b.shape = (10,) + + assert_raises(AttributeError, rs) + + def test_bool(self): + # Ticket #60 + np.bool_(1) # Should succeed + + def test_indexing1(self): + # Ticket #64 + descr = [('x', [('y', [('z', 'c16', (2,)),]),]),] + buffer = ((([6j, 4j],),),) + h = np.array(buffer, dtype=descr) + h['x']['y']['z'] + + def test_indexing2(self): + # Ticket #65 + descr = [('x', 'i4', (2,))] + buffer = ([3, 2],) + h = np.array(buffer, dtype=descr) + h['x'] + + def test_round(self): + # Ticket #67 + x = np.array([1+2j]) + assert_almost_equal(x**(-1), [1/(1+2j)]) + + def test_scalar_compare(self): + # Trac Ticket #72 + # https://github.com/numpy/numpy/issues/565 + a = np.array(['test', 'auto']) + assert_array_equal(a == 'auto', np.array([False, True])) + assert_(a[1] == 'auto') + assert_(a[0] != 'auto') + b = np.linspace(0, 10, 11) + assert_array_equal(b != 'auto', np.ones(11, dtype=bool)) + assert_(b[0] != 'auto') + + def test_unicode_swapping(self): + # Ticket #79 + ulen = 1 + ucs_value = '\U0010FFFF' + ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen) + ua.newbyteorder() # Should succeed. + + def test_object_array_fill(self): + # Ticket #86 + x = np.zeros(1, 'O') + x.fill([]) + + def test_mem_dtype_align(self): + # Ticket #93 + assert_raises(TypeError, np.dtype, + {'names':['a'], 'formats':['foo']}, align=1) + + def test_endian_bool_indexing(self): + # Ticket #105 + a = np.arange(10., dtype='>f8') + b = np.arange(10., dtype='<f8') + xa = np.where((a > 2) & (a < 6)) + xb = np.where((b > 2) & (b < 6)) + ya = ((a > 2) & (a < 6)) + yb = ((b > 2) & (b < 6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self): + # GitHub issue #369 + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self): + # Ticket #2185 + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self): + # Ticket #106 + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np.core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self): + # Ticket #111 + ref = np.arange(10) + x = np.arange(10, dtype='<f8') + assert_array_equal(ref, x) + x = np.arange(10, dtype='>f8') + assert_array_equal(ref, x) + + def test_arange_inf_step(self): + ref = np.arange(0, 1, 10) + x = np.arange(0, 1, np.inf) + assert_array_equal(ref, x) + + ref = np.arange(0, 1, -10) + x = np.arange(0, 1, -np.inf) + assert_array_equal(ref, x) + + ref = np.arange(0, -1, -10) + x = np.arange(0, -1, -np.inf) + assert_array_equal(ref, x) + + ref = np.arange(0, -1, 10) + x = np.arange(0, -1, np.inf) + assert_array_equal(ref, x) + + def test_arange_underflow_stop_and_step(self): + finfo = np.finfo(np.float64) + + ref = np.arange(0, finfo.eps, 2 * finfo.eps) + x = np.arange(0, finfo.eps, finfo.max) + assert_array_equal(ref, x) + + ref = np.arange(0, finfo.eps, -2 * finfo.eps) + x = np.arange(0, finfo.eps, -finfo.max) + assert_array_equal(ref, x) + + ref = np.arange(0, -finfo.eps, -2 * finfo.eps) + x = np.arange(0, -finfo.eps, -finfo.max) + assert_array_equal(ref, x) + + ref = np.arange(0, -finfo.eps, 2 * finfo.eps) + x = np.arange(0, -finfo.eps, finfo.max) + assert_array_equal(ref, x) + + def test_argmax(self): + # Ticket #119 + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + for i in range(a.ndim): + a.argmax(i) # Should succeed + + def test_mem_divmod(self): + # Ticket #126 + for i in range(10): + divmod(np.array([i])[0], 10) + + def test_hstack_invalid_dims(self): + # Ticket #128 + x = np.arange(9).reshape((3, 3)) + y = np.array([0, 0, 0]) + assert_raises(ValueError, np.hstack, (x, y)) + + def test_squeeze_type(self): + # Ticket #133 + a = np.array([3]) + b = np.array(3) + assert_(type(a.squeeze()) is np.ndarray) + assert_(type(b.squeeze()) is np.ndarray) + + def test_add_identity(self): + # Ticket #143 + assert_equal(0, np.add.identity) + + def test_numpy_float_python_long_addition(self): + # Check that numpy float and python longs can be added correctly. + a = np.float_(23.) + 2**135 + assert_equal(a, 23. + 2**135) + + def test_binary_repr_0(self): + # Ticket #151 + assert_equal('0', np.binary_repr(0)) + + def test_rec_iterate(self): + # Ticket #160 + descr = np.dtype([('i', int), ('f', float), ('s', '|S3')]) + x = np.rec.array([(1, 1.1, '1.0'), + (2, 2.2, '2.0')], dtype=descr) + x[0].tolist() + [i for i in x[0]] + + def test_unicode_string_comparison(self): + # Ticket #190 + a = np.array('hello', np.str_) + b = np.array('world') + a == b + + def test_tobytes_FORTRANORDER_discontiguous(self): + # Fix in r2836 + # Create non-contiguous Fortran ordered array + x = np.array(np.random.rand(3, 3), order='F')[:, :2] + assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes())) + + def test_flat_assignment(self): + # Correct behaviour of ticket #194 + x = np.empty((3, 1)) + x.flat = np.arange(3) + assert_array_almost_equal(x, [[0], [1], [2]]) + x.flat = np.arange(3, dtype=float) + assert_array_almost_equal(x, [[0], [1], [2]]) + + def test_broadcast_flat_assignment(self): + # Ticket #194 + x = np.empty((3, 1)) + + def bfa(): + x[:] = np.arange(3) + + def bfb(): + x[:] = np.arange(3, dtype=float) + + assert_raises(ValueError, bfa) + assert_raises(ValueError, bfb) + + @pytest.mark.xfail(IS_WASM, reason="not sure why") + @pytest.mark.parametrize("index", + [np.ones(10, dtype=bool), np.arange(10)], + ids=["boolean-arr-index", "integer-arr-index"]) + def test_nonarray_assignment(self, index): + # See also Issue gh-2870, test for non-array assignment + # and equivalent unsafe casted array assignment + a = np.arange(10) + + with pytest.raises(ValueError): + a[index] = np.nan + + with np.errstate(invalid="warn"): + with pytest.warns(RuntimeWarning, match="invalid value"): + a[index] = np.array(np.nan) # Only warns + + def test_unpickle_dtype_with_object(self): + # Implemented in r2840 + dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(dt, f, protocol=proto) + f.seek(0) + dt_ = pickle.load(f) + assert_equal(dt, dt_) + + def test_mem_array_creation_invalid_specification(self): + # Ticket #196 + dt = np.dtype([('x', int), ('y', np.object_)]) + # Wrong way + assert_raises(ValueError, np.array, [1, 'object'], dt) + # Correct way + np.array([(1, 'object')], dt) + + def test_recarray_single_element(self): + # Ticket #202 + a = np.array([1, 2, 3], dtype=np.int32) + b = a.copy() + r = np.rec.array(a, shape=1, formats=['3i4'], names=['d']) + assert_array_equal(a, b) + assert_equal(a, r[0][0]) + + def test_zero_sized_array_indexing(self): + # Ticket #205 + tmp = np.array([]) + + def index_tmp(): + tmp[np.array(10)] + + assert_raises(IndexError, index_tmp) + + def test_chararray_rstrip(self): + # Ticket #222 + x = np.chararray((1,), 5) + x[0] = b'a ' + x = x.rstrip() + assert_equal(x[0], b'a') + + def test_object_array_shape(self): + # Ticket #239 + assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,)) + assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2)) + assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2)) + assert_equal(np.array([], dtype=object).shape, (0,)) + assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0)) + assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,)) + + def test_mem_around(self): + # Ticket #243 + x = np.zeros((1,)) + y = [0] + decimal = 6 + np.around(abs(x-y), decimal) <= 10.0**(-decimal) + + def test_character_array_strip(self): + # Ticket #246 + x = np.char.array(("x", "x ", "x ")) + for c in x: + assert_equal(c, "x") + + def test_lexsort(self): + # Lexsort memory error + v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + assert_equal(np.lexsort(v), 0) + + def test_lexsort_invalid_sequence(self): + # Issue gh-4123 + class BuggySequence: + def __len__(self): + return 4 + + def __getitem__(self, key): + raise KeyError + + assert_raises(KeyError, np.lexsort, BuggySequence()) + + def test_lexsort_zerolen_custom_strides(self): + # Ticket #14228 + xs = np.array([], dtype='i8') + assert np.lexsort((xs,)).shape[0] == 0 # Works + + xs.strides = (16,) + assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError + + def test_lexsort_zerolen_custom_strides_2d(self): + xs = np.array([], dtype='i8') + + xs.shape = (0, 2) + xs.strides = (16, 16) + assert np.lexsort((xs,), axis=0).shape[0] == 0 + + xs.shape = (2, 0) + xs.strides = (16, 16) + assert np.lexsort((xs,), axis=0).shape[0] == 2 + + def test_lexsort_invalid_axis(self): + assert_raises(np.AxisError, np.lexsort, (np.arange(1),), axis=2) + assert_raises(np.AxisError, np.lexsort, (np.array([]),), axis=1) + assert_raises(np.AxisError, np.lexsort, (np.array(1),), axis=10) + + def test_lexsort_zerolen_element(self): + dt = np.dtype([]) # a void dtype with no fields + xs = np.empty(4, dt) + + assert np.lexsort((xs,)).shape[0] == xs.shape[0] + + def test_pickle_py2_bytes_encoding(self): + # Check that arrays and scalars pickled on Py2 are + # unpickleable on Py3 using encoding='bytes' + + test_data = [ + # (original, py2_pickle) + (np.str_('\u6f2c'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n" + b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."), + + (np.array([9e123], dtype=np.float64), + b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n" + b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n" + b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n" + b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."), + + (np.array([(9e123,)], dtype=[('name', float)]), + b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n" + b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n" + b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n" + b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n" + b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n" + b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."), + ] + + for original, data in test_data: + result = pickle.loads(data, encoding='bytes') + assert_equal(result, original) + + if isinstance(result, np.ndarray) and result.dtype.names is not None: + for name in result.dtype.names: + assert_(isinstance(name, str)) + + def test_pickle_dtype(self): + # Ticket #251 + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + pickle.dumps(float, protocol=proto) + + def test_swap_real(self): + # Ticket #265 + assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) + assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0) + assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0) + assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0) + + def test_object_array_from_list(self): + # Ticket #270 (gh-868) + assert_(np.array([1, None, 'A']).shape == (3,)) + + def test_multiple_assign(self): + # Ticket #273 + a = np.zeros((3, 1), int) + a[[1, 2]] = 1 + + def test_empty_array_type(self): + assert_equal(np.array([]).dtype, np.zeros(0).dtype) + + def test_void_copyswap(self): + dt = np.dtype([('one', '<i4'), ('two', '<i4')]) + x = np.array((1, 2), dtype=dt) + x = x.byteswap() + assert_(x['one'] > 1 and x['two'] > 2) + + def test_method_args(self): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1 = ['argmax', 'argmin', 'sum', 'any', 'all', 'cumsum', + 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + res1 = getattr(arr, func)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1-res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15*arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1-res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self): + # Ticket #298 + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self): + # Ticket #302 + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self): + # Ticket #312 + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self): + # Ticket #322 + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") + + def test_object_array_assign(self): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self): + # Ticket #324 + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self): + # Issue #465 and related checks + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self): + # Ticket #327 + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self): + # Ticket #330 + np.float64(1.0).imag + + def test_dtype_tuple(self): + # Ticket #334 + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self): + # Ticket #335 + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self): + # Ticket #341 + assert_equal(np.array(['X'], 'c'), b'X') + + def test_string_array_size(self): + # Ticket #342 + assert_raises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self): + # Ticket #344 + dt1 = np.dtype(('uint32', 2)) + dt2 = np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self): + # Make sure reshape order works. + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self): + # Issue #380, test reshaping of zero strided arrays + a = np.ones(1) + a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self): + # GitHub Issue #2700, setting shape failed for 0-sized arrays + a = np.ones((0, 2)) + a.shape = (-1, 2) + + # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides. + # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous. + @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, + reason="Using relaxed stride debug") + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self): + # Ticket #352 + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self): + # Make sure optimization is not called in this case. + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) + + def test_object_argmax(self): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self): + # Ticket #372 + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self): + # Ticket #374 + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self): + # Changeset #3443 + _assert_valid_refcount(np.vdot) + + def test_startswith(self): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self): + # Ticket #413 + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self): + # Convolve should raise an error for empty input array. + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self): + # Ticket #449 + r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self): + # Changeset 3557 + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self): + # Ticket #483 + r = np.array([[b'abc']], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == b'abc') + + def test_take_output(self): + # Ensure that 'take' honours output parameter. + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + if HAS_REFCOUNT: + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + if HAS_REFCOUNT: + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self): + # Ticket #501 + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + np.array_str(s) # Should succeed + + def test_frompyfunc_endian(self): + # Ticket #503 + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype='<f8') + assert_almost_equal(uradians(big_endian).astype(float), + uradians(little_endian).astype(float)) + + def test_mem_string_arr(self): + # Ticket #514 + s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + t = [] + np.hstack((t, s)) + + def test_arr_transpose(self): + # Ticket #516 + x = np.random.rand(*(2,)*16) + x.transpose(list(range(16))) # Should succeed + + def test_string_mergesort(self): + # Ticket #540 + x = np.array(['a']*32) + assert_array_equal(x.argsort(kind='m'), np.arange(32)) + + def test_argmax_byteorder(self): + # Ticket #546 + a = np.arange(3, dtype='>f') + assert_(a[a.argmax()] == a.max()) + + def test_rand_seed(self): + # Ticket #555 + for l in np.arange(4): + np.random.seed(l) + + def test_mem_deallocation_leak(self): + # Ticket #562 + a = np.zeros(5, dtype=float) + b = np.array(a, dtype=float) + del a, b + + def test_mem_on_invalid_dtype(self): + "Ticket #583" + assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str) + + def test_dot_negative_stride(self): + # Ticket #588 + x = np.array([[1, 5, 25, 125., 625]]) + y = np.array([[20.], [160.], [640.], [1280.], [1024.]]) + z = y[::-1].copy() + y2 = y[::-1] + assert_equal(np.dot(x, z), np.dot(x, y2)) + + def test_object_casting(self): + # This used to trigger the object-type version of + # the bitwise_or operation, because float64 -> object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + + assert_raises(TypeError, rs) + + def test_unicode_scalar(self): + # Ticket #600 + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + new = pickle.loads(pickle.dumps(el, protocol=proto)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self): + # Ticket #616 + for T in ('>f4', '<f4'): + dt = np.dtype(T) + assert_equal(np.arange(0, dtype=dt).dtype, dt) + assert_equal(np.arange(0.5, dtype=dt).dtype, dt) + assert_equal(np.arange(5, dtype=dt).dtype, dt) + + def test_bool_flat_indexing_invalid_nr_elements(self): + s = np.ones(10, dtype=float) + x = np.array((15,), dtype=float) + + def ia(x, s, v): + x[(s > 0)] = v + + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + + # Old special case (different code path): + assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + + def test_mem_scalar_indexing(self): + # Ticket #603 + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self): + # Check argsort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self): + # Check sort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self): + # Ticket #657 + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:]) + assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4')) + + def test_sign_bit(self): + x = np.array([0, -0.0, 0]) + assert_equal(str(np.abs(x)), '[0. 0. 0.]') + + def test_flat_index_byteswap(self): + for dt in (np.dtype('<i4'), np.dtype('>i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides. + # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous, + # 0-sized reshape itself is tested elsewhere. + @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, + reason="Using relaxed stride debug") + def test_copy_detection_corner_case2(self): + # Ticket #771: strides are not set correctly when reshaping 0-sized + # arrays + b = np.indices((0, 3, 4)).T.reshape(-1, 3) + assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) + + def test_object_array_refcounting(self): + # Ticket #633 + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:, :] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + tmp = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + tmp = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3*5) + + tmp = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + tmp = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + del tmp # Avoid pyflakes unused variable warning + + def test_mem_custom_float_to_array(self): + # Ticket 702 + class MyFloat: + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp.astype(float) # Should succeed + + def test_object_array_refcount_self_assign(self): + # Ticket #711 + class VictimObject: + deleted = False + + def __del__(self): + self.deleted = True + + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self): + x = [1, 2, 3] + assert_raises(ValueError, + np.fromiter, [xi for xi in x], dtype='S') + + def test_reduce_big_object_array(self): + # Ticket #713 + oldsize = np.setbufsize(10*16) + a = np.array([None]*161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self): + # Ticket #714 + np.zeros(10)[np.array(0)] + + def test_nonnative_endian_fill(self): + # Non-native endian arrays were incorrectly filled with scalars + # before r5034. + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('<i4') + x = np.empty([1], dtype=dtype) + x.fill(1) + assert_equal(x, np.array([1], dtype=dtype)) + + def test_dot_alignment_sse2(self): + # Test for ticket #551, changeset r5140 + x = np.zeros((30, 40)) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + y = pickle.loads(pickle.dumps(x, protocol=proto)) + # y is now typically not aligned on a 8-byte boundary + z = np.ones((1, y.shape[0])) + # This shouldn't cause a segmentation fault: + np.dot(z, y) + + def test_astype_copy(self): + # Ticket #788, changeset r5155 + # The test data file was generated by scipy.io.savemat. + # The dtype is float64, but the isbuiltin attribute is 0. + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, "astype_copy.pkl") + with open(filename, 'rb') as f: + xp = pickle.load(f, encoding='latin1') + xpd = xp.astype(np.float64) + assert_((xp.__array_interface__['data'][0] != + xpd.__array_interface__['data'][0])) + + def test_compress_small_type(self): + # Ticket #789, changeset 5217. + # compress with out argument segfaulted if cannot cast safely + import numpy as np + a = np.array([[1, 2], [3, 4]]) + b = np.zeros((2, 1), dtype=np.single) + try: + a.compress([True, False], axis=1, out=b) + raise AssertionError("compress with an out which cannot be " + "safely casted should not return " + "successfully") + except TypeError: + pass + + def test_attributes(self): + # Ticket #791 + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + assert_(dat.info == 'jubba') + dat.resize((4, 2)) + assert_(dat.info == 'jubba') + dat.sort() + assert_(dat.info == 'jubba') + dat.fill(2) + assert_(dat.info == 'jubba') + dat.put([2, 3, 4], [6, 3, 4]) + assert_(dat.info == 'jubba') + dat.setfield(4, np.int32, 0) + assert_(dat.info == 'jubba') + dat.setflags() + assert_(dat.info == 'jubba') + assert_(dat.all(1).info == 'jubba') + assert_(dat.any(1).info == 'jubba') + assert_(dat.argmax(1).info == 'jubba') + assert_(dat.argmin(1).info == 'jubba') + assert_(dat.argsort(1).info == 'jubba') + assert_(dat.astype(TestArray).info == 'jubba') + assert_(dat.byteswap().info == 'jubba') + assert_(dat.clip(2, 7).info == 'jubba') + assert_(dat.compress([0, 1, 1]).info == 'jubba') + assert_(dat.conj().info == 'jubba') + assert_(dat.conjugate().info == 'jubba') + assert_(dat.copy().info == 'jubba') + dat2 = TestArray([2, 3, 1, 0], 'jubba') + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + assert_(dat2.choose(choices).info == 'jubba') + assert_(dat.cumprod(1).info == 'jubba') + assert_(dat.cumsum(1).info == 'jubba') + assert_(dat.diagonal().info == 'jubba') + assert_(dat.flatten().info == 'jubba') + assert_(dat.getfield(np.int32, 0).info == 'jubba') + assert_(dat.imag.info == 'jubba') + assert_(dat.max(1).info == 'jubba') + assert_(dat.mean(1).info == 'jubba') + assert_(dat.min(1).info == 'jubba') + assert_(dat.newbyteorder().info == 'jubba') + assert_(dat.prod(1).info == 'jubba') + assert_(dat.ptp(1).info == 'jubba') + assert_(dat.ravel().info == 'jubba') + assert_(dat.real.info == 'jubba') + assert_(dat.repeat(2).info == 'jubba') + assert_(dat.reshape((2, 4)).info == 'jubba') + assert_(dat.round().info == 'jubba') + assert_(dat.squeeze().info == 'jubba') + assert_(dat.std(1).info == 'jubba') + assert_(dat.sum(1).info == 'jubba') + assert_(dat.swapaxes(0, 1).info == 'jubba') + assert_(dat.take([2, 3, 5]).info == 'jubba') + assert_(dat.transpose().info == 'jubba') + assert_(dat.T.info == 'jubba') + assert_(dat.var(1).info == 'jubba') + assert_(dat.view(TestArray).info == 'jubba') + # These methods do not preserve subclasses + assert_(type(dat.nonzero()[0]) is np.ndarray) + assert_(type(dat.nonzero()[1]) is np.ndarray) + + def test_recarray_tolist(self): + # Ticket #793, changeset r5215 + # Comparisons fail for NaN, so we can't use random memory + # for the test. + buf = np.zeros(40, dtype=np.int8) + a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) + b = a.tolist() + assert_( a[0].tolist() == b[0]) + assert_( a[1].tolist() == b[1]) + + def test_nonscalar_item_method(self): + # Make sure that .item() fails graciously when it should + a = np.arange(5) + assert_raises(ValueError, a.item) + + def test_char_array_creation(self): + a = np.array('123', dtype='c') + b = np.array([b'1', b'2', b'3']) + assert_equal(a, b) + + def test_unaligned_unicode_access(self): + # Ticket #825 + for i in range(1, 9): + msg = 'unicode offset: %d chars' % i + t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) + x = np.array([(b'a', 'b')], dtype=t) + assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) + + def test_sign_for_complex_nan(self): + # Ticket 794. + with np.errstate(invalid='ignore'): + C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) + have = np.sign(C) + want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) + assert_equal(have, want) + + def test_for_equal_names(self): + # Ticket #674 + dt = np.dtype([('foo', float), ('bar', float)]) + a = np.zeros(10, dt) + b = list(a.dtype.names) + b[0] = "notfoo" + a.dtype.names = b + assert_(a.dtype.names[0] == "notfoo") + assert_(a.dtype.names[1] == "bar") + + def test_for_object_scalar_creation(self): + # Ticket #816 + a = np.object_() + b = np.object_(3) + b2 = np.object_(3.0) + c = np.object_([4, 5]) + d = np.object_([None, {}, []]) + assert_(a is None) + assert_(type(b) is int) + assert_(type(b2) is float) + assert_(type(c) is np.ndarray) + assert_(c.dtype == object) + assert_(d.dtype == object) + + def test_array_resize_method_system_error(self): + # Ticket #840 - order should be an invalid keyword. + x = np.array([[0, 1], [2, 3]]) + assert_raises(TypeError, x.resize, (2, 2), order='C') + + def test_for_zero_length_in_choose(self): + "Ticket #882" + a = np.array(1) + assert_raises(ValueError, lambda x: x.choose([]), a) + + def test_array_ndmin_overflow(self): + "Ticket #947." + assert_raises(ValueError, lambda: np.array([1], ndmin=33)) + + def test_void_scalar_with_titles(self): + # No ticket + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = np.array(data, dtype=dtype1) + assert_(arr[0][0] == 'john') + assert_(arr[0][1] == 4) + + def test_void_scalar_constructor(self): + #Issue #1550 + + #Create test string data, construct void scalar from data and assert + #that void scalar contains original data. + test_string = np.array("test") + test_string_void_scalar = np.core.multiarray.scalar( + np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) + + assert_(test_string_void_scalar.view(test_string.dtype) == test_string) + + #Create record scalar, construct from data and assert that + #reconstructed scalar is correct. + test_record = np.ones((), "i,i") + test_record_void_scalar = np.core.multiarray.scalar( + test_record.dtype, test_record.tobytes()) + + assert_(test_record_void_scalar == test_record) + + # Test pickle and unpickle of void and record scalars + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads( + pickle.dumps(test_string, protocol=proto)) == test_string) + assert_(pickle.loads( + pickle.dumps(test_record, protocol=proto)) == test_record) + + @_no_tracing + def test_blasdot_uninitialized_memory(self): + # Ticket #950 + for m in [0, 1, 2]: + for n in [0, 1, 2]: + for k in range(3): + # Try to ensure that x->data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + x.resize((m, 0), refcheck=False) + else: + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + y.resize((0, n), refcheck=False) + else: + y.resize((0, n)) + + # `dot` should just return zero (m, n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + # Regression test for #1061. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed dimension exceeded'): + np.empty(sz) + + def test_huge_arange(self): + # Regression test for #1062. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed size exceeded'): + np.arange(sz) + assert_(np.size == sz) + + def test_fromiter_bytes(self): + # Ticket #1058 + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + # Ticket #1078: segfaults when creating an array with a sequence of + # 0d arrays. + a = np.array((np.ones(2), np.array(2)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + # Ticket #1081: weird array with strange input... + t = np.array([np.array([]), np.array(0, object)], dtype=object) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + # Ticket #1080. + assert_raises(ValueError, np.zeros, [975]*7, np.int8) + assert_raises(ValueError, np.zeros, [26244]*5, np.int8) + + def test_dtype_keyerrors_(self): + # Ticket #1106. + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(TypeError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + # Ticket #1217, don't segfault. + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=int)) + + def test_object_array_to_fixed_string(self): + # Ticket #1235. + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + # Ticket #1240. + a = np.array([['abc', '\u03a3'], + ['asdf', 'erw']], + dtype='U') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') + + def test_unicode_to_string_cast_error(self): + # gh-15790 + a = np.array(['\x80'] * 129, dtype='U3') + assert_raises(UnicodeEncodeError, np.array, a, 'S') + b = a.reshape(3, 43)[:-1, :-1] + assert_raises(UnicodeEncodeError, np.array, b, 'S') + + def test_mixed_string_byte_array_creation(self): + a = np.array(['1234', b'123']) + assert_(a.itemsize == 16) + a = np.array([b'123', '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', b'123', '12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'1234']) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + # Ticket #1198 and #1267 + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + # Ticket #1267 + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + # Ticket #1267 + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + # Ticket #1259 and gh-441 + for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: + z = np.array([2.2-1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + # Ticket #1299 + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + @pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test no longer works." + ) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_structured_arrays_with_objects2(self): + # Ticket #1299 second test + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + # Ticket #1254 + dtspec = [(('a', 'a'), 'i'), ('b', 'i')] + assert_raises(ValueError, np.dtype, dtspec) + + def test_signed_integer_division_overflow(self): + # Ticket #1317. + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(over="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, int): + test_type(t) + + def test_buffer_hashlib(self): + from hashlib import sha256 + + x = np.array([1, 2, 3], dtype=np.dtype('<i4')) + assert_equal(sha256(x).hexdigest(), '4636993d3e1da4e9d6b8f87b79e8f7c6d018580d52661950eabc3845c5897a4d') + + def test_0d_string_scalar(self): + # Bug #1436; the following should succeed + np.asarray('x', '>c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + # Ticket #1345: the following should not cause a crash + with assert_warns(DeprecationWarning): + np.fromstring(b'aa, aa, 1.0', sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np.sctypeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], np.bool_) # not x[0] because it is unordered + failures = [] + + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + d = np.dot(b, c) + except TypeError: + failures.append((x, y)) + else: + if d != 0: + failures.append((x, y)) + if failures: + raise AssertionError("Failures: %r" % failures) + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in 'eps epsneg max min resolution tiny'.split(): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # On Python 3, tofile/fromfile used to get (#1610) the Python + # file handle out of sync + f0 = tempfile.NamedTemporaryFile() + f = f0.file + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + f.close() + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_warns(np.ComplexWarning, float, x) + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_equal(complex(x), 1+2j) + + def test_complex_boolean_cast(self): + # Ticket #2218 + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a.dtype = np.float32 + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_find_common_type_boolean(self): + # Ticket #1695 + with pytest.warns(DeprecationWarning, match="np.find_common_type"): + res = np.find_common_type([], ['?', '?']) + assert res == '?' + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + # The second use of itemsize was throwing an exception because in + # ctors.c, discover_itemsize was calling PyObject_Length without + # checking the return code. This failed to get the length of the + # number 2, and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + np.array([['abc', 2], ['long ', '0123456789']], dtype=np.bytes_) + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + # Similar to GitHub issue #387 + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_squeeze_axis_handling(self): + # Issue #10779 + # Ensure proper handling of objects + # that don't support axis specification + # when squeezing + + class OldSqueeze(np.ndarray): + + def __new__(cls, + input_array): + obj = np.asarray(input_array).view(cls) + return obj + + # it is perfectly reasonable that prior + # to numpy version 1.7.0 a subclass of ndarray + # might have been created that did not expect + # squeeze to have an axis argument + # NOTE: this example is somewhat artificial; + # it is designed to simulate an old API + # expectation to guard against regression + def squeeze(self): + return super().squeeze() + + oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) + + # if no axis argument is specified the old API + # expectation should give the correct result + assert_equal(np.squeeze(oldsqueeze), + np.array([1,2,3])) + + # likewise, axis=None should work perfectly well + # with the old API expectation + assert_equal(np.squeeze(oldsqueeze, axis=None), + np.array([1,2,3])) + + # however, specification of any particular axis + # should raise a TypeError in the context of the + # old API specification, even when using a valid + # axis specification like 1 for this array + with assert_raises(TypeError): + # this would silently succeed for array + # subclasses / objects that did not support + # squeeze axis argument handling before fixing + # Issue #10779 + np.squeeze(oldsqueeze, axis=1) + + # check for the same behavior when using an invalid + # axis specification -- in this case axis=0 does not + # have size 1, but the priority should be to raise + # a TypeError for the axis argument and NOT a + # ValueError for squeezing a non-empty dimension + with assert_raises(TypeError): + np.squeeze(oldsqueeze, axis=0) + + # the new API knows how to handle the axis + # argument and will return a ValueError if + # attempting to squeeze an axis that is not + # of length 1 + with assert_raises(ValueError): + np.squeeze(np.array([[1],[2],[3]]), axis=0) + + def test_reduce_contiguous(self): + # GitHub issue #387 + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, float, a) + a[()] = None + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + def test_object_array_circular_reference(self): + # Test the same for a circular reference. + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + b[()] = a + assert_raises(RecursionError, int, a) + # NumPy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = None + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) + assert_equal(float(a), float(0)) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a[()]) == 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield should not overwrite Object fields with non-Object data + x = np.array([1, 2, 3], dtype=object) + assert_raises(TypeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = b"hello1" + s2 = b"hello2" + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = b'black' + s2 = b'white' + s3 = b'other' + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + # Ticket #1756 + s = b'0123456789abcdef' + a = np.array([s]*5) + for i in range(1, 17): + a1 = np.array(a, "|S%d" % i) + a2 = np.array([s[:i]]*5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + # Check that alignment flag is updated on stride setting + a = np.arange(10) + assert_(a.flags.aligned) + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except Exception: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0x6a + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0x7d + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" + b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" + b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" + b"p13\ntp14\nb.") + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129]).astype('b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.str_('\u6bd2'), + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" + b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.float64(9e123), + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" + b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" + b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1 + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" + b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" + b"tp8\nRp9\n."), + 'different'), + ] + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + @pytest.mark.slow_pypy + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings (gh-2583) + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]: + b = np.array([val, tostr('xx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for NumPy's four byte unicode. + a = np.array(['abcd']) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0]*5 + [1]*6 + [2]*6)*4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.str_)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2,), dtype) + a[...] = [(1, 2), (3, 4)] + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, (2)i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[['H', 'e', 'l', 'l', 'o'], + ['F', 'o', 'o', 'b', '']]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')]) + arr = np.array([('2000-01-01', 1)], dt) + formatted = '{0}'.format(arr[0]) + assert_equal(formatted, str(arr[0])) + + def test_deepcopy_on_0d_array(self): + # Ticket #3311. + arr = np.array(3) + arr_cp = copy.deepcopy(arr) + + assert_equal(arr, arr_cp) + assert_equal(arr.shape, arr_cp.shape) + assert_equal(int(arr), int(arr_cp)) + assert_(arr is not arr_cp) + assert_(isinstance(arr_cp, type(arr))) + + def test_deepcopy_F_order_object_array(self): + # Ticket #6456. + a = {'a': 1} + b = {'b': 2} + arr = np.array([[a, b], [a, b]], order='F') + arr_cp = copy.deepcopy(arr) + + assert_equal(arr, arr_cp) + assert_(arr is not arr_cp) + # Ensure that we have actually copied the item. + assert_(arr[0, 1] is not arr_cp[1, 1]) + # Ensure we are allowed to have references to the same object. + assert_(arr[0, 1] is arr[1, 1]) + # Check the references hold for the copied objects. + assert_(arr_cp[0, 1] is arr_cp[1, 1]) + + def test_deepcopy_empty_object_array(self): + # Ticket #8536. + # Deepcopy should succeed + a = np.array([], dtype=object) + b = copy.deepcopy(a) + assert_(a.shape == b.shape) + + def test_bool_subscript_crash(self): + # gh-4494 + c = np.rec.array([(1, 2, 3), (4, 5, 6)]) + masked = c[np.array([True, False])] + base = masked.base + del masked, c + base.dtype + + def test_richcompare_crash(self): + # gh-4613 + import operator as op + + # dummy class where __array__ throws exception + class Foo: + __array_priority__ = 1002 + + def __array__(self, *args, **kwargs): + raise Exception() + + rhs = Foo() + lhs = np.array(1) + for f in [op.lt, op.le, op.gt, op.ge]: + assert_raises(TypeError, f, lhs, rhs) + assert_(not op.eq(lhs, rhs)) + assert_(op.ne(lhs, rhs)) + + def test_richcompare_scalar_and_subclass(self): + # gh-4709 + class Foo(np.ndarray): + def __eq__(self, other): + return "OK" + + x = np.array([1, 2, 3]).view(Foo) + assert_equal(10 == x, "OK") + assert_equal(np.int32(10) == x, "OK") + assert_equal(np.array([10]) == x, "OK") + + def test_pickle_empty_string(self): + # gh-3926 + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test_string = np.bytes_('') + assert_equal(pickle.loads( + pickle.dumps(test_string, protocol=proto)), test_string) + + def test_frompyfunc_many_args(self): + # gh-5672 + + def passer(*args): + pass + + assert_raises(ValueError, np.frompyfunc, passer, 32, 1) + + def test_repeat_broadcasting(self): + # gh-5743 + a = np.arange(60).reshape(3, 4, 5) + for axis in chain(range(-a.ndim, a.ndim), [None]): + assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis)) + + def test_frompyfunc_nout_0(self): + # gh-2014 + + def f(x): + x[0], x[-1] = x[-1], x[0] + + uf = np.frompyfunc(f, 1, 0) + a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object) + assert_equal(uf(a), ()) + expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object) + assert_array_equal(a, expected) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_leak_in_structured_dtype_comparison(self): + # gh-6250 + recordtype = np.dtype([('a', np.float64), + ('b', np.int32), + ('d', (str, 5))]) + + # Simple case + a = np.zeros(2, dtype=recordtype) + for i in range(100): + a == a + assert_(sys.getrefcount(a) < 10) + + # The case in the bug report. + before = sys.getrefcount(a) + u, v = a[0], a[1] + u == v + del u, v + gc.collect() + after = sys.getrefcount(a) + assert_equal(before, after) + + def test_empty_percentile(self): + # gh-6530 / gh-6553 + assert_array_equal(np.percentile(np.arange(10), []), np.array([])) + + def test_void_compare_segfault(self): + # gh-6922. The following should not segfault + a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')]) + a.sort() + + def test_reshape_size_overflow(self): + # gh-7455 + a = np.ones(20)[::2] + if np.dtype(np.intp).itemsize == 8: + # 64 bit. The following are the prime factors of 2**63 + 5, + # plus a leading 2, so when multiplied together as int64, + # the result overflows to a total size of 10. + new_shape = (2, 13, 419, 691, 823, 2977518503) + else: + # 32 bit. The following are the prime factors of 2**31 + 5, + # plus a leading 2, so when multiplied together as int32, + # the result overflows to a total size of 10. + new_shape = (2, 7, 7, 43826197) + assert_raises(ValueError, a.reshape, new_shape) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_invalid_structured_dtypes(self): + # gh-2865 + # mapping python objects to other dtypes + assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')])) + assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')])) + assert_raises(ValueError, np.dtype, + ('i8', [('name', [('name', 'O')])])) + assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O')) + assert_raises(ValueError, np.dtype, ('i8', 'O')) + # wrong number/type of tuple elements in dict + assert_raises(ValueError, np.dtype, + ('i', {'name': ('i', 0, 'title', 'oops')})) + assert_raises(ValueError, np.dtype, + ('i', {'name': ('i', 'wrongtype', 'title')})) + # disallowed as of 1.13 + assert_raises(ValueError, np.dtype, + ([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')])) + # allowed as a special case due to existing use, see gh-2798 + a = np.ones(1, dtype=('O', [('name', 'O')])) + assert_equal(a[0], 1) + # In particular, the above union dtype (and union dtypes in general) + # should mainly behave like the main (object) dtype: + assert a[0] is a.item() + assert type(a[0]) is int + + def test_correct_hash_dict(self): + # gh-8887 - __hash__ would be None despite tp_hash being set + all_types = set(np.sctypeDict.values()) - {np.void} + for t in all_types: + val = t() + + try: + hash(val) + except TypeError as e: + assert_equal(t.__hash__, None) + else: + assert_(t.__hash__ != None) + + def test_scalar_copy(self): + scalar_types = set(np.sctypeDict.values()) + values = { + np.void: b"a", + np.bytes_: b"a", + np.str_: "a", + np.datetime64: "2017-08-25", + } + for sctype in scalar_types: + item = sctype(values.get(sctype, 1)) + item2 = copy.copy(item) + assert_equal(item, item2) + + def test_void_item_memview(self): + va = np.zeros(10, 'V4') + x = va[:1].item() + va[0] = b'\xff\xff\xff\xff' + del va + assert_equal(x, b'\x00\x00\x00\x00') + + def test_void_getitem(self): + # Test fix for gh-11668. + assert_(np.array([b'a'], 'V1').astype('O') == b'a') + assert_(np.array([b'ab'], 'V2').astype('O') == b'ab') + assert_(np.array([b'abc'], 'V3').astype('O') == b'abc') + assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd') + + def test_structarray_title(self): + # The following used to segfault on pypy, due to NPY_TITLE_KEY + # not working properly and resulting to double-decref of the + # structured array field items: + # See: https://bitbucket.org/pypy/pypy/issues/2789 + for j in range(5): + structure = np.array([1], dtype=[(('x', 'X'), np.object_)]) + structure[0]['x'] = np.array([2]) + gc.collect() + + def test_dtype_scalar_squeeze(self): + # gh-11384 + values = { + 'S': b"a", + 'M': "2018-06-20", + } + for ch in np.typecodes['All']: + if ch in 'O': + continue + sctype = np.dtype(ch).type + scvalue = sctype(values.get(ch, 3)) + for axis in [None, ()]: + squeezed = scvalue.squeeze(axis=axis) + assert_equal(squeezed, scvalue) + assert_equal(type(squeezed), type(scvalue)) + + def test_field_access_by_title(self): + # gh-11507 + s = 'Some long field name' + if HAS_REFCOUNT: + base = sys.getrefcount(s) + t = np.dtype([((s, 'f1'), np.float64)]) + data = np.zeros(10, t) + for i in range(10): + str(data[['f1']]) + if HAS_REFCOUNT: + assert_(base <= sys.getrefcount(s)) + + @pytest.mark.parametrize('val', [ + # arrays and scalars + np.ones((10, 10), dtype='int32'), + np.uint64(10), + ]) + @pytest.mark.parametrize('protocol', + range(2, pickle.HIGHEST_PROTOCOL + 1) + ) + def test_pickle_module(self, protocol, val): + # gh-12837 + s = pickle.dumps(val, protocol) + assert b'_multiarray_umath' not in s + if protocol == 5 and len(val.shape) > 0: + # unpickling ndarray goes through _frombuffer for protocol 5 + assert b'numpy.core.numeric' in s + else: + assert b'numpy.core.multiarray' in s + + def test_object_casting_errors(self): + # gh-11993 update to ValueError (see gh-16909), since strings can in + # principle be converted to complex, but this string cannot. + arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) + assert_raises(ValueError, arr.astype, 'c8') + + def test_eff1d_casting(self): + # gh-12711 + x = np.array([1, 2, 4, 7, 0], dtype=np.int16) + res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) + + # The use of safe casting means, that 1<<20 is cast unsafely, an + # error may be better, but currently there is no mechanism for it. + res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20)) + assert_equal(res, [0, 1, 2, 3, -7, 0]) + + def test_pickle_datetime64_array(self): + # gh-12745 (would fail with pickle5 installed) + d = np.datetime64('2015-07-04 12:59:59.50', 'ns') + arr = np.array([d]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dumped = pickle.dumps(arr, protocol=proto) + assert_equal(pickle.loads(dumped), arr) + + def test_bad_array_interface(self): + class T: + __array_interface__ = {} + + with assert_raises(ValueError): + np.array([T()]) + + def test_2d__array__shape(self): + class T: + def __array__(self): + return np.ndarray(shape=(0,0)) + + # Make sure __array__ is used instead of Sequence methods. + def __iter__(self): + return iter([]) + + def __getitem__(self, idx): + raise AssertionError("__getitem__ was called") + + def __len__(self): + return 0 + + + t = T() + # gh-13659, would raise in broadcasting [x=t for x in result] + arr = np.array([t]) + assert arr.shape == (1, 0, 0) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + def test_to_ctypes(self): + #gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) + + def test_complex_conversion_error(self): + # gh-17068 + with pytest.raises(TypeError, match=r"Unable to convert dtype.*"): + complex(np.array("now", np.datetime64)) + + def test__array_interface__descr(self): + # gh-17068 + dt = np.dtype(dict(names=['a', 'b'], + offsets=[0, 0], + formats=[np.int64, np.int64])) + descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] + assert descr == [('', '|V8')] # instead of [(b'', '|V8')] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @requires_memory(free_bytes=9e9) + def test_dot_big_stride(self): + # gh-17111 + # blas stride = stride//itemsize > int32 max + int32_max = np.iinfo(np.int32).max + n = int32_max + 3 + a = np.empty([n], dtype=np.float32) + b = a[::n-1] + b[...] = 1 + assert b.strides[0] > int32_max * b.dtype.itemsize + assert np.dot(b, b) == 2.0 + + def test_frompyfunc_name(self): + # name conversion was failing for python 3 strings + # resulting in the default '?' name. Also test utf-8 + # encoding using non-ascii name. + def cassé(x): + return x + + f = np.frompyfunc(cassé, 1, 1) + assert str(f) == "<ufunc 'cassé (vectorized)'>" + + @pytest.mark.parametrize("operation", [ + 'add', 'subtract', 'multiply', 'floor_divide', + 'conjugate', 'fmod', 'square', 'reciprocal', + 'power', 'absolute', 'negative', 'positive', + 'greater', 'greater_equal', 'less', + 'less_equal', 'equal', 'not_equal', 'logical_and', + 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'left_shift', 'right_shift', + 'gcd', 'lcm' + ] + ) + @pytest.mark.parametrize("order", [ + ('b->', 'B->'), + ('h->', 'H->'), + ('i->', 'I->'), + ('l->', 'L->'), + ('q->', 'Q->'), + ] + ) + def test_ufunc_order(self, operation, order): + # gh-18075 + # Ensure signed types before unsigned + def get_idx(string, str_lst): + for i, s in enumerate(str_lst): + if string in s: + return i + raise ValueError(f"{string} not in list") + types = getattr(np, operation).types + assert get_idx(order[0], types) < get_idx(order[1], types), ( + f"Unexpected types order of ufunc in {operation}" + f"for {order}. Possible fix: Use signed before unsigned" + "in generate_umath.py") + + def test_nonbool_logical(self): + # gh-22845 + # create two arrays with bit patterns that do not overlap. + # needs to be large enough to test both SIMD and scalar paths + size = 100 + a = np.frombuffer(b'\x01' * size, dtype=np.bool_) + b = np.frombuffer(b'\x80' * size, dtype=np.bool_) + expected = np.ones(size, dtype=np.bool_) + assert_array_equal(np.logical_and(a, b), expected) + diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalar_ctors.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalar_ctors.py new file mode 100644 index 00000000..da976d64 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalar_ctors.py @@ -0,0 +1,186 @@ +""" +Test the scalar constructors, which also do type-coercion +""" +import pytest + +import numpy as np +from numpy.testing import ( + assert_equal, assert_almost_equal, assert_warns, + ) + +class TestFromString: + def test_floating(self): + # Ticket #640, floats from string + fsingle = np.single('1.234') + fdouble = np.double('1.234') + flongdouble = np.longdouble('1.234') + assert_almost_equal(fsingle, 1.234) + assert_almost_equal(fdouble, 1.234) + assert_almost_equal(flongdouble, 1.234) + + def test_floating_overflow(self): + """ Strings containing an unrepresentable float overflow """ + fhalf = np.half('1e10000') + assert_equal(fhalf, np.inf) + fsingle = np.single('1e10000') + assert_equal(fsingle, np.inf) + fdouble = np.double('1e10000') + assert_equal(fdouble, np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + assert_equal(flongdouble, np.inf) + + fhalf = np.half('-1e10000') + assert_equal(fhalf, -np.inf) + fsingle = np.single('-1e10000') + assert_equal(fsingle, -np.inf) + fdouble = np.double('-1e10000') + assert_equal(fdouble, -np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + assert_equal(flongdouble, -np.inf) + + +class TestExtraArgs: + def test_superclass(self): + # try both positional and keyword arguments + s = np.str_(b'\\x61', encoding='unicode-escape') + assert s == 'a' + s = np.str_(b'\\x61', 'unicode-escape') + assert s == 'a' + + # previously this would return '\\xx' + with pytest.raises(UnicodeDecodeError): + np.str_(b'\\xx', encoding='unicode-escape') + with pytest.raises(UnicodeDecodeError): + np.str_(b'\\xx', 'unicode-escape') + + # superclass fails, but numpy succeeds + assert np.bytes_(-2) == b'-2' + + def test_datetime(self): + dt = np.datetime64('2000-01', ('M', 2)) + assert np.datetime_data(dt) == ('M', 2) + + with pytest.raises(TypeError): + np.datetime64('2000', garbage=True) + + def test_bool(self): + with pytest.raises(TypeError): + np.bool_(False, garbage=True) + + def test_void(self): + with pytest.raises(TypeError): + np.void(b'test', garbage=True) + + +class TestFromInt: + def test_intp(self): + # Ticket #99 + assert_equal(1024, np.intp(1024)) + + def test_uint64_from_negative(self): + with pytest.warns(DeprecationWarning): + assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) + + +int_types = [np.byte, np.short, np.intc, np.int_, np.longlong] +uint_types = [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong] +float_types = [np.half, np.single, np.double, np.longdouble] +cfloat_types = [np.csingle, np.cdouble, np.clongdouble] + + +class TestArrayFromScalar: + """ gh-15467 """ + + def _do_test(self, t1, t2): + x = t1(2) + arr = np.array(x, dtype=t2) + # type should be preserved exactly + if t2 is None: + assert arr.dtype.type is t1 + else: + assert arr.dtype.type is t2 + + @pytest.mark.parametrize('t1', int_types + uint_types) + @pytest.mark.parametrize('t2', int_types + uint_types + [None]) + def test_integers(self, t1, t2): + return self._do_test(t1, t2) + + @pytest.mark.parametrize('t1', float_types) + @pytest.mark.parametrize('t2', float_types + [None]) + def test_reals(self, t1, t2): + return self._do_test(t1, t2) + + @pytest.mark.parametrize('t1', cfloat_types) + @pytest.mark.parametrize('t2', cfloat_types + [None]) + def test_complex(self, t1, t2): + return self._do_test(t1, t2) + + +@pytest.mark.parametrize("length", + [5, np.int8(5), np.array(5, dtype=np.uint16)]) +def test_void_via_length(length): + res = np.void(length) + assert type(res) is np.void + assert res.item() == b"\0" * 5 + assert res.dtype == "V5" + +@pytest.mark.parametrize("bytes_", + [b"spam", np.array(567.)]) +def test_void_from_byteslike(bytes_): + res = np.void(bytes_) + expected = bytes(bytes_) + assert type(res) is np.void + assert res.item() == expected + + # Passing dtype can extend it (this is how filling works) + res = np.void(bytes_, dtype="V100") + assert type(res) is np.void + assert res.item()[:len(expected)] == expected + assert res.item()[len(expected):] == b"\0" * (res.nbytes - len(expected)) + # As well as shorten: + res = np.void(bytes_, dtype="V4") + assert type(res) is np.void + assert res.item() == expected[:4] + +def test_void_arraylike_trumps_byteslike(): + # The memoryview is converted as an array-like of shape (18,) + # rather than a single bytes-like of that length. + m = memoryview(b"just one mintleaf?") + res = np.void(m) + assert type(res) is np.ndarray + assert res.dtype == "V1" + assert res.shape == (18,) + +def test_void_dtype_arg(): + # Basic test for the dtype argument (positional and keyword) + res = np.void((1, 2), dtype="i,i") + assert res.item() == (1, 2) + res = np.void((2, 3), "i,i") + assert res.item() == (2, 3) + +@pytest.mark.parametrize("data", + [5, np.int8(5), np.array(5, dtype=np.uint16)]) +def test_void_from_integer_with_dtype(data): + # The "length" meaning is ignored, rather data is used: + res = np.void(data, dtype="i,i") + assert type(res) is np.void + assert res.dtype == "i,i" + assert res["f0"] == 5 and res["f1"] == 5 + +def test_void_from_structure(): + dtype = np.dtype([('s', [('f', 'f8'), ('u', 'U1')]), ('i', 'i2')]) + data = np.array(((1., 'a'), 2), dtype=dtype) + res = np.void(data[()], dtype=dtype) + assert type(res) is np.void + assert res.dtype == dtype + assert res == data[()] + +def test_void_bad_dtype(): + with pytest.raises(TypeError, + match="void: descr must be a `void.*int64"): + np.void(4, dtype="i8") + + # Subarray dtype (with shape `(4,)` is rejected): + with pytest.raises(TypeError, + match=r"void: descr must be a `void.*\(4,\)"): + np.void(4, dtype="4i") diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalar_methods.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalar_methods.py new file mode 100644 index 00000000..18a7bc82 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalar_methods.py @@ -0,0 +1,204 @@ +""" +Test the scalar constructors, which also do type-coercion +""" +import fractions +import platform +import types +from typing import Any, Type + +import pytest +import numpy as np + +from numpy.testing import assert_equal, assert_raises, IS_MUSL + + +class TestAsIntegerRatio: + # derived in part from the cpython test "test_floatasratio" + + @pytest.mark.parametrize("ftype", [ + np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("f, ratio", [ + (0.875, (7, 8)), + (-0.875, (-7, 8)), + (0.0, (0, 1)), + (11.5, (23, 2)), + ]) + def test_small(self, ftype, f, ratio): + assert_equal(ftype(f).as_integer_ratio(), ratio) + + @pytest.mark.parametrize("ftype", [ + np.half, np.single, np.double, np.longdouble]) + def test_simple_fractions(self, ftype): + R = fractions.Fraction + assert_equal(R(0, 1), + R(*ftype(0.0).as_integer_ratio())) + assert_equal(R(5, 2), + R(*ftype(2.5).as_integer_ratio())) + assert_equal(R(1, 2), + R(*ftype(0.5).as_integer_ratio())) + assert_equal(R(-2100, 1), + R(*ftype(-2100.0).as_integer_ratio())) + + @pytest.mark.parametrize("ftype", [ + np.half, np.single, np.double, np.longdouble]) + def test_errors(self, ftype): + assert_raises(OverflowError, ftype('inf').as_integer_ratio) + assert_raises(OverflowError, ftype('-inf').as_integer_ratio) + assert_raises(ValueError, ftype('nan').as_integer_ratio) + + def test_against_known_values(self): + R = fractions.Fraction + assert_equal(R(1075, 512), + R(*np.half(2.1).as_integer_ratio())) + assert_equal(R(-1075, 512), + R(*np.half(-2.1).as_integer_ratio())) + assert_equal(R(4404019, 2097152), + R(*np.single(2.1).as_integer_ratio())) + assert_equal(R(-4404019, 2097152), + R(*np.single(-2.1).as_integer_ratio())) + assert_equal(R(4728779608739021, 2251799813685248), + R(*np.double(2.1).as_integer_ratio())) + assert_equal(R(-4728779608739021, 2251799813685248), + R(*np.double(-2.1).as_integer_ratio())) + # longdouble is platform dependent + + @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [ + # dtype test cases generated using hypothesis + # first five generated cases per dtype + (np.half, [0.0, 0.01154830649280303, 0.31082276347447274, + 0.527350517124794, 0.8308562335072596], + [0, 1, 0, -8, 12]), + (np.single, [0.0, 0.09248576989263226, 0.8160498218131407, + 0.17389442853722373, 0.7956044195067877], + [0, 12, 10, 17, -26]), + (np.double, [0.0, 0.031066908499895136, 0.5214135908877832, + 0.45780736035689296, 0.5906586745934036], + [0, -801, 51, 194, -653]), + pytest.param( + np.longdouble, + [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495, + 0.9620175814461964], + [0, -7400, 14266, -7822, -8721], + marks=[ + pytest.mark.skipif( + np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double"), + pytest.mark.skipif( + platform.machine().startswith("ppc"), + reason="IBM double double"), + ] + ) + ]) + def test_roundtrip(self, ftype, frac_vals, exp_vals): + for frac, exp in zip(frac_vals, exp_vals): + f = np.ldexp(ftype(frac), exp) + assert f.dtype == ftype + n, d = f.as_integer_ratio() + + try: + nf = np.longdouble(n) + df = np.longdouble(d) + if not np.isfinite(df): + raise OverflowError + except (OverflowError, RuntimeWarning): + # the values may not fit in any float type + pytest.skip("longdouble too small on this platform") + + assert_equal(nf / df, f, "{}/{}".format(n, d)) + + +class TestIsInteger: + @pytest.mark.parametrize("str_value", ["inf", "nan"]) + @pytest.mark.parametrize("code", np.typecodes["Float"]) + def test_special(self, code: str, str_value: str) -> None: + cls = np.dtype(code).type + value = cls(str_value) + assert not value.is_integer() + + @pytest.mark.parametrize( + "code", np.typecodes["Float"] + np.typecodes["AllInteger"] + ) + def test_true(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + for value in float_array: + assert value.is_integer() + + @pytest.mark.parametrize("code", np.typecodes["Float"]) + def test_false(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + float_array *= 1.1 + for value in float_array: + if value == 0: + continue + assert not value.is_integer() + + +class TestClassGetItem: + @pytest.mark.parametrize("cls", [ + np.number, + np.integer, + np.inexact, + np.unsignedinteger, + np.signedinteger, + np.floating, + ]) + def test_abc(self, cls: Type[np.number]) -> None: + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + def test_abc_complexfloating(self) -> None: + alias = np.complexfloating[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.complexfloating + + @pytest.mark.parametrize("arg_len", range(4)) + def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert np.complexfloating[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + np.complexfloating[arg_tup] + + @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) + def test_abc_non_numeric(self, cls: Type[np.generic]) -> None: + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_concrete(self, code: str) -> None: + cls = np.dtype(code).type + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.number[arg_tup] + else: + with pytest.raises(TypeError): + np.number[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.number[Any] + + +class TestBitCount: + # derived in part from the cpython test "test_bit_count" + + @pytest.mark.parametrize("itype", np.sctypes['int']+np.sctypes['uint']) + def test_small(self, itype): + for a in range(max(np.iinfo(itype).min, 0), 128): + msg = f"Smoke test for {itype}({a}).bit_count()" + assert itype(a).bit_count() == bin(a).count("1"), msg + + def test_bit_count(self): + for exp in [10, 17, 63]: + a = 2**exp + assert np.uint64(a).bit_count() == 1 + assert np.uint64(a - 1).bit_count() == exp + assert np.uint64(a ^ 63).bit_count() == 7 + assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarbuffer.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarbuffer.py new file mode 100644 index 00000000..31b0494c --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarbuffer.py @@ -0,0 +1,153 @@ +""" +Test scalar buffer interface adheres to PEP 3118 +""" +import numpy as np +from numpy.core._rational_tests import rational +from numpy.core._multiarray_tests import get_buffer_info +import pytest + +from numpy.testing import assert_, assert_equal, assert_raises + +# PEP3118 format strings for native (standard alignment and byteorder) types +scalars_and_codes = [ + (np.bool_, '?'), + (np.byte, 'b'), + (np.short, 'h'), + (np.intc, 'i'), + (np.int_, 'l'), + (np.longlong, 'q'), + (np.ubyte, 'B'), + (np.ushort, 'H'), + (np.uintc, 'I'), + (np.uint, 'L'), + (np.ulonglong, 'Q'), + (np.half, 'e'), + (np.single, 'f'), + (np.double, 'd'), + (np.longdouble, 'g'), + (np.csingle, 'Zf'), + (np.cdouble, 'Zd'), + (np.clongdouble, 'Zg'), +] +scalars_only, codes_only = zip(*scalars_and_codes) + + +class TestScalarPEP3118: + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_match_array(self, scalar): + x = scalar() + a = np.array([], dtype=np.dtype(scalar)) + mv_x = memoryview(x) + mv_a = memoryview(a) + assert_equal(mv_x.format, mv_a.format) + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_dim(self, scalar): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) + def test_scalar_code_and_properties(self, scalar, code): + x = scalar() + expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0, + shape=(), format=code, readonly=True) + + mv_x = memoryview(x) + assert self._as_dict(mv_x) == expected + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_buffers_readonly(self, scalar): + x = scalar() + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def test_void_scalar_structured_data(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert_(isinstance(x, np.void)) + mv_x = memoryview(x) + expected_size = 16 * np.dtype((np.str_, 1)).itemsize + expected_size += 2 * np.dtype(np.float64).itemsize + assert_equal(mv_x.itemsize, expected_size) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + # check scalar format string against ndarray format string + a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_(isinstance(a, np.ndarray)) + mv_a = memoryview(a) + assert_equal(mv_x.itemsize, mv_a.itemsize) + assert_equal(mv_x.format, mv_a.format) + + # Check that we do not allow writeable buffer export (technically + # we could allow it sometimes here...) + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def _as_dict(self, m): + return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, + ndim=m.ndim, format=m.format, readonly=m.readonly) + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,), + format='B', readonly=True) + v = memoryview(dt1) + assert self._as_dict(v) == expected + + v = memoryview(dt2 - dt1) + assert self._as_dict(v) == expected + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(dt1, ["WRITABLE"]) + + @pytest.mark.parametrize('s', [ + pytest.param("\x32\x32", id="ascii"), + pytest.param("\uFE0F\uFE0F", id="basic multilingual"), + pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"), + ]) + def test_str_ucs4(self, s): + s = np.str_(s) # only our subclass implements the buffer protocol + + # all the same, characters always encode as ucs4 + expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w', + readonly=True) + + v = memoryview(s) + assert self._as_dict(v) == expected + + # integers of the paltform-appropriate endianness + code_points = np.frombuffer(v, dtype='i4') + + assert_equal(code_points, [ord(c) for c in s]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(s, ["WRITABLE"]) + + def test_user_scalar_fails_buffer(self): + r = rational(1) + with assert_raises(TypeError): + memoryview(r) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(r, ["WRITABLE"]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarinherit.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarinherit.py new file mode 100644 index 00000000..f9c574d5 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarinherit.py @@ -0,0 +1,98 @@ +""" Test printing of scalar types. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises + + +class A: + pass +class B(A, np.float64): + pass + +class C(B): + pass +class D(C, B): + pass + +class B0(np.float64, A): + pass +class C0(B0): + pass + +class HasNew: + def __new__(cls, *args, **kwargs): + return cls, args, kwargs + +class B1(np.float64, HasNew): + pass + + +class TestInherit: + def test_init(self): + x = B(1.0) + assert_(str(x) == '1.0') + y = C(2.0) + assert_(str(y) == '2.0') + z = D(3.0) + assert_(str(z) == '3.0') + + def test_init2(self): + x = B0(1.0) + assert_(str(x) == '1.0') + y = C0(2.0) + assert_(str(y) == '2.0') + + def test_gh_15395(self): + # HasNew is the second base, so `np.float64` should have priority + x = B1(1.0) + assert_(str(x) == '1.0') + + # previously caused RecursionError!? + with pytest.raises(TypeError): + B1(1.0, 2.0) + + +class TestCharacter: + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.bytes_('abc') + np_u = np.str_('abc') + s = b'def' + u = 'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == 'defabc') + + class MyStr(str, np.generic): + # would segfault + pass + + with assert_raises(TypeError): + # Previously worked, but gave completely wrong result + ret = s + MyStr('abc') + + class MyBytes(bytes, np.generic): + # would segfault + pass + + ret = s + MyBytes(b'abc') + assert(type(ret) is type(s)) + assert ret == b"defabc" + + def test_char_repeat(self): + np_s = np.bytes_('abc') + np_u = np.str_('abc') + res_s = b'abc' * 5 + res_u = 'abc' * 5 + assert_(np_s * 5 == res_s) + assert_(np_u * 5 == res_u) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarmath.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarmath.py new file mode 100644 index 00000000..9977c8b1 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarmath.py @@ -0,0 +1,1100 @@ +import contextlib +import sys +import warnings +import itertools +import operator +import platform +from numpy._utils import _pep440 +import pytest +from hypothesis import given, settings +from hypothesis.strategies import sampled_from +from hypothesis.extra import numpy as hynp + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_almost_equal, + assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, + assert_warns, _SUPPORTS_SVE, + ) + +try: + COMPILERS = np.show_config(mode="dicts")["Compilers"] + USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl" +except TypeError: + USING_CLANG_CL = False + +types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() + +objecty_things = [object(), None] + +reasonable_operators_for_scalars = [ + operator.lt, operator.le, operator.eq, operator.ne, operator.ge, + operator.gt, operator.add, operator.floordiv, operator.mod, + operator.mul, operator.pow, operator.sub, operator.truediv, +] + + +# This compares scalarmath against ufuncs. + +class TestTypes: + def test_types(self): + for atype in types: + a = atype(1) + assert_(a == 1, "error with %r: got %r" % (atype, a)) + + def test_type_add(self): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self): + for k, atype in enumerate(types): + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +def check_ufunc_scalar_equivalence(op, arr1, arr2): + scalar1 = arr1[()] + scalar2 = arr2[()] + assert isinstance(scalar1, np.generic) + assert isinstance(scalar2, np.generic) + + if arr1.dtype.kind == "c" or arr2.dtype.kind == "c": + comp_ops = {operator.ge, operator.gt, operator.le, operator.lt} + if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)): + pytest.xfail("complex comp ufuncs use sort-order, scalars do not.") + if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]: + # array**scalar special case can have different result dtype + # (Other powers may have issues also, but are not hit here.) + # TODO: It would be nice to resolve this issue. + pytest.skip("array**2 can have incorrect/weird result dtype") + + # ignore fpe's since they may just mismatch for integers anyway. + with warnings.catch_warnings(), np.errstate(all="ignore"): + # Comparisons DeprecationWarnings replacing errors (2022-03): + warnings.simplefilter("error", DeprecationWarning) + try: + res = op(arr1, arr2) + except Exception as e: + with pytest.raises(type(e)): + op(scalar1, scalar2) + else: + scalar_res = op(scalar1, scalar2) + assert_array_equal(scalar_res, res, strict=True) + + +@pytest.mark.slow +@settings(max_examples=10000, deadline=2000) +@given(sampled_from(reasonable_operators_for_scalars), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) +def test_array_scalar_ufunc_equivalence(op, arr1, arr2): + """ + This is a thorough test attempting to cover important promotion paths + and ensuring that arrays and scalars stay as aligned as possible. + However, if it creates troubles, it should maybe just be removed. + """ + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.slow +@given(sampled_from(reasonable_operators_for_scalars), + hynp.scalar_dtypes(), hynp.scalar_dtypes()) +def test_array_scalar_ufunc_dtypes(op, dt1, dt2): + # Same as above, but don't worry about sampling weird values so that we + # do not have to sample as much + arr1 = np.array(2, dtype=dt1) + arr2 = np.array(3, dtype=dt2) # some power do weird things. + + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.parametrize("fscalar", [np.float16, np.float32]) +def test_int_float_promotion_truediv(fscalar): + # Promotion for mixed int and float32/float16 must not go to float64 + i = np.int8(1) + f = fscalar(1) + expected = np.result_type(i, f) + assert (i / f).dtype == expected + assert (f / i).dtype == expected + # But normal int / int true division goes to float64: + assert (i / i).dtype == np.dtype("float64") + # For int16, result has to be ast least float32 (takes ufunc path): + assert (np.int16(1) / f).dtype == np.dtype("float32") + + +class TestBaseMath: + @pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982") + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + # skip true divide for ints + if dt != np.int32: + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + np.add(inp1, 2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + inp2[...] = np.ones_like(inp2) + np.add(2, inp2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower: + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, "error with %r: got %r" % (t, b)) + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = "error with %r: got %r" % (t, b) + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + + def test_integers_to_negative_integer_power(self): + # Note that the combination of uint64 with a signed integer + # has common type np.float64. The other combinations should all + # raise a ValueError for integer ** negative integer. + exp = [np.array(-1, dt)[()] for dt in 'bhilq'] + + # 1 ** -1 possible special case + base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, 1.) + + # -1 ** -1 possible special case + base = [np.array(-1, dt)[()] for dt in 'bhilq'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, -1.) + + # 2 ** -1 perhaps generic + base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, .5) + + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = ("error with %r and %r:" + "got %r, expected %r") % (t1, t2, result, 9) + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + + def test_modular_power(self): + # modular power is not implemented, so ensure it errors + a = 5 + b = 4 + c = 10 + expected = pow(a, b, c) # noqa: F841 + for t in (np.int32, np.float32, np.complex64): + # note that 3-operand power only dispatches on the first argument + assert_raises(TypeError, operator.pow, t(a), b, c) + assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) + + +def floordiv_and_mod(x, y): + return (x // y, x % y) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestModulus: + + def test_modulus_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1)[()] + b = np.array(sg2*19, dtype=dt2)[()] + div, rem = op(a, b) + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = list(divmod(*t) for t in arg) + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floordiv_and_mod, divmod]: + for dt in np.typecodes['Float']: + msg = 'op: %s, dtype: %s' % (op.__name__, dt) + fa = a.astype(dt) + fb = b.astype(dt) + # use list comprehension so a_ and b_ are scalars + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_modulus_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1)[()] + b = np.array(sg2*6e-8, dtype=dt2)[()] + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = operator.mod(a, b) + assert_(rem <= b, 'dt: %s' % dt) + rem = operator.mod(-a, -b) + assert_(rem >= -b, 'dt: %s' % dt) + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = operator.mod(fone, fzer) + assert_(np.isnan(rem), 'dt: %s' % dt) + # MSVC 2008 returns NaN here, so disable the check. + #rem = operator.mod(fone, finf) + #assert_(rem == fone, 'dt: %s' % dt) + rem = operator.mod(fone, fnan) + assert_(np.isnan(rem), 'dt: %s' % dt) + rem = operator.mod(finf, fone) + assert_(np.isnan(rem), 'dt: %s' % dt) + for op in [floordiv_and_mod, divmod]: + div, mod = op(fone, fzer) + assert_(np.isinf(div)) and assert_(np.isnan(mod)) + + def test_inplace_floordiv_handling(self): + # issue gh-12927 + # this only applies to in-place floordiv //=, because the output type + # promotes to float which does not fit + a = np.array([1, 2], np.int64) + b = np.array([1, 2], np.uint64) + with pytest.raises(TypeError, + match=r"Cannot cast ufunc 'floor_divide' output from"): + a //= b + + +class TestComplexDivision: + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b/a)) + b = t(0.) + assert_(np.isnan(b/a)) + + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), + (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = list() + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + +class TestConversion: + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + with pytest.warns(DeprecationWarning): + res = np.array(np.iinfo(code).max + 1, dtype=code) + tgt = np.iinfo(code).min + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.dtype(code).type(np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def overflow_error_func(dtype): + dtype(np.iinfo(dtype).max + 1) + + for code in [np.int_, np.uint, np.longlong, np.ulonglong]: + assert_raises(OverflowError, overflow_error_func, code) + + def test_int_from_infinite_longdouble(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") + @pytest.mark.skipif(platform.machine().startswith("ppc"), + reason="IBM double double") + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) + + def test_numpy_scalar_relational_operators(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + #unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + def test_scalar_comparison_to_none(self): + # Scalars should just return False and not give a warnings. + # The comparisons are flagged by pep8, ignore that. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) + assert_(not np.str_('test') == None) + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) + + assert_(np.float32(1) != None) + assert_(np.str_('test') != None) + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) + assert_(len(w) == 0) + + # For documentation purposes, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations are done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +#class TestRepr: +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr: + def _test_type_repr(self, t): + finfo = np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize*8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from https://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00]*storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7-(last_fraction_bit_idx % 8) + constr[byte] = 1 << bytebit + elif which == 'small norm': + byte = last_exponent_bit_idx // 8 + bytebit = 7-(last_exponent_bit_idx % 8) + constr[byte] = 1 << bytebit + else: + raise ValueError('hmm') + val = constr.view(t)[0] + val_repr = repr(val) + val2 = t(eval(val_repr)) + if not (val2 == 0 and val < 1e-100): + assert_equal(val, val2) + + def test_float_repr(self): + # long double test cannot work, because eval goes through a python + # float + for t in [np.float32, np.float64]: + self._test_type_repr(t) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestMultiply: + def test_seq_repeat(self): + # Test that basic sequences get repeated when multiplied with + # numpy integers. And errors are raised when multiplied with others. + # Some of this behaviour may be controversial and could be open for + # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = {'?'} + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= {'V'} # can't default-construct void scalars + + for seq_type in (list, tuple): + seq = seq_type([1, 2, 3]) + for numpy_type in accepted_types: + i = np.dtype(numpy_type).type(2) + assert_equal(seq * i, seq * int(i)) + assert_equal(i * seq, int(i) * seq) + + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + assert_equal( + assert_warns(DeprecationWarning, operator.mul, seq, i), + seq * int(i)) + assert_equal( + assert_warns(DeprecationWarning, operator.mul, i, seq), + int(i) * seq) + + for numpy_type in forbidden_types: + i = np.dtype(numpy_type).type() + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + + def test_no_seq_repeat_basic_array_like(self): + # Test that an array-like which does not know how to be multiplied + # does not attempt sequence repeat (raise TypeError). + # See also gh-7428. + class ArrayLike: + def __init__(self, arr): + self.arr = arr + def __array__(self): + return self.arr + + # Test for simple ArrayLike above and memoryviews (original report) + for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): + assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) + assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) + assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) + assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) + + +class TestNegative: + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.neg, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + if dt in np.typecodes['UnsignedInteger']: + st = np.dtype(dt).type + max = st(np.iinfo(dt).max) + assert_equal(operator.neg(a), max) + else: + assert_equal(operator.neg(a) + a, 0) + +class TestSubtract: + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.sub, a, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.sub(a, a), 0) + + +class TestAbs: + def _test_abs_func(self, absfunc, test_dtype): + x = test_dtype(-1.5) + assert_equal(absfunc(x), 1.5) + x = test_dtype(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = test_dtype(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = test_dtype(np.finfo(test_dtype).max) + assert_equal(absfunc(x), x.real) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + x = test_dtype(np.finfo(test_dtype).tiny) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).min) + assert_equal(absfunc(x), -x.real) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_builtin_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(abs, dtype) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_numpy_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(np.abs, dtype) + +class TestBitShifts: + + @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) + @pytest.mark.parametrize('op', + [operator.rshift, operator.lshift], ids=['>>', '<<']) + def test_shift_all_bits(self, type_code, op): + """Shifts where the shift amount is the width of the type or wider """ + if ( + USING_CLANG_CL and + type_code in ("l", "L") and + op is operator.lshift + ): + pytest.xfail("Failing on clang-cl builds") + # gh-2449 + dt = np.dtype(type_code) + nbits = dt.itemsize * 8 + for val in [5, -5]: + for shift in [nbits, nbits + 4]: + val_scl = np.array(val).astype(dt)[()] + shift_scl = dt.type(shift) + res_scl = op(val_scl, shift_scl) + if val_scl < 0 and op is operator.rshift: + # sign bit is preserved + assert_equal(res_scl, -1) + else: + assert_equal(res_scl, 0) + + # Result on scalars should be the same as on arrays + val_arr = np.array([val_scl]*32, dtype=dt) + shift_arr = np.array([shift]*32, dtype=dt) + res_arr = op(val_arr, shift_arr) + assert_equal(res_arr, res_scl) + + +class TestHash: + @pytest.mark.parametrize("type_code", np.typecodes['AllInteger']) + def test_integer_hashes(self, type_code): + scalar = np.dtype(type_code).type + for i in range(128): + assert hash(i) == hash(scalar(i)) + + @pytest.mark.parametrize("type_code", np.typecodes['AllFloat']) + def test_float_and_complex_hashes(self, type_code): + scalar = np.dtype(type_code).type + for val in [np.pi, np.inf, 3, 6.]: + numpy_val = scalar(val) + # Cast back to Python, in case the NumPy scalar has less precision + if numpy_val.dtype.kind == 'c': + val = complex(numpy_val) + else: + val = float(numpy_val) + assert val == numpy_val + assert hash(val) == hash(numpy_val) + + if hash(float(np.nan)) != hash(float(np.nan)): + # If Python distinguishes different NaNs we do so too (gh-18833) + assert hash(scalar(np.nan)) != hash(scalar(np.nan)) + + @pytest.mark.parametrize("type_code", np.typecodes['Complex']) + def test_complex_hashes(self, type_code): + # Test some complex valued hashes specifically: + scalar = np.dtype(type_code).type + for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]: + numpy_val = scalar(val) + assert hash(complex(numpy_val)) == hash(numpy_val) + + +@contextlib.contextmanager +def recursionlimit(n): + o = sys.getrecursionlimit() + try: + sys.setrecursionlimit(n) + yield + finally: + sys.setrecursionlimit(o) + + +@given(sampled_from(objecty_things), + sampled_from(reasonable_operators_for_scalars), + sampled_from(types)) +def test_operator_object_left(o, op, type_): + try: + with recursionlimit(200): + op(o, type_(1)) + except TypeError: + pass + + +@given(sampled_from(objecty_things), + sampled_from(reasonable_operators_for_scalars), + sampled_from(types)) +def test_operator_object_right(o, op, type_): + try: + with recursionlimit(200): + op(type_(1), o) + except TypeError: + pass + + +@given(sampled_from(reasonable_operators_for_scalars), + sampled_from(types), + sampled_from(types)) +def test_operator_scalars(op, type1, type2): + try: + op(type1(1), type2(1)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("val", [None, 2**64]) +def test_longdouble_inf_loop(op, val): + # Note: The 2**64 value will pass once NEP 50 is adopted. + try: + op(np.longdouble(3), val) + except TypeError: + pass + try: + op(val, np.longdouble(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("val", [None, 2**64]) +def test_clongdouble_inf_loop(op, val): + # Note: The 2**64 value will pass once NEP 50 is adopted. + try: + op(np.clongdouble(3), val) + except TypeError: + pass + try: + op(val, np.longdouble(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda min, max: max + max, + lambda min, max: min - max, + lambda min, max: max * max], ids=["+", "-", "*"]) +def test_scalar_integer_operation_overflow(dtype, operation): + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + max = st(np.iinfo(dtype).max) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, max) + + +@pytest.mark.parametrize("dtype", np.typecodes["Integer"]) +@pytest.mark.parametrize("operation", [ + lambda min, neg_1: -min, + lambda min, neg_1: abs(min), + lambda min, neg_1: min * neg_1, + pytest.param(lambda min, neg_1: min // neg_1, + marks=pytest.mark.skip(reason="broken on some platforms"))], + ids=["neg", "abs", "*", "//"]) +def test_scalar_signed_integer_overflow(dtype, operation): + # The minimum signed integer can "overflow" for some additional operations + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + neg_1 = st(-1) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, neg_1) + + +@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"]) +def test_scalar_unsigned_integer_overflow(dtype): + val = np.dtype(dtype).type(8) + with pytest.warns(RuntimeWarning, match="overflow encountered"): + -val + + zero = np.dtype(dtype).type(0) + -zero # does not warn + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda val, zero: val // zero, + lambda val, zero: val % zero, ], ids=["//", "%"]) +def test_scalar_integer_operation_divbyzero(dtype, operation): + st = np.dtype(dtype).type + val = st(100) + zero = st(0) + + with pytest.warns(RuntimeWarning, match="divide by zero"): + operation(val, zero) + + +ops_with_names = [ + ("__lt__", "__gt__", operator.lt, True), + ("__le__", "__ge__", operator.le, True), + ("__eq__", "__eq__", operator.eq, True), + # Note __op__ and __rop__ may be identical here: + ("__ne__", "__ne__", operator.ne, True), + ("__gt__", "__lt__", operator.gt, True), + ("__ge__", "__le__", operator.ge, True), + ("__floordiv__", "__rfloordiv__", operator.floordiv, False), + ("__truediv__", "__rtruediv__", operator.truediv, False), + ("__add__", "__radd__", operator.add, False), + ("__mod__", "__rmod__", operator.mod, False), + ("__mul__", "__rmul__", operator.mul, False), + ("__pow__", "__rpow__", operator.pow, False), + ("__sub__", "__rsub__", operator.sub, False), +] + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble]) +def test_subclass_deferral(sctype, __op__, __rop__, op, cmp): + """ + This test covers scalar subclass deferral. Note that this is exceedingly + complicated, especially since it tends to fall back to the array paths and + these additionally add the "array priority" mechanism. + + The behaviour was modified subtly in 1.22 (to make it closer to how Python + scalars work). Due to its complexity and the fact that subclassing NumPy + scalars is probably a bad idea to begin with. There is probably room + for adjustments here. + """ + class myf_simple1(sctype): + pass + + class myf_simple2(sctype): + pass + + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func}) + + # inheritance has to override, or this is correctly lost: + res = op(myf_simple1(1), myf_simple2(2)) + assert type(res) == sctype or type(res) == np.bool_ + assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited + + # Two independent subclasses do not really define an order. This could + # be attempted, but we do not since Python's `int` does neither: + assert op(myf_op(1), myf_simple1(2)) == __op__ + assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited + + +def test_longdouble_complex(): + # Simple test to check longdouble and complex combinations, since these + # need to go through promotion, which longdouble needs to be careful about. + x = np.longdouble(1) + assert x + 1j == 1+1j + assert 1j + x == 1+1j + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) +@np._no_nep50_warning() +def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + # Check that deferring is indicated using `__array_ufunc__`: + myt = type("myt", (subtype,), + {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None}) + + # Just like normally, we should never presume we can modify the float. + assert op(myt(1), np.float64(2)) == __op__ + assert op(np.float64(1), myt(2)) == __rop__ + + if op in {operator.mod, operator.floordiv} and subtype == complex: + return # module is not support for complex. Do not test. + + if __rop__ == __op__: + return + + # When no deferring is indicated, subclasses are handled normally. + myt = type("myt", (subtype,), {__rop__: rop_func}) + + # Check for float32, as a float subclass float64 may behave differently + res = op(myt(1), np.float16(2)) + expected = op(subtype(1), np.float16(2)) + assert res == expected + assert type(res) == type(expected) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), subtype(1)) + assert res == expected + assert type(res) == type(expected) + + # Same check for longdouble: + res = op(myt(1), np.longdouble(2)) + expected = op(subtype(1), np.longdouble(2)) + assert res == expected + assert type(res) == type(expected) + res = op(np.float32(2), myt(1)) + expected = op(np.longdouble(2), subtype(1)) + assert res == expected diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarprint.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarprint.py new file mode 100644 index 00000000..98d1f4aa --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_scalarprint.py @@ -0,0 +1,382 @@ +""" Test printing of scalar types. + +""" +import code +import platform +import pytest +import sys + +from tempfile import TemporaryFile +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises, IS_MUSL + +class TestRealScalars: + def test_str(self): + svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] + styps = [np.float16, np.float32, np.float64, np.longdouble] + wanted = [ + ['0.0', '0.0', '0.0', '0.0' ], + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan']] + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. + def check(v): + assert_equal(str(np.float64(v)), str(v)) + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), str(v)) + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + def test_py2_float_print(self): + # gh-10753 + # In python2, the python float type implements an obsolete method + # tp_print, which overrides tp_repr and tp_str when using "print" to + # output to a "real file" (ie, not a StringIO). Make sure we don't + # inherit it. + x = np.double(0.1999999999999) + with TemporaryFile('r+t') as f: + print(x, file=f) + f.seek(0) + output = f.read() + assert_equal(output, str(x) + '\n') + # In python2 the value float('0.1999999999999') prints with reduced + # precision as '0.2', but we want numpy's np.double('0.1999999999999') + # to print the unique value, '0.1999999999999'. + + # gh-11031 + # Only in the python2 interactive shell and when stdout is a "real" + # file, the output of the last command is printed to stdout without + # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print + # x` are potentially different. Make sure they are the same. The only + # way I found to get prompt-like output is using an actual prompt from + # the 'code' module. Again, must use tempfile to get a "real" file. + + # dummy user-input which enters one line and then ctrl-Ds. + def userinput(): + yield 'np.sqrt(2)' + raise EOFError + gen = userinput() + input_func = lambda prompt="": next(gen) + + with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: + orig_stdout, orig_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = fo, fe + + code.interact(local={'np': np}, readfunc=input_func, banner='') + + sys.stdout, sys.stderr = orig_stdout, orig_stderr + + fo.seek(0) + capture = fo.read().strip() + + assert_equal(capture, repr(np.sqrt(2))) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + + assert_equal(fpos64(5e-324, unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + f32x = np.finfo(np.float32).max + assert_equal(fpos32(f32x, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. + assert_equal(fpos32(f32x), + "340282350000000000000000000000000000000.") + + # Further tests of zero-padding vs rounding in different combinations + # of unique, fractional, precision, min_digits + # precision can only reduce digits, not add them. + # min_digits can only extend digits, not reduce them. + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0), + "340282346638528859811704183484516925440.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4), + "340282346638528859811704183484516925440.0000") + assert_equal(fpos32(f32x, unique=True, fractional=True, + min_digits=4, precision=4), + "340282346638528859811704183484516925440.0000") + assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False, + precision=0) + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=20), + "340282346638528859810000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=15), + "340282346638529000000000000000000000000.") + assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + # test that unique rounding is preserved when precision is supplied + # but no extra digits need to be printed (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=False, precision=15), + '-6.310887241768094e-30') + assert_equal(fsci64(a, unique=True, precision=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, precision=15, min_digits=15), + '-6.310887241768095e-30') + # adds/remove digits in unique mode with unbiased rnding + assert_equal(fsci64(a, unique=True, precision=14), + '-6.31088724176809e-30') + assert_equal(fsci64(a, unique=True, min_digits=16), + '-6.3108872417680944e-30') + assert_equal(fsci64(a, unique=True, precision=16), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=14), + '-6.310887241768095e-30') + # test min_digits in unique mode with different rounding cases + assert_equal(fsci64('1e120', min_digits=3), '1.000e+120') + assert_equal(fsci64('1e100', min_digits=3), '1.000e+100') + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, + fractional=False), "324.00") + + def test_dragon4_interface(self): + tps = [np.float16, np.float32, np.float64] + # test is flaky for musllinux on np.float128 + if hasattr(np, 'float128') and not IS_MUSL: + tps.append(np.float128) + + fpos = np.format_float_positional + fsci = np.format_float_scientific + + for tp in tps: + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + + @pytest.mark.skipif(not platform.machine().startswith("ppc64"), + reason="only applies to ppc float128 values") + def test_ppc64_ibm_double_double128(self): + # check that the precision decreases once we get into the subnormal + # range. Unlike float64, this starts around 1e-292 instead of 1e-308, + # which happens when the first double is normal and the second is + # subnormal. + x = np.float128('2.123123123123123123123123123123123e-286') + got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] + expected = [ + "1.06156156156156156156156156156157e-286", + "1.06156156156156156156156156156158e-287", + "1.06156156156156156156156156156159e-288", + "1.0615615615615615615615615615616e-289", + "1.06156156156156156156156156156157e-290", + "1.06156156156156156156156156156156e-291", + "1.0615615615615615615615615615616e-292", + "1.0615615615615615615615615615615e-293", + "1.061561561561561561561561561562e-294", + "1.06156156156156156156156156155e-295", + "1.0615615615615615615615615616e-296", + "1.06156156156156156156156156e-297", + "1.06156156156156156156156157e-298", + "1.0615615615615615615615616e-299", + "1.06156156156156156156156e-300", + "1.06156156156156156156155e-301", + "1.0615615615615615615616e-302", + "1.061561561561561561562e-303", + "1.06156156156156156156e-304", + "1.0615615615615615618e-305", + "1.06156156156156156e-306", + "1.06156156156156157e-307", + "1.0615615615615616e-308", + "1.06156156156156e-309", + "1.06156156156157e-310", + "1.0615615615616e-311", + "1.06156156156e-312", + "1.06156156154e-313", + "1.0615615616e-314", + "1.06156156e-315", + "1.06156155e-316", + "1.061562e-317", + "1.06156e-318", + "1.06155e-319", + "1.0617e-320", + "1.06e-321", + "1.04e-322", + "1e-323", + "0.0", + "0.0"] + assert_equal(got, expected) + + # Note: we follow glibc behavior, but it (or gcc) might not be right. + # In particular we can get two values that print the same but are not + # equal: + a = np.float128('2')/np.float128('3') + b = np.float128(str(a)) + assert_equal(str(a), str(b)) + assert_(a != b) + + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_shape_base.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_shape_base.py new file mode 100644 index 00000000..0428b95a --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_shape_base.py @@ -0,0 +1,825 @@ +import pytest +import numpy as np +from numpy.core import ( + array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, + newaxis, concatenate, stack + ) +from numpy.core.shape_base import (_block_dispatcher, _block_setup, + _block_concatenate, _block_slicing) +from numpy.testing import ( + assert_, assert_raises, assert_array_equal, assert_equal, + assert_raises_regex, assert_warns, IS_PYPY + ) + + +class TestAtleast1d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1]), array([2])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1, 2]), array([2, 3])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r1array(self): + """ Test to make sure equivalent Travis O's r1array function + """ + assert_(atleast_1d(3).shape == (1,)) + assert_(atleast_1d(3j).shape == (1,)) + assert_(atleast_1d(3.0).shape == (1,)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) + + +class TestAtleast2d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1]]), array([[2]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1, 2]]), array([[2, 3]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r2array(self): + """ Test to make sure equivalent Travis O's r2array function + """ + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) + + +class TestAtleast3d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1]]]), array([[[2]]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1], [2]]]), array([[[2], [3]]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a[:,:, newaxis], b[:,:, newaxis]] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + +class TestHstack: + def test_non_iterable(self): + assert_raises(TypeError, hstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, hstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack((np.arange(3) for _ in range(2))) + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(map(lambda x: x, np.ones((3, 2)))) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.hstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([1, 2, 3, 2, 3, 4]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + hstack((a, b), casting="safe", dtype=np.int64) + + +class TestVstack: + def test_non_iterable(self): + assert_raises(TypeError, vstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, vstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = array([1, 2]) + b = array([1, 2]) + res = vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + vstack((np.arange(3) for _ in range(2))) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.vstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + vstack((a, b), casting="safe", dtype=np.int64) + + + +class TestConcatenate: + def test_returns_copy(self): + a = np.eye(3) + b = np.concatenate([a]) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_exceptions(self): + # test axis must be in bounds + for ndim in [1, 2, 3]: + a = np.ones((1,)*ndim) + np.concatenate((a, a), axis=0) # OK + assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim) + assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) + + # Scalars cannot be concatenated + assert_raises(ValueError, concatenate, (0,)) + assert_raises(ValueError, concatenate, (np.array(0),)) + + # dimensionality must match + assert_raises_regex( + ValueError, + r"all the input arrays must have same number of dimensions, but " + r"the array at index 0 has 1 dimension\(s\) and the array at " + r"index 1 has 2 dimension\(s\)", + np.concatenate, (np.zeros(1), np.zeros((1, 1)))) + + # test shapes must match except for concatenation axis + a = np.ones((1, 2, 3)) + b = np.ones((2, 2, 3)) + axis = list(range(3)) + for i in range(3): + np.concatenate((a, b), axis=axis[0]) # OK + assert_raises_regex( + ValueError, + "all the input array dimensions except for the concatenation axis " + "must match exactly, but along dimension {}, the array at " + "index 0 has size 1 and the array at index 1 has size 2" + .format(i), + np.concatenate, (a, b), axis=axis[1]) + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) + axis.append(axis.pop(0)) + + # No arrays to concatenate raises ValueError + assert_raises(ValueError, concatenate, ()) + + def test_concatenate_axis_None(self): + a = np.arange(4, dtype=np.float64).reshape((2, 2)) + b = list(range(3)) + c = ['x'] + r = np.concatenate((a, a), axis=None) + assert_equal(r.dtype, a.dtype) + assert_equal(r.ndim, 1) + r = np.concatenate((a, b), axis=None) + assert_equal(r.size, a.size + len(b)) + assert_equal(r.dtype, a.dtype) + r = np.concatenate((a, b, c), axis=None, dtype="U") + d = array(['0.0', '1.0', '2.0', '3.0', + '0', '1', '2', 'x']) + assert_array_equal(r, d) + + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + + def test_large_concatenate_axis_None(self): + # When no axis is given, concatenate uses flattened versions. + # This also had a bug with many arrays (see gh-5979). + x = np.arange(1, 100) + r = np.concatenate(x, None) + assert_array_equal(x, r) + + # This should probably be deprecated: + r = np.concatenate(x, 100) # axis is >= MAXDIMS + assert_array_equal(x, r) + + def test_concatenate(self): + # Test concatenate function + # One sequence returns unmodified (but as array) + r4 = list(range(4)) + assert_array_equal(concatenate((r4,)), r4) + # Any sequence + assert_array_equal(concatenate((tuple(r4),)), r4) + assert_array_equal(concatenate((array(r4),)), r4) + # 1D default concatenation + r3 = list(range(3)) + assert_array_equal(concatenate((r4, r3)), r4 + r3) + # Mixed sequence types + assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) + assert_array_equal(concatenate((array(r4), r3)), r4 + r3) + # Explicit axis specification + assert_array_equal(concatenate((r4, r3), 0), r4 + r3) + # Including negative + assert_array_equal(concatenate((r4, r3), -1), r4 + r3) + # 2D + a23 = array([[10, 11, 12], [13, 14, 15]]) + a13 = array([[0, 1, 2]]) + res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) + assert_array_equal(concatenate((a23, a13)), res) + assert_array_equal(concatenate((a23, a13), 0), res) + assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) + assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) + # Arrays much match shape + assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) + # 3D + res = arange(2 * 3 * 7).reshape((2, 3, 7)) + a0 = res[..., :4] + a1 = res[..., 4:6] + a2 = res[..., 6:] + assert_array_equal(concatenate((a0, a1, a2), 2), res) + assert_array_equal(concatenate((a0, a1, a2), -1), res) + assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_operator_concat(self): + import operator + a = array([1, 2]) + b = array([3, 4]) + n = [1,2] + res = array([1, 2, 3, 4]) + assert_raises(TypeError, operator.concat, a, b) + assert_raises(TypeError, operator.concat, a, n) + assert_raises(TypeError, operator.concat, n, a) + assert_raises(TypeError, operator.concat, a, 1) + assert_raises(TypeError, operator.concat, 1, a) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) + concatenate((a, b), out=np.empty(4)) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"]) + @pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) + def test_out_and_dtype(self, axis, out_dtype, casting): + # Compare usage of `out=out` with `dtype=out.dtype` + out = np.empty(4, dtype=out_dtype) + to_concat = (array([1.1, 2.2]), array([3.3, 4.4])) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + concatenate(to_concat, out=out, axis=axis, casting=casting) + with assert_raises(TypeError): + concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + else: + res_out = concatenate(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"]) + @pytest.mark.parametrize("arrs", + [([0.],), ([0.], [1]), ([0], ["string"], [1.])]) + def test_dtype_with_promotion(self, arrs, string_dt, axis): + # Note that U0 and S0 should be deprecated eventually and changed to + # actually give the empty string result (together with `np.array`) + res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") + # The actual dtype should be identical to a cast (of a double array): + assert res.dtype == np.array(1.).astype(string_dt).dtype + + @pytest.mark.parametrize("axis", [None, 0]) + def test_string_dtype_does_not_inspect(self, axis): + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="S", axis=axis) + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="U", axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_subarray_error(self, axis): + with pytest.raises(TypeError, match=".*subarray dtype"): + np.concatenate(([1], [1]), dtype="(2,)i", axis=axis) + + +def test_stack(): + # non-iterable input + assert_raises(TypeError, stack, 1) + + # 0d input + for input_ in [(1, 2, 3), + [np.int32(1), np.int32(2), np.int32(3)], + [np.array(1), np.array(2), np.array(3)]]: + assert_array_equal(stack(input_), [1, 2, 3]) + # 1d input examples + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + r1 = array([[1, 2, 3], [4, 5, 6]]) + assert_array_equal(np.stack((a, b)), r1) + assert_array_equal(np.stack((a, b), axis=1), r1.T) + # all input types + assert_array_equal(np.stack(list([a, b])), r1) + assert_array_equal(np.stack(array([a, b])), r1) + # all shapes for 1d input + arrays = [np.random.randn(3) for _ in range(10)] + axes = [0, 1, -1, -2] + expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2) + assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3) + # all shapes for 2d input + arrays = [np.random.randn(3, 4) for _ in range(10)] + axes = [0, 1, 2, -1, -2, -3] + expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), + (3, 4, 10), (3, 10, 4), (10, 3, 4)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + # empty arrays + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) + # edge cases + assert_raises_regex(ValueError, 'need at least one array', stack, []) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [1, np.arange(3)]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(2), np.arange(3)]) + + # do not accept generators + with pytest.raises(TypeError, match="arrays to stack must be"): + stack((x for x in range(3))) + + #casting and dtype test + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2], [2, 3], [3, 4]]) + assert_array_equal(res, expected_res) + #casting and dtype with TypeError + with assert_raises(TypeError): + stack((a, b), dtype=np.int64, axis=1, casting="safe") + + +@pytest.mark.parametrize("axis", [0]) +@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) +@pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) +def test_stack_out_and_dtype(axis, out_dtype, casting): + to_concat = (array([1, 2]), array([3, 4])) + res = array([[1, 2], [3, 4]]) + out = np.zeros_like(res) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + else: + res_out = stack(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + stack(to_concat, out=out, dtype=out_dtype, axis=axis) + + +class TestBlock: + @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) + def block(self, request): + # blocking small arrays and large arrays go through different paths. + # the algorithm is triggered depending on the number of element + # copies required. + # We define a test fixture that forces most tests to go through + # both code paths. + # Ultimately, this should be removed if a single algorithm is found + # to be faster for both small and large arrays. + def _block_force_concatenate(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_concatenate(arrays, list_ndim, result_ndim) + + def _block_force_slicing(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_slicing(arrays, list_ndim, result_ndim) + + if request.param == 'force_concatenate': + return _block_force_concatenate + elif request.param == 'force_slicing': + return _block_force_slicing + elif request.param == 'block': + return block + else: + raise ValueError('Unknown blocking request. There is a typo in the tests.') + + def test_returns_copy(self, block): + a = np.eye(3) + b = block(a) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_block_total_size_estimate(self, block): + _, _, _, total_size = _block_setup([1]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1]]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1, 1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1], [1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) + assert total_size == 4 + + def test_block_simple_row_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + desired = np.array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + result = block([a_2d, b_2d]) + assert_equal(desired, result) + + def test_block_simple_column_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + expected = np.array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + result = block([[a_2d], [b_2d]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_row_wise(self, block): + # # # 1-D vectors are treated as row arrays + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([1, 2, 3, 2, 3, 4]) + result = block([a, b]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_multiple_rows(self, block): + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3, 2, 3, 4], + [1, 2, 3, 2, 3, 4]]) + result = block([[a, b], [a, b]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_column_wise(self, block): + # # # 1-D vectors are treated as row arrays + a_1d = np.array([1, 2, 3]) + b_1d = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3], + [2, 3, 4]]) + result = block([[a_1d], [b_1d]]) + assert_equal(expected, result) + + def test_block_mixed_1d_and_2d(self, block): + a_2d = np.ones((2, 2)) + b_1d = np.array([2, 2]) + result = block([[a_2d], [b_1d]]) + expected = np.array([[1, 1], + [1, 1], + [2, 2]]) + assert_equal(expected, result) + + def test_block_complicated(self, block): + # a bit more complicated + one_2d = np.array([[1, 1, 1]]) + two_2d = np.array([[2, 2, 2]]) + three_2d = np.array([[3, 3, 3, 3, 3, 3]]) + four_1d = np.array([4, 4, 4, 4, 4, 4]) + five_0d = np.array(5) + six_1d = np.array([6, 6, 6, 6, 6]) + zero_2d = np.zeros((2, 6)) + + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + result = block([[one_2d, two_2d], + [three_2d], + [four_1d], + [five_0d, six_1d], + [zero_2d]]) + assert_equal(result, expected) + + def test_nested(self, block): + one = np.array([1, 1, 1]) + two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) + three = np.array([3, 3, 3]) + four = np.array([4, 4, 4]) + five = np.array(5) + six = np.array([6, 6, 6, 6, 6]) + zero = np.zeros((2, 6)) + + result = block([ + [ + block([ + [one], + [three], + [four] + ]), + two + ], + [five, six], + [zero] + ]) + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 2, 2, 2], + [4, 4, 4, 2, 2, 2], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + assert_equal(result, expected) + + def test_3d(self, block): + a000 = np.ones((2, 2, 2), int) * 1 + + a100 = np.ones((3, 2, 2), int) * 2 + a010 = np.ones((2, 3, 2), int) * 3 + a001 = np.ones((2, 2, 3), int) * 4 + + a011 = np.ones((2, 3, 3), int) * 5 + a101 = np.ones((3, 2, 3), int) * 6 + a110 = np.ones((3, 3, 2), int) * 7 + + a111 = np.ones((3, 3, 3), int) * 8 + + result = block([ + [ + [a000, a001], + [a010, a011], + ], + [ + [a100, a101], + [a110, a111], + ] + ]) + expected = array([[[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]]]) + + assert_array_equal(result, expected) + + def test_block_with_mismatched_shape(self, block): + a = np.array([0, 0]) + b = np.eye(2) + assert_raises(ValueError, block, [a, b]) + assert_raises(ValueError, block, [b, a]) + + to_block = [[np.ones((2,3)), np.ones((2,2))], + [np.ones((2,2)), np.ones((2,2))]] + assert_raises(ValueError, block, to_block) + def test_no_lists(self, block): + assert_equal(block(1), np.array(1)) + assert_equal(block(np.eye(3)), np.eye(3)) + + def test_invalid_nesting(self, block): + msg = 'depths are mismatched' + assert_raises_regex(ValueError, msg, block, [1, [2]]) + assert_raises_regex(ValueError, msg, block, [1, []]) + assert_raises_regex(ValueError, msg, block, [[1], 2]) + assert_raises_regex(ValueError, msg, block, [[], 2]) + assert_raises_regex(ValueError, msg, block, [ + [[1], [2]], + [[3, 4]], + [5] # missing brackets + ]) + + def test_empty_lists(self, block): + assert_raises_regex(ValueError, 'empty', block, []) + assert_raises_regex(ValueError, 'empty', block, [[]]) + assert_raises_regex(ValueError, 'empty', block, [[1], []]) + + def test_tuple(self, block): + assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) + assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) + + def test_different_ndims(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_block_memory_order(self, block): + # 3D + arr_c = np.zeros((3,)*3, order='C') + arr_f = np.zeros((3,)*3, order='F') + + b_c = [[[arr_c, arr_c], + [arr_c, arr_c]], + [[arr_c, arr_c], + [arr_c, arr_c]]] + + b_f = [[[arr_f, arr_f], + [arr_f, arr_f]], + [[arr_f, arr_f], + [arr_f, arr_f]]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + arr_c = np.zeros((3, 3), order='C') + arr_f = np.zeros((3, 3), order='F') + # 2D + b_c = [[arr_c, arr_c], + [arr_c, arr_c]] + + b_f = [[arr_f, arr_f], + [arr_f, arr_f]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + +def test_block_dispatcher(): + class ArrayLike: + pass + a = ArrayLike() + b = ArrayLike() + c = ArrayLike() + assert_equal(list(_block_dispatcher(a)), [a]) + assert_equal(list(_block_dispatcher([a])), [a]) + assert_equal(list(_block_dispatcher([a, b])), [a, b]) + assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) + # don't recurse into non-lists + assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_simd.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_simd.py new file mode 100644 index 00000000..92b56744 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_simd.py @@ -0,0 +1,1333 @@ +# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics +# may be involved in their functionality. +import pytest, math, re +import itertools +import operator +from numpy.core._simd import targets, clear_floatstatus, get_floatstatus +from numpy.core._multiarray_umath import __cpu_baseline__ + +def check_floatstatus(divbyzero=False, overflow=False, + underflow=False, invalid=False, + all=False): + #define NPY_FPE_DIVIDEBYZERO 1 + #define NPY_FPE_OVERFLOW 2 + #define NPY_FPE_UNDERFLOW 4 + #define NPY_FPE_INVALID 8 + err = get_floatstatus() + ret = (all or divbyzero) and (err & 1) != 0 + ret |= (all or overflow) and (err & 2) != 0 + ret |= (all or underflow) and (err & 4) != 0 + ret |= (all or invalid) and (err & 8) != 0 + return ret + +class _Test_Utility: + # submodule of the desired SIMD extension, e.g. targets["AVX512F"] + npyv = None + # the current data type suffix e.g. 's8' + sfx = None + # target name can be 'baseline' or one or more of CPU features + target_name = None + + def __getattr__(self, attr): + """ + To call NPV intrinsics without the attribute 'npyv' and + auto suffixing intrinsics according to class attribute 'sfx' + """ + return getattr(self.npyv, attr + "_" + self.sfx) + + def _x2(self, intrin_name): + return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2") + + def _data(self, start=None, count=None, reverse=False): + """ + Create list of consecutive numbers according to number of vector's lanes. + """ + if start is None: + start = 1 + if count is None: + count = self.nlanes + rng = range(start, start + count) + if reverse: + rng = reversed(rng) + if self._is_fp(): + return [x / 1.0 for x in rng] + return list(rng) + + def _is_unsigned(self): + return self.sfx[0] == 'u' + + def _is_signed(self): + return self.sfx[0] == 's' + + def _is_fp(self): + return self.sfx[0] == 'f' + + def _scalar_size(self): + return int(self.sfx[1:]) + + def _int_clip(self, seq): + if self._is_fp(): + return seq + max_int = self._int_max() + min_int = self._int_min() + return [min(max(v, min_int), max_int) for v in seq] + + def _int_max(self): + if self._is_fp(): + return None + max_u = self._to_unsigned(self.setall(-1))[0] + if self._is_signed(): + return max_u // 2 + return max_u + + def _int_min(self): + if self._is_fp(): + return None + if self._is_unsigned(): + return 0 + return -(self._int_max() + 1) + + def _true_mask(self): + max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1) + return max_unsig[0] + + def _to_unsigned(self, vector): + if isinstance(vector, (list, tuple)): + return getattr(self.npyv, "load_u" + self.sfx[1:])(vector) + else: + sfx = vector.__name__.replace("npyv_", "") + if sfx[0] == "b": + cvt_intrin = "cvt_u{0}_b{0}" + else: + cvt_intrin = "reinterpret_u{0}_{1}" + return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector) + + def _pinfinity(self): + return float("inf") + + def _ninfinity(self): + return -float("inf") + + def _nan(self): + return float("nan") + + def _cpu_features(self): + target = self.target_name + if target == "baseline": + target = __cpu_baseline__ + else: + target = target.split('__') # multi-target separator + return ' '.join(target) + +class _SIMD_BOOL(_Test_Utility): + """ + To test all boolean vector types at once + """ + def _nlanes(self): + return getattr(self.npyv, "nlanes_u" + self.sfx[1:]) + + def _data(self, start=None, count=None, reverse=False): + true_mask = self._true_mask() + rng = range(self._nlanes()) + if reverse: + rng = reversed(rng) + return [true_mask if x % 2 else 0 for x in rng] + + def _load_b(self, data): + len_str = self.sfx[1:] + load = getattr(self.npyv, "load_u" + len_str) + cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") + return cvt(load(data)) + + def test_operators_logical(self): + """ + Logical operations for boolean types. + Test intrinsics: + npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX, + npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8 + """ + data_a = self._data() + data_b = self._data(reverse=True) + vdata_a = self._load_b(data_a) + vdata_b = self._load_b(data_b) + + data_and = [a & b for a, b in zip(data_a, data_b)] + vand = getattr(self, "and")(vdata_a, vdata_b) + assert vand == data_and + + data_or = [a | b for a, b in zip(data_a, data_b)] + vor = getattr(self, "or")(vdata_a, vdata_b) + assert vor == data_or + + data_xor = [a ^ b for a, b in zip(data_a, data_b)] + vxor = getattr(self, "xor")(vdata_a, vdata_b) + assert vxor == data_xor + + vnot = getattr(self, "not")(vdata_a) + assert vnot == data_b + + # among the boolean types, andc, orc and xnor only support b8 + if self.sfx not in ("b8"): + return + + data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)] + vandc = getattr(self, "andc")(vdata_a, vdata_b) + assert data_andc == vandc + + data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)] + vorc = getattr(self, "orc")(vdata_a, vdata_b) + assert data_orc == vorc + + data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)] + vxnor = getattr(self, "xnor")(vdata_a, vdata_b) + assert data_xnor == vxnor + + def test_tobits(self): + data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)]) + for data in (self._data(), self._data(reverse=True)): + vdata = self._load_b(data) + data_bits = data2bits(data) + tobits = self.tobits(vdata) + bin_tobits = bin(tobits) + assert bin_tobits == bin(data_bits) + + def test_pack(self): + """ + Pack multiple vectors into one + Test intrinsics: + npyv_pack_b8_b16 + npyv_pack_b8_b32 + npyv_pack_b8_b64 + """ + if self.sfx not in ("b16", "b32", "b64"): + return + # create the vectors + data = self._data() + rdata = self._data(reverse=True) + vdata = self._load_b(data) + vrdata = self._load_b(rdata) + pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}") + # for scalar execution, concatenate the elements of the multiple lists + # into a single list (spack) and then iterate over the elements of + # the created list applying a mask to capture the first byte of them. + if self.sfx == "b16": + spack = [(i & 0xFF) for i in (list(rdata) + list(data))] + vpack = pack_simd(vrdata, vdata) + elif self.sfx == "b32": + spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))] + vpack = pack_simd(vrdata, vrdata, vdata, vdata) + elif self.sfx == "b64": + spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))] + vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, + vdata, vdata, vdata, vdata) + assert vpack == spack + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [-1, 0], + [0, -1], + [-1], + [0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self._load_b(data * self._nlanes()) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_INT(_Test_Utility): + """ + To test all integer vector types at once + """ + def test_operators_shift(self): + if self.sfx in ("u8", "s8"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + for count in range(self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift + shl = self.shl(vdata_a, count) + assert shl == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift + shr = self.shr(vdata_a, count) + assert shr == data_shr_a + + # shift by zero or max or out-range immediate constant is not applicable and illogical + for count in range(1, self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift by an immediate constant + shli = self.shli(vdata_a, count) + assert shli == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift by an immediate constant + shri = self.shri(vdata_a, count) + assert shri == data_shr_a + + def test_arithmetic_subadd_saturated(self): + if self.sfx in ("u32", "s32", "u64", "s64"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)]) + adds = self.adds(vdata_a, vdata_b) + assert adds == data_adds + + data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)]) + subs = self.subs(vdata_a, vdata_b) + assert subs == data_subs + + def test_math_max_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + simd_max = self.max(vdata_a, vdata_b) + assert simd_max == data_max + + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + simd_min = self.min(vdata_a, vdata_b) + assert simd_min == data_min + + @pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000]) + def test_reduce_max_min(self, start): + """ + Test intrinsics: + npyv_reduce_max_##sfx + npyv_reduce_min_##sfx + """ + vdata_a = self.load(self._data(start)) + assert self.reduce_max(vdata_a) == max(vdata_a) + assert self.reduce_min(vdata_a) == min(vdata_a) + + +class _SIMD_FP32(_Test_Utility): + """ + To only test single precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + features = self._cpu_features() + if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features): + # very costly to emulate nearest even on Armv7 + # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1 + _round = lambda v: int(v + (0.5 if v >= 0 else -0.5)) + else: + _round = round + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + data_round = [_round(x) for x in vdata_a] + vround = self.round_s32(vdata_a) + assert vround == data_round + +class _SIMD_FP64(_Test_Utility): + """ + To only test double precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + vdata_b = self.mul(vdata_a, self.setall(-1.5)) + data_round = [round(x) for x in list(vdata_a) + list(vdata_b)] + vround = self.round_s32(vdata_a, vdata_b) + assert vround == data_round + +class _SIMD_FP(_Test_Utility): + """ + To test all float vector types at once + """ + def test_arithmetic_fused(self): + vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3 + vdata_cx2 = self.add(vdata_c, vdata_c) + # multiply and add, a*b + c + data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) + fma = self.muladd(vdata_a, vdata_b, vdata_c) + assert fma == data_fma + # multiply and subtract, a*b - c + fms = self.mulsub(vdata_a, vdata_b, vdata_c) + data_fms = self.sub(data_fma, vdata_cx2) + assert fms == data_fms + # negate multiply and add, -(a*b) + c + nfma = self.nmuladd(vdata_a, vdata_b, vdata_c) + data_nfma = self.sub(vdata_cx2, data_fma) + assert nfma == data_nfma + # negate multiply and subtract, -(a*b) - c + nfms = self.nmulsub(vdata_a, vdata_b, vdata_c) + data_nfms = self.mul(data_fma, self.setall(-1)) + assert nfms == data_nfms + # multiply, add for odd elements and subtract even elements. + # (a * b) -+ c + fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c)) + assert fmas[0::2] == list(data_fms)[0::2] + assert fmas[1::2] == list(data_fma)[1::2] + + def test_abs(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) + for case, desired in abs_cases: + data_abs = [desired]*self.nlanes + vabs = self.abs(self.setall(case)) + assert vabs == pytest.approx(data_abs, nan_ok=True) + + vabs = self.abs(self.mul(vdata, self.setall(-1))) + assert vabs == data + + def test_sqrt(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) + for case, desired in sqrt_cases: + data_sqrt = [desired]*self.nlanes + sqrt = self.sqrt(self.setall(case)) + assert sqrt == pytest.approx(data_sqrt, nan_ok=True) + + data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + sqrt = self.sqrt(vdata) + assert sqrt == data_sqrt + + def test_square(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + # square + square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) + for case, desired in square_cases: + data_square = [desired]*self.nlanes + square = self.square(self.setall(case)) + assert square == pytest.approx(data_square, nan_ok=True) + + data_square = [x*x for x in data] + square = self.square(vdata) + assert square == data_square + + @pytest.mark.parametrize("intrin, func", [("ceil", math.ceil), + ("trunc", math.trunc), ("floor", math.floor), ("rint", round)]) + def test_rounding(self, intrin, func): + """ + Test intrinsics: + npyv_rint_##SFX + npyv_ceil_##SFX + npyv_trunc_##SFX + npyv_floor##SFX + """ + intrin_name = intrin + intrin = getattr(self, intrin) + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + # special cases + round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf)) + for case, desired in round_cases: + data_round = [desired]*self.nlanes + _round = intrin(self.setall(case)) + assert _round == pytest.approx(data_round, nan_ok=True) + + for x in range(0, 2**20, 256**2): + for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15): + data = self.load([(x+a)*w for a in range(self.nlanes)]) + data_round = [func(x) for x in data] + _round = intrin(data) + assert _round == data_round + + # test large numbers + for i in ( + 1.1529215045988576e+18, 4.6116860183954304e+18, + 5.902958103546122e+20, 2.3611832414184488e+21 + ): + x = self.setall(i) + y = intrin(x) + data_round = [func(n) for n in x] + assert y == data_round + + # signed zero + if intrin_name == "floor": + data_szero = (-0.0,) + else: + data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5) + + for w in data_szero: + _round = self._to_unsigned(intrin(self.setall(w))) + data_round = self._to_unsigned(self.setall(-0.0)) + assert _round == data_round + + @pytest.mark.parametrize("intrin", [ + "max", "maxp", "maxn", "min", "minp", "minn" + ]) + def test_max_min(self, intrin): + """ + Test intrinsics: + npyv_max_##sfx + npyv_maxp_##sfx + npyv_maxn_##sfx + npyv_min_##sfx + npyv_minp_##sfx + npyv_minn_##sfx + npyv_reduce_max_##sfx + npyv_reduce_maxp_##sfx + npyv_reduce_maxn_##sfx + npyv_reduce_min_##sfx + npyv_reduce_minp_##sfx + npyv_reduce_minn_##sfx + """ + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0) + func = eval(intrin[:3]) + reduce_intrin = getattr(self, "reduce_" + intrin) + intrin = getattr(self, intrin) + hf_nlanes = self.nlanes//2 + + cases = ( + ([0.0, -0.0], [-0.0, 0.0]), + ([10, -10], [10, -10]), + ([pinf, 10], [10, ninf]), + ([10, pinf], [ninf, 10]), + ([10, -10], [10, -10]), + ([-10, 10], [-10, 10]) + ) + for op1, op2 in cases: + vdata_a = self.load(op1*hf_nlanes) + vdata_b = self.load(op2*hf_nlanes) + data = func(vdata_a, vdata_b) + simd = intrin(vdata_a, vdata_b) + assert simd == data + data = func(vdata_a) + simd = reduce_intrin(vdata_a) + assert simd == data + + if not chk_nan: + return + if chk_nan == 1: + test_nan = lambda a, b: ( + b if math.isnan(a) else a if math.isnan(b) else b + ) + else: + test_nan = lambda a, b: ( + nan if math.isnan(a) or math.isnan(b) else b + ) + cases = ( + (nan, 10), + (10, nan), + (nan, pinf), + (pinf, nan), + (nan, nan) + ) + for op1, op2 in cases: + vdata_ab = self.load([op1, op2]*hf_nlanes) + data = test_nan(op1, op2) + simd = reduce_intrin(vdata_ab) + assert simd == pytest.approx(data, nan_ok=True) + vdata_a = self.setall(op1) + vdata_b = self.setall(op2) + data = [data] * self.nlanes + simd = intrin(vdata_a, vdata_b) + assert simd == pytest.approx(data, nan_ok=True) + + def test_reciprocal(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) + for case, desired in recip_cases: + data_recip = [desired]*self.nlanes + recip = self.recip(self.setall(case)) + assert recip == pytest.approx(data_recip, nan_ok=True) + + data_recip = self.load([1/x for x in data]) # load to truncate precision + recip = self.recip(vdata) + assert recip == data_recip + + def test_special_cases(self): + """ + Compare Not NaN. Test intrinsics: + npyv_notnan_##SFX + """ + nnan = self.notnan(self.setall(self._nan())) + assert nnan == [0]*self.nlanes + + @pytest.mark.parametrize("intrin_name", [ + "rint", "trunc", "ceil", "floor" + ]) + def test_unary_invalid_fpexception(self, intrin_name): + intrin = getattr(self, intrin_name) + for d in [float("nan"), float("inf"), -float("inf")]: + v = self.setall(d) + clear_floatstatus() + intrin(v) + assert check_floatstatus(invalid=True) == False + + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq"), + (operator.ne, "cmpneq") + ]) + def test_comparison_with_nan(self, py_comp, np_comp): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + intrin = getattr(self, np_comp) + cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan), + (ninf, nan), (-0.0, +0.0)) + for case_operand1, case_operand2 in cmp_cases: + data_a = [case_operand1]*self.nlanes + data_b = [case_operand2]*self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + vcmp = to_bool(intrin(vdata_a, vdata_b)) + data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)] + assert vcmp == data_cmp + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [float("nan"), 0], + [0, float("nan")], + [float("nan"), 1], + [1, float("nan")], + [float("nan"), float("nan")], + [0.0, -0.0], + [-0.0, 0.0], + [1.0, -0.0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_ALL(_Test_Utility): + """ + To test all vector types at once + """ + def test_memory_load(self): + data = self._data() + # unaligned load + load_data = self.load(data) + assert load_data == data + # aligned load + loada_data = self.loada(data) + assert loada_data == data + # stream load + loads_data = self.loads(data) + assert loads_data == data + # load lower part + loadl = self.loadl(data) + loadl_half = list(loadl)[:self.nlanes//2] + data_half = data[:self.nlanes//2] + assert loadl_half == data_half + assert loadl != data # detect overflow + + def test_memory_store(self): + data = self._data() + vdata = self.load(data) + # unaligned store + store = [0] * self.nlanes + self.store(store, vdata) + assert store == data + # aligned store + store_a = [0] * self.nlanes + self.storea(store_a, vdata) + assert store_a == data + # stream store + store_s = [0] * self.nlanes + self.stores(store_s, vdata) + assert store_s == data + # store lower part + store_l = [0] * self.nlanes + self.storel(store_l, vdata) + assert store_l[:self.nlanes//2] == data[:self.nlanes//2] + assert store_l != vdata # detect overflow + # store higher part + store_h = [0] * self.nlanes + self.storeh(store_h, vdata) + assert store_h[:self.nlanes//2] == data[self.nlanes//2:] + assert store_h != vdata # detect overflow + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.load_tillz, self.load_till", (32, 64), 1, [0xffff]), + ("self.load2_tillz, self.load2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_load_tillz, npyv_load_till = eval(intrin) + data = self._data() + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] # test out of range + for n in lanes: + load_till = npyv_load_till(data, n, *fill) + load_tillz = npyv_load_tillz(data, n) + n *= scale + data_till = data[:n] + fill * ((self.nlanes-n) // scale) + assert load_till == data_till + data_tillz = data[:n] + [0] * (self.nlanes-n) + assert load_tillz == data_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.store_till", (32, 64), 1), + ("self.store2_till", (32, 64), 2), + ]) + def test_memory_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_store_till = eval(intrin) + data = self._data() + data_rev = self._data(reverse=True) + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for n in lanes: + data_till = data_rev.copy() + data_till[:n*scale] = data[:n*scale] + store_till = self._data(reverse=True) + npyv_store_till(store_till, n, vdata) + assert store_till == data_till + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.loadn", (32, 64), 1), + ("self.loadn2", (32, 64), 2), + ]) + def test_memory_noncont_load(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_loadn = eval(intrin) + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride*self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes//scale) + else: + data = self._data(count=stride*self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)])) + ) + data_stride = self.load(data_stride) # cast unsigned + loadn = npyv_loadn(data, stride) + assert loadn == data_stride + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.loadn_tillz, self.loadn_till", (32, 64), 1, [0xffff]), + ("self.loadn2_tillz, self.loadn2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_loadn_tillz, npyv_loadn_till = eval(intrin) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride*self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes//scale) + else: + data = self._data(count=stride*self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)]) + )) + data_stride = list(self.load(data_stride)) # cast unsigned + for n in lanes: + nscale = n * scale + llanes = self.nlanes - nscale + data_stride_till = ( + data_stride[:nscale] + fill * (llanes//scale) + ) + loadn_till = npyv_loadn_till(data, stride, n, *fill) + assert loadn_till == data_stride_till + data_stride_tillz = data_stride[:nscale] + [0] * llanes + loadn_tillz = npyv_loadn_tillz(data, stride, n) + assert loadn_tillz == data_stride_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen", (32, 64), 1), + ("self.storen2", (32, 64), 2), + ]) + def test_memory_noncont_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen = eval(intrin) + data = self._data() + vdata = self.load(data) + hlanes = self.nlanes // scale + for stride in range(1, 64): + data_storen = [0xff] * stride * self.nlanes + for s in range(0, hlanes*stride, stride): + i = (s//stride)*scale + data_storen[s:s+scale] = data[i:i+scale] + storen = [0xff] * stride * self.nlanes + storen += [0x7f]*64 + npyv_storen(storen, stride, vdata) + assert storen[:-64] == data_storen + assert storen[-64:] == [0x7f]*64 # detect overflow + + for stride in range(-64, 0): + data_storen = [0xff] * -stride * self.nlanes + for s in range(0, hlanes*stride, stride): + i = (s//stride)*scale + data_storen[s-scale:s or None] = data[i:i+scale] + storen = [0x7f]*64 + storen += [0xff] * -stride * self.nlanes + npyv_storen(storen, stride, vdata) + assert storen[64:] == data_storen + assert storen[:64] == [0x7f]*64 # detect overflow + # stride 0 + data_storen = [0x7f] * self.nlanes + storen = data_storen.copy() + data_storen[0:scale] = data[-scale:] + npyv_storen(storen, 0, vdata) + assert storen == data_storen + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen_till", (32, 64), 1), + ("self.storen2_till", (32, 64), 2), + ]) + def test_memory_noncont_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen_till = eval(intrin) + data = self._data() + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + hlanes = self.nlanes // scale + for stride in range(1, 64): + for n in lanes: + data_till = [0xff] * stride * self.nlanes + tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale) + for s in range(0, hlanes*stride, stride)[:n]: + i = (s//stride)*scale + data_till[s:s+scale] = tdata[i:i+scale] + storen_till = [0xff] * stride * self.nlanes + storen_till += [0x7f]*64 + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[:-64] == data_till + assert storen_till[-64:] == [0x7f]*64 # detect overflow + + for stride in range(-64, 0): + for n in lanes: + data_till = [0xff] * -stride * self.nlanes + tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale) + for s in range(0, hlanes*stride, stride)[:n]: + i = (s//stride)*scale + data_till[s-scale:s or None] = tdata[i:i+scale] + storen_till = [0x7f]*64 + storen_till += [0xff] * -stride * self.nlanes + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[64:] == data_till + assert storen_till[:64] == [0x7f]*64 # detect overflow + + # stride 0 + for n in lanes: + data_till = [0x7f] * self.nlanes + storen_till = data_till.copy() + data_till[0:scale] = data[:n*scale][-scale:] + npyv_storen_till(storen_till, 0, n, vdata) + assert storen_till == data_till + + @pytest.mark.parametrize("intrin, table_size, elsize", [ + ("self.lut32", 32, 32), + ("self.lut16", 16, 64) + ]) + def test_lut(self, intrin, table_size, elsize): + """ + Test lookup table intrinsics: + npyv_lut32_##sfx + npyv_lut16_##sfx + """ + if elsize != self._scalar_size(): + return + intrin = eval(intrin) + idx_itrin = getattr(self.npyv, f"setall_u{elsize}") + table = range(0, table_size) + for i in table: + broadi = self.setall(i) + idx = idx_itrin(i) + lut = intrin(table, idx) + assert lut == broadi + + def test_misc(self): + broadcast_zero = self.zero() + assert broadcast_zero == [0] * self.nlanes + for i in range(1, 10): + broadcasti = self.setall(i) + assert broadcasti == [i] * self.nlanes + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # py level of npyv_set_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with zero. + vset = self.set(*data_a) + assert vset == data_a + # py level of npyv_setf_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with the specified scalar. + vsetf = self.setf(10, *data_a) + assert vsetf == data_a + + # We're testing the sanity of _simd's type-vector, + # reinterpret* intrinsics itself are tested via compiler + # during the build of _simd module + sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"] + if self.npyv.simd_f64: + sfxes.append("f64") + if self.npyv.simd_f32: + sfxes.append("f32") + for sfx in sfxes: + vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__ + assert vec_name == "npyv_" + sfx + + # select & mask operations + select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_a == data_a + select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_b == data_b + + # test extract elements + assert self.extract0(vdata_b) == vdata_b[0] + + # cleanup intrinsic is only used with AVX for + # zeroing registers to avoid the AVX-SSE transition penalty, + # so nothing to test here + self.npyv.cleanup() + + def test_reorder(self): + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + # lower half part + data_a_lo = data_a[:self.nlanes//2] + data_b_lo = data_b[:self.nlanes//2] + # higher half part + data_a_hi = data_a[self.nlanes//2:] + data_b_hi = data_b[self.nlanes//2:] + # combine two lower parts + combinel = self.combinel(vdata_a, vdata_b) + assert combinel == data_a_lo + data_b_lo + # combine two higher parts + combineh = self.combineh(vdata_a, vdata_b) + assert combineh == data_a_hi + data_b_hi + # combine x2 + combine = self.combine(vdata_a, vdata_b) + assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi) + + # zip(interleave) + data_zipl = self.load([ + v for p in zip(data_a_lo, data_b_lo) for v in p + ]) + data_ziph = self.load([ + v for p in zip(data_a_hi, data_b_hi) for v in p + ]) + vzip = self.zip(vdata_a, vdata_b) + assert vzip == (data_zipl, data_ziph) + vzip = [0]*self.nlanes*2 + self._x2("store")(vzip, (vdata_a, vdata_b)) + assert vzip == list(data_zipl) + list(data_ziph) + + # unzip(deinterleave) + unzip = self.unzip(data_zipl, data_ziph) + assert unzip == (data_a, data_b) + unzip = self._x2("load")(list(data_zipl) + list(data_ziph)) + assert unzip == (data_a, data_b) + + def test_reorder_rev64(self): + # Reverse elements of each 64-bit lane + ssize = self._scalar_size() + if ssize == 64: + return + data_rev64 = [ + y for x in range(0, self.nlanes, 64//ssize) + for y in reversed(range(x, x + 64//ssize)) + ] + rev64 = self.rev64(self.load(range(self.nlanes))) + assert rev64 == data_rev64 + + def test_reorder_permi128(self): + """ + Test permuting elements for each 128-bit lane. + npyv_permi128_##sfx + """ + ssize = self._scalar_size() + if ssize < 32: + return + data = self.load(self._data()) + permn = 128//ssize + permd = permn-1 + nlane128 = self.nlanes//permn + shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6] + for i in range(permn): + indices = [(i >> shf) & permd for shf in shfl] + vperm = self.permi128(data, *indices) + data_vperm = [ + data[j + (e & -permn)] + for e, j in enumerate(indices*nlane128) + ] + assert vperm == data_vperm + + @pytest.mark.parametrize('func, intrin', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq") + ]) + def test_operators_comparison(self, func, intrin): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + intrin = getattr(self, intrin) + + mask_true = self._true_mask() + def to_bool(vector): + return [lane == mask_true for lane in vector] + + data_cmp = [func(a, b) for a, b in zip(data_a, data_b)] + cmp = to_bool(intrin(vdata_a, vdata_b)) + assert cmp == data_cmp + + def test_operators_logical(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + if self._is_fp(): + data_cast_a = self._to_unsigned(vdata_a) + data_cast_b = self._to_unsigned(vdata_b) + cast, cast_data = self._to_unsigned, self._to_unsigned + else: + data_cast_a, data_cast_b = data_a, data_b + cast, cast_data = lambda a: a, self.load + + data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)]) + vxor = cast(self.xor(vdata_a, vdata_b)) + assert vxor == data_xor + + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + assert vor == data_or + + data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) + vand = cast(getattr(self, "and")(vdata_a, vdata_b)) + assert vand == data_and + + data_not = cast_data([~a for a in data_cast_a]) + vnot = cast(getattr(self, "not")(vdata_a)) + assert vnot == data_not + + if self.sfx not in ("u8"): + return + data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)] + vandc = cast(getattr(self, "andc")(vdata_a, vdata_b)) + assert vandc == data_andc + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [1, 2, 3, 4], + [-1, -2, -3, -4], + [0, 1, 2, 3, 4], + [0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff], + [0, -1, -2, -3, 4], + [0], + [1], + [-1] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + + def test_conversion_boolean(self): + bsfx = "b" + self.sfx[1:] + to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx)) + from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx)) + + false_vb = to_boolean(self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) + assert false_vb != true_vb + + false_vsfx = from_boolean(false_vb) + true_vsfx = from_boolean(true_vb) + assert false_vsfx != true_vsfx + + def test_conversion_expand(self): + """ + Test expand intrinsics: + npyv_expand_u16_u8 + npyv_expand_u32_u16 + """ + if self.sfx not in ("u8", "u16"): + return + totype = self.sfx[0]+str(int(self.sfx[1:])*2) + expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") + # close enough from the edge to detect any deviation + data = self._data(self._int_max() - self.nlanes) + vdata = self.load(data) + edata = expand(vdata) + # lower half part + data_lo = data[:self.nlanes//2] + # higher half part + data_hi = data[self.nlanes//2:] + assert edata == (data_lo, data_hi) + + def test_arithmetic_subadd(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # non-saturated + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) + assert add == data_add + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) + assert sub == data_sub + + def test_arithmetic_mul(self): + if self.sfx in ("u64", "s64"): + return + + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_mul = self.load([a * b for a, b in zip(data_a, data_b)]) + mul = self.mul(vdata_a, vdata_b) + assert mul == data_mul + + def test_arithmetic_div(self): + if not self._is_fp(): + return + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # load to truncate f64 to precision of f32 + data_div = self.load([a / b for a, b in zip(data_a, data_b)]) + div = self.div(vdata_a, vdata_b) + assert div == data_div + + def test_arithmetic_intdiv(self): + """ + Test integer division intrinsics: + npyv_divisor_##sfx + npyv_divc_##sfx + """ + if self._is_fp(): + return + + int_min = self._int_min() + def trunc_div(a, d): + """ + Divide towards zero works with large integers > 2^53, + and wrap around overflow similar to what C does. + """ + if d == -1 and a == int_min: + return a + sign_a, sign_d = a < 0, d < 0 + if a == 0 or sign_a == sign_d: + return a // d + return (a + sign_d - sign_a) // d + 1 + + data = [1, -int_min] # to test overflow + data += range(0, 2**8, 2**5) + data += range(0, 2**8, 2**5-1) + bsize = self._scalar_size() + if bsize > 8: + data += range(2**8, 2**16, 2**13) + data += range(2**8, 2**16, 2**13-1) + if bsize > 16: + data += range(2**16, 2**32, 2**29) + data += range(2**16, 2**32, 2**29-1) + if bsize > 32: + data += range(2**32, 2**64, 2**61) + data += range(2**32, 2**64, 2**61-1) + # negate + data += [-x for x in data] + for dividend, divisor in itertools.product(data, data): + divisor = self.setall(divisor)[0] # cast + if divisor == 0: + continue + dividend = self.load(self._data(dividend)) + data_divc = [trunc_div(a, divisor) for a in dividend] + divisor_parms = self.divisor(divisor) + divc = self.divc(dividend, divisor_parms) + assert divc == data_divc + + def test_arithmetic_reduce_sum(self): + """ + Test reduce sum intrinsics: + npyv_sum_##sfx + """ + if self.sfx not in ("u32", "u64", "f32", "f64"): + return + # reduce sum + data = self._data() + vdata = self.load(data) + + data_sum = sum(data) + vsum = self.sum(vdata) + assert vsum == data_sum + + def test_arithmetic_reduce_sumup(self): + """ + Test extend reduce sum intrinsics: + npyv_sumup_##sfx + """ + if self.sfx not in ("u8", "u16"): + return + rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes) + for r in rdata: + data = self._data(r) + vdata = self.load(data) + data_sum = sum(data) + vsum = self.sumup(vdata) + assert vsum == data_sum + + def test_mask_conditional(self): + """ + Conditional addition and subtraction for all supported data types. + Test intrinsics: + npyv_ifadd_##SFX, npyv_ifsub_##SFX + """ + vdata_a = self.load(self._data()) + vdata_b = self.load(self._data(reverse=True)) + true_mask = self.cmpeq(self.zero(), self.zero()) + false_mask = self.cmpneq(self.zero(), self.zero()) + + data_sub = self.sub(vdata_b, vdata_a) + ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b) + assert ifsub == data_sub + ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b) + assert ifsub == vdata_b + + data_add = self.add(vdata_b, vdata_a) + ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b) + assert ifadd == data_add + ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b) + assert ifadd == vdata_b + + if not self._is_fp(): + return + data_div = self.div(vdata_b, vdata_a) + ifdiv = self.ifdiv(true_mask, vdata_b, vdata_a, vdata_b) + assert ifdiv == data_div + ifdivz = self.ifdivz(true_mask, vdata_b, vdata_a) + assert ifdivz == data_div + ifdiv = self.ifdiv(false_mask, vdata_a, vdata_b, vdata_b) + assert ifdiv == vdata_b + ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b) + assert ifdivz == self.zero() + +bool_sfx = ("b8", "b16", "b32", "b64") +int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") +fp_sfx = ("f32", "f64") +all_sfx = int_sfx + fp_sfx +tests_registry = { + bool_sfx: _SIMD_BOOL, + int_sfx : _SIMD_INT, + fp_sfx : _SIMD_FP, + ("f32",): _SIMD_FP32, + ("f64",): _SIMD_FP64, + all_sfx : _SIMD_ALL +} +for target_name, npyv in targets.items(): + simd_width = npyv.simd if npyv else '' + pretty_name = target_name.split('__') # multi-target separator + if len(pretty_name) > 1: + # multi-target + pretty_name = f"({' '.join(pretty_name)})" + else: + pretty_name = pretty_name[0] + + skip = "" + skip_sfx = dict() + if not npyv: + skip = f"target '{pretty_name}' isn't supported by current machine" + elif not npyv.simd: + skip = f"target '{pretty_name}' isn't supported by NPYV" + else: + if not npyv.simd_f32: + skip_sfx["f32"] = f"target '{pretty_name}' "\ + "doesn't support single-precision" + if not npyv.simd_f64: + skip_sfx["f64"] = f"target '{pretty_name}' doesn't"\ + "support double-precision" + + for sfxes, cls in tests_registry.items(): + for sfx in sfxes: + skip_m = skip_sfx.get(sfx, skip) + inhr = (cls,) + attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name) + tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + if skip_m: + pytest.mark.skip(reason=skip_m)(tcls) + globals()[tcls.__name__] = tcls diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_simd_module.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_simd_module.py new file mode 100644 index 00000000..4fbaa9f3 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_simd_module.py @@ -0,0 +1,101 @@ +import pytest +from numpy.core._simd import targets +""" +This testing unit only for checking the sanity of common functionality, +therefore all we need is just to take one submodule that represents any +of enabled SIMD extensions to run the test on it and the second submodule +required to run only one check related to the possibility of mixing +the data types among each submodule. +""" +npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd] +npyv, npyv2 = (npyvs + [None, None])[:2] + +unsigned_sfx = ["u8", "u16", "u32", "u64"] +signed_sfx = ["s8", "s16", "s32", "s64"] +fp_sfx = [] +if npyv and npyv.simd_f32: + fp_sfx.append("f32") +if npyv and npyv.simd_f64: + fp_sfx.append("f64") + +int_sfx = unsigned_sfx + signed_sfx +all_sfx = unsigned_sfx + int_sfx + +@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +class Test_SIMD_MODULE: + + @pytest.mark.parametrize('sfx', all_sfx) + def test_num_lanes(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + vector = getattr(npyv, "setall_" + sfx)(1) + assert len(vector) == nlanes + + @pytest.mark.parametrize('sfx', all_sfx) + def test_type_name(self, sfx): + vector = getattr(npyv, "setall_" + sfx)(1) + assert vector.__name__ == "npyv_" + sfx + + def test_raises(self): + a, b = [npyv.setall_u32(1)]*2 + for sfx in all_sfx: + vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") + pytest.raises(TypeError, vcb("add"), a) + pytest.raises(TypeError, vcb("add"), a, b, a) + pytest.raises(TypeError, vcb("setall")) + pytest.raises(TypeError, vcb("setall"), [1]) + pytest.raises(TypeError, vcb("load"), 1) + pytest.raises(ValueError, vcb("load"), [1]) + pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + + @pytest.mark.skipif(not npyv2, reason=( + "could not find a second SIMD extension with NPYV support" + )) + def test_nomix(self): + # mix among submodules isn't allowed + a = npyv.setall_u32(1) + a2 = npyv2.setall_u32(1) + pytest.raises(TypeError, npyv.add_u32, a2, a2) + pytest.raises(TypeError, npyv2.add_u32, a, a) + + @pytest.mark.parametrize('sfx', unsigned_sfx) + def test_unsigned_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxu = (1 << int(sfx[1:])) - 1 + maxu_72 = (1 << 72) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes) + assert lanes == [maxu] * nlanes + lane = getattr(npyv, "setall_" + sfx)(-1)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes) + assert lanes == [maxu] * nlanes + + @pytest.mark.parametrize('sfx', signed_sfx) + def test_signed_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxs_72 = (1 << 71) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0] + assert lane == -1 + lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes) + assert lanes == [-1] * nlanes + mins_72 = -1 << 71 + lane = getattr(npyv, "setall_" + sfx)(mins_72)[0] + assert lane == 0 + lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes) + assert lanes == [0] * nlanes + + def test_truncate_f32(self): + if not npyv.simd_f32: + pytest.skip("F32 isn't support by the SIMD extension") + f32 = npyv.setall_f32(0.1)[0] + assert f32 != 0.1 + assert round(f32, 1) == 0.1 + + def test_compare(self): + data_range = range(0, npyv.nlanes_u32) + vdata = npyv.load_u32(data_range) + assert vdata == list(data_range) + assert vdata == tuple(data_range) + for i in data_range: + assert vdata[i] == data_range[i] diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_strings.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_strings.py new file mode 100644 index 00000000..42f775e8 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_strings.py @@ -0,0 +1,99 @@ +import pytest + +import operator +import numpy as np + +from numpy.testing import assert_array_equal + + +COMPARISONS = [ + (operator.eq, np.equal, "=="), + (operator.ne, np.not_equal, "!="), + (operator.lt, np.less, "<"), + (operator.le, np.less_equal, "<="), + (operator.gt, np.greater, ">"), + (operator.ge, np.greater_equal, ">="), +] + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_string, arr_unicode) + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_unicode, arr_string) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + # While there is no loop, manual casting is acceptable: + res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe") + res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe") + + expected = op(arr_string.astype('U'), arr_unicode) + assert_array_equal(res1, expected) + assert_array_equal(res2, expected) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), + ("<U1", "<U1"), ("<U1", ">U1"), (">U1", ">U1"), + ("<U1", "<U10"), ("<U1", ">U10")]) +@pytest.mark.parametrize("aligned", [True, False]) +def test_string_comparisons(op, ufunc, sym, dtypes, aligned): + # ensure native byte-order for the first view to stay within unicode range + native_dt = np.dtype(dtypes[0]).newbyteorder("=") + arr = np.arange(2**15).view(native_dt).astype(dtypes[0]) + if not aligned: + # Make `arr` unaligned: + new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0]) + new[...] = arr + arr = new + + arr2 = arr.astype(dtypes[1], copy=True) + np.random.shuffle(arr2) + arr[0] = arr2[0] # make sure one matches + + expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected) + + expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr2, arr), expected) + assert_array_equal(ufunc(arr2, arr), expected) + assert_array_equal(np.compare_chararrays(arr2, arr, sym, False), expected) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), ("<U1", "<U1"), ("<U1", ">U10")]) +def test_string_comparisons_empty(op, ufunc, sym, dtypes): + arr = np.empty((1, 0, 1, 5), dtype=dtypes[0]) + arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1]) + + expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool) + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected) + + +@pytest.mark.parametrize("str_dt", ["S", "U"]) +@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"]) +def test_float_to_string_cast(str_dt, float_dt): + float_dt = np.dtype(float_dt) + fi = np.finfo(float_dt) + arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt) + expected = ["nan", "inf", "-inf", repr(fi.max), repr(fi.min)] + if float_dt.kind == 'c': + expected = [f"({r}+0j)" for r in expected] + + res = arr.astype(str_dt) + assert_array_equal(res, np.array(expected, dtype=str_dt)) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_ufunc.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_ufunc.py new file mode 100644 index 00000000..9fbc4b2d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_ufunc.py @@ -0,0 +1,2996 @@ +import warnings +import itertools +import sys +import ctypes as ct + +import pytest +from pytest import param + +import numpy as np +import numpy.core._umath_tests as umt +import numpy.linalg._umath_linalg as uml +import numpy.core._operand_flag_tests as opflag_tests +import numpy.core._rational_tests as _rational_tests +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, assert_no_warnings, + assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM, IS_PYPY, + ) +from numpy.testing._private.utils import requires_memory +from numpy.compat import pickle + + +UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + + +class TestUfuncKwargs: + def test_kwarg_exact(self): + assert_raises(TypeError, np.add, 1, 2, castingx='safe') + assert_raises(TypeError, np.add, 1, 2, dtypex=int) + assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) + assert_raises(TypeError, np.add, 1, 2, outx=None) + assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') + assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') + assert_raises(TypeError, np.add, 1, 2, subokx=False) + assert_raises(TypeError, np.add, 1, 2, wherex=[True]) + + def test_sig_signature(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + signature='ii->i') + + def test_sig_dtype(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + dtype=int) + assert_raises(TypeError, np.add, 1, 2, signature='ii->i', + dtype=int) + + def test_extobj_refcount(self): + # Should not segfault with USE_DEBUG. + assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True) + + +class TestUfuncGenericLoops: + """Test generic loops. + + The loops to be tested are: + + PyUFunc_ff_f_As_dd_d + PyUFunc_ff_f + PyUFunc_dd_d + PyUFunc_gg_g + PyUFunc_FF_F_As_DD_D + PyUFunc_DD_D + PyUFunc_FF_F + PyUFunc_GG_G + PyUFunc_OO_O + PyUFunc_OO_O_method + PyUFunc_f_f_As_d_d + PyUFunc_d_d + PyUFunc_f_f + PyUFunc_g_g + PyUFunc_F_F_As_D_D + PyUFunc_F_F + PyUFunc_D_D + PyUFunc_G_G + PyUFunc_O_O + PyUFunc_O_O_method + PyUFunc_On_Om + + Where: + + f -- float + d -- double + g -- long double + F -- complex float + D -- complex double + G -- complex long double + O -- python object + + It is difficult to assure that each of these loops is entered from the + Python level as the special cased loops are a moving target and the + corresponding types are architecture dependent. We probably need to + define C level testing ufuncs to get at them. For the time being, I've + just looked at the signatures registered in the build directory to find + relevant functions. + + """ + np_dtypes = [ + (np.single, np.single), (np.single, np.double), + (np.csingle, np.csingle), (np.csingle, np.cdouble), + (np.double, np.double), (np.longdouble, np.longdouble), + (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + def f2(x, y): + return x**y + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs, xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + # class to use in testing object method loops + class foo: + def conjugate(self): + return np.bool_(1) + + def logical_xor(self, obj): + return np.bool_(1) + + def test_unary_PyUFunc_O_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.abs(x) == 1)) + + def test_unary_PyUFunc_O_O_method_simple(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.conjugate(x) == True)) + + def test_binary_PyUFunc_OO_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.add(x, x) == 2)) + + def test_binary_PyUFunc_OO_O_method(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_binary_PyUFunc_On_Om_method(self, foo=foo): + x = np.full((10, 2, 3), foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_python_complex_conjugate(self): + # The conjugate ufunc should fall back to calling the method: + arr = np.array([1+2j, 3-4j], dtype="O") + assert isinstance(arr[0], complex) + res = np.conjugate(arr) + assert res.dtype == np.dtype("O") + assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O")) + + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_unary_PyUFunc_O_O_method_full(self, ufunc): + """Compare the result of the object loop with non-object one""" + val = np.float64(np.pi/4) + + class MyFloat(np.float64): + def __getattr__(self, attr): + try: + return super().__getattr__(attr) + except AttributeError: + return lambda: getattr(np.core.umath, attr)(val) + + # Use 0-D arrays, to ensure the same element call + num_arr = np.array(val, dtype=np.float64) + obj_arr = np.array(MyFloat(val), dtype="O") + + with np.errstate(all="raise"): + try: + res_num = ufunc(num_arr) + except Exception as exc: + with assert_raises(type(exc)): + ufunc(obj_arr) + else: + res_obj = ufunc(obj_arr) + assert_array_almost_equal(res_num.astype("O"), res_obj) + + +def _pickleable_module_global(): + pass + + +class TestUfunc: + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads(pickle.dumps(np.sin, + protocol=proto)) is np.sin) + + # Check that ufunc not defined in the top level numpy namespace + # such as numpy.core._rational_tests.test_add can also be pickled + res = pickle.loads(pickle.dumps(_rational_tests.test_add, + protocol=proto)) + assert_(res is _rational_tests.test_add) + + def test_pickle_withstring(self): + astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" + b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") + assert_(pickle.loads(astring) is np.cos) + + @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") + def test_pickle_name_is_qualname(self): + # This tests that a simplification of our ufunc pickle code will + # lead to allowing qualnames as names. Future ufuncs should + # possible add a specific qualname, or a hook into pickling instead + # (dask+numba may benefit). + _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc + obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc)) + assert obj is umt._pickleable_module_global_ufunc + + def test_reduceat_shifting_sum(self): + L = 6 + x = np.arange(L) + idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) + + def test_all_ufunc(self): + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + ===== ==== ============= =============== ======================== + done args function types notes + ===== ==== ============= =============== ======================== + n 1 conjugate nums + O + n 1 absolute nums + O complex -> real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + # from include/numpy/ufuncobject.h + size_inferred = 2 + can_ignore = 4 + def test_signature0(self): + # the arguments to test_signature are: nin, nout, core_signature + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i),(i)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 1, 0)) + assert_equal(ixs, (0, 0)) + assert_equal(flags, (self.size_inferred,)) + assert_equal(sizes, (-1,)) + + def test_signature1(self): + # empty core signature; treat as plain ufunc (with trivial core) + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(),()->()") + assert_equal(enabled, 0) + assert_equal(num_dims, (0, 0, 0)) + assert_equal(ixs, ()) + assert_equal(flags, ()) + assert_equal(sizes, ()) + + def test_signature2(self): + # more complicated names for variables + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1,i2),(J_1)->(_kAB)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 1)) + assert_equal(ixs, (0, 1, 2, 3)) + assert_equal(flags, (self.size_inferred,)*4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature3(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1, i12), (J_1)->(i12, i2)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 2)) + assert_equal(ixs, (0, 1, 2, 1, 3)) + assert_equal(flags, (self.size_inferred,)*4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature4(self): + # matrix_multiply signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n,k),(k,m)->(n,m)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred,)*3) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature5(self): + # matmul signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n?,k),(k,m?)->(n?,m?)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred | self.can_ignore, + self.size_inferred, + self.size_inferred | self.can_ignore)) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature6(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "(3)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature7(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3),(03,3),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (0, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature8(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3?),(3?,3?),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature9(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "( 3) -> ( )") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature10(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature_failure_extra_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "((i)),(i)->()") + + def test_signature_failure_mismatching_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),)i(->()") + + def test_signature_failure_signature_missing_input_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),->()") + + def test_signature_failure_signature_missing_output_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 2, "(i),(i)->()") + + def test_get_signature(self): + assert_equal(umt.inner1d.signature, "(i),(i)->()") + + def test_forced_sig(self): + a = 0.5*np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + with pytest.warns(DeprecationWarning): + assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + with pytest.warns(DeprecationWarning): + assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), + [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + with pytest.warns(DeprecationWarning): + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + with pytest.warns(DeprecationWarning): + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_signature_all_None(self): + # signature all None, is an acceptable alternative (since 1.21) + # to not providing a signature. + res1 = np.add([3], [4], sig=(None, None, None)) + res2 = np.add([3], [4]) + assert_array_equal(res1, res2) + res1 = np.maximum([3], [4], sig=(None, None, None)) + res2 = np.maximum([3], [4]) + assert_array_equal(res1, res2) + + with pytest.raises(TypeError): + # special case, that would be deprecated anyway, so errors: + np.add(3, 4, signature=(None,)) + + def test_signature_dtype_type(self): + # Since that will be the normal behaviour (past NumPy 1.21) + # we do support the types already: + float_dtype = type(np.dtype(np.float64)) + np.add(3, 4, signature=(float_dtype, float_dtype, None)) + + @pytest.mark.parametrize("get_kwarg", [ + lambda dt: dict(dtype=x), + lambda dt: dict(signature=(x, None, None))]) + def test_signature_dtype_instances_allowed(self, get_kwarg): + # We allow certain dtype instances when there is a clear singleton + # and the given one is equivalent; mainly for backcompat. + int64 = np.dtype("int64") + int64_2 = pickle.loads(pickle.dumps(int64)) + # Relies on pickling behavior, if assert fails just remove test... + assert int64 is not int64_2 + + assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 + td = np.timedelta(2, "s") + assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" + + @pytest.mark.parametrize("get_kwarg", [ + param(lambda x: dict(dtype=x), id="dtype"), + param(lambda x: dict(signature=(x, None, None)), id="signature")]) + def test_signature_dtype_instances_allowed(self, get_kwarg): + msg = "The `dtype` and `signature` arguments to ufuncs" + + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder())) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("m8[ns]"))) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg("m8[ns]")) + + @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"]) + def test_partial_signature_mismatch(self, casting): + # If the second argument matches already, no need to specify it: + res = np.ldexp(np.float32(1.), np.int_(2), dtype="d") + assert res.dtype == "d" + res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d")) + assert res.dtype == "d" + + # ldexp only has a loop for long input as second argument, overriding + # the output cannot help with that (no matter the casting) + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), dtype="d") + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), signature=(None, None, "d")) + + def test_partial_signature_mismatch_with_cache(self): + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + # Ensure e,d->None is in the dispatching cache (double loop) + np.add(np.float16(1), np.float64(2)) + # The error must still be raised: + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + + def test_use_output_signature_for_all_arguments(self): + # Test that providing only `dtype=` or `signature=(None, None, dtype)` + # is sufficient if falling back to a homogeneous signature works. + # In this case, the `intp, intp -> intp` loop is chosen. + res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe") + assert res == 1 # the cast happens first. + res = np.power(1.5, 2.8, signature=(None, None, np.intp), + casting="unsafe") + assert res == 1 + with pytest.raises(TypeError): + # the unsafe casting would normally cause errors though: + np.power(1.5, 2.8, dtype=np.intp) + + def test_signature_errors(self): + with pytest.raises(TypeError, + match="the signature object to ufunc must be a string or"): + np.add(3, 4, signature=123.) # neither a string nor a tuple + + with pytest.raises(ValueError): + # bad symbols that do not translate to dtypes + np.add(3, 4, signature="%^->#") + + with pytest.raises(ValueError): + np.add(3, 4, signature=b"ii-i") # incomplete and byte string + + with pytest.raises(ValueError): + np.add(3, 4, signature="ii>i") # incomplete string + + with pytest.raises(ValueError): + np.add(3, 4, signature=(None, "f8")) # bad length + + with pytest.raises(UnicodeDecodeError): + np.add(3, 4, signature=b"\xff\xff->i") + + def test_forced_dtype_times(self): + # Signatures only set the type numbers (not the actual loop dtypes) + # so using `M` in a signature/dtype should generally work: + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]') + np.maximum(a, a, dtype="M") + np.maximum.reduce(a, dtype="M") + + arr = np.arange(10, dtype="m8[s]") + np.add(arr, arr, dtype="m") + np.maximum(arr, arr, dtype="m") + + @pytest.mark.parametrize("ufunc", [np.add, np.sqrt]) + def test_cast_safety(self, ufunc): + """Basic test for the safest casts, because ufuncs inner loops can + indicate a cast-safety as well (which is normally always "no"). + """ + def call_ufunc(arr, **kwargs): + return ufunc(*(arr,) * ufunc.nin, **kwargs) + + arr = np.array([1., 2., 3.], dtype=np.float32) + arr_bs = arr.astype(arr.dtype.newbyteorder()) + expected = call_ufunc(arr) + # Normally, a "no" cast: + res = call_ufunc(arr, casting="no") + assert_array_equal(expected, res) + # Byte-swapping is not allowed with "no" though: + with pytest.raises(TypeError): + call_ufunc(arr_bs, casting="no") + + # But is allowed with "equiv": + res = call_ufunc(arr_bs, casting="equiv") + assert_array_equal(expected, res) + + # Casting to float64 is safe, but not equiv: + with pytest.raises(TypeError): + call_ufunc(arr_bs, dtype=np.float64, casting="equiv") + + # but it is safe cast: + res = call_ufunc(arr_bs, dtype=np.float64, casting="safe") + expected = call_ufunc(arr.astype(np.float64)) # upcast + assert_array_equal(expected, res) + + def test_true_divide(self): + a = np.array(10) + b = np.array(20) + tgt = np.array(0.5) + + for tc in 'bhilqBHILQefdgFDG': + dt = np.dtype(tc) + aa = a.astype(dt) + bb = b.astype(dt) + + # Check result value and dtype. + for x, y in itertools.product([aa, -aa], [bb, -bb]): + + # Check with no output type specified + if tc in 'FDG': + tgt = complex(x)/complex(y) + else: + tgt = float(x)/float(y) + + res = np.true_divide(x, y) + rtol = max(np.finfo(res).resolution, 1e-15) + assert_allclose(res, tgt, rtol=rtol) + + if tc in 'bhilqBHILQ': + assert_(res.dtype.name == 'float64') + else: + assert_(res.dtype.name == dt.name ) + + # Check with output type specified. This also checks for the + # incorrect casts in issue gh-3484 because the unary '-' does + # not change types, even for unsigned types, Hence casts in the + # ufunc from signed to unsigned and vice versa will lead to + # errors in the values. + for tcout in 'bhilqBHILQ': + dtout = np.dtype(tcout) + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + + for tcout in 'efdg': + dtout = np.dtype(tcout) + if tc in 'FDG': + # Casting complex to float is not allowed + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + else: + tgt = float(x)/float(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + # Some test values result in invalid for float16 + # and the cast to it may overflow to inf. + with np.errstate(invalid='ignore', over='ignore'): + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res) and tcout == 'e': + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + for tcout in 'FDG': + dtout = np.dtype(tcout) + tgt = complex(x)/complex(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res): + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + # Check booleans + a = np.ones((), dtype=np.bool_) + res = np.true_divide(a, a) + assert_(res == 1.0) + assert_(res.dtype.name == 'float64') + res = np.true_divide(~a, a) + assert_(res == 0.0) + assert_(res.dtype.name == 'float64') + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sum(self): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + # warning if sum overflows, which it does in float16 + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + + tgt = dt(v * (v + 1) / 2) + overflow = not np.isfinite(tgt) + assert_equal(len(w), 1 * overflow) + + d = np.arange(1, v + 1, dtype=dt) + + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 2 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 3 * overflow) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_sum_initial(self): + # Integer, single axis + assert_equal(np.sum([3], initial=2), 5) + + # Floating point + assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) + + # Multiple non-adjacent axes + assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), + [12, 12, 12]) + + def test_sum_where(self): + # More extensive tests done in test_reduction_with_where. + assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) + assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., + where=[True, False]), [9., 5.]) + + def test_inner1d(self): + a = np.arange(6).reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) + a = np.arange(6) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + # Broadcast in core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_raises(ValueError, umt.inner1d, a, b) + # Extend core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_raises(ValueError, umt.inner1d, a, b) + # Broadcast should fail + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + assert_raises(ValueError, umt.inner1d, a, b) + + # Writing to a broadcasted array with overlap should warn, gh-2705 + a = np.arange(2) + b = np.arange(4).reshape((2, 2)) + u, v = np.broadcast_arrays(a, b) + assert_equal(u.strides[0], 0) + x = u + v + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + u += v + assert_equal(len(w), 1) + assert_(x[0, 0] != u[0, 0]) + + # Output reduction should not be allowed. + # See gh-15139 + a = np.arange(6).reshape(3, 2) + b = np.ones(2) + out = np.empty(()) + assert_raises(ValueError, umt.inner1d, a, b, out) + out2 = np.empty(3) + c = umt.inner1d(a, b, out2) + assert_(c is out2) + + def test_out_broadcasts(self): + # For ufuncs and gufuncs (not for reductions), we currently allow + # the output to cause broadcasting of the input arrays. + # both along dimensions with shape 1 and dimensions which do not + # exist at all in the inputs. + arr = np.arange(3).reshape(1, 3) + out = np.empty((5, 4, 3)) + np.add(arr, arr, out=out) + assert (out == np.arange(3) * 2).all() + + # The same holds for gufuncs (gh-16484) + umt.inner1d(arr, arr, out=out) + # the result would be just a scalar `5`, but is broadcast fully: + assert (out == 5).all() + + @pytest.mark.parametrize(["arr", "out"], [ + ([2], np.empty(())), + ([1, 2], np.empty(1)), + (np.ones((4, 3)), np.empty((4, 1)))], + ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"]) + def test_out_broadcast_errors(self, arr, out): + # Output is (currently) allowed to broadcast inputs, but it cannot be + # smaller than the actual result. + with pytest.raises(ValueError, match="non-broadcastable"): + np.positive(arr, out=out) + + with pytest.raises(ValueError, match="non-broadcastable"): + np.add(np.ones(()), arr, out=out) + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a + 0.1 + assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype='<i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + + # Output should always be native-endian + Ba = np.arange(1, dtype='>f8') + La = np.arange(1, dtype='<f8') + assert_equal((Ba+Ba).dtype, np.dtype('f8')) + assert_equal((Ba+La).dtype, np.dtype('f8')) + assert_equal((La+Ba).dtype, np.dtype('f8')) + assert_equal((La+La).dtype, np.dtype('f8')) + + assert_equal(np.absolute(La).dtype, np.dtype('f8')) + assert_equal(np.absolute(Ba).dtype, np.dtype('f8')) + assert_equal(np.negative(La).dtype, np.dtype('f8')) + assert_equal(np.negative(Ba).dtype, np.dtype('f8')) + + def test_incontiguous_array(self): + msg = "incontiguous memory layout of array" + x = np.arange(64).reshape((2, 2, 2, 2, 2, 2)) + a = x[:, 0,:, 0,:, 0] + b = x[:, 1,:, 1,:, 1] + a[0, 0, 0] = -1 + msg2 = "make sure it references to the original array" + assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + x = np.arange(24).reshape(2, 3, 4) + a = x.T + b = x.T + a[0, 0, 0] = -1 + assert_equal(x[0, 0, 0], -1, err_msg=msg2) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + + def test_output_argument(self): + msg = "output argument" + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(4).reshape((2, 1, 2)) + 1 + c = np.zeros((2, 3), dtype='int') + umt.inner1d(a, b, c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) + c[:] = -1 + umt.inner1d(a, b, out=c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) + + msg = "output argument with type cast" + c = np.zeros((2, 3), dtype='int16') + umt.inner1d(a, b, c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) + c[:] = -1 + umt.inner1d(a, b, out=c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) + + msg = "output argument with incontiguous layout" + c = np.zeros((2, 3, 4), dtype='int16') + umt.inner1d(a, b, c[..., 0]) + assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg) + c[:] = -1 + umt.inner1d(a, b, out=c[..., 0]) + assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg) + + def test_axes_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + # basic tests on inputs (outputs tested below with matrix_multiply). + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + # default + c = inner1d(a, b, axes=[(-1,), (-1,), ()]) + assert_array_equal(c, (a * b).sum(-1)) + # integers ok for single axis. + c = inner1d(a, b, axes=[-1, -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # mix fine + c = inner1d(a, b, axes=[(-1,), -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # can omit last axis. + c = inner1d(a, b, axes=[-1, -1]) + assert_array_equal(c, (a * b).sum(-1)) + # can pass in other types of integer (with __index__ protocol) + c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) + assert_array_equal(c, (a * b).sum(-1)) + # swap some axes + c = inner1d(a, b, axes=[0, 0]) + assert_array_equal(c, (a * b).sum(0)) + c = inner1d(a, b, axes=[0, 2]) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + # Check errors for improperly constructed axes arguments. + # should have list. + assert_raises(TypeError, inner1d, a, b, axes=-1) + # needs enough elements + assert_raises(ValueError, inner1d, a, b, axes=[-1]) + # should pass in indices. + assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0]) + assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1]) + assert_raises(TypeError, inner1d, a, b, axes=[None, 1]) + # cannot pass an index unless there is only one dimension + # (output is wrong in this case) + assert_raises(np.AxisError, inner1d, a, b, axes=[-1, -1, -1]) + # or pass in generally the wrong number of axes + assert_raises(np.AxisError, inner1d, a, b, axes=[-1, -1, (-1,)]) + assert_raises(np.AxisError, inner1d, a, b, axes=[-1, (-2, -1), ()]) + # axes need to have same length. + assert_raises(ValueError, inner1d, a, b, axes=[0, 1]) + + # matrix_multiply signature: '(m,n),(n,p)->(m,p)' + mm = umt.matrix_multiply + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # Sanity check. + c = mm(a, b) + assert_array_equal(c, np.matmul(a, b)) + # Default axes. + c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) + assert_array_equal(c, np.matmul(a, b)) + # Default with explicit axes. + c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) + assert_array_equal(c, np.matmul(a, b)) + # swap some axes. + c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) + assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), + b.transpose(0, 3, 1, 2))) + # Default with output array. + c = np.empty((2, 2, 3, 1)) + d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b)) + # Transposed output array + c = np.empty((1, 2, 2, 3)) + d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) + # Check errors for improperly constructed axes arguments. + # wrong argument + assert_raises(TypeError, mm, a, b, axis=1) + # axes should be list + assert_raises(TypeError, mm, a, b, axes=1) + assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) + # list needs to have right length + assert_raises(ValueError, mm, a, b, axes=[]) + assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) + # list should not contain None, or lists + assert_raises(TypeError, mm, a, b, axes=[None, None, None]) + assert_raises(TypeError, + mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) + assert_raises(TypeError, + mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) + # single integers are AxisErrors if more are required + assert_raises(np.AxisError, mm, a, b, axes=[-1, -1, -1]) + assert_raises(np.AxisError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) + # tuples should not have duplicated values + assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) + # arrays should have enough axes. + z = np.zeros((2, 2)) + assert_raises(ValueError, mm, z, z[0]) + assert_raises(ValueError, mm, z, z, out=z[:, 0]) + assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) + assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) + # Regular ufuncs should not accept axes. + assert_raises(TypeError, np.add, 1., 1., axes=[0]) + # should be able to deal with bad unrelated kwargs. + assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) + + def test_axis_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, axis=-1) + assert_array_equal(c, (a * b).sum(-1)) + out = np.zeros_like(c) + d = inner1d(a, b, axis=-1, out=out) + assert_(d is out) + assert_array_equal(d, c) + c = inner1d(a, b, axis=0) + assert_array_equal(c, (a * b).sum(0)) + # Sanity checks on innerwt and cumsum. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, axis=0), + np.sum(a * b * w, axis=0)) + assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) + assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) + out = np.empty_like(a) + b = umt.cumsum(a, out=out, axis=0) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=0)) + b = umt.cumsum(a, out=out, axis=1) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=-1)) + # Check errors. + # Cannot pass in both axis and axes. + assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0]) + # Not an integer. + assert_raises(TypeError, inner1d, a, b, axis=[0]) + # more than 1 core dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, axis=1) + # Output wrong size in axis. + out = np.empty((1, 2, 3), dtype=a.dtype) + assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) + # Regular ufuncs should not accept axis. + assert_raises(TypeError, np.add, 1., 1., axis=0) + + def test_keepdims_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + out = np.zeros_like(c) + d = inner1d(a, b, keepdims=True, out=out) + assert_(d is out) + assert_array_equal(d, c) + # Now combined with axis and axes. + c = inner1d(a, b, axis=-1, keepdims=False) + assert_array_equal(c, (a * b).sum(-1, keepdims=False)) + c = inner1d(a, b, axis=-1, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = inner1d(a, b, axis=0, keepdims=False) + assert_array_equal(c, (a * b).sum(0, keepdims=False)) + c = inner1d(a, b, axis=0, keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = inner1d(a, b, axes=[0, 0], keepdims=False) + assert_array_equal(c, (a * b).sum(0)) + c = inner1d(a, b, axes=[0, 0, 0], keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = inner1d(a, b, axes=[0, 2], keepdims=False) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + c = inner1d(a, b, axes=[0, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = inner1d(a, b, axes=[0, 2, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = inner1d(a, b, axes=[0, 2, 0], keepdims=True) + assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) + # Hardly useful, but should work. + c = inner1d(a, b, axes=[0, 2, 1], keepdims=True) + assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) + .sum(1, keepdims=True)) + # Check with two core dimensions. + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected = uml.det(a) + c = uml.det(a, keepdims=False) + assert_array_equal(c, expected) + c = uml.det(a, keepdims=True) + assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected_s, expected_l = uml.slogdet(a) + cs, cl = uml.slogdet(a, keepdims=False) + assert_array_equal(cs, expected_s) + assert_array_equal(cl, expected_l) + cs, cl = uml.slogdet(a, keepdims=True) + assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) + assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) + # Sanity check on innerwt. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, keepdims=True), + np.sum(a * b * w, axis=-1, keepdims=True)) + assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), + np.sum(a * b * w, axis=0, keepdims=True)) + # Check errors. + # Not a boolean + assert_raises(TypeError, inner1d, a, b, keepdims='true') + # More than 1 core dimension, and core output dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, keepdims=True) + assert_raises(TypeError, mm, a, b, keepdims=False) + # Regular ufuncs should not accept keepdims. + assert_raises(TypeError, np.add, 1., 1., keepdims=False) + + def test_innerwt(self): + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + + def test_innerwt_empty(self): + """Test generalized ufunc with zero-sized operands""" + a = np.array([], dtype='f8') + b = np.array([], dtype='f8') + w = np.array([], dtype='f8') + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + + def test_cross1d(self): + """Test with fixed-sized signature.""" + a = np.eye(3) + assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) + out = np.zeros((3, 3)) + result = umt.cross1d(a[0], a, out) + assert_(result is out) + assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) + assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) + assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) + # Wrong output core dimension. + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) + # Wrong output broadcast dimension (see gh-15139). + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3)) + + def test_can_ignore_signature(self): + # Comparing the effects of ? in signature: + # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. + # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. + mat = np.arange(12).reshape((2, 3, 2)) + single_vec = np.arange(2) + col_vec = single_vec[:, np.newaxis] + col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # matrix @ single column vector with proper dimension + mm_col_vec = umt.matrix_multiply(mat, col_vec) + # matmul does the same thing + matmul_col_vec = umt.matmul(mat, col_vec) + assert_array_equal(matmul_col_vec, mm_col_vec) + # matrix @ vector without dimension making it a column vector. + # matrix multiply fails -> missing core dim. + assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) + # matmul mimicker passes, and returns a vector. + matmul_col = umt.matmul(mat, single_vec) + assert_array_equal(matmul_col, mm_col_vec.squeeze()) + # Now with a column array: same as for column vector, + # broadcasting sensibly. + mm_col_vec = umt.matrix_multiply(mat, col_vec_array) + matmul_col_vec = umt.matmul(mat, col_vec_array) + assert_array_equal(matmul_col_vec, mm_col_vec) + # As above, but for row vector + single_vec = np.arange(3) + row_vec = single_vec[np.newaxis, :] + row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 + # row vector @ matrix + mm_row_vec = umt.matrix_multiply(row_vec, mat) + matmul_row_vec = umt.matmul(row_vec, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # single row vector @ matrix + assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) + matmul_row = umt.matmul(single_vec, mat) + assert_array_equal(matmul_row, mm_row_vec.squeeze()) + # row vector array @ matrix + mm_row_vec = umt.matrix_multiply(row_vec_array, mat) + matmul_row_vec = umt.matmul(row_vec_array, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # Now for vector combinations + # row vector @ column vector + col_vec = row_vec.T + col_vec_array = row_vec_array.swapaxes(-2, -1) + mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) + matmul_row_col_vec = umt.matmul(row_vec, col_vec) + assert_array_equal(matmul_row_col_vec, mm_row_col_vec) + # single row vector @ single col vector + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) + matmul_row_col = umt.matmul(single_vec, single_vec) + assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) + # row vector array @ matrix + mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) + matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) + assert_array_equal(matmul_row_col_array, mm_row_col_array) + # Finally, check that things are *not* squeezed if one gives an + # output. + out = np.zeros_like(mm_row_col_array) + out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + out[:] = 0 + out = umt.matmul(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + # And check one cannot put missing dimensions back. + out = np.zeros_like(mm_row_col_vec) + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, + out) + # But fine for matmul, since it is just a broadcast. + out = umt.matmul(single_vec, single_vec, out) + assert_array_equal(out, mm_row_col_vec.squeeze()) + + def test_matrix_multiply(self): + self.compare_matrix_multiply_results(np.int64) + self.compare_matrix_multiply_results(np.double) + + def test_matrix_multiply_umath_empty(self): + res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) + assert_array_equal(res, np.zeros((0, 0))) + res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) + assert_array_equal(res, np.zeros((10, 10))) + + def compare_matrix_multiply_results(self, tp): + d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) + d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) + msg = "matrix multiply on type %s" % d1.dtype.name + + def permute_n(n): + if n == 1: + return ([0],) + ret = () + base = permute_n(n-1) + for perm in base: + for i in range(n): + new = perm + [n-1] + new[n-1] = new[i] + new[i] = n-1 + ret += (new,) + return ret + + def slice_n(n): + if n == 0: + return ((),) + ret = () + base = slice_n(n-1) + for sl in base: + ret += (sl+(slice(None),),) + ret += (sl+(slice(0, 1),),) + return ret + + def broadcastable(s1, s2): + return s1 == s2 or s1 == 1 or s2 == 1 + + permute_3 = permute_n(3) + slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) + + ref = True + for p1 in permute_3: + for p2 in permute_3: + for s1 in slice_3: + for s2 in slice_3: + a1 = d1.transpose(p1)[s1] + a2 = d2.transpose(p2)[s2] + ref = ref and a1.base is not None + ref = ref and a2.base is not None + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): + assert_array_almost_equal( + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis,:], axis=-1), + err_msg=msg + ' %s %s' % (str(a1.shape), + str(a2.shape))) + + assert_equal(ref, True, err_msg="reference check") + + def test_euclidean_pdist(self): + a = np.arange(12, dtype=float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + + def test_cumsum(self): + a = np.arange(10) + result = umt.cumsum(a) + assert_array_equal(result, a.cumsum()) + + def test_object_logical(self): + a = np.array([3, None, True, False, "test", ""], dtype=object) + assert_equal(np.logical_or(a, None), + np.array([x or None for x in a], dtype=object)) + assert_equal(np.logical_or(a, True), + np.array([x or True for x in a], dtype=object)) + assert_equal(np.logical_or(a, 12), + np.array([x or 12 for x in a], dtype=object)) + assert_equal(np.logical_or(a, "blah"), + np.array([x or "blah" for x in a], dtype=object)) + + assert_equal(np.logical_and(a, None), + np.array([x and None for x in a], dtype=object)) + assert_equal(np.logical_and(a, True), + np.array([x and True for x in a], dtype=object)) + assert_equal(np.logical_and(a, 12), + np.array([x and 12 for x in a], dtype=object)) + assert_equal(np.logical_and(a, "blah"), + np.array([x and "blah" for x in a], dtype=object)) + + assert_equal(np.logical_not(a), + np.array([not x for x in a], dtype=object)) + + assert_equal(np.logical_or.reduce(a), 3) + assert_equal(np.logical_and.reduce(a), None) + + def test_object_comparison(self): + class HasComparisons: + def __eq__(self, other): + return '==' + + arr0d = np.array(HasComparisons()) + assert_equal(arr0d == arr0d, True) + assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast + + arr1d = np.array([HasComparisons()]) + assert_equal(arr1d == arr1d, np.array([True])) + assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) + + def test_object_array_reduction(self): + # Reductions on object arrays + a = np.array(['a', 'b', 'c'], dtype=object) + assert_equal(np.sum(a), 'abc') + assert_equal(np.max(a), 'c') + assert_equal(np.min(a), 'a') + a = np.array([True, False, True], dtype=object) + assert_equal(np.sum(a), 2) + assert_equal(np.prod(a), 0) + assert_equal(np.any(a), True) + assert_equal(np.all(a), False) + assert_equal(np.max(a), True) + assert_equal(np.min(a), False) + assert_equal(np.array([[1]], dtype=object).sum(), 1) + assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) + assert_equal(np.array([1], dtype=object).sum(initial=1), 2) + assert_equal(np.array([[1], [2, 3]], dtype=object) + .sum(initial=[0], where=[False, True]), [0, 2, 3]) + + def test_object_array_accumulate_inplace(self): + # Checks that in-place accumulates work, see also gh-7402 + arr = np.ones(4, dtype=object) + arr[:] = [[1] for i in range(4)] + # Twice reproduced also for tuples: + np.add.accumulate(arr, out=arr) + np.add.accumulate(arr, out=arr) + assert_array_equal(arr, + np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object), + ) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + np.add.accumulate(arr, out=arr, axis=-1) + np.add.accumulate(arr, out=arr, axis=-1) + assert_array_equal(arr[0, :], + np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object), + ) + + def test_object_array_accumulate_failure(self): + # Typical accumulation on object works as expected: + res = np.add.accumulate(np.array([1, 0, 2], dtype=object)) + assert_array_equal(res, np.array([1, 1, 3], dtype=object)) + # But errors are propagated from the inner-loop if they occur: + with pytest.raises(TypeError): + np.add.accumulate([1, None, 2]) + + def test_object_array_reduceat_inplace(self): + # Checks that in-place reduceats work, see also gh-7465 + arr = np.empty(4, dtype=object) + arr[:] = [[1] for i in range(4)] + out = np.empty(4, dtype=object) + out[:] = [[1] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr) + np.add.reduceat(arr, np.arange(4), out=arr) + assert_array_equal(arr, out) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + out = np.ones((2, 4), dtype=object) + out[0, :] = [[2] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + assert_array_equal(arr, out) + + def test_object_array_reduceat_failure(self): + # Reduceat works as expected when no invalid operation occurs (None is + # not involved in an operation here) + res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2]) + assert_array_equal(res, np.array([None, 2], dtype=object)) + # But errors when None would be involved in an operation: + with pytest.raises(TypeError): + np.add.reduceat([1, None, 2], [0, 2]) + + def test_zerosize_reduction(self): + # Test with default dtype and object dtype + for a in [[], np.array([], dtype=object)]: + assert_equal(np.sum(a), 0) + assert_equal(np.prod(a), 1) + assert_equal(np.any(a), False) + assert_equal(np.all(a), True) + assert_raises(ValueError, np.max, a) + assert_raises(ValueError, np.min, a) + + def test_axis_out_of_bounds(self): + a = np.array([False, False]) + assert_raises(np.AxisError, a.all, axis=1) + a = np.array([False, False]) + assert_raises(np.AxisError, a.all, axis=-2) + + a = np.array([False, False]) + assert_raises(np.AxisError, a.any, axis=1) + a = np.array([False, False]) + assert_raises(np.AxisError, a.any, axis=-2) + + def test_scalar_reduction(self): + # The functions 'sum', 'prod', etc allow specifying axis=0 + # even for scalars + assert_equal(np.sum(3, axis=0), 3) + assert_equal(np.prod(3.5, axis=0), 3.5) + assert_equal(np.any(True, axis=0), True) + assert_equal(np.all(False, axis=0), False) + assert_equal(np.max(3, axis=0), 3) + assert_equal(np.min(2.5, axis=0), 2.5) + + # Check scalar behaviour for ufuncs without an identity + assert_equal(np.power.reduce(3), 3) + + # Make sure that scalars are coming out from this operation + assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + + # check if scalars/0-d arrays get cast + assert_(type(np.any(0, axis=0)) is np.bool_) + + # assert that 0-d arrays get wrapped + class MyArray(np.ndarray): + pass + a = np.array(1).view(MyArray) + assert_(type(np.any(a)) is MyArray) + + def test_casting_out_param(self): + # Test that it's possible to do casts on output + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) + np.add(a, b, out=c) + assert_equal(c, 2) + + a = np.zeros(65536) + b = np.zeros(65536, dtype=np.float32) + np.subtract(a, 0, out=b) + assert_equal(b, 0) + + def test_where_param(self): + # Test that the where= ufunc parameter works with regular arrays + a = np.arange(7) + b = np.ones(7) + c = np.zeros(7) + np.add(a, b, out=c, where=(a % 2 == 1)) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) + + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) + assert_equal(a, [[2, 27], [16, 5]]) + # Broadcasting the where= parameter + np.subtract(a, 2, out=a, where=[True, False]) + assert_equal(a, [[0, 27], [14, 5]]) + + def test_where_param_buffer_output(self): + # This test is temporarily skipped because it requires + # adding masking features to the nditer to work properly + + # With casting on output + a = np.ones(10, np.int64) + b = np.ones(10, np.int64) + c = 1.5 * np.ones(10, np.float64) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) + + def test_where_param_alloc(self): + # With casting and allocated output + a = np.array([1], dtype=np.int64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + # No casting and allocated output + a = np.array([1], dtype=np.float64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + def test_where_with_broadcasting(self): + # See gh-17198 + a = np.random.random((5000, 4)) + b = np.random.random((5000, 1)) + + where = a > 0.3 + out = np.full_like(a, 0) + np.less(a, b, where=where, out=out) + b_where = np.broadcast_to(b, a.shape)[where] + assert_array_equal((a[where] < b_where), out[where].astype(bool)) + assert not out[~where].any() # outside mask, out remains all 0 + + def check_identityless_reduction(self, a): + # np.minimum.reduce is an identityless reduction + + # Verify that it sees the zero at various positions + a[...] = 1 + a[1, 0, 0] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) + assert_equal(np.minimum.reduce(a, axis=0), + [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[1, 1, 1, 1], [0, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[1, 1, 1], [0, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + a[...] = 1 + a[0, 1, 0] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) + assert_equal(np.minimum.reduce(a, axis=0), + [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[0, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[1, 0, 1], [1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + a[...] = 1 + a[0, 0, 1] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) + assert_equal(np.minimum.reduce(a, axis=0), + [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[1, 0, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[0, 1, 1], [1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + @requires_memory(6 * 1024**3) + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="test array too large for 32bit platform") + def test_identityless_reduction_huge_array(self): + # Regression test for gh-20921 (copying identity incorrectly failed) + arr = np.zeros((2, 2**31), 'uint8') + arr[:, 0] = [1, 3] + arr[:, -1] = [4, 1] + res = np.maximum.reduce(arr, axis=0) + del arr + assert res[0] == 3 + assert res[-1] == 4 + + def test_identityless_reduction_corder(self): + a = np.empty((2, 3, 4), order='C') + self.check_identityless_reduction(a) + + def test_identityless_reduction_forder(self): + a = np.empty((2, 3, 4), order='F') + self.check_identityless_reduction(a) + + def test_identityless_reduction_otherorder(self): + a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) + self.check_identityless_reduction(a) + + def test_identityless_reduction_noncontig(self): + a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_identityless_reduction_noncontig_unaligned(self): + a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_reduce_identity_depends_on_loop(self): + """ + The type of the result should always depend on the selected loop, not + necessarily the output (only relevant for object arrays). + """ + # For an object loop, the default value 0 with type int is used: + assert type(np.add.reduce([], dtype=object)) is int + out = np.array(None, dtype=object) + # When the loop is float64 but `out` is object this does not happen, + # the result is float64 cast to object (which gives Python `float`). + np.add.reduce([], out=out, dtype=np.float64) + assert type(out[()]) is float + + def test_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + assert_equal(np.maximum.reduce([], initial=0), 0) + + # For cases like reduction of an empty array over the reals. + assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) + assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) + + # Random tests + assert_equal(np.minimum.reduce([5], initial=4), 4) + assert_equal(np.maximum.reduce([4], initial=5), 5) + assert_equal(np.maximum.reduce([5], initial=4), 5) + assert_equal(np.minimum.reduce([4], initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc reductions + assert_raises(ValueError, np.minimum.reduce, [], initial=None) + assert_raises(ValueError, np.add.reduce, [], initial=None) + # Also in the somewhat special object case: + with pytest.raises(ValueError): + np.add.reduce([], initial=None, dtype=object) + + # Check that np._NoValue gives default behavior. + assert_equal(np.add.reduce([], initial=np._NoValue), 0) + + # Check that initial kwarg behaves as intended for dtype=object + a = np.array([10], dtype=object) + res = np.add.reduce(a, initial=5) + assert_equal(res, 15) + + def test_empty_reduction_and_idenity(self): + arr = np.zeros((0, 5)) + # OK, since the reduction itself is *not* empty, the result is + assert np.true_divide.reduce(arr, axis=1).shape == (0,) + # Not OK, the reduction itself is empty and we have no idenity + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=0) + + # Test that an empty reduction fails also if the result is empty + arr = np.zeros((0, 0, 5)) + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=1) + + # Division reduction makes sense with `initial=1` (empty or not): + res = np.true_divide.reduce(arr, axis=1, initial=1) + assert_array_equal(res, np.ones((0, 5))) + + @pytest.mark.parametrize('axis', (0, 1, None)) + @pytest.mark.parametrize('where', (np.array([False, True, True]), + np.array([[True], [False], [True]]), + np.array([[True, False, False], + [False, True, False], + [False, True, True]]))) + def test_reduction_with_where(self, axis, where): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.zeros_like(a) + np.positive(a, out=a_check, where=where) + + res = np.add.reduce(a, axis=axis, where=where) + check = a_check.sum(axis) + assert_equal(res, check) + # Check we do not overwrite elements of a internally. + assert_array_equal(a, a_copy) + + @pytest.mark.parametrize(('axis', 'where'), + ((0, np.array([True, False, True])), + (1, [True, True, False]), + (None, True))) + @pytest.mark.parametrize('initial', (-np.inf, 5.)) + def test_reduction_with_where_and_initial(self, axis, where, initial): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.full(a.shape, -np.inf) + np.positive(a, out=a_check, where=where) + + res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) + check = a_check.max(axis, initial=initial) + assert_equal(res, check) + + def test_reduction_where_initial_needed(self): + a = np.arange(9.).reshape(3, 3) + m = [False, True, False] + assert_raises(ValueError, np.maximum.reduce, a, where=m) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + res = np.divide.reduce(a, axis=0) + assert_equal(res, [8.0, 4.0, 8.0]) + + res = np.divide.reduce(a, axis=1) + assert_equal(res, [2.0, 8.0]) + + res = np.divide.reduce(a, axis=()) + assert_equal(res, a) + + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have a n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_raises(TypeError, np.add, a, 1.1, out=a) + + def add_inplace(a, b): + a += b + + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [2, 3, 4]) + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=_rational_tests.rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = _rational_tests.test_add(a, b, c) + target = np.array([0, 2, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + # The new resolution means that we can (usually) find custom loops + # as long as they match exactly: + result = _rational_tests.test_add(a, b) + assert_equal(result, target) + + # This works even more generally, so long the default common-dtype + # promoter works out: + result = _rational_tests.test_add(a, b.astype(np.uint16), out=c) + assert_equal(result, target) + + # But, it can be fooled, e.g. (use scalars, which forces legacy + # type resolution to kick in, which then fails): + with assert_raises(TypeError): + _rational_tests.test_add(a, np.uint16(2)) + + def test_operand_flags(self): + a = np.arange(16, dtype='l').reshape(4, 4) + b = np.arange(9, dtype='l').reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy.core._struct_ufunc_tests as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + assert_raises(RuntimeError, struct_ufunc.register_fail) + + def test_custom_ufunc(self): + a = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + b = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + + result = _rational_tests.test_add_rationals(a, b) + expected = np.array( + [_rational_tests.rational(1), + _rational_tests.rational(2, 3), + _rational_tests.rational(1, 2)], + dtype=_rational_tests.rational) + assert_equal(result, expected) + + def test_custom_ufunc_forced_sig(self): + # gh-9351 - looking for a non-first userloop would previously hang + with assert_raises(TypeError): + np.multiply(_rational_tests.rational(1), 1, + signature=(_rational_tests.rational, int, None)) + + def test_custom_array_like(self): + + class MyThing: + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > self.ndim: + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5)*MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + @pytest.mark.parametrize("a", ( + np.arange(10, dtype=int), + np.arange(10, dtype=_rational_tests.rational), + )) + def test_ufunc_at_basic(self, a): + + aa = a.copy() + np.add.at(aa, [2, 5, 2], 1) + assert_equal(aa, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # missing second operand + np.add.at(aa, [2, 5, 3]) + + aa = a.copy() + np.negative.at(aa, [2, 5, 3]) + assert_equal(aa, [0, 1, -2, -3, 4, -5, 6, 7, 8, 9]) + + aa = a.copy() + b = np.array([100, 100, 100]) + np.add.at(aa, [2, 5, 2], b) + assert_equal(aa, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # extraneous second operand + np.negative.at(a, [2, 5, 3], [1, 2, 3]) + + with pytest.raises(ValueError): + # second operand cannot be converted to an array + np.add.at(a, [2, 5, 3], [[1, 2], 1]) + + # ufuncs with indexed loops for performance in ufunc.at + indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide, + np.maximum, np.minimum, np.fmax, np.fmin] + + @pytest.mark.parametrize( + "typecode", np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", indexed_ufuncs) + def test_ufunc_at_inner_loops(self, typecode, ufunc): + if ufunc is np.divide and typecode in np.typecodes['AllInteger']: + # Avoid divide-by-zero and inf for integer divide + a = np.ones(100, dtype=typecode) + indx = np.random.randint(100, size=30, dtype=np.intp) + vals = np.arange(1, 31, dtype=typecode) + else: + a = np.ones(1000, dtype=typecode) + indx = np.random.randint(1000, size=3000, dtype=np.intp) + vals = np.arange(3000, dtype=typecode) + atag = a.copy() + # Do the calculation twice and compare the answers + with warnings.catch_warnings(record=True) as w_at: + warnings.simplefilter('always') + ufunc.at(a, indx, vals) + with warnings.catch_warnings(record=True) as w_loop: + warnings.simplefilter('always') + for i, v in zip(indx, vals): + # Make sure all the work happens inside the ufunc + # in order to duplicate error/warning handling + ufunc(atag[i], v, out=atag[i:i+1], casting="unsafe") + assert_equal(atag, a) + # If w_loop warned, make sure w_at warned as well + if len(w_loop) > 0: + # + assert len(w_at) > 0 + assert w_at[0].category == w_loop[0].category + assert str(w_at[0].message)[:10] == str(w_loop[0].message)[:10] + + @pytest.mark.parametrize("typecode", np.typecodes['Complex']) + @pytest.mark.parametrize("ufunc", [np.add, np.subtract, np.multiply]) + def test_ufunc_at_inner_loops_complex(self, typecode, ufunc): + a = np.ones(10, dtype=typecode) + indx = np.concatenate([np.ones(6, dtype=np.intp), + np.full(18, 4, dtype=np.intp)]) + value = a.dtype.type(1j) + ufunc.at(a, indx, value) + expected = np.ones_like(a) + if ufunc is np.multiply: + expected[1] = expected[4] = -1 + else: + expected[1] += 6 * (value if ufunc is np.add else -value) + expected[4] += 18 * (value if ufunc is np.add else -value) + + assert_array_equal(a, expected) + + def test_ufunc_at_ellipsis(self): + # Make sure the indexed loop check does not choke on iters + # with subspaces + arr = np.zeros(5) + np.add.at(arr, slice(None), np.ones(5)) + assert_array_equal(arr, np.ones(5)) + + def test_ufunc_at_negative(self): + arr = np.ones(5, dtype=np.int32) + indx = np.arange(5) + umt.indexed_negative.at(arr, indx) + # If it is [-1, -1, -1, -100, 0] then the regular strided loop was used + assert np.all(arr == [-1, -1, -1, -200, -1]) + + def test_ufunc_at_large(self): + # issue gh-23457 + indices = np.zeros(8195, dtype=np.int16) + b = np.zeros(8195, dtype=float) + b[0] = 10 + b[1] = 5 + b[8192:] = 100 + a = np.zeros(1, dtype=float) + np.add.at(a, indices, b) + assert a[0] == b.sum() + + def test_cast_index_fastpath(self): + arr = np.zeros(10) + values = np.ones(100000) + # index must be cast, which may be buffered in chunks: + index = np.zeros(len(values), dtype=np.uint8) + np.add.at(arr, index, values) + assert arr[0] == len(values) + + @pytest.mark.parametrize("value", [ + np.ones(1), np.ones(()), np.float64(1.), 1.]) + def test_ufunc_at_scalar_value_fastpath(self, value): + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) + + def test_ufunc_at_multiD(self): + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + def test_ufunc_at_0D(self): + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + def test_ufunc_at_dtypes(self): + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + def test_ufunc_at_boolean(self): + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + def test_ufunc_at_advanced(self): + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) + + # Test multiple output ufuncs raise error, gh-5665 + assert_raises(ValueError, np.modf.at, np.arange(10), [1]) + + # Test maximum + a = np.array([1, 2, 3]) + np.maximum.at(a, [0], 0) + assert_equal(a, np.array([1, 2, 3])) + + @pytest.mark.parametrize("dtype", + np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", + [np.add, np.subtract, np.divide, np.minimum, np.maximum]) + def test_at_negative_indexes(self, dtype, ufunc): + a = np.arange(0, 10).astype(dtype) + indxs = np.array([-1, 1, -1, 2]).astype(np.intp) + vals = np.array([1, 5, 2, 10], dtype=a.dtype) + + expected = a.copy() + for i, v in zip(indxs, vals): + expected[i] = ufunc(expected[i], v) + + ufunc.at(a, indxs, vals) + assert_array_equal(a, expected) + assert np.all(indxs == [-1, 1, -1, 2]) + + def test_at_not_none_signature(self): + # Test ufuncs with non-trivial signature raise a TypeError + a = np.ones((2, 2, 2)) + b = np.ones((1, 2, 2)) + assert_raises(TypeError, np.matmul.at, a, [0], b) + + a = np.array([[[1, 2], [3, 4]]]) + assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0]) + + def test_at_no_loop_for_op(self): + # str dtype does not have a ufunc loop for np.add + arr = np.ones(10, dtype=str) + with pytest.raises(np.core._exceptions._UFuncNoLoopError): + np.add.at(arr, [0, 1], [0, 1]) + + def test_at_output_casting(self): + arr = np.array([-1]) + np.equal.at(arr, [0], [0]) + assert arr[0] == 0 + + def test_at_broadcast_failure(self): + arr = np.arange(5) + with pytest.raises(ValueError): + np.add.at(arr, [0, 1], [1, 2, 3]) + + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5,2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + assert_equal(f(d, 0, None, None, False, 0), r) + assert_equal(f(d, 0, None, None, False, initial=0), r) + assert_equal(f(d, 0, None, None, False, 0, True), r) + assert_equal(f(d, 0, None, None, False, 0, where=True), r) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, + where=True), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyword + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + + def test_structured_equal(self): + # https://github.com/numpy/numpy/issues/4855 + + class MyA(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*(input.view(np.ndarray) + for input in inputs), **kwargs) + a = np.arange(12.).reshape(4,3) + ra = a.view(dtype=('f8,f8,f8')).squeeze() + mra = ra.view(MyA) + + target = np.array([ True, False, False, False], dtype=bool) + assert_equal(np.all(target == (mra == ra[0])), True) + + def test_scalar_equal(self): + # Scalar comparisons should always work, without deprecation warnings. + # even when the ufunc fails. + a = np.array(0.) + b = np.array('a') + assert_(a != b) + assert_(b != a) + assert_(not (a == b)) + assert_(not (b == a)) + + def test_NotImplemented_not_returned(self): + # See gh-5964 and gh-2091. Some of these functions are not operator + # related and were fixed for other reasons in the past. + binary_funcs = [ + np.power, np.add, np.subtract, np.multiply, np.divide, + np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, + np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, + np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, + np.maximum, np.minimum, np.mod, + np.greater, np.greater_equal, np.less, np.less_equal, + np.equal, np.not_equal] + + a = np.array('1') + b = 1 + c = np.array([1., 2.]) + for f in binary_funcs: + assert_raises(TypeError, f, a, b) + assert_raises(TypeError, f, c, a) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or]) # logical_xor object loop is bad + @pytest.mark.parametrize("signature", + [(None, None, object), (object, None, None), + (None, object, None)]) + def test_logical_ufuncs_object_signatures(self, ufunc, signature): + a = np.array([True, None, False], dtype=object) + res = ufunc(a, a, signature=signature) + assert res.dtype == object + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("signature", + [(bool, None, object), (object, None, bool), + (None, object, bool)]) + def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature): + # Most mixed signatures fail (except those with bool out, e.g. `OO->?`) + a = np.array([True, None, False]) + with pytest.raises(TypeError): + ufunc(a, a, signature=signature) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_support_anything(self, ufunc): + # The logical ufuncs support even input that can't be promoted: + a = np.array(b'1', dtype="V3") + c = np.array([1., 2.]) + assert_array_equal(ufunc(a, c), ufunc([True, True], True)) + assert ufunc.reduce(a) == True + # check that the output has no effect: + out = np.zeros(2, dtype=np.int32) + expected = ufunc([True, True], True).astype(out.dtype) + assert_array_equal(ufunc(a, c, out=out), expected) + out = np.zeros((), dtype=np.int32) + assert ufunc.reduce(a, out=out) == True + # Last check, test reduction when out and a match (the complexity here + # is that the "i,i->?" may seem right, but should not match. + a = np.array([3], dtype="i") + out = np.zeros((), dtype=a.dtype) + assert ufunc.reduce(a, out=out) == 1 + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_reject_string(self, ufunc): + """ + Logical ufuncs are normally well defined by working with the boolean + equivalent, i.e. casting all inputs to bools should work. + + However, casting strings to bools is *currently* weird, because it + actually uses `bool(int(str))`. Thus we explicitly reject strings. + This test should succeed (and can probably just be removed) as soon as + string to bool casts are well defined in NumPy. + """ + with pytest.raises(TypeError, match="contain a loop with signature"): + ufunc(["1"], ["3"]) + with pytest.raises(TypeError, match="contain a loop with signature"): + ufunc.reduce(["1", "2", "0"]) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_out_cast_check(self, ufunc): + a = np.array('1') + c = np.array([1., 2.]) + out = a.copy() + with pytest.raises(TypeError): + # It would be safe, but not equiv casting: + ufunc(a, c, out=out, casting="equiv") + + def test_reducelike_byteorder_resolution(self): + # See gh-20699, byte-order changes need some extra care in the type + # resolution to make the following succeed: + arr_be = np.arange(10, dtype=">i8") + arr_le = np.arange(10, dtype="<i8") + + assert np.add.reduce(arr_be) == np.add.reduce(arr_le) + assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le)) + assert_array_equal( + np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1])) + + def test_reducelike_out_promotes(self): + # Check that the out argument to reductions is considered for + # promotion. See also gh-20455. + # Note that these paths could prefer `initial=` in the future and + # do not up-cast to the default integer for add and prod + arr = np.ones(1000, dtype=np.uint8) + out = np.zeros((), dtype=np.uint16) + assert np.add.reduce(arr, out=out) == 1000 + arr[:10] = 2 + assert np.multiply.reduce(arr, out=out) == 2**10 + + # For legacy dtypes, the signature currently has to be forced if `out=` + # is passed. The two paths below should differ, without `dtype=` the + # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`! + arr = np.full(5, 2**25-1, dtype=np.int64) + + # float32 and int64 promote to float64: + res = np.zeros((), dtype=np.float32) + # If `dtype=` is passed, the calculation is forced to float32: + single_res = np.zeros((), dtype=np.float32) + np.multiply.reduce(arr, out=single_res, dtype=np.float32) + assert single_res != res + + def test_reducelike_output_needs_identical_cast(self): + # Checks the case where the we have a simple byte-swap works, maily + # tests that this is not rejected directly. + # (interesting because we require descriptor identity in reducelikes). + arr = np.ones(20, dtype="f8") + out = np.empty((), dtype=arr.dtype.newbyteorder()) + expected = np.add.reduce(arr) + np.add.reduce(arr, out=out) + assert_array_equal(expected, out) + # Check reduceat: + out = np.empty(2, dtype=arr.dtype.newbyteorder()) + expected = np.add.reduceat(arr, [0, 1]) + np.add.reduceat(arr, [0, 1], out=out) + assert_array_equal(expected, out) + # And accumulate: + out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder()) + expected = np.add.accumulate(arr) + np.add.accumulate(arr, out=out) + assert_array_equal(expected, out) + + def test_reduce_noncontig_output(self): + # Check that reduction deals with non-contiguous output arrays + # appropriately. + # + # gh-8036 + + x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) + x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) + y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) + y = y_base[::2,:] + + y_base_copy = y_base.copy() + + r0 = np.add.reduce(x, out=y.copy(), axis=2) + r1 = np.add.reduce(x, out=y, axis=2) + + # The results should match, and y_base shouldn't get clobbered + assert_equal(r0, r1) + assert_equal(y_base[1,:], y_base_copy[1,:]) + assert_equal(y_base[3,:], y_base_copy[3,:]) + + @pytest.mark.parametrize("with_cast", [True, False]) + def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast): + # Should raise an error mentioning "shape" or "size" + arr = np.arange(5) + out = np.arange(3) # definitely wrong shape + if with_cast: + # If a cast is necessary on the output, we can be sure to use + # the generic NpyIter (non-fast) path. + out = out.astype(np.float64) + + with pytest.raises(ValueError, match="(shape|size)"): + np.add.reduceat(arr, [0, 3], out=out) + + with pytest.raises(ValueError, match="(shape|size)"): + np.add.accumulate(arr, out=out) + + @pytest.mark.parametrize('out_shape', + [(), (1,), (3,), (1, 1), (1, 3), (4, 3)]) + @pytest.mark.parametrize('keepdims', [True, False]) + @pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce]) + def test_reduce_wrong_dimension_output(self, f_reduce, keepdims, out_shape): + # Test that we're not incorrectly broadcasting dimensions. + # See gh-15144 (failed for np.add.reduce previously). + a = np.arange(12.).reshape(4, 3) + out = np.empty(out_shape, a.dtype) + + correct_out = f_reduce(a, axis=0, keepdims=keepdims) + if out_shape != correct_out.shape: + with assert_raises(ValueError): + f_reduce(a, axis=0, out=out, keepdims=keepdims) + else: + check = f_reduce(a, axis=0, out=out, keepdims=keepdims) + assert_(check is out) + assert_array_equal(check, correct_out) + + def test_reduce_output_does_not_broadcast_input(self): + # Test that the output shape cannot broadcast an input dimension + # (it never can add dimensions, but it might expand an existing one) + a = np.ones((1, 10)) + out_correct = (np.empty((1, 1))) + out_incorrect = np.empty((3, 1)) + np.add.reduce(a, axis=-1, out=out_correct, keepdims=True) + np.add.reduce(a, axis=-1, out=out_correct[:, 0], keepdims=False) + with assert_raises(ValueError): + np.add.reduce(a, axis=-1, out=out_incorrect, keepdims=True) + with assert_raises(ValueError): + np.add.reduce(a, axis=-1, out=out_incorrect[:, 0], keepdims=False) + + def test_reduce_output_subclass_ok(self): + class MyArr(np.ndarray): + pass + + out = np.empty(()) + np.add.reduce(np.ones(5), out=out) # no subclass, all fine + out = out.view(MyArr) + assert np.add.reduce(np.ones(5), out=out) is out + assert type(np.add.reduce(out)) is MyArr + + def test_no_doc_string(self): + # gh-9337 + assert_('\n' not in umt.inner1d_no_doc.__doc__) + + def test_invalid_args(self): + # gh-7961 + exc = pytest.raises(TypeError, np.sqrt, None) + # minimally check the exception text + assert exc.match('loop of ufunc does not support') + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_not_finite(self, nat): + try: + assert not np.isfinite(nat) + except TypeError: + pass # ok, just not implemented + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_nan(self, nat): + try: + assert np.isnan(nat) + except TypeError: + pass # ok, just not implemented + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_not_inf(self, nat): + try: + assert not np.isinf(nat) + except TypeError: + pass # ok, just not implemented + + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +def test_ufunc_types(ufunc): + ''' + Check all ufuncs that the correct type is returned. Avoid + object and boolean types since many operations are not defined for + for them. + + Choose the shape so even dot and matmul will succeed + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if 'O' in typ or '?' in typ: + continue + inp, out = typ.split('->') + args = [np.ones((3, 3), t) for t in inp] + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res = ufunc(*args) + if isinstance(res, tuple): + outs = tuple(out) + assert len(res) == len(outs) + for r, t in zip(res, outs): + assert r.dtype == np.dtype(t) + else: + assert res.dtype == np.dtype(out) + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +@np._no_nep50_warning() +def test_ufunc_noncontiguous(ufunc): + ''' + Check that contiguous and non-contiguous calls to ufuncs + have the same results for values in range(9) + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if any(set('O?mM') & set(typ)): + # bool, object, datetime are too irregular for this simple test + continue + inp, out = typ.split('->') + args_c = [np.empty(6, t) for t in inp] + args_n = [np.empty(18, t)[::3] for t in inp] + for a in args_c: + a.flat = range(1,7) + for a in args_n: + a.flat = range(1,7) + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res_c = ufunc(*args_c) + res_n = ufunc(*args_n) + if len(out) == 1: + res_c = (res_c,) + res_n = (res_n,) + for c_ar, n_ar in zip(res_c, res_n): + dt = c_ar.dtype + if np.issubdtype(dt, np.floating): + # for floating point results allow a small fuss in comparisons + # since different algorithms (libm vs. intrinsics) can be used + # for different input strides + res_eps = np.finfo(dt).eps + tol = 2*res_eps + assert_allclose(res_c, res_n, atol=tol, rtol=tol) + else: + assert_equal(c_ar, n_ar) + + +@pytest.mark.parametrize('ufunc', [np.sign, np.equal]) +def test_ufunc_warn_with_nan(ufunc): + # issue gh-15127 + # test that calling certain ufuncs with a non-standard `nan` value does not + # emit a warning + # `b` holds a 64 bit signaling nan: the most significant bit of the + # significand is zero. + b = np.array([0x7ff0000000000001], 'i8').view('f8') + assert np.isnan(b) + if ufunc.nin == 1: + ufunc(b) + elif ufunc.nin == 2: + ufunc(b, b.copy()) + else: + raise ValueError('ufunc with more than 2 inputs') + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_ufunc_out_casterrors(): + # Tests that casting errors are correctly reported and buffers are + # cleared. + # The following array can be added to itself as an object array, but + # the result cannot be cast to an integer output: + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * int(np.BUFSIZE * 1.5) + + ["string"] + + [value] * int(1.5 * np.BUFSIZE), dtype=object) + out = np.ones(len(arr), dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError): + # Output casting failure: + np.add(arr, arr, out=out, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + with pytest.raises(ValueError): + # Input casting failure: + np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + +@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)]) +def test_ufunc_input_casterrors(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + ["string"] + + [value] * int(1.5 * np.BUFSIZE), dtype=object) + with pytest.raises(ValueError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)]) +def test_ufunc_input_floatingpoint_error(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + [np.nan] + + [value] * int(1.5 * np.BUFSIZE)) + with np.errstate(invalid="raise"), pytest.raises(FloatingPointError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +def test_trivial_loop_invalid_cast(): + # This tests the fast-path "invalid cast", see gh-19904. + with pytest.raises(TypeError, + match="cast ufunc 'add' input 0"): + # the void dtype definitely cannot cast to double: + np.add(np.array(1, "i,i"), 3, signature="dd->d") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.parametrize("offset", + [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)]) +def test_reduce_casterrors(offset): + # Test reporting of casting errors in reductions, we test various + # offsets to where the casting error will occur, since these may occur + # at different places during the reduction procedure. For example + # the first item may be special. + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * offset + + ["string"] + + [value] * int(1.5 * np.BUFSIZE), dtype=object) + out = np.array(-1, dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError, match="invalid literal"): + # This is an unsafe cast, but we currently always allow that. + # Note that the double loop is picked, but the cast fails. + # `initial=None` disables the use of an identity here to test failures + # while copying the first values path (not used when identity exists). + np.add.reduce(arr, dtype=np.intp, out=out, initial=None) + assert count == sys.getrefcount(value) + # If an error occurred during casting, the operation is done at most until + # the error occurs (the result of which would be `value * offset`) and -1 + # if the error happened immediately. + # This does not define behaviour, the output is invalid and thus undefined + assert out[()] < value * offset + + +def test_object_reduce_cleanup_on_failure(): + # Test cleanup, including of the initial value (manually provided or not) + with pytest.raises(TypeError): + np.add.reduce([1, 2, None], initial=4) + + with pytest.raises(TypeError): + np.add.reduce([1, 2, None]) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("method", + [np.add.accumulate, np.add.reduce, + pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"), + pytest.param(lambda x: np.log.at(x, [2]), id="at")]) +def test_ufunc_methods_floaterrors(method): + # adding inf and -inf (or log(-inf) creates an invalid float and warns + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning, match="invalid value"): + method(arr) + + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + method(arr) + + +def _check_neg_zero(value): + if value != 0.0: + return False + if not np.signbit(value.real): + return False + if value.dtype.kind == "c": + return np.signbit(value.imag) + return True + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_addition_negative_zero(dtype): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + arr = np.array(neg_zero) + arr2 = np.array(neg_zero) + + assert _check_neg_zero(arr + arr2) + # In-place ops may end up on a different path (reduce path) see gh-21211 + arr += arr2 + assert _check_neg_zero(arr) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +@pytest.mark.parametrize("use_initial", [True, False]) +def test_addition_reduce_negative_zero(dtype, use_initial): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + kwargs = {} + if use_initial: + kwargs["initial"] = neg_zero + else: + pytest.xfail("-0. propagation in sum currently requires initial") + + # Test various length, in case SIMD paths or chunking play a role. + # 150 extends beyond the pairwise blocksize; probably not important. + for i in range(0, 150): + arr = np.array([neg_zero] * i, dtype=dtype) + res = np.sum(arr, **kwargs) + if i > 0 or use_initial: + assert _check_neg_zero(res) + else: + # `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])` + assert not np.signbit(res.real) + assert not np.signbit(res.imag) + +class TestLowlevelAPIAccess: + def test_resolve_dtypes_basic(self): + # Basic test for dtype resolution: + i4 = np.dtype("i4") + f4 = np.dtype("f4") + f8 = np.dtype("f8") + + r = np.add.resolve_dtypes((i4, f4, None)) + assert r == (f8, f8, f8) + + # Signature uses the same logic to parse as ufunc (less strict) + # the following is "same-kind" casting so works: + r = np.add.resolve_dtypes(( + i4, i4, None), signature=(None, None, "f4")) + assert r == (f4, f4, f4) + + # Check NEP 50 "weak" promotion also: + r = np.add.resolve_dtypes((f4, int, None)) + assert r == (f4, f4, f4) + + with pytest.raises(TypeError): + np.add.resolve_dtypes((i4, f4, None), casting="no") + + def test_weird_dtypes(self): + S0 = np.dtype("S0") + # S0 is often converted by NumPy to S1, but not here: + r = np.equal.resolve_dtypes((S0, S0, None)) + assert r == (S0, S0, np.dtype(bool)) + + # Subarray dtypes are weird and may not work fully, we preserve them + # leading to a TypeError (currently no equal loop for void/structured) + dts = np.dtype("10i") + with pytest.raises(TypeError): + np.equal.resolve_dtypes((dts, dts, None)) + + def test_resolve_dtypes_reduction(self): + i4 = np.dtype("i4") + with pytest.raises(NotImplementedError): + np.add.resolve_dtypes((i4, i4, i4), reduction=True) + + @pytest.mark.parametrize("dtypes", [ + (np.dtype("i"), np.dtype("i")), + (None, np.dtype("i"), np.dtype("f")), + (np.dtype("i"), None, np.dtype("f")), + ("i4", "i4", None)]) + def test_resolve_dtypes_errors(self, dtypes): + with pytest.raises(TypeError): + np.add.resolve_dtypes(dtypes) + + def test_resolve_dtypes_reduction(self): + i2 = np.dtype("i2") + long_ = np.dtype("long") + # Check special addition resolution: + res = np.add.resolve_dtypes((None, i2, None), reduction=True) + assert res == (long_, long_, long_) + + def test_resolve_dtypes_reduction_errors(self): + i2 = np.dtype("i2") + + with pytest.raises(TypeError): + np.add.resolve_dtypes((None, i2, i2)) + + with pytest.raises(TypeError): + np.add.signature((None, None, "i4")) + + @pytest.mark.skipif(not hasattr(ct, "pythonapi"), + reason="`ctypes.pythonapi` required for capsule unpacking.") + def test_loop_access(self): + # This is a basic test for the full strided loop access + data_t = ct.ARRAY(ct.c_char_p, 2) + dim_t = ct.ARRAY(ct.c_ssize_t, 1) + strides_t = ct.ARRAY(ct.c_ssize_t, 2) + strided_loop_t = ct.CFUNCTYPE( + ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p) + + class call_info_t(ct.Structure): + _fields_ = [ + ("strided_loop", strided_loop_t), + ("context", ct.c_void_p), + ("auxdata", ct.c_void_p), + ("requires_pyapi", ct.c_byte), + ("no_floatingpoint_errors", ct.c_byte), + ] + + i4 = np.dtype("i4") + dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4)) + assert dt == (i4, i4) # can be used without casting + + # Fill in the rest of the information: + np.negative._get_strided_loop(call_info_obj) + + ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p + call_info = ct.pythonapi.PyCapsule_GetPointer( + ct.py_object(call_info_obj), + ct.c_char_p(b"numpy_1.24_ufunc_call_info")) + + call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents + + arr = np.arange(10, dtype=i4) + call_info.strided_loop( + call_info.context, + data_t(arr.ctypes.data, arr.ctypes.data), + arr.ctypes.shape, # is a C-array with 10 here + strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]), + call_info.auxdata) + + # We just directly called the negative inner-loop in-place: + assert_array_equal(arr, -np.arange(10, dtype=i4)) + + @pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")]) + def test__get_strided_loop_errors_bad_strides(self, strides): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"): + np.negative._get_strided_loop(call_info, fixed_strides=strides) + + def test__get_strided_loop_errors_bad_call_info(self): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(ValueError, match="PyCapsule"): + np.negative._get_strided_loop("not the capsule!") + + with pytest.raises(TypeError, match=".*incompatible context"): + np.add._get_strided_loop(call_info) + + np.negative._get_strided_loop(call_info) + with pytest.raises(TypeError): + # cannot call it a second time: + np.negative._get_strided_loop(call_info) + + def test_long_arrays(self): + t = np.zeros((1029, 917), dtype=np.single) + t[0][0] = 1 + t[28][414] = 1 + tc = np.cos(t) + assert_equal(tc[0][0], tc[28][414]) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath.py new file mode 100644 index 00000000..963e740d --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath.py @@ -0,0 +1,4743 @@ +import platform +import warnings +import fnmatch +import itertools +import pytest +import sys +import os +import operator +from fractions import Fraction +from functools import reduce +from collections import namedtuple + +import numpy.core.umath as ncu +from numpy.core import _umath_tests as ncu_tests +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_almost_equal, assert_array_almost_equal, + assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, + _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL, + IS_PYPY + ) +from numpy.testing._private.utils import _glibc_older_than + +UFUNCS = [obj for obj in np.core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] + +UFUNCS_UNARY = [ + uf for uf in UFUNCS if uf.nin == 1 +] +UFUNCS_UNARY_FP = [ + uf for uf in UFUNCS_UNARY if 'f->f' in uf.types +] + +UFUNCS_BINARY = [ + uf for uf in UFUNCS if uf.nin == 2 +] +UFUNCS_BINARY_ACC = [ + uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1 +] + +def interesting_binop_operands(val1, val2, dtype): + """ + Helper to create "interesting" operands to cover common code paths: + * scalar inputs + * only first "values" is an array (e.g. scalar division fast-paths) + * Longer array (SIMD) placing the value of interest at different positions + * Oddly strided arrays which may not be SIMD compatible + + It does not attempt to cover unaligned access or mixed dtypes. + These are normally handled by the casting/buffering machinery. + + This is not a fixture (currently), since I believe a fixture normally + only yields once? + """ + fill_value = 1 # could be a parameter, but maybe not an optional one? + + arr1 = np.full(10003, dtype=dtype, fill_value=fill_value) + arr2 = np.full(10003, dtype=dtype, fill_value=fill_value) + + arr1[0] = val1 + arr2[0] = val2 + + extractor = lambda res: res + yield arr1[0], arr2[0], extractor, "scalars" + + extractor = lambda res: res + yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays" + + # reset array values to fill_value: + arr1[0] = fill_value + arr2[0] = fill_value + + for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]: + arr1[pos] = val1 + arr2[pos] = val2 + + extractor = lambda res: res[pos] + yield arr1, arr2, extractor, f"off-{pos}" + yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar" + + arr1[pos] = fill_value + arr2[pos] = fill_value + + for stride in [-1, 113]: + op1 = arr1[::stride] + op2 = arr2[::stride] + op1[10] = val1 + op2[10] = val2 + + extractor = lambda res: res[10] + yield op1, op2, extractor, f"stride-{stride}" + + op1[10] = fill_value + op2[10] = fill_value + + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +def bad_arcsinh(): + """The blocklisted trig functions are not accurate on aarch64/PPC for + complex256. Rather than dig through the actual problem skip the + test. This should be fixed when we can move past glibc2.17 + which is the version in manylinux2014 + """ + if platform.machine() == 'aarch64': + x = 1.78e-10 + elif on_powerpc(): + x = 2.16e-10 + else: + return False + v1 = np.arcsinh(np.float128(x)) + v2 = np.arcsinh(np.complex256(x)).real + # The eps for float128 is 1-e33, so this is way bigger + return abs((v1 / v2) - 1.0) > 1e-23 + + +class _FilterInvalids: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + +class TestConstants: + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestOut: + def test_out_subok(self): + for subok in (True, False): + a = np.array(0.5) + o = np.empty(()) + + r = np.add(a, 2, o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=(o,), subok=subok) + assert_(r is o) + + d = np.array(5.7) + o1 = np.empty(()) + o2 = np.empty((), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, None, o2, subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, o1, o2, subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) + assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, [], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) + o.flags.writeable = False + assert_raises(ValueError, np.add, a, 2, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) + + def test_out_wrap_subok(self): + class ArrayWrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls, arr): + return np.asarray(arr).view(cls).copy() + + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + + for subok in (True, False): + a = ArrayWrap([0.5]) + + r = np.add(a, 2, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=(None,), subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + d = ArrayWrap([5.7]) + o1 = np.empty((1,)) + o2 = np.empty((1,), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, None, o2, subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + +class TestComparisons: + import operator + + @pytest.mark.parametrize('dtype', np.sctypes['uint'] + np.sctypes['int'] + + np.sctypes['float'] + [np.bool_]) + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + def test_comparison_functions(self, dtype, py_comp, np_comp): + # Initialize input arrays + if dtype == np.bool_: + a = np.random.choice(a=[False, True], size=1000) + b = np.random.choice(a=[False, True], size=1000) + scalar = True + else: + a = np.random.randint(low=1, high=10, size=1000).astype(dtype) + b = np.random.randint(low=1, high=10, size=1000).astype(dtype) + scalar = 5 + np_scalar = np.dtype(dtype).type(scalar) + a_lst = a.tolist() + b_lst = b.tolist() + + # (Binary) Comparison (x1=array, x2=array) + comp_b = np_comp(a, b).view(np.uint8) + comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)] + + # (Scalar1) Comparison (x1=scalar, x2=array) + comp_s1 = np_comp(np_scalar, b).view(np.uint8) + comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst] + + # (Scalar2) Comparison (x1=array, x2=scalar) + comp_s2 = np_comp(a, np_scalar).view(np.uint8) + comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst] + + # Sequence: Binary, Scalar1 and Scalar2 + assert_(comp_b.tolist() == comp_b_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s1.tolist() == comp_s1_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s2.tolist() == comp_s2_list, + f"Failed comparison ({py_comp.__name__})") + + def test_ignore_object_identity_in_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __eq__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.equal(a, a), [False]) + + def test_ignore_object_identity_in_not_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.not_equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.not_equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.not_equal(a, a), [True]) + + def test_error_in_equal_reduce(self): + # gh-20929 + # make sure np.equal.reduce raises a TypeError if an array is passed + # without specifying the dtype + a = np.array([0, 0]) + assert_equal(np.equal.reduce(a, dtype=bool), True) + assert_raises(TypeError, np.equal.reduce, a) + + def test_object_dtype(self): + assert np.equal(1, [1], dtype=object).dtype == object + assert np.equal(1, [1], signature=(None, None, "O")).dtype == object + + def test_object_nonbool_dtype_error(self): + # bool output dtype is fine of course: + assert np.equal(1, [1], dtype=bool).dtype == bool + + # but the following are examples do not have a loop: + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, dtype=np.int64) + + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, sig=(None, None, "l")) + + @pytest.mark.parametrize("dtypes", ["qQ", "Qq"]) + @pytest.mark.parametrize('py_comp, np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("vals", [(2**60, 2**60+1), (2**60+1, 2**60)]) + def test_large_integer_direct_comparison( + self, dtypes, py_comp, np_comp, vals): + # Note that float(2**60) + 1 == float(2**60). + a1 = np.array([2**60], dtype=dtypes[0]) + a2 = np.array([2**60 + 1], dtype=dtypes[1]) + expected = py_comp(2**60, 2**60+1) + + assert py_comp(a1, a2) == expected + assert np_comp(a1, a2) == expected + # Also check the scalars: + s1 = a1[0] + s2 = a2[0] + assert isinstance(s1, np.integer) + assert isinstance(s2, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(s1, s2) == expected + assert np_comp(s1, s2) == expected + + @pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger']) + @pytest.mark.parametrize('py_comp_func, np_comp_func', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("flip", [True, False]) + def test_unsigned_signed_direct_comparison( + self, dtype, py_comp_func, np_comp_func, flip): + if flip: + py_comp = lambda x, y: py_comp_func(y, x) + np_comp = lambda x, y: np_comp_func(y, x) + else: + py_comp = py_comp_func + np_comp = np_comp_func + + arr = np.array([np.iinfo(dtype).max], dtype=dtype) + expected = py_comp(int(arr[0]), -1) + + assert py_comp(arr, -1) == expected + assert np_comp(arr, -1) == expected + scalar = arr[0] + assert isinstance(scalar, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(scalar, -1) == expected + assert np_comp(scalar, -1) == expected + + +class TestAdd: + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision: + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + np.sctypes['int'] + np.sctypes['uint'], ( + ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "range(15)" + ), + ( + # dividend + "np.arange(fo.min, fo.min+lsize).astype(dtype)," + # divisors + "np.arange(lsize//-2, lsize//2).astype(dtype)," + # scalar divisors + "range(fo.min, fo.min + 15)" + ), ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]" + ) + ) + )) + def test_division_int_boundary(self, dtype, ex_val): + fo = np.iinfo(dtype) + neg = -1 if fo.min < 0 else 1 + # Large enough to test SIMD loops and remainder elements + lsize = 512 + 7 + a, b, divisors = eval(ex_val) + a_lst, b_lst = a.tolist(), b.tolist() + + c_div = lambda n, d: ( + 0 if d == 0 else ( + fo.min if (n and n == fo.min and d == -1) else n//d + ) + ) + with np.errstate(divide='ignore'): + ac = a.copy() + ac //= b + div_ab = a // b + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + + msg = "Integer arrays floor division check (//)" + assert all(div_ab == div_lst), msg + msg_eq = "Integer arrays floor division check (//=)" + assert all(ac == div_lst), msg_eq + + for divisor in divisors: + ac = a.copy() + with np.errstate(divide='ignore', over='ignore'): + div_a = a // divisor + ac //= divisor + div_lst = [c_div(i, divisor) for i in a_lst] + + assert all(div_a == div_lst), msg + assert all(ac == div_lst), msg_eq + + with np.errstate(divide='raise', over='raise'): + if 0 in b: + # Verify overflow case + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // b + else: + a // b + if fo.min and fo.min in a: + with pytest.raises(FloatingPointError, + match='overflow encountered in floor_divide'): + a // -1 + elif fo.min: + a // -1 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // 0 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + ac = a.copy() + ac //= 0 + + np.array([], dtype=dtype) // 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + np.sctypes['int'] + np.sctypes['uint'], ( + "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)", + "np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)", + "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)", + "np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)", + ) + )) + def test_division_int_reduce(self, dtype, ex_val): + fo = np.iinfo(dtype) + a = eval(ex_val) + lst = a.tolist() + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + ) + + with np.errstate(divide='ignore'): + div_a = np.floor_divide.reduce(a) + div_lst = reduce(c_div, lst) + msg = "Reduce floor integer division check" + assert div_a == div_lst, msg + + with np.errstate(divide='raise', over='raise'): + with pytest.raises(FloatingPointError, + match="divide by zero encountered in reduce"): + np.floor_divide.reduce(np.arange(-100, 100).astype(dtype)) + if fo.min: + with pytest.raises(FloatingPointError, + match='overflow encountered in reduce'): + np.floor_divide.reduce( + np.array([fo.min, 1, -1], dtype=dtype) + ) + + @pytest.mark.parametrize( + "dividend,divisor,quotient", + [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12), + (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12), + (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), + (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), + (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), + (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), + (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), + (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), + (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), + (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), + (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), + (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), + (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), + ]) + def test_division_int_timedelta(self, dividend, divisor, quotient): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): + msg = "Timedelta floor division check" + assert dividend // divisor == quotient, msg + + # Test for arrays as well + msg = "Timedelta arrays floor division check" + dividend_array = np.array([dividend]*5) + quotient_array = np.array([quotient]*5) + assert all(dividend_array // divisor == quotient_array), msg + else: + if IS_WASM: + pytest.skip("fp errors don't work in wasm") + with np.errstate(divide='raise', invalid='raise'): + with pytest.raises(FloatingPointError): + dividend // divisor + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) + assert_almost_equal(x**2/x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2/x + assert_almost_equal(y/x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan)/x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf)/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf)/x + assert_(np.isinf(y)[0]) + y = 0.0/x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that floor division, divmod and remainder raises type errors + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + with pytest.raises(TypeError): + x // 7 + with pytest.raises(TypeError): + np.divmod(x, 7) + with pytest.raises(TypeError): + np.remainder(x, 7) + + def test_floor_division_signed_zero(self): + # Check that the sign bit is correctly set when dividing positive and + # negative zero by one. + x = np.zeros(10) + assert_equal(np.signbit(x//1), 0) + assert_equal(np.signbit((-x)//1), 1) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_errors(self, dtype): + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + # divide by zero error check + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.floor_divide, fone, fzer) + with np.errstate(divide='ignore', invalid='raise'): + np.floor_divide(fone, fzer) + + # The following already contain a NaN and should not warn + with np.errstate(all='raise'): + np.floor_divide(fnan, fone) + np.floor_divide(fone, fnan) + np.floor_divide(fnan, fzer) + np.floor_divide(fzer, fnan) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_corner_cases(self, dtype): + # test corner cases like 1.0//0.0 for errors and return vals + x = np.zeros(10, dtype=dtype) + y = np.ones(10, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + div = np.floor_divide(fnan, fone) + assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + div = np.floor_divide(fone, fnan) + assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + div = np.floor_divide(fnan, fzer) + assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + # verify 1.0//0.0 computations return inf + with np.errstate(divide='ignore'): + z = np.floor_divide(y, x) + assert_(np.isinf(z).all()) + +def floor_divide_and_remainder(x, y): + return (np.floor_divide(x, y), np.remainder(x, y)) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestRemainder: + + def test_remainder_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1) + b = np.array(sg2*19, dtype=dt2) + div, rem = op(a, b) + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = list(divmod(*t) for t in arg) + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floor_divide_and_remainder, np.divmod]: + for dt in np.typecodes['Float']: + msg = 'op: %s, dtype: %s' % (op.__name__, dt) + fa = a.astype(dt) + fb = b.astype(dt) + div, rem = op(fa, fb) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_remainder_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1) + b = np.array(sg2*6e-8, dtype=dt2) + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_float_divmod_errors(self, dtype): + # Check valid errors raised for divmod and remainder + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + # since divmod is combination of both remainder and divide + # ops it will set both dividebyzero and invalid flags + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fzero, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, finf) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, fzero) + with np.errstate(divide='raise', invalid='ignore'): + # inf / 0 does not set any flags, only the modulo creates a NaN + np.divmod(finf, fzero) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + @pytest.mark.parametrize('fn', [np.fmod, np.remainder]) + def test_float_remainder_errors(self, dtype, fn): + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + + # The following already contain a NaN and should not warn. + with np.errstate(all='raise'): + with pytest.raises(FloatingPointError, + match="invalid value"): + fn(fone, fzero) + fn(fnan, fzero) + fn(fzero, fnan) + fn(fone, fnan) + fn(fnan, fone) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_float_remainder_overflow(self): + a = np.finfo(np.float64).tiny + with np.errstate(over='ignore', invalid='ignore'): + div, mod = np.divmod(4, a) + np.isinf(div) + assert_(mod == 0) + with np.errstate(over='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + with np.errstate(invalid='raise', over='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + + def test_float_divmod_corner_cases(self): + # check nan cases + for dt in np.typecodes['Float']: + fnan = np.array(np.nan, dtype=dt) + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + div, rem = np.divmod(fone, fzer) + assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(fzer, fzer) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(finf, finf) + assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(finf, fzer) + assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(fnan, fone) + assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) + assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + div, rem = np.divmod(fone, fnan) + assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) + assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + div, rem = np.divmod(fnan, fzer) + assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) + assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + + def test_float_remainder_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = np.remainder(a, b) + assert_(rem <= b, 'dt: %s' % dt) + rem = np.remainder(-a, -b) + assert_(rem >= -b, 'dt: %s' % dt) + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "invalid value encountered in fmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = np.remainder(fone, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + # MSVC 2008 returns NaN here, so disable the check. + #rem = np.remainder(fone, finf) + #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, fone) + fmod = np.fmod(finf, fone) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, finf) + fmod = np.fmod(finf, fone) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + rem = np.remainder(finf, fzer) + fmod = np.fmod(finf, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + rem = np.remainder(fone, fnan) + fmod = np.fmod(fone, fnan) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + rem = np.remainder(fnan, fzer) + fmod = np.fmod(fnan, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + rem = np.remainder(fnan, fone) + fmod = np.fmod(fnan, fone) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + + +class TestDivisionIntegerOverflowsAndDivideByZero: + result_type = namedtuple('result_type', + ['nocast', 'casted']) + helper_lambdas = { + 'zero': lambda dtype: 0, + 'min': lambda dtype: np.iinfo(dtype).min, + 'neg_min': lambda dtype: -np.iinfo(dtype).min, + 'min-zero': lambda dtype: (np.iinfo(dtype).min, 0), + 'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0), + } + overflow_results = { + np.remainder: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + np.fmod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.mod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.floordiv: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.floor_divide: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.divmod: result_type( + helper_lambdas['min-zero'], helper_lambdas['neg_min-zero']) + } + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + def test_signed_division_overflow(self, dtype): + to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == np.iinfo(op1.dtype).min + + # Remainder is well defined though, and does not warn: + res = op1 % op2 + assert res.dtype == op1.dtype + assert extractor(res) == 0 + # Check fmod as well: + res = np.fmod(op1, op2) + assert extractor(res) == 0 + + # Divmod warns for the division part: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == np.iinfo(op1.dtype).min + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_divide_by_zero(self, dtype): + # Note that the return value cannot be well defined here, but NumPy + # currently uses 0 consistently. This could be changed. + to_check = interesting_binop_operands(1, 0, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="divide by zero"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == 0 + + with pytest.warns(RuntimeWarning, match="divide by zero"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == 0 + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dividend_dtype", + np.sctypes['int']) + @pytest.mark.parametrize("divisor_dtype", + np.sctypes['int']) + @pytest.mark.parametrize("operation", + [np.remainder, np.fmod, np.divmod, np.floor_divide, + operator.mod, operator.floordiv]) + @np.errstate(divide='warn', over='warn') + def test_overflows(self, dividend_dtype, divisor_dtype, operation): + # SIMD tries to perform the operation on as many elements as possible + # that is a multiple of the register's size. We resort to the + # default implementation for the leftover elements. + # We try to cover all paths here. + arrays = [np.array([np.iinfo(dividend_dtype).min]*i, + dtype=dividend_dtype) for i in range(1, 129)] + divisor = np.array([-1], dtype=divisor_dtype) + # If dividend is a larger type than the divisor (`else` case), + # then, result will be a larger type than dividend and will not + # result in an overflow for `divmod` and `floor_divide`. + if np.dtype(dividend_dtype).itemsize >= np.dtype( + divisor_dtype).itemsize and operation in ( + np.divmod, np.floor_divide, operator.floordiv): + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].nocast( + dividend_dtype) + + # Arrays + for a in arrays: + # In case of divmod, we need to flatten the result + # column first as we get a column vector of quotient and + # remainder and a normal flatten of the expected result. + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].nocast( + dividend_dtype)]*len(a)).flatten() + assert_array_equal(result, expected_array) + else: + # Scalars + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].casted( + dividend_dtype) + + # Arrays + for a in arrays: + # See above comment on flatten + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].casted( + dividend_dtype)]*len(a)).flatten() + assert_array_equal(result, expected_array) + + +class TestCbrt: + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + +class TestPower: + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1./3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + def test_power_complex(self): + x = np.array([1+2j, 2+3j, 3+4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) + assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) + assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) + assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) + assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) + assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, + (-117-44j)/15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), + ncu.sqrt(3+4j)]) + norm = 1./((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443+16124j, 23161315+58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex_) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z*z) + assert_complex_equal(z**3, z*z*z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1+0j]) + cnan = np.array([complex(np.nan, np.nan)]) + # FIXME cinf not tested. + #cinf = np.array([complex(np.inf, 0)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0+1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1+0.2j), cnan) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_zero_power_nonzero(self): + # Testing 0^{Non-zero} issue 18378 + zero = np.array([0.0+0.0j]) + cnan = np.array([complex(np.nan, np.nan)]) + + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + #Complex powers with positive real part will not generate a warning + assert_complex_equal(np.power(zero, 1+4j), zero) + assert_complex_equal(np.power(zero, 2-3j), zero) + #Testing zero values when real part is greater than zero + assert_complex_equal(np.power(zero, 1+1j), zero) + assert_complex_equal(np.power(zero, 1+0j), zero) + assert_complex_equal(np.power(zero, 1-1j), zero) + #Complex powers will negative real part or 0 (provided imaginary + # part is not zero) will generate a NAN and hence a RUNTIME warning + with pytest.warns(expected_warning=RuntimeWarning) as r: + assert_complex_equal(np.power(zero, -1+1j), cnan) + assert_complex_equal(np.power(zero, -2-3j), cnan) + assert_complex_equal(np.power(zero, -7+0j), cnan) + assert_complex_equal(np.power(zero, 0+1j), cnan) + assert_complex_equal(np.power(zero, 0-1j), cnan) + assert len(r) == 5 + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + res = x**2.0 + assert_((x**2.00001).dtype is res.dtype) + assert_array_equal(res, [1, 4, 9]) + # check the inplace operation on the casted copy doesn't mess with x + assert_(not np.may_share_memory(res, x)) + assert_array_equal(x, [1, 2, 3]) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + def test_integer_power(self): + a = np.array([15, 15], 'i8') + b = np.power(a, a) + assert_equal(b, [437893890380859375, 437893890380859375]) + + def test_integer_power_with_integer_zero_exponent(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + arr = np.arange(-10, 10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + dtypes = np.typecodes['UnsignedInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + def test_integer_power_of_1(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(1, arr), np.ones_like(arr)) + + def test_integer_power_of_zero(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(1, 10, dtype=dt) + assert_equal(np.power(0, arr), np.zeros_like(arr)) + + def test_integer_to_negative_power(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + a = np.array([0, 1, 2, 3], dtype=dt) + b = np.array([0, 1, 2, -3], dtype=dt) + one = np.array(1, dtype=dt) + minusone = np.array(-1, dtype=dt) + assert_raises(ValueError, np.power, a, b) + assert_raises(ValueError, np.power, a, minusone) + assert_raises(ValueError, np.power, one, b) + assert_raises(ValueError, np.power, one, minusone) + + def test_float_to_inf_power(self): + for dt in [np.float32, np.float64]: + a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt) + b = np.array([np.inf, -np.inf, np.inf, -np.inf, + np.inf, -np.inf, np.inf, -np.inf], dt) + r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt) + assert_equal(np.power(a, b), r) + + +class TestFloat_power: + def test_type_conversion(self): + arg_type = '?bhilBHILefdgFDG' + res_type = 'ddddddddddddgDDG' + for dtin, dtout in zip(arg_type, res_type): + msg = "dtin: %s, dtout: %s" % (dtin, dtout) + arg = np.ones(1, dtype=dtin) + res = np.float_power(arg, arg) + assert_(res.dtype.name == np.dtype(dtout).name, msg) + + +class TestLog2: + @pytest.mark.parametrize('dt', ['f', 'd', 'g']) + def test_log2_values(self, dt): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + @pytest.mark.parametrize("i", range(1, 65)) + def test_log2_ints(self, i): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2: + def test_exp2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) + + def test_logaddexp2_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp2.identity, -np.inf) + assert_equal(np.logaddexp2.reduce([]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0) + + +class TestLog: + def test_log_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.log(xf), yf) + + # test aliasing(issue #17761) + x = np.array([2, 0.937500, 3, 0.947500, 1.054697]) + xf = np.log(x) + assert_almost_equal(np.log(x, out=x), xf) + + # test log() of max for dtype does not raise + for dt in ['f', 'd', 'g']: + try: + with np.errstate(all='raise'): + x = np.finfo(dt).max + np.log(x) + except FloatingPointError as exc: + if dt == 'g' and IS_MUSL: + # FloatingPointError is known to occur on longdouble + # for musllinux_x86_64 x is very large + pytest.skip( + "Overflow has occurred for" + " np.log(np.finfo(np.longdouble).max)" + ) + else: + raise exc + + def test_log_strides(self): + np.random.seed(42) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii)) + x_special = x_f64.copy() + x_special[3:-1:4] = 1.0 + y_true = np.log(x_f64) + y_special = np.log(x_special) + for jj in strides: + assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + +class TestExp: + def test_exp_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.exp(yf), xf) + + def test_exp_strides(self): + np.random.seed(42) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii)) + y_true = np.exp(x_f64) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + +class TestSpecialFloats: + def test_exp_values(self): + with np.errstate(under='raise', over='raise'): + x = [np.nan, np.nan, np.inf, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.exp(yf), xf) + + # See: https://github.com/numpy/numpy/issues/19192 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp_exceptions(self): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(11.0899)) + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(-17.5)) + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0] + y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + yf1p = np.array(y1p, dtype=dt) + assert_equal(np.log(yf), xf) + assert_equal(np.log2(yf), xf) + assert_equal(np.log10(yf), xf) + assert_equal(np.log1p(yf1p), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-1.0, dtype=dt)) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-2.0, dtype=dt)) + + # See https://github.com/numpy/numpy/issues/18005 + with assert_no_warnings(): + a = np.array(1e9, dtype='float32') + np.log(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g']) + def test_sincos_values(self, dtype): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.nan, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf] + xf = np.array(x, dtype=dtype) + yf = np.array(y, dtype=dtype) + assert_equal(np.sin(yf), xf) + assert_equal(np.cos(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail( + sys.platform.startswith("darwin"), + reason="underflow is triggered for scalar 'sin'" + ) + def test_sincos_underflow(self): + with np.errstate(under='raise'): + underflow_trigger = np.array( + float.fromhex("0x1.f37f47a03f82ap-511"), + dtype=np.float64 + ) + np.sin(underflow_trigger) + np.cos(underflow_trigger) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['e', 'f', 'd']) + @pytest.mark.parametrize('value', [np.inf, -np.inf]) + def test_sincos_errors(self, callable, dtype, value): + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array([value], dtype=dtype)) + + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['f', 'd']) + @pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5]) + def test_sincos_overlaps(self, callable, dtype, stride): + N = 100 + M = N // abs(stride) + rng = np.random.default_rng(42) + x = rng.standard_normal(N, dtype) + y = callable(x[::stride]) + callable(x[::stride], out=x[:M]) + assert_equal(x[:M], y) + + @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g']) + def test_sqrt_values(self, dt): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sqrt(yf), xf) + + # with np.errstate(invalid='raise'): + # assert_raises( + # FloatingPointError, np.sqrt, np.array(-100., dtype=dt) + # ) + + def test_abs_values(self): + x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.abs(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_square_values(self): + x = [np.nan, np.nan, np.inf, np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all='ignore'): + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.square(yf), xf) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.square, + np.array(1E3, dtype='e')) + assert_raises(FloatingPointError, np.square, + np.array(1E32, dtype='f')) + assert_raises(FloatingPointError, np.square, + np.array(1E200, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_reciprocal_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.reciprocal(yf), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd', 'g']: + assert_raises(FloatingPointError, np.reciprocal, + np.array(-0.0, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_tan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf] + out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.tan(in_arr), out_arr) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.tan, + np.array(np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.tan, + np.array(-np.inf, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arcsincos(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsin(in_arr), out_arr) + assert_equal(np.arccos(in_arr), out_arr) + + for callable in [np.arcsin, np.arccos]: + for value in [np.inf, -np.inf, 2.0, -2.0]: + for dt in ['e', 'f', 'd']: + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array(value, dtype=dt)) + + def test_arctan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan] + out = [np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctan(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.sinh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.sinh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.sinh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.sinh, + np.array(1200.0, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif('bsd' in sys.platform, + reason="fallback implementation may not raise, see gh-2487") + def test_cosh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.cosh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.cosh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.cosh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.cosh, + np.array(1200.0, dtype='d')) + + def test_tanh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, 1.0, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.tanh(in_arr), out_arr) + + def test_arcsinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsinh(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arccosh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0] + out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arccosh(in_arr), out_arr) + + for value in [0.0, -np.inf]: + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arccosh, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arctanh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0] + out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctanh(in_arr), out_arr) + + for value in [1.01, np.inf, -np.inf, 1.0, -1.0]: + with np.errstate(invalid='raise', divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arctanh, + np.array(value, dtype=dt)) + + # Make sure glibc < 2.18 atanh is not used, issue 25087 + assert np.signbit(np.arctanh(-1j).real) + + # See: https://github.com/numpy/numpy/issues/20448 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp2(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, 0.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.exp2(in_arr), out_arr) + + for value in [2000.0, -2000.0]: + with np.errstate(over='raise', under='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.exp2, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_expm1(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.expm1(in_arr), out_arr) + + for value in [200.0, 2000.0]: + with np.errstate(over='raise'): + for dt in ['e', 'f']: + assert_raises(FloatingPointError, np.expm1, + np.array(value, dtype=dt)) + + # test to ensure no spurious FP exceptions are raised due to SIMD + INF_INVALID_ERR = [ + np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh + ] + NEG_INVALID_ERR = [ + np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh, + np.arctanh + ] + ONE_INVALID_ERR = [ + np.arctanh, + ] + LTONE_INVALID_ERR = [ + np.arccosh, + ] + BYZERO_ERR = [ + np.log, np.log2, np.log10, np.reciprocal, np.arccosh + ] + + @pytest.mark.skipif(sys.platform == "win32" and sys.maxsize < 2**31 + 1, + reason='failures on 32-bit Python, see FIXME below') + @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + @pytest.mark.parametrize("data, escape", ( + ([0.03], LTONE_INVALID_ERR), + ([0.03]*32, LTONE_INVALID_ERR), + # neg + ([-1.0], NEG_INVALID_ERR), + ([-1.0]*32, NEG_INVALID_ERR), + # flat + ([1.0], ONE_INVALID_ERR), + ([1.0]*32, ONE_INVALID_ERR), + # zero + ([0.0], BYZERO_ERR), + ([0.0]*32, BYZERO_ERR), + ([-0.0], BYZERO_ERR), + ([-0.0]*32, BYZERO_ERR), + # nan + ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.nan]*32, LTONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0]*32, ONE_INVALID_ERR), + ([np.nan], []), + ([np.nan]*32, []), + # inf + ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.inf]*32, INF_INVALID_ERR + LTONE_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0]*32, INF_INVALID_ERR), + ([np.inf], INF_INVALID_ERR), + ([np.inf]*32, INF_INVALID_ERR), + # ninf + ([0.5, 0.5, 0.5, -np.inf], + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, -np.inf]*32, + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0]*32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf]*32, NEG_INVALID_ERR + INF_INVALID_ERR), + )) + def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): + if escape and ufunc in escape: + return + # FIXME: NAN raises FP invalid exception: + # - ceil/float16 on MSVC:32-bit + # - spacing/float16 on almost all platforms + # FIXME: skipped on MSVC:32-bit during switch to Meson, 10 cases fail + # when SIMD support not present / disabled + if ufunc in (np.spacing, np.ceil) and dtype == 'e': + return + array = np.array(data, dtype=dtype) + with assert_no_warnings(): + ufunc(array) + + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + def test_divide_spurious_fpexception(self, dtype): + dt = np.dtype(dtype) + dt_info = np.finfo(dt) + subnorm = dt_info.smallest_subnormal + # Verify a bug fix caused due to filling the remaining lanes of the + # partially loaded dividend SIMD vector with ones, which leads to + # raising an overflow warning when the divisor is denormal. + # see https://github.com/numpy/numpy/issues/25097 + with assert_no_warnings(): + np.zeros(128 + 1, dtype=dt) / subnorm + +class TestFPClass: + @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, + 2, 4, 5, 6, 7, 8, 9, 10]) + def test_fpclass(self, stride): + arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') + arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') + nan = np.array([True, True, False, False, False, False, False, False, False, False]) + inf = np.array([False, False, True, True, False, False, False, False, False, False]) + sign = np.array([False, True, False, True, True, False, True, False, False, True]) + finite = np.array([False, False, False, False, True, True, True, True, True, True]) + assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) + assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) + assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) + assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) + assert_equal(np.signbit(arr_f32[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64[::stride]), sign[::stride]) + assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride]) + assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride]) + + @pytest.mark.parametrize("dtype", ['d', 'f']) + def test_fp_noncontiguous(self, dtype): + data = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, + 1.0, -0.0, 0.0, 2.2251e-308, + -2.2251e-308], dtype=dtype) + nan = np.array([True, True, False, False, False, False, + False, False, False, False]) + inf = np.array([False, False, True, True, False, False, + False, False, False, False]) + sign = np.array([False, True, False, True, True, False, + True, False, False, True]) + finite = np.array([False, False, False, False, True, True, + True, True, True, True]) + out = np.ndarray(data.shape, dtype='bool') + ncontig_in = data[1::3] + ncontig_out = out[1::3] + contig_in = np.array(ncontig_in) + assert_equal(ncontig_in.flags.c_contiguous, False) + assert_equal(ncontig_out.flags.c_contiguous, False) + assert_equal(contig_in.flags.c_contiguous, True) + # ncontig in, ncontig out + assert_equal(np.isnan(ncontig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(ncontig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(ncontig_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(ncontig_in, out=ncontig_out), finite[1::3]) + # contig in, ncontig out + assert_equal(np.isnan(contig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(contig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(contig_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(contig_in, out=ncontig_out), finite[1::3]) + # ncontig in, contig out + assert_equal(np.isnan(ncontig_in), nan[1::3]) + assert_equal(np.isinf(ncontig_in), inf[1::3]) + assert_equal(np.signbit(ncontig_in), sign[1::3]) + assert_equal(np.isfinite(ncontig_in), finite[1::3]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + nan_split = np.array(np.array_split(nan, 2)) + inf_split = np.array(np.array_split(inf, 2)) + sign_split = np.array(np.array_split(sign, 2)) + finite_split = np.array(np.array_split(finite, 2)) + assert_equal(np.isnan(data_split), nan_split) + assert_equal(np.isinf(data_split), inf_split) + assert_equal(np.signbit(data_split), sign_split) + assert_equal(np.isfinite(data_split), finite_split) + +class TestLDExp: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + def test_ldexp(self, dtype, stride): + mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) + assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) + +class TestFRExp: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + @pytest.mark.xfail(IS_MUSL, reason="gh23048") + @pytest.mark.skipif(not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux") + def test_frexp(self, dtype, stride): + arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) + mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2*np.ones(8, dtype='i') + mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) + assert_equal(mant_true[::stride], mant) + assert_equal(exp_true[::stride], exp) + assert_equal(out_mant[::stride], mant_true[::stride]) + assert_equal(out_exp[::stride], exp_true[::stride]) + +# func : [maxulperror, low, high] +avx_ufuncs = {'sqrt' :[1, 0., 100.], + 'absolute' :[0, -100., 100.], + 'reciprocal' :[1, 1., 100.], + 'square' :[1, -100., 100.], + 'rint' :[0, -100., 100.], + 'floor' :[0, -100., 100.], + 'ceil' :[0, -100., 100.], + 'trunc' :[0, -100., 100.]} + +class TestAVXUfuncs: + def test_avx_based_ufunc(self): + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + np.random.seed(42) + for func, prop in avx_ufuncs.items(): + maxulperr = prop[0] + minval = prop[1] + maxval = prop[2] + # various array sizes to ensure masking in AVX is tested + for size in range(1,32): + myfunc = getattr(np, func) + x_f32 = np.float32(np.random.uniform(low=minval, high=maxval, + size=size)) + x_f64 = np.float64(x_f32) + x_f128 = np.longdouble(x_f32) + y_true128 = myfunc(x_f128) + if maxulperr == 0: + assert_equal(myfunc(x_f32), np.float32(y_true128)) + assert_equal(myfunc(x_f64), np.float64(y_true128)) + else: + assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128), + maxulp=maxulperr) + assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128), + maxulp=maxulperr) + # various strides to test gather instruction + if size > 1: + y_true32 = myfunc(x_f32) + y_true64 = myfunc(x_f64) + for jj in strides: + assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) + assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) + +class TestAVXFloat32Transcendental: + def test_exp_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) + + def test_log_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) + + def test_sincos_float32(self): + np.random.seed(42) + N = 1000000 + M = np.int_(N/20) + index = np.random.randint(low=0, high=N, size=M) + x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) + if not _glibc_older_than("2.17"): + # test coverage for elements > 117435.992f for which glibc is used + # this is known to be problematic on old glibc, so skip it there + x_f32[index] = np.float32(10E+10*np.random.rand(M)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) + # test aliasing(issue #17761) + tx_f32 = x_f32.copy() + assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + + def test_strided_float32(self): + np.random.seed(42) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) + for ii in sizes: + x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) + x_f32_large = x_f32.copy() + x_f32_large[3:-1:4] = 120000.0 + exp_true = np.exp(x_f32) + log_true = np.log(x_f32) + sin_true = np.sin(x_f32_large) + cos_true = np.cos(x_f32_large) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) + + def test_logaddexp_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp.identity, -np.inf) + assert_equal(np.logaddexp.reduce([]), -np.inf) + + +class TestLog1p: + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1: + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + def test_complex(self): + x = np.asarray(1e-12) + assert_allclose(x, ncu.expm1(x)) + x = x.astype(np.complex128) + assert_allclose(x, ncu.expm1(x)) + + +class TestHypot: + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + def test_reduce(self): + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) + assert_equal(ncu.hypot.reduce([]), 0.0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) + + +class TestHypotSpecialValues: + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) + + +class TestArctan2SpecialValues: + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(np.PZERO, np.PZERO) + assert_arctan2_isnzero(np.NZERO, np.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(np.PZERO, 1) + assert_arctan2_isnzero(np.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp: + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + out = np.ones(8) + out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.maximum(arr1,arr2), maxtrue) + assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2]) + assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) + assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal(out, out_maxtrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.maximum([v1], [v2]), [expected]) + assert_equal(np.maximum.reduce([v1, v2]), expected) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + out = np.ones(8) + out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.minimum(arr1,arr2), mintrue) + assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2]) + assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) + assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal(out, out_mintrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.minimum([v1], [v2]), [expected]) + assert_equal(np.minimum.reduce([v1, v2]), expected) + + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmax(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, dtmax), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmax([v1], [v2]), [expected]) + assert_equal(np.fmax.reduce([v1, v2]), expected) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmin(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, dtmin), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmin([v1], [v2]), [expected]) + assert_equal(np.fmin.reduce([v1, v2]), expected) + + +class TestBool: + def test_exceptions(self): + a = np.ones(1, dtype=np.bool_) + assert_raises(TypeError, np.negative, a) + assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) + + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + + out = [False, True, True, True] + assert_equal(np.bitwise_or(arg1, arg2), out) + + out = [False, False, False, True] + assert_equal(np.bitwise_and(arg1, arg2), out) + + out = [False, True, True, False] + assert_equal(np.bitwise_xor(arg1, arg2), out) + + def test_reduce(self): + none = np.array([0, 0, 0, 0], bool) + some = np.array([1, 0, 1, 1], bool) + every = np.array([1, 1, 1, 1], bool) + empty = np.array([], bool) + + arrs = [none, some, every, empty] + + for arr in arrs: + assert_equal(np.logical_and.reduce(arr), all(arr)) + + for arr in arrs: + assert_equal(np.logical_or.reduce(arr), any(arr)) + + for arr in arrs: + assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) + + +class TestBitwiseUFuncs: + + bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O'] + + def test_values(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = "dt = '%s'" % dt.char + + assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) + assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) + + assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) + + def test_types(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = "dt = '%s'" % dt.char + + assert_(np.bitwise_not(zeros).dtype == dt, msg) + assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) + + def test_identity(self): + assert_(np.bitwise_or.identity == 0, 'bitwise_or') + assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') + assert_(np.bitwise_and.identity == -1, 'bitwise_and') + + def test_reduction(self): + binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) + + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + for f in binary_funcs: + msg = "dt: '%s', f: '%s'" % (dt, f) + assert_equal(f.reduce(zeros), zeros, err_msg=msg) + assert_equal(f.reduce(ones), ones, err_msg=msg) + + # Test empty reduction, no object dtype + for dt in self.bitwise_types[:-1]: + # No object array types + empty = np.array([], dtype=dt) + for f in binary_funcs: + msg = "dt: '%s', f: '%s'" % (dt, f) + tgt = np.array(f.identity).astype(dt) + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + assert_(res.dtype == tgt.dtype, msg) + + # Empty object arrays use the identity. Note that the types may + # differ, the actual type used is determined by the assign_identity + # function and is not the same as the type returned by the identity + # method. + for f in binary_funcs: + msg = "dt: '%s'" % (f,) + empty = np.array([], dtype=object) + tgt = f.identity + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + + # Non-empty object arrays do not use the identity + for f in binary_funcs: + msg = "dt: '%s'" % (f,) + btype = np.array([True], dtype=object) + assert_(type(f.reduce(btype)) is bool, msg) + + +class TestInt: + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) + + +class TestFloatingPoint: + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees: + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) + + +class TestRadians: + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) + + +class TestHeavside: + def test_heaviside(self): + x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) + expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) + expected1 = expectedhalf.copy() + expected1[0, 2] = 1 + + h = ncu.heaviside(x, 0.5) + assert_equal(h, expectedhalf) + + h = ncu.heaviside(x, 1.0) + assert_equal(h, expected1) + + x = x.astype(np.float32) + + h = ncu.heaviside(x, np.float32(0.5)) + assert_equal(h, expectedhalf.astype(np.float32)) + + h = ncu.heaviside(x, np.float32(1.0)) + assert_equal(h, expected1.astype(np.float32)) + + +class TestSign: + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + # FIXME: a not used + a = np.sign(foo.astype(object)) + + assert_raises(TypeError, test_nan) + +class TestMinMax: + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: '%r\n%s' % (inp, msg) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + def test_reduce_reorder(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrisic function that causes + # invalid status to be set. Also make sure warnings are not emitted + for n in (2, 4, 8, 16, 32): + for dt in (np.float32, np.float16, np.complex64): + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) + + def test_minimize_no_warns(self): + a = np.minimum(np.nan, 1) + assert_equal(a, np.nan) + + +class TestAbsoluteNegative: + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + assert_((out >= 0).all()) + + tgt = [-1*(i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1*inp, err_msg=msg) + d = -1 * inp + np.negative(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + @pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64']) + @pytest.mark.parametrize("big", [True, False]) + def test_noncontiguous(self, dtype, big): + data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6, + 6, -2.2251e-308, -8, 10], dtype=dtype) + expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6, + -6, 2.2251e-308, 8, -10], dtype=dtype) + if big: + data = np.repeat(data, 10) + expect = np.repeat(expect, 10) + out = np.ndarray(data.shape, dtype=dtype) + ncontig_in = data[1::2] + ncontig_out = out[1::2] + contig_in = np.array(ncontig_in) + # contig in, contig out + assert_array_equal(np.negative(contig_in), expect[1::2]) + # contig in, ncontig out + assert_array_equal(np.negative(contig_in, out=ncontig_out), + expect[1::2]) + # ncontig in, contig out + assert_array_equal(np.negative(ncontig_in), expect[1::2]) + # ncontig in, ncontig out + assert_array_equal(np.negative(ncontig_in, out=ncontig_out), + expect[1::2]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + expect_split = np.array(np.array_split(expect, 2)) + assert_equal(np.negative(data_split), expect_split) + + +class TestPositive: + def test_valid(self): + valid_dtypes = [int, float, complex, object] + for dtype in valid_dtypes: + x = np.arange(5, dtype=dtype) + result = np.positive(x) + assert_equal(x, result, err_msg=str(dtype)) + + def test_invalid(self): + with assert_raises(TypeError): + np.positive(True) + with assert_raises(TypeError): + np.positive(np.datetime64('2000-01-01')) + with assert_raises(TypeError): + np.positive(np.array(['foo'], dtype=str)) + with assert_raises(TypeError): + np.positive(np.array(['bar'], dtype=object)) + + +class TestSpecialMethods: + def test_wrap(self): + + class with_wrap: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + r = with_wrap() + r.arr = arr + r.context = context + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + assert_(func is ncu.minimum) + assert_equal(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + assert_equal(i, 0) + + def test_wrap_and_prepare_out(self): + # Calling convention for out should not affect how special methods are + # called + + class StoreArrayPrepareWrap(np.ndarray): + _wrap_args = None + _prepare_args = None + def __new__(cls): + return np.zeros(()).view(cls) + def __array_wrap__(self, obj, context): + self._wrap_args = context[1] + return obj + def __array_prepare__(self, obj, context): + self._prepare_args = context[1] + return obj + @property + def args(self): + # We need to ensure these are fetched at the same time, before + # any other ufuncs are called by the assertions + return (self._prepare_args, self._wrap_args) + def __repr__(self): + return "a" # for short test output + + def do_test(f_call, f_expected): + a = StoreArrayPrepareWrap() + f_call(a) + p, w = a.args + expected = f_expected(a) + try: + assert_equal(p, expected) + assert_equal(w, expected) + except AssertionError as e: + # assert_equal produces truly useless error messages + raise AssertionError("\n".join([ + "Bad arguments passed in ufunc call", + " expected: {}".format(expected), + " __array_prepare__ got: {}".format(p), + " __array_wrap__ got: {}".format(w) + ])) + + # method not on the out argument + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) + + # method on the out argument + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + + # Also check the where mask handling: + do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + + class with_wrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1).view(cls).copy() + + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + assert_(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + + class A(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + + a = A() + x = np.float64(1)*a + assert_(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_old_wrap(self): + + class with_wrap: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr): + r = with_wrap() + r.arr = arr + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + + def test_priority(self): + + class A: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + r = type(self)() + r.arr = arr + r.context = context + return r + + class B(A): + __array_priority__ = 20. + + class C(A): + __array_priority__ = 40. + + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + + class A: + def __array__(self): + return np.zeros(2) + + def __array_wrap__(self, arr, context): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum.reduce, a) + + def test_failing_out_wrap(self): + + singleton = np.array([1.0]) + + class Ok(np.ndarray): + def __array_wrap__(self, obj): + return singleton + + class Bad(np.ndarray): + def __array_wrap__(self, obj): + raise RuntimeError + + ok = np.empty(1).view(Ok) + bad = np.empty(1).view(Bad) + # double-free (segfault) of "ok" if "bad" raises an exception + for i in range(10): + assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) + + def test_none_wrap(self): + # Tests that issue #8507 is resolved. Previously, this would segfault + + class A: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context=None): + return None + + a = A() + assert_equal(ncu.maximum(a, a), None) + + def test_default_prepare(self): + + class with_wrap: + __array_priority__ = 10 + + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + return arr + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + @pytest.mark.parametrize("use_where", [True, False]) + def test_prepare(self, use_where): + + class with_prepare(np.ndarray): + __array_priority__ = 10 + + def __array_prepare__(self, arr, context): + # make sure we can return a new + return np.array(arr).view(type=with_prepare) + + a = np.array(1).view(type=with_prepare) + if use_where: + x = np.add(a, a, where=np.array(True)) + else: + x = np.add(a, a) + assert_equal(x, np.array(2)) + assert_equal(type(x), with_prepare) + + @pytest.mark.parametrize("use_where", [True, False]) + def test_prepare_out(self, use_where): + + class with_prepare(np.ndarray): + __array_priority__ = 10 + + def __array_prepare__(self, arr, context): + return np.array(arr).view(type=with_prepare) + + a = np.array([1]).view(type=with_prepare) + if use_where: + x = np.add(a, a, a, where=[True]) + else: + x = np.add(a, a, a) + # Returned array is new, because of the strange + # __array_prepare__ above + assert_(not np.shares_memory(x, a)) + assert_equal(x, np.array([2])) + assert_equal(type(x), with_prepare) + + def test_failing_prepare(self): + + class A: + def __array__(self): + return np.zeros(1) + + def __array_prepare__(self, arr, context=None): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum, a, a, where=False) + + def test_array_too_many_args(self): + + class A: + def __array__(self, dtype, context): + return np.zeros(1) + + a = A() + assert_raises_regex(TypeError, '2 required positional', np.sum, a) + + def test_ufunc_override(self): + # check override works even with instance with high priority. + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return self, func, method, inputs, kwargs + + class MyNDArray(np.ndarray): + __array_priority__ = 100 + + a = A() + b = np.array([1]).view(MyNDArray) + res0 = np.multiply(a, b) + res1 = np.multiply(b, b, out=a) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.multiply) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], (a, b)) + assert_equal(res1[3], (b, b)) + assert_equal(res0[4], {}) + assert_equal(res1[4], {'out': (a,)}) + + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "A" + + class ASub(A): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "ASub" + + class B: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "B" + + class C: + def __init__(self): + self.count = 0 + + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + class CSub(C): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + a = A() + a_sub = ASub() + b = B() + c = C() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + assert_equal(c.count, 1) + # Check our counter works, so we can trust tests below. + res = np.multiply(c, a) + assert_equal(c.count, 2) + + # Both NotImplemented. + c = C() + c_sub = CSub() + assert_raises(TypeError, np.multiply, c, c_sub) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = c_sub.count = 0 + assert_raises(TypeError, np.multiply, c_sub, c) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, c, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, 2, c) + assert_equal(c.count, 1) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + c.count = 0 + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(c.count, 1) + c.count = 0 + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + assert_equal(c.count, 0) + + c.count = 0 + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(c.count, 0) + c_sub.count = 0 + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(c_sub.count, 0) + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + c = C() + c_sub = CSub() + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + c2 = C() + c.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + assert_equal(c2.count, 0) + c.count = c2.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 0) + assert_equal(c2.count, 1) + + def test_ufunc_override_methods(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + # __call__ + a = A() + with assert_raises(TypeError): + np.multiply.__call__(1, a, foo='bar', answer=42) + res = np.multiply.__call__(1, a, subok='bar', where=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, a)) + assert_equal(res[4], {'subok': 'bar', 'where': 42}) + + # __call__, wrong args + assert_raises(TypeError, np.multiply, a) + assert_raises(TypeError, np.multiply, a, a, a, a) + assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') + assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) + + # reduce, output equal to None removed, but not other explicit ones, + # even if they are at their default value. + res = np.multiply.reduce(a, 0, None, None, False) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) + assert_equal(res[4], {'axis': 0, 'keepdims': True}) + res = np.multiply.reduce(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + res = np.multiply.reduce(a, 0, None, None, False, 2, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': 2, 'where': True}) + # np._NoValue ignored for initial + res = np.multiply.reduce(a, 0, None, None, False, + np._NoValue, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'where': True}) + # None kept for initial, True for where. + res = np.multiply.reduce(a, 0, None, None, False, None, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': None, 'where': True}) + + # reduce, wrong args + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, output equal to None removed. + res = np.multiply.accumulate(a, 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') + assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) + res = np.multiply.accumulate(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # accumulate, wrong args + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.accumulate, a, + 'axis0', axis='axis0') + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, output equal to None removed. + res = np.multiply.reduceat(a, [4, 2], 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') + assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) + res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # reduceat, wrong args + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + 'axis0', axis='axis0') + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], (a, 42)) + assert_equal(res[4], {}) + + # outer, wrong args + assert_raises(TypeError, np.multiply.outer, a) + assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], (a, [4, 2], 'b0')) + + # at, wrong args + assert_raises(TypeError, np.multiply.at, a) + assert_raises(TypeError, np.multiply.at, a, a, a, a) + + def test_ufunc_override_out(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'][0], 'out_arg') + assert_equal(res1['out'][0], 'out_arg') + assert_equal(res2['out'][0], 'out_arg') + assert_equal(res3['out'][0], 'out_arg') + assert_equal(res4['out'][0], 'out_arg') + assert_equal(res5['out'][0], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + # While we're at it, check that default output is never passed on. + assert_(np.sin(a, None) == {}) + assert_(np.sin(a, out=None) == {}) + assert_(np.sin(a, out=(None,)) == {}) + assert_(np.modf(a, None) == {}) + assert_(np.modf(a, None, None) == {}) + assert_(np.modf(a, out=(None, None)) == {}) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + np.modf(a, out=None) + + # don't give positional and output argument, or too many arguments. + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, np.multiply, a, b, 'one', out='two') + assert_raises(TypeError, np.multiply, a, b, 'one', 'two') + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) + assert_raises(TypeError, np.multiply, a, out=()) + assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) + assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) + + def test_ufunc_override_where(self): + + class OverriddenArrayOld(np.ndarray): + + def _unwrap(self, objs): + cls = type(self) + result = [] + for obj in objs: + if isinstance(obj, cls): + obj = np.array(obj) + elif type(obj) != np.ndarray: + return NotImplemented + result.append(obj) + return result + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + inputs = self._unwrap(inputs) + if inputs is NotImplemented: + return NotImplemented + + kwargs = kwargs.copy() + if "out" in kwargs: + kwargs["out"] = self._unwrap(kwargs["out"]) + if kwargs["out"] is NotImplemented: + return NotImplemented + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + class OverriddenArrayNew(OverriddenArrayOld): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + kwargs = kwargs.copy() + if "where" in kwargs: + kwargs["where"] = self._unwrap((kwargs["where"], )) + if kwargs["where"] is NotImplemented: + return NotImplemented + else: + kwargs["where"] = kwargs["where"][0] + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + ufunc = np.negative + + array = np.array([1, 2, 3]) + where = np.array([True, False, True]) + expected = ufunc(array, where=where) + + with pytest.raises(TypeError): + ufunc(array, where=where.view(OverriddenArrayOld)) + + result_1 = ufunc( + array, + where=where.view(OverriddenArrayNew) + ) + assert isinstance(result_1, OverriddenArrayNew) + assert np.all(np.array(result_1) == expected, where=where) + + result_2 = ufunc( + array.view(OverriddenArrayNew), + where=where.view(OverriddenArrayNew) + ) + assert isinstance(result_2, OverriddenArrayNew) + assert np.all(np.array(result_2) == expected, where=where) + + def test_ufunc_override_exception(self): + + class A: + def __array_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + + a = A() + assert_raises(ValueError, np.negative, 1, out=a) + assert_raises(ValueError, np.negative, a) + assert_raises(ValueError, np.divide, 1., a) + + def test_ufunc_override_not_implemented(self): + + class A: + def __array_ufunc__(self, *args, **kwargs): + return NotImplemented + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.negative(A()) + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, " + "out=(1,)): 'A', 'object', 'int'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.add(A(), object(), out=1) + + def test_ufunc_override_disabled(self): + + class OptOut: + __array_ufunc__ = None + + opt_out = OptOut() + + # ufuncs always raise + msg = "operand 'OptOut' does not support ufuncs" + with assert_raises_regex(TypeError, msg): + np.add(opt_out, 1) + with assert_raises_regex(TypeError, msg): + np.add(1, opt_out) + with assert_raises_regex(TypeError, msg): + np.negative(opt_out) + + # opt-outs still hold even when other arguments have pathological + # __array_ufunc__ implementations + + class GreedyArray: + def __array_ufunc__(self, *args, **kwargs): + return self + + greedy = GreedyArray() + assert_(np.negative(greedy) is greedy) + with assert_raises_regex(TypeError, msg): + np.add(greedy, opt_out) + with assert_raises_regex(TypeError, msg): + np.add(greedy, 1, out=opt_out) + + def test_gufunc_override(self): + # gufunc are just ufunc instances, but follow a different path, + # so check __array_ufunc__ overrides them properly. + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + inner1d = ncu_tests.inner1d + a = A() + res = inner1d(a, a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (a, a)) + assert_equal(res[4], {}) + + res = inner1d(1, 1, out=a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, 1)) + assert_equal(res[4], {'out': (a,)}) + + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, inner1d, a, out='two') + assert_raises(TypeError, inner1d, a, a, 'one', out='two') + assert_raises(TypeError, inner1d, a, a, 'one', 'two') + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) + + def test_ufunc_override_with_super(self): + # NOTE: this class is used in doc/source/user/basics.subclassing.rst + # if you make any changes here, do update it there too. + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = out + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super().__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if any(isinstance(input_, A) for input_ in inputs): + return "A!" + else: + return NotImplemented + + d = np.arange(5.) + # 1 input, 1 output + a = np.arange(5.).view(A) + b = np.sin(a) + check = np.sin(d) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0]}) + b = np.sin(d, out=(a,)) + assert_(np.all(check == b)) + assert_equal(b.info, {'outputs': [0]}) + assert_(b is a) + a = np.arange(5.).view(A) + b = np.sin(a, out=a) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) + + # 1 input, 2 outputs + a = np.arange(5.).view(A) + b1, b2 = np.modf(a) + assert_equal(b1.info, {'inputs': [0]}) + b1, b2 = np.modf(d, out=(None, a)) + assert_(b2 is a) + assert_equal(b1.info, {'outputs': [1]}) + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c1, c2 = np.modf(a, out=(a, b)) + assert_(c1 is a) + assert_(c2 is b) + assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) + + # 2 input, 1 output + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c = np.add(a, b, out=a) + assert_(c is a) + assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) + # some tests with a non-ndarray subclass + a = np.arange(5.) + b = B() + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_raises(TypeError, np.add, a, b) + a = a.view(A) + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") + assert_(np.add(a, b) == "A!") + # regression check for gh-9102 -- tests ufunc.reduce implicitly. + d = np.array([[1, 2, 3], [1, 2, 3]]) + a = d.view(A) + c = a.any() + check = d.any() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + c = a.max() + check = d.max() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.array(0).view(A) + c = a.max(out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = a.max(axis=0) + b = np.zeros_like(check).view(A) + c = a.max(axis=0, out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # simple explicit tests of reduce, accumulate, reduceat + check = np.add.reduce(d, axis=1) + c = np.add.reduce(a, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduce(a, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = np.add.accumulate(d, axis=0) + c = np.add.accumulate(a, axis=0) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.accumulate(a, 0, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + indices = [0, 2, 1] + check = np.add.reduceat(d, indices, axis=1) + c = np.add.reduceat(a, indices, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduceat(a, indices, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # and a few tests for at + d = np.array([[1, 2, 3], [1, 2, 3]]) + check = d.copy() + a = d.copy().view(A) + np.add.at(check, ([0, 1], [0, 2]), 1.) + np.add.at(a, ([0, 1], [0, 2]), 1.) + assert_equal(a, check) + assert_(a.info, {'inputs': [0]}) + b = np.array(1.).view(A) + a = d.copy().view(A) + np.add.at(a, ([0, 1], [0, 2]), b) + assert_equal(a, check) + assert_(a.info, {'inputs': [0, 2]}) + + def test_array_ufunc_direct_call(self): + # This is mainly a regression test for gh-24023 (shouldn't segfault) + a = np.array(1) + with pytest.raises(TypeError): + a.__array_ufunc__() + + # No kwargs means kwargs may be NULL on the C-level + with pytest.raises(TypeError): + a.__array_ufunc__(1, 2) + + # And the same with a valid call: + res = a.__array_ufunc__(np.add, "__call__", a, a) + assert_array_equal(res, a + a) + +class TestChoose: + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +class TestRationalFunctions: + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60]*4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4]*4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2*big + b = 5*big + assert_equal(np.lcm(a, b), 10*big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q*3), q) + assert_equal(np.gcd(a, -q*3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_builtin_long(self): + # sanity check that array coercion is alright for builtin longs + assert_equal(np.array(2**200).item(), 2**200) + + # expressed as prime factors + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + assert_equal(np.gcd(2**100, 3**100), 1) + + +class TestRoundingFunctions: + + def test_object_direct(self): + """ test direct implementation of these magic methods """ + class C: + def __floor__(self): + return 1 + def __ceil__(self): + return 2 + def __trunc__(self): + return 3 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [1, 1]) + assert_equal(np.ceil(arr), [2, 2]) + assert_equal(np.trunc(arr), [3, 3]) + + def test_object_indirect(self): + """ test implementations via __float__ """ + class C: + def __float__(self): + return -2.5 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [-3, -3]) + assert_equal(np.ceil(arr), [-2, -2]) + with pytest.raises(TypeError): + np.trunc(arr) # consistent with math.trunc + + def test_fraction(self): + f = Fraction(-4, 3) + assert_equal(np.floor(f), -2) + assert_equal(np.ceil(f), -1) + assert_equal(np.trunc(f), -1) + + +class TestComplexFunctions: + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh: + x = 1.5 + else: + x = .5 + fr = f(x) + fz = f(complex(x)) + assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) + assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) + + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_precisions_consistent(self): + z = 1 + 1j + for f in self.funcs: + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) + assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) + + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) + + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) + + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts_complex64(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + + def test_against_cmath(self): + import cmath + + points = [-1-1j, -1+1j, +1-1j, +1+1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4*np.finfo(complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex_(p))) + b = cfunc(p) + assert_( + abs(a - b) < atol, + "%s %s: %s; cmath: %s" % (fname, p, a, b) + ) + + @pytest.mark.xfail( + # manylinux2014 uses glibc2.17 + _glibc_older_than("2.18"), + reason="Older glibc versions are imprecise (maybe passes with SIMD?)" + ) + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex]) + def test_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.longcomplex: + if bad_arcsinh(): + pytest.skip("Trig functions of np.longcomplex values known " + "to be inaccurate on aarch64 and PPC for some " + "compilation configurations.") + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + check(x_series, 50.0*eps) + else: + check(x_series, 2.1*eps) + check(x_basic, 2.0*eps/1e-3) + + # Check a few points + + z = np.array([1e-5*(1+1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1-np.arctanh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1-np.arcsinh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1-np.arctan(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1-np.arcsin(z)/p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2*eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1+1j) + + @np.errstate(all="ignore") + def test_promotion_corner_cases(self): + for func in self.funcs: + assert func(np.float16(1)).dtype == np.float16 + # Integer to low precision float promotion is a dubious choice: + assert func(np.uint8(1)).dtype == np.float16 + assert func(np.int16(1)).dtype == np.float32 + + +class TestAttributes: + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + def test_doc(self): + # don't bother checking the long list of kwargs, which are likely to + # change + assert_(ncu.add.__doc__.startswith( + "add(x1, x2, /, out=None, *, where=True")) + assert_(ncu.frexp.__doc__.startswith( + "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) + + +class TestSubclass: + + def test_subclass_op(self): + + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + + a = simple((3, 4)) + assert_equal(a+a, a) + + +class TestFrompyfunc: + + def test_identity(self): + def mul(a, b): + return a * b + + # with identity=value + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_equal(mul_ufunc.reduce([]), 1) + + # with identity=None (reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + # with no identity (not reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + if np.dtype(dtype).char == 'F': + scale = np.finfo(dtype).eps * 1e2 + atol = np.float32(1e-2) + else: + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) + ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + if np.any(jr): + x = x0[jr] + x.real = np.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) + + if np.any(ji): + x = x0[ji] + x.imag = np.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + + +def test_nextafterf(): + return _test_nextafter(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + + +def test_nextafter_0(): + for t, direction in itertools.product(np.sctypes['float'], (1, -1)): + # The value of tiny for double double is NaN, so we need to pass the + # assert + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(t).tiny): + tiny = np.finfo(t).tiny + assert_( + 0. < direction * np.nextafter(t(0), t(direction)) < tiny) + assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_equal(np.spacing(one), eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} + + for dt, dec_ in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) + assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) + assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) + assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) + assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) + + +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + + +@pytest.mark.parametrize('ftype', [np.float32, np.float64]) +def test_memoverlap_accumulate(ftype): + # Reproduces bug https://github.com/numpy/numpy/issues/15597 + arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype) + out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype) + out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype) + assert_equal(np.maximum.accumulate(arr), out_max) + assert_equal(np.minimum.accumulate(arr), out_min) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[-1] == '?' and t[0] not in 'DFGMmO' +]) +def test_memoverlap_accumulate_cmp(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 1]*size, dtype=dtype) + acc = ufunc.accumulate(arr, dtype='?') + acc_u8 = acc.view(np.uint8) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8) + assert_equal(exp, acc_u8) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?' +]) +def test_memoverlap_accumulate_symmetric(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + with np.errstate(all='ignore'): + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 2]*size).astype(dtype) + acc = ufunc.accumulate(arr, dtype=dtype) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype) + assert_equal(exp, acc) + +def test_signaling_nan_exceptions(): + with assert_no_warnings(): + a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') + np.isnan(a) + +@pytest.mark.parametrize("arr", [ + np.arange(2), + np.matrix([0, 1]), + np.matrix([[0, 1], [2, 5]]), + ]) +def test_outer_subclass_preserve(arr): + # for gh-8661 + class foo(np.ndarray): pass + actual = np.multiply.outer(arr.view(foo), arr.view(foo)) + assert actual.__class__.__name__ == 'foo' + +def test_outer_bad_subclass(): + class BadArr1(np.ndarray): + def __array_finalize__(self, obj): + # The outer call reshapes to 3 dims, try to do a bad reshape. + if self.ndim == 3: + self.shape = self.shape + (1,) + + def __array_prepare__(self, obj, context=None): + return obj + + class BadArr2(np.ndarray): + def __array_finalize__(self, obj): + if isinstance(obj, BadArr2): + # outer inserts 1-sized dims. In that case disturb them. + if self.shape[-1] == 1: + self.shape = self.shape[::-1] + + def __array_prepare__(self, obj, context=None): + return obj + + for cls in [BadArr1, BadArr2]: + arr = np.ones((2, 3)).view(cls) + with assert_raises(TypeError) as a: + # The first array gets reshaped (not the second one) + np.add.outer(arr, [1, 2]) + + # This actually works, since we only see the reshaping error: + arr = np.ones((2, 3)).view(cls) + assert type(np.add.outer([1, 2], arr)) is cls + +def test_outer_exceeds_maxdims(): + deep = np.ones((1,) * 17) + with assert_raises(ValueError): + np.add.outer(deep, deep) + +def test_bad_legacy_ufunc_silent_errors(): + # legacy ufuncs can't report errors and NumPy can't check if the GIL + # is released. So NumPy has to check after the GIL is released just to + # cover all bases. `np.power` uses/used to use this. + arr = np.arange(3).astype(np.float64) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + # not contiguous means the fast-path cannot be taken + non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2] + ncu_tests.always_error(non_contig, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.outer(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduce(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduceat(arr, [0, 1]) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.accumulate(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.at(arr, [0, 1, 2], arr) + + +@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) +def test_bad_legacy_gufunc_silent_errors(x1): + # Verify that an exception raised in a gufunc loop propagates correctly. + # The signature of always_error_gufunc is '(i),()->()'. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_gufunc(x1, 0.0) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath_accuracy.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath_accuracy.py new file mode 100644 index 00000000..6ee4d2fe --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath_accuracy.py @@ -0,0 +1,75 @@ +import numpy as np +import os +from os import path +import sys +import pytest +from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER +from numpy.testing import assert_array_max_ulp +from numpy.testing._private.utils import _glibc_older_than +from numpy.core._multiarray_umath import __cpu_features__ + +UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values() if + isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] +UNARY_OBJECT_UFUNCS.remove(getattr(np, 'invert')) + +IS_AVX = __cpu_features__.get('AVX512F', False) or \ + (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) +# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). +runtest = (sys.platform.startswith('linux') + and IS_AVX and not _glibc_older_than("2.17")) +platform_skip = pytest.mark.skipif(not runtest, + reason="avoid testing inconsistent platform " + "library implementations") + +# convert string to hex function taken from: +# https://stackoverflow.com/questions/1592158/convert-hex-to-float # +def convert(s, datatype="np.float32"): + i = int(s, 16) # convert from hex to a Python int + if (datatype == "np.float64"): + cp = pointer(c_longlong(i)) # make this into a c long long integer + fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer + else: + cp = pointer(c_int(i)) # make this into a c integer + fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer + + return fp.contents.value # dereference the pointer, get the float + +str_to_float = np.vectorize(convert) + +class TestAccuracy: + @platform_skip + def test_validate_transcendentals(self): + with np.errstate(all='ignore'): + data_dir = path.join(path.dirname(__file__), 'data') + files = os.listdir(data_dir) + files = list(filter(lambda f: f.endswith('.csv'), files)) + for filename in files: + filepath = path.join(data_dir, filename) + with open(filepath) as fid: + file_without_comments = (r for r in fid if not r[0] in ('$', '#')) + data = np.genfromtxt(file_without_comments, + dtype=('|S39','|S39','|S39',int), + names=('type','input','output','ulperr'), + delimiter=',', + skip_header=1) + npname = path.splitext(filename)[0].split('-')[3] + npfunc = getattr(np, npname) + for datatype in np.unique(data['type']): + data_subset = data[data['type'] == datatype] + inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + perm = np.random.permutation(len(inval)) + inval = inval[perm] + outval = outval[perm] + maxulperr = data_subset['ulperr'].max() + assert_array_max_ulp(npfunc(inval), outval, maxulperr) + + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_validate_fp16_transcendentals(self, ufunc): + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=1, dtype=np.float16) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath_complex.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath_complex.py new file mode 100644 index 00000000..e5430058 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_umath_complex.py @@ -0,0 +1,622 @@ +import sys +import platform +import pytest + +import numpy as np +# import the c-extension module directly since _arg is not exported via umath +import numpy.core._multiarray_umath as ncu +from numpy.testing import ( + assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp + ) + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +#FIXME: this will probably change when we require full C99 campatibility +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(np.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +# TODO This can be xfail when the generator functions are got rid of. +platform_skip = pytest.mark.skipif(xfail_complex_tests, + reason="Inadequate C99 complex support") + + + +class TestCexp: + def test_simple(self): + check = check_complex_value + f = np.exp + + check(f, 1, 0, np.exp(1), 0, False) + check(f, 0, 1, np.cos(1), np.sin(1), False) + + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) + check(f, 1, 1, ref.real, ref.imag, False) + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + check(f, np.PZERO, 0, 1, 0, False) + check(f, np.NZERO, 0, 1, 0, False) + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + check(f, 1, np.inf, np.nan, np.nan) + check(f, -1, np.inf, np.nan, np.nan) + check(f, 0, np.inf, np.nan, np.nan) + + # cexp(inf + 0i) is inf + 0i + check(f, np.inf, 0, np.inf, 0) + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + check(f, -np.inf, 1, np.PZERO, np.PZERO) + check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + check(f, np.inf, 1, np.inf, np.inf) + check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_inf(None) + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_inf(None) + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_nan(None) + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, -1, np.nan, np.nan) + + check(f, np.nan, np.inf, np.nan, np.nan) + check(f, np.nan, -np.inf, np.nan, np.nan) + + # cexp(nan + nani) is nan + nani + check(f, np.nan, np.nan, np.nan, np.nan) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + check = check_complex_value + f = np.exp + + check(f, np.nan, 0, np.nan, 0) + +class TestClog: + def test_simple(self): + x = np.array([1+0j, 1+2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + assert_almost_equal(y, y_r) + + @platform_skip + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([np.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) + + +class TestCsqrt: + + def test_simple(self): + # sqrt(1) + check_complex_value(np.sqrt, 1, 0, 1, 0) + + # sqrt(1i) + rres = 0.5*np.sqrt(2) + ires = rres + check_complex_value(np.sqrt, 0, 1, rres, ires, False) + + # sqrt(-1) + check_complex_value(np.sqrt, -1, 0, 0, 1) + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(complex(1, 1))) + + def f(z): + return np.sqrt(np.conj(z)) + + check_complex_value(f, 1, 1, ref.real, ref.imag, False) + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + # C99: Sec G 6.4.2 + + check = check_complex_value + f = np.sqrt + + # csqrt(+-0 + 0i) is 0 + 0i + check(f, np.PZERO, 0, 0, 0) + check(f, np.NZERO, 0, 0, 0) + + # csqrt(x + infi) is inf + infi for any x (including NaN) + check(f, 1, np.inf, np.inf, np.inf) + check(f, -1, np.inf, np.inf, np.inf) + + check(f, np.PZERO, np.inf, np.inf, np.inf) + check(f, np.NZERO, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) + check(f, -np.nan, np.inf, np.inf, np.inf) + + # csqrt(x + nani) is nan + nani for any finite x + check(f, 1, np.nan, np.nan, np.nan) + check(f, -1, np.nan, np.nan, np.nan) + check(f, 0, np.nan, np.nan, np.nan) + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + check(f, -np.inf, 1, np.PZERO, np.inf) + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + check(f, np.inf, 1, np.inf, np.PZERO) + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(complex(-np.inf, np.nan))) + #Fixme: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # csqrt(+inf + nani) is inf + nani + check(f, np.inf, np.nan, np.inf, np.nan) + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + check(f, np.nan, 0, np.nan, np.nan) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, np.nan, np.nan, np.nan) + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + assert_almost_equal(y, y_r) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1+0j, + 0.20787957635076193+0j, + 0.35812203996480685+0.6097119028618724j, + 0.12659112128185032+0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1+0j, + 0.20787957635076193+0j, + 0.35812203996480685+0.6097119028618724j, + 0.12659112128185032+0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + assert_almost_equal(y, y_r) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1+0j], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + check_real_value(np.abs, np.nan, np.nan, np.nan) + + x.append(np.nan) + y.append(-np.nan) + check_real_value(np.abs, -np.nan, np.nan, np.nan) + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + check_real_value(np.abs, np.inf, np.nan, np.inf) + + x.append(-np.inf) + y.append(np.nan) + check_real_value(np.abs, -np.inf, np.nan, np.inf) + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + + def g(a, b): + return np.abs(complex(a, b)) + + xa = np.array(x, dtype=complex) + assert len(xa) == len(x) == len(y) + for xi, yi in zip(x, y): + ref = g(xi, yi) + check_real_value(f, xi, yi, ref) + +class TestCarg: + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip( + reason="Complex arithmetic with signed zero fails on most platforms") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) + check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) + + # carg(+0 +- 0i) returns +- 0 + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) + + # carg(x +- 0i) returns +- 0 for x > 0 + check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) + check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) + + # carg(x +- 0i) returns +- pi for x < 0 + check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) + check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) + + # carg(+- 0 + yi) returns pi/2 for y > 0 + check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) + check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) + check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + check_real_value(ncu._arg, -np.inf, 1, np.pi, False) + check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) + check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) + + # carg(x +- np.infi) returns +-pi/2 for finite x + check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) + check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) + + # carg(-np.inf +- np.infi) returns +-3pi/4 + check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) + check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) + + # carg(np.inf +- np.infi) returns +-pi/4 + check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) + check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) + + # carg(x + yi) returns np.nan if x or y is nan + check_real_value(ncu._arg, np.nan, 0, np.nan, False) + check_real_value(ncu._arg, 0, np.nan, np.nan, False) + + check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) + check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) + + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) + +class TestSpecialComplexAVX: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + def test_array(self, stride, astype): + arr = np.array([complex(np.nan , np.nan), + complex(np.nan , np.inf), + complex(np.inf , np.nan), + complex(np.inf , np.inf), + complex(0. , np.inf), + complex(np.inf , 0.), + complex(0. , 0.), + complex(0. , np.nan), + complex(np.nan , 0.)], dtype=astype) + abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) + sq_true = np.array([complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.inf), + complex(-np.inf, np.nan), + complex(np.inf, np.nan), + complex(0., 0.), + complex(np.nan, np.nan), + complex(np.nan, np.nan)], dtype=astype) + with np.errstate(invalid='ignore'): + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + assert_equal(np.square(arr[::stride]), sq_true[::stride]) + +class TestComplexAbsoluteAVX: + @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19]) + @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + # test to ensure masking and strides work as intended in the AVX implementation + def test_array(self, arraysize, stride, astype): + arr = np.ones(arraysize, dtype=astype) + abs_true = np.ones(arraysize, dtype=arr.real.dtype) + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + +# Testcase taken as is from https://github.com/numpy/numpy/issues/16660 +class TestComplexAbsoluteMixedDTypes: + @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) + + def test_array(self, stride, astype, func): + dtype = [('template_id', '<i8'), ('bank_chisq','<f4'), + ('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'), + ('cont_chisq', '<f4'), ('psd_var_val', '<f4'), ('sg_chisq','<f4'), + ('mycomplex', astype), ('time_index', '<i8')] + vec = np.array([ + (0, 0., 0, -31.666483, 200, 0., 0., 1. , 3.0+4.0j , 613090), + (1, 0., 0, 260.91525 , 42, 0., 0., 1. , 5.0+12.0j , 787315), + (1, 0., 0, 52.15155 , 42, 0., 0., 1. , 8.0+15.0j , 806641), + (1, 0., 0, 52.430195, 42, 0., 0., 1. , 7.0+24.0j , 1363540), + (2, 0., 0, 304.43646 , 58, 0., 0., 1. , 20.0+21.0j , 787323), + (3, 0., 0, 299.42108 , 52, 0., 0., 1. , 12.0+35.0j , 787332), + (4, 0., 0, 39.4836 , 28, 0., 0., 9.182192, 9.0+40.0j , 787304), + (4, 0., 0, 76.83787 , 28, 0., 0., 1. , 28.0+45.0j, 1321869), + (5, 0., 0, 143.26366 , 24, 0., 0., 10.996129, 11.0+60.0j , 787299)], dtype=dtype) + myfunc = getattr(np, func) + a = vec['mycomplex'] + g = myfunc(a[::stride]) + + b = vec['mycomplex'].copy() + h = myfunc(b[::stride]) + + assert_array_max_ulp(h.real, g.real, 1) + assert_array_max_ulp(h.imag, g.imag, 1) diff --git a/.venv/lib/python3.12/site-packages/numpy/core/tests/test_unicode.py b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_unicode.py new file mode 100644 index 00000000..e5454bd4 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/tests/test_unicode.py @@ -0,0 +1,368 @@ +import pytest + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_equal + +def buffer_length(arr): + if isinstance(arr, str): + if not arr: + charmax = 0 + else: + charmax = max([ord(c) for c in arr]) + if charmax < 256: + size = 1 + elif charmax < 65536: + size = 2 + else: + size = 4 + return size * len(arr) + v = memoryview(arr) + if v.shape is None: + return len(v) * v.itemsize + else: + return np.prod(v.shape) * v.itemsize + + +# In both cases below we need to make sure that the byte swapped value (as +# UCS4) is still a valid unicode: +# Value that can be represented in UCS2 interpreters +ucs2_value = '\u0900' +# Value that cannot be represented in UCS2 interpreters (but can in UCS4) +ucs4_value = '\U00100900' + + +def test_string_cast(): + str_arr = np.array(["1234", "1234\0\0"], dtype='S') + uni_arr1 = str_arr.astype('>U') + uni_arr2 = str_arr.astype('<U') + + assert_array_equal(str_arr != uni_arr1, np.ones(2, dtype=bool)) + assert_array_equal(uni_arr1 != str_arr, np.ones(2, dtype=bool)) + assert_array_equal(str_arr == uni_arr1, np.zeros(2, dtype=bool)) + assert_array_equal(uni_arr1 == str_arr, np.zeros(2, dtype=bool)) + + assert_array_equal(uni_arr1, uni_arr2) + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros: + """Check the creation of zero-valued arrays""" + + def content_check(self, ua, ua_scalar, nbytes): + + # Check the length of the unicode base type + assert_(int(ua.dtype.str[2:]) == self.ulen) + # Check the length of the data buffer + assert_(buffer_length(ua) == nbytes) + # Small check that data in array element is ok + assert_(ua_scalar == '') + # Encode to ascii and double check + assert_(ua_scalar.encode('ascii') == b'') + # Check buffer lengths for scalars + assert_(buffer_length(ua_scalar) == 0) + + def test_zeros0D(self): + # Check creation of 0-dimensional objects + ua = np.zeros((), dtype='U%s' % self.ulen) + self.content_check(ua, ua[()], 4*self.ulen) + + def test_zerosSD(self): + # Check creation of single-dimensional objects + ua = np.zeros((2,), dtype='U%s' % self.ulen) + self.content_check(ua, ua[0], 4*self.ulen*2) + self.content_check(ua, ua[1], 4*self.ulen*2) + + def test_zerosMD(self): + # Check creation of multi-dimensional objects + ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen) + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) + + +class TestCreateZeros_1(CreateZeros): + """Check the creation of zero-valued arrays (size 1)""" + ulen = 1 + + +class TestCreateZeros_2(CreateZeros): + """Check the creation of zero-valued arrays (size 2)""" + ulen = 2 + + +class TestCreateZeros_1009(CreateZeros): + """Check the creation of zero-valued arrays (size 1009)""" + ulen = 1009 + + +class CreateValues: + """Check the creation of unicode arrays with values""" + + def content_check(self, ua, ua_scalar, nbytes): + + # Check the length of the unicode base type + assert_(int(ua.dtype.str[2:]) == self.ulen) + # Check the length of the data buffer + assert_(buffer_length(ua) == nbytes) + # Small check that data in array element is ok + assert_(ua_scalar == self.ucs_value*self.ulen) + # Encode to UTF-8 and double check + assert_(ua_scalar.encode('utf-8') == + (self.ucs_value*self.ulen).encode('utf-8')) + # Check buffer lengths for scalars + if self.ucs_value == ucs4_value: + # In UCS2, the \U0010FFFF will be represented using a + # surrogate *pair* + assert_(buffer_length(ua_scalar) == 2*2*self.ulen) + else: + # In UCS2, the \uFFFF will be represented using a + # regular 2-byte word + assert_(buffer_length(ua_scalar) == 2*self.ulen) + + def test_values0D(self): + # Check creation of 0-dimensional objects with values + ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) + self.content_check(ua, ua[()], 4*self.ulen) + + def test_valuesSD(self): + # Check creation of single-dimensional objects with values + ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) + self.content_check(ua, ua[0], 4*self.ulen*2) + self.content_check(ua, ua[1], 4*self.ulen*2) + + def test_valuesMD(self): + # Check creation of multi-dimensional objects with values + ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) + + +class TestCreateValues_1_UCS2(CreateValues): + """Check the creation of valued arrays (size 1, UCS2 values)""" + ulen = 1 + ucs_value = ucs2_value + + +class TestCreateValues_1_UCS4(CreateValues): + """Check the creation of valued arrays (size 1, UCS4 values)""" + ulen = 1 + ucs_value = ucs4_value + + +class TestCreateValues_2_UCS2(CreateValues): + """Check the creation of valued arrays (size 2, UCS2 values)""" + ulen = 2 + ucs_value = ucs2_value + + +class TestCreateValues_2_UCS4(CreateValues): + """Check the creation of valued arrays (size 2, UCS4 values)""" + ulen = 2 + ucs_value = ucs4_value + + +class TestCreateValues_1009_UCS2(CreateValues): + """Check the creation of valued arrays (size 1009, UCS2 values)""" + ulen = 1009 + ucs_value = ucs2_value + + +class TestCreateValues_1009_UCS4(CreateValues): + """Check the creation of valued arrays (size 1009, UCS4 values)""" + ulen = 1009 + ucs_value = ucs4_value + + +############################################################ +# Assignment tests +############################################################ + +class AssignValues: + """Check the assignment of unicode arrays with values""" + + def content_check(self, ua, ua_scalar, nbytes): + + # Check the length of the unicode base type + assert_(int(ua.dtype.str[2:]) == self.ulen) + # Check the length of the data buffer + assert_(buffer_length(ua) == nbytes) + # Small check that data in array element is ok + assert_(ua_scalar == self.ucs_value*self.ulen) + # Encode to UTF-8 and double check + assert_(ua_scalar.encode('utf-8') == + (self.ucs_value*self.ulen).encode('utf-8')) + # Check buffer lengths for scalars + if self.ucs_value == ucs4_value: + # In UCS2, the \U0010FFFF will be represented using a + # surrogate *pair* + assert_(buffer_length(ua_scalar) == 2*2*self.ulen) + else: + # In UCS2, the \uFFFF will be represented using a + # regular 2-byte word + assert_(buffer_length(ua_scalar) == 2*self.ulen) + + def test_values0D(self): + # Check assignment of 0-dimensional objects with values + ua = np.zeros((), dtype='U%s' % self.ulen) + ua[()] = self.ucs_value*self.ulen + self.content_check(ua, ua[()], 4*self.ulen) + + def test_valuesSD(self): + # Check assignment of single-dimensional objects with values + ua = np.zeros((2,), dtype='U%s' % self.ulen) + ua[0] = self.ucs_value*self.ulen + self.content_check(ua, ua[0], 4*self.ulen*2) + ua[1] = self.ucs_value*self.ulen + self.content_check(ua, ua[1], 4*self.ulen*2) + + def test_valuesMD(self): + # Check assignment of multi-dimensional objects with values + ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen) + ua[0, 0, 0] = self.ucs_value*self.ulen + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + ua[-1, -1, -1] = self.ucs_value*self.ulen + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) + + +class TestAssignValues_1_UCS2(AssignValues): + """Check the assignment of valued arrays (size 1, UCS2 values)""" + ulen = 1 + ucs_value = ucs2_value + + +class TestAssignValues_1_UCS4(AssignValues): + """Check the assignment of valued arrays (size 1, UCS4 values)""" + ulen = 1 + ucs_value = ucs4_value + + +class TestAssignValues_2_UCS2(AssignValues): + """Check the assignment of valued arrays (size 2, UCS2 values)""" + ulen = 2 + ucs_value = ucs2_value + + +class TestAssignValues_2_UCS4(AssignValues): + """Check the assignment of valued arrays (size 2, UCS4 values)""" + ulen = 2 + ucs_value = ucs4_value + + +class TestAssignValues_1009_UCS2(AssignValues): + """Check the assignment of valued arrays (size 1009, UCS2 values)""" + ulen = 1009 + ucs_value = ucs2_value + + +class TestAssignValues_1009_UCS4(AssignValues): + """Check the assignment of valued arrays (size 1009, UCS4 values)""" + ulen = 1009 + ucs_value = ucs4_value + + +############################################################ +# Byteorder tests +############################################################ + +class ByteorderValues: + """Check the byteorder of unicode arrays in round-trip conversions""" + + def test_values0D(self): + # Check byteorder of 0-dimensional objects + ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) + ua2 = ua.newbyteorder() + # This changes the interpretation of the data region (but not the + # actual data), therefore the returned scalars are not + # the same (they are byte-swapped versions of each other). + assert_(ua[()] != ua2[()]) + ua3 = ua2.newbyteorder() + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + def test_valuesSD(self): + # Check byteorder of single-dimensional objects + ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) + ua2 = ua.newbyteorder() + assert_((ua != ua2).all()) + assert_(ua[-1] != ua2[-1]) + ua3 = ua2.newbyteorder() + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + def test_valuesMD(self): + # Check byteorder of multi-dimensional objects + ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, + dtype='U%s' % self.ulen) + ua2 = ua.newbyteorder() + assert_((ua != ua2).all()) + assert_(ua[-1, -1, -1] != ua2[-1, -1, -1]) + ua3 = ua2.newbyteorder() + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + def test_values_cast(self): + # Check byteorder of when casting the array for a strided and + # contiguous array: + test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) + test2 = np.repeat(test1, 2)[::2] + for ua in (test1, test2): + ua2 = ua.astype(dtype=ua.dtype.newbyteorder()) + assert_((ua == ua2).all()) + assert_(ua[-1] == ua2[-1]) + ua3 = ua2.astype(dtype=ua.dtype) + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + def test_values_updowncast(self): + # Check byteorder of when casting the array to a longer and shorter + # string length for strided and contiguous arrays + test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) + test2 = np.repeat(test1, 2)[::2] + for ua in (test1, test2): + # Cast to a longer type with zero padding + longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder() + ua2 = ua.astype(dtype=longer_type) + assert_((ua == ua2).all()) + assert_(ua[-1] == ua2[-1]) + # Cast back again with truncating: + ua3 = ua2.astype(dtype=ua.dtype) + # Arrays must be equal after the round-trip + assert_equal(ua, ua3) + + +class TestByteorder_1_UCS2(ByteorderValues): + """Check the byteorder in unicode (size 1, UCS2 values)""" + ulen = 1 + ucs_value = ucs2_value + + +class TestByteorder_1_UCS4(ByteorderValues): + """Check the byteorder in unicode (size 1, UCS4 values)""" + ulen = 1 + ucs_value = ucs4_value + + +class TestByteorder_2_UCS2(ByteorderValues): + """Check the byteorder in unicode (size 2, UCS2 values)""" + ulen = 2 + ucs_value = ucs2_value + + +class TestByteorder_2_UCS4(ByteorderValues): + """Check the byteorder in unicode (size 2, UCS4 values)""" + ulen = 2 + ucs_value = ucs4_value + + +class TestByteorder_1009_UCS2(ByteorderValues): + """Check the byteorder in unicode (size 1009, UCS2 values)""" + ulen = 1009 + ucs_value = ucs2_value + + +class TestByteorder_1009_UCS4(ByteorderValues): + """Check the byteorder in unicode (size 1009, UCS4 values)""" + ulen = 1009 + ucs_value = ucs4_value diff --git a/.venv/lib/python3.12/site-packages/numpy/core/umath.py b/.venv/lib/python3.12/site-packages/numpy/core/umath.py new file mode 100644 index 00000000..6a5474ff --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/umath.py @@ -0,0 +1,36 @@ +""" +Create the numpy.core.umath namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. + +""" + +from . import _multiarray_umath +from ._multiarray_umath import * # noqa: F403 +# These imports are needed for backward compatibility, +# do not change them. issue gh-11862 +# _ones_like is semi-public, on purpose not added to __all__ +from ._multiarray_umath import _UFUNC_API, _add_newdoc_ufunc, _ones_like + +__all__ = [ + '_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG', + 'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT', + 'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN', + 'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID', + 'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT', + 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add', + 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj', + 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide', + 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs', + 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp', + 'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside', + 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', + 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', + 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative', + 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', + 'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign', + 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', + 'tanh', 'true_divide', 'trunc'] diff --git a/.venv/lib/python3.12/site-packages/numpy/core/umath_tests.py b/.venv/lib/python3.12/site-packages/numpy/core/umath_tests.py new file mode 100644 index 00000000..90ab17e6 --- /dev/null +++ b/.venv/lib/python3.12/site-packages/numpy/core/umath_tests.py @@ -0,0 +1,13 @@ +""" +Shim for _umath_tests to allow a deprecation period for the new name. + +""" +import warnings + +# 2018-04-04, numpy 1.15.0 +warnings.warn(("numpy.core.umath_tests is an internal NumPy " + "module and should not be imported. It will " + "be removed in a future NumPy release."), + category=DeprecationWarning, stacklevel=2) + +from ._umath_tests import * |
