summaryrefslogtreecommitdiff
path: root/.venv/lib/python3.12/site-packages/numpy/lib
diff options
context:
space:
mode:
authorblackhao <13851610112@163.com>2025-08-22 02:51:50 -0500
committerblackhao <13851610112@163.com>2025-08-22 02:51:50 -0500
commit4aab4087dc97906d0b9890035401175cdaab32d4 (patch)
tree4e2e9d88a711ec5b1cfa02e8ac72a55183b99123 /.venv/lib/python3.12/site-packages/numpy/lib
parentafa8f50d1d21c721dabcb31ad244610946ab65a3 (diff)
2.0
Diffstat (limited to '.venv/lib/python3.12/site-packages/numpy/lib')
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__init__.py97
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi44
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pycbin0 -> 3010 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pycbin0 -> 2161 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pycbin0 -> 28978 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pycbin0 -> 43158 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pycbin0 -> 9627 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pycbin0 -> 25647 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pycbin0 -> 35982 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pycbin0 -> 207985 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pycbin0 -> 40778 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pycbin0 -> 36311 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pycbin0 -> 35190 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pycbin0 -> 76016 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pycbin0 -> 96608 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pycbin0 -> 53606 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pycbin0 -> 18335 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pycbin0 -> 44015 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pycbin0 -> 19596 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pycbin0 -> 38482 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pycbin0 -> 22208 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pycbin0 -> 7270 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pycbin0 -> 16244 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pycbin0 -> 25608 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pycbin0 -> 6643 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pycbin0 -> 350 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pycbin0 -> 788 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pycbin0 -> 3290 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pycbin0 -> 8977 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pycbin0 -> 271 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pycbin0 -> 64014 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pycbin0 -> 411 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pycbin0 -> 299 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pycbin0 -> 262 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py62
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi26
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py890
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi89
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py1260
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi444
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py224
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi46
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py700
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi31
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py1036
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi26
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py5844
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi985
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py1085
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi50
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py1067
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi196
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py900
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_iotools.pyi114
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py2024
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi52
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py2596
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi301
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py1465
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi316
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py642
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi93
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py1301
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi235
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py549
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi74
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py1201
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi438
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py699
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi350
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py207
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi67
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py299
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi225
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py779
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi10
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_version.py154
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi17
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py7
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi12
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/format.py24
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/format.pyi66
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/introspect.py95
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi3
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/mixins.py180
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi75
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/npyio.py1
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi9
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py1681
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi435
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/scimath.py13
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi30
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py1
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi6
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py0
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pycbin0 -> 192 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pycbin0 -> 20906 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pycbin0 -> 18619 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pycbin0 -> 3482 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pycbin0 -> 2544 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pycbin0 -> 72097 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pycbin0 -> 59087 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pycbin0 -> 2637 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pycbin0 -> 55254 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pycbin0 -> 301287 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pycbin0 -> 51401 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pycbin0 -> 35518 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pycbin0 -> 176840 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pycbin0 -> 67059 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pycbin0 -> 12707 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pycbin0 -> 92780 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pycbin0 -> 23174 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pycbin0 -> 22348 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pycbin0 -> 57938 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pycbin0 -> 15294 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pycbin0 -> 56388 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pycbin0 -> 30055 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pycbin0 -> 29332 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pycbin0 -> 31613 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pycbin0 -> 6034 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pycbin0 -> 4174 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npybin0 -> 258 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npybin0 -> 258 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npzbin0 -> 366 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npybin0 -> 325 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npzbin0 -> 453 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npybin0 -> 96 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npybin0 -> 96 bytes
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py352
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py360
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py64
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py32
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py1415
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py1074
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py46
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py1054
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_function_base.py4573
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py855
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py568
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py2848
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py1101
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py215
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py1438
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py376
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py320
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py1052
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_regression.py231
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_shape_base.py813
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py656
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py559
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py473
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py97
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py80
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/user_array.py1
-rw-r--r--.venv/lib/python3.12/site-packages/numpy/lib/user_array.pyi1
155 files changed, 52602 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__init__.py b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.py
new file mode 100644
index 0000000..a248d04
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.py
@@ -0,0 +1,97 @@
+"""
+``numpy.lib`` is mostly a space for implementing functions that don't
+belong in core or in another NumPy submodule with a clear purpose
+(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
+
+``numpy.lib``'s private submodules contain basic functions that are used by
+other public modules and are useful to have in the main name-space.
+
+"""
+
+# Public submodules
+# Note: recfunctions is public, but not imported
+from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain
+from numpy._core.function_base import add_newdoc
+
+# Private submodules
+# load module names. See https://github.com/networkx/networkx/issues/5838
+from . import (
+ _arraypad_impl,
+ _arraysetops_impl,
+ _arrayterator_impl,
+ _function_base_impl,
+ _histograms_impl,
+ _index_tricks_impl,
+ _nanfunctions_impl,
+ _npyio_impl,
+ _polynomial_impl,
+ _shape_base_impl,
+ _stride_tricks_impl,
+ _twodim_base_impl,
+ _type_check_impl,
+ _ufunclike_impl,
+ _utils_impl,
+ _version,
+ array_utils,
+ format,
+ introspect,
+ mixins,
+ npyio,
+ scimath,
+ stride_tricks,
+)
+
+# numpy.lib namespace members
+from ._arrayterator_impl import Arrayterator
+from ._version import NumpyVersion
+
+__all__ = [
+ "Arrayterator", "add_docstring", "add_newdoc", "array_utils",
+ "format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath",
+ "stride_tricks", "tracemalloc_domain",
+]
+
+add_newdoc.__module__ = "numpy.lib"
+
+from numpy._pytesttester import PytestTester
+
+test = PytestTester(__name__)
+del PytestTester
+
+def __getattr__(attr):
+ # Warn for deprecated/removed aliases
+ import math
+ import warnings
+
+ if attr == "math":
+ warnings.warn(
+ "`np.lib.math` is a deprecated alias for the standard library "
+ "`math` module (Deprecated Numpy 1.25). Replace usages of "
+ "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
+ return math
+ elif attr == "emath":
+ raise AttributeError(
+ "numpy.lib.emath was an alias for emath module that was removed "
+ "in NumPy 2.0. Replace usages of numpy.lib.emath with "
+ "numpy.emath.",
+ name=None
+ )
+ elif attr in (
+ "histograms", "type_check", "nanfunctions", "function_base",
+ "arraypad", "arraysetops", "ufunclike", "utils", "twodim_base",
+ "shape_base", "polynomial", "index_tricks",
+ ):
+ raise AttributeError(
+ f"numpy.lib.{attr} is now private. If you are using a public "
+ "function, it should be available in the main numpy namespace, "
+ "otherwise check the NumPy 2.0 migration guide.",
+ name=None
+ )
+ elif attr == "arrayterator":
+ raise AttributeError(
+ "numpy.lib.arrayterator submodule is now private. To access "
+ "Arrayterator class use numpy.lib.Arrayterator.",
+ name=None
+ )
+ else:
+ raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi
new file mode 100644
index 0000000..6185a49
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__init__.pyi
@@ -0,0 +1,44 @@
+from numpy._core.function_base import add_newdoc
+from numpy._core.multiarray import add_docstring, tracemalloc_domain
+
+# all submodules of `lib` are accessible at runtime through `__getattr__`,
+# so we implicitly re-export them here
+from . import _array_utils_impl as _array_utils_impl
+from . import _arraypad_impl as _arraypad_impl
+from . import _arraysetops_impl as _arraysetops_impl
+from . import _arrayterator_impl as _arrayterator_impl
+from . import _datasource as _datasource
+from . import _format_impl as _format_impl
+from . import _function_base_impl as _function_base_impl
+from . import _histograms_impl as _histograms_impl
+from . import _index_tricks_impl as _index_tricks_impl
+from . import _iotools as _iotools
+from . import _nanfunctions_impl as _nanfunctions_impl
+from . import _npyio_impl as _npyio_impl
+from . import _polynomial_impl as _polynomial_impl
+from . import _scimath_impl as _scimath_impl
+from . import _shape_base_impl as _shape_base_impl
+from . import _stride_tricks_impl as _stride_tricks_impl
+from . import _twodim_base_impl as _twodim_base_impl
+from . import _type_check_impl as _type_check_impl
+from . import _ufunclike_impl as _ufunclike_impl
+from . import _utils_impl as _utils_impl
+from . import _version as _version
+from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks
+from ._arrayterator_impl import Arrayterator
+from ._version import NumpyVersion
+
+__all__ = [
+ "Arrayterator",
+ "add_docstring",
+ "add_newdoc",
+ "array_utils",
+ "format",
+ "introspect",
+ "mixins",
+ "NumpyVersion",
+ "npyio",
+ "scimath",
+ "stride_tricks",
+ "tracemalloc_domain",
+]
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..1c5fea7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/__init__.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc
new file mode 100644
index 0000000..9d1bad4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc
new file mode 100644
index 0000000..cab5c63
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc
new file mode 100644
index 0000000..a234cd7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc
new file mode 100644
index 0000000..4cb830a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc
new file mode 100644
index 0000000..5c27b16
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_datasource.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc
new file mode 100644
index 0000000..0948a7b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_format_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc
new file mode 100644
index 0000000..124bec3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc
new file mode 100644
index 0000000..eb55724
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc
new file mode 100644
index 0000000..d49d7a3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc
new file mode 100644
index 0000000..9c76d6c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_iotools.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc
new file mode 100644
index 0000000..66783aa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc
new file mode 100644
index 0000000..55e9a46
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc
new file mode 100644
index 0000000..6dd6ded
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc
new file mode 100644
index 0000000..3b62648
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc
new file mode 100644
index 0000000..e3c1fe4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc
new file mode 100644
index 0000000..8cebc75
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc
new file mode 100644
index 0000000..753465d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc
new file mode 100644
index 0000000..0baef8e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc
new file mode 100644
index 0000000..2d4780d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc
new file mode 100644
index 0000000..1ec40c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc
new file mode 100644
index 0000000..f82bdde
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc
new file mode 100644
index 0000000..903b5a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/_version.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc
new file mode 100644
index 0000000..3b05c66
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/array_utils.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc
new file mode 100644
index 0000000..a5bf7ce
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/format.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc
new file mode 100644
index 0000000..202121b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/introspect.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc
new file mode 100644
index 0000000..a7e79bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/mixins.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc
new file mode 100644
index 0000000..c1d402b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/npyio.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc
new file mode 100644
index 0000000..a819167
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/recfunctions.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc
new file mode 100644
index 0000000..d3275b8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/scimath.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc
new file mode 100644
index 0000000..a9dc97b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc
new file mode 100644
index 0000000..f083806
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/__pycache__/user_array.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py
new file mode 100644
index 0000000..c3996e1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.py
@@ -0,0 +1,62 @@
+"""
+Miscellaneous utils.
+"""
+from numpy._core import asarray
+from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple
+from numpy._utils import set_module
+
+__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]
+
+
+@set_module("numpy.lib.array_utils")
+def byte_bounds(a):
+ """
+ Returns pointers to the end-points of an array.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array. It must conform to the Python-side of the array
+ interface.
+
+ Returns
+ -------
+ (low, high) : tuple of 2 integers
+ The first integer is the first byte of the array, the second
+ integer is just past the last byte of the array. If `a` is not
+ contiguous it will not use every byte between the (`low`, `high`)
+ values.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> I = np.eye(2, dtype='f'); I.dtype
+ dtype('float32')
+ >>> low, high = np.lib.array_utils.byte_bounds(I)
+ >>> high - low == I.size*I.itemsize
+ True
+ >>> I = np.eye(2); I.dtype
+ dtype('float64')
+ >>> low, high = np.lib.array_utils.byte_bounds(I)
+ >>> high - low == I.size*I.itemsize
+ True
+
+ """
+ ai = a.__array_interface__
+ a_data = ai['data'][0]
+ astrides = ai['strides']
+ ashape = ai['shape']
+ bytes_a = asarray(a).dtype.itemsize
+
+ a_low = a_high = a_data
+ if astrides is None:
+ # contiguous case
+ a_high += a.size * bytes_a
+ else:
+ for shape, stride in zip(ashape, astrides):
+ if stride < 0:
+ a_low += (shape - 1) * stride
+ else:
+ a_high += (shape - 1) * stride
+ a_high += bytes_a
+ return a_low, a_high
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi
new file mode 100644
index 0000000..d3e0714
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_array_utils_impl.pyi
@@ -0,0 +1,26 @@
+from collections.abc import Iterable
+from typing import Any
+
+from numpy import generic
+from numpy.typing import NDArray
+
+__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]
+
+# NOTE: In practice `byte_bounds` can (potentially) take any object
+# implementing the `__array_interface__` protocol. The caveat is
+# that certain keys, marked as optional in the spec, must be present for
+# `byte_bounds`. This concerns `"strides"` and `"data"`.
+def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ...
+
+def normalize_axis_tuple(
+ axis: int | Iterable[int],
+ ndim: int = ...,
+ argname: str | None = ...,
+ allow_duplicate: bool | None = ...,
+) -> tuple[int, int]: ...
+
+def normalize_axis_index(
+ axis: int = ...,
+ ndim: int = ...,
+ msg_prefix: str | None = ...,
+) -> int: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py
new file mode 100644
index 0000000..507a0ab
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.py
@@ -0,0 +1,890 @@
+"""
+The arraypad module contains a group of functions to pad values onto the edges
+of an n-dimensional array.
+
+"""
+import numpy as np
+from numpy._core.overrides import array_function_dispatch
+from numpy.lib._index_tricks_impl import ndindex
+
+__all__ = ['pad']
+
+
+###############################################################################
+# Private utility functions.
+
+
+def _round_if_needed(arr, dtype):
+ """
+ Rounds arr inplace if destination dtype is integer.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ dtype : dtype
+ The dtype of the destination array.
+ """
+ if np.issubdtype(dtype, np.integer):
+ arr.round(out=arr)
+
+
+def _slice_at_axis(sl, axis):
+ """
+ Construct tuple of slices to slice an array in the given dimension.
+
+ Parameters
+ ----------
+ sl : slice
+ The slice for the given dimension.
+ axis : int
+ The axis to which `sl` is applied. All other dimensions are left
+ "unsliced".
+
+ Returns
+ -------
+ sl : tuple of slices
+ A tuple with slices matching `shape` in length.
+
+ Examples
+ --------
+ >>> np._slice_at_axis(slice(None, 3, -1), 1)
+ (slice(None, None, None), slice(None, 3, -1), (...,))
+ """
+ return (slice(None),) * axis + (sl,) + (...,)
+
+
+def _view_roi(array, original_area_slice, axis):
+ """
+ Get a view of the current region of interest during iterative padding.
+
+ When padding multiple dimensions iteratively corner values are
+ unnecessarily overwritten multiple times. This function reduces the
+ working area for the first dimensions so that corners are excluded.
+
+ Parameters
+ ----------
+ array : ndarray
+ The array with the region of interest.
+ original_area_slice : tuple of slices
+ Denotes the area with original values of the unpadded array.
+ axis : int
+ The currently padded dimension assuming that `axis` is padded before
+ `axis` + 1.
+
+ Returns
+ -------
+ roi : ndarray
+ The region of interest of the original `array`.
+ """
+ axis += 1
+ sl = (slice(None),) * axis + original_area_slice[axis:]
+ return array[sl]
+
+
+def _pad_simple(array, pad_width, fill_value=None):
+ """
+ Pad array on all sides with either a single value or undefined values.
+
+ Parameters
+ ----------
+ array : ndarray
+ Array to grow.
+ pad_width : sequence of tuple[int, int]
+ Pad width on both sides for each dimension in `arr`.
+ fill_value : scalar, optional
+ If provided the padded area is filled with this value, otherwise
+ the pad area left undefined.
+
+ Returns
+ -------
+ padded : ndarray
+ The padded array with the same dtype as`array`. Its order will default
+ to C-style if `array` is not F-contiguous.
+ original_area_slice : tuple
+ A tuple of slices pointing to the area of the original array.
+ """
+ # Allocate grown array
+ new_shape = tuple(
+ left + size + right
+ for size, (left, right) in zip(array.shape, pad_width)
+ )
+ order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
+ padded = np.empty(new_shape, dtype=array.dtype, order=order)
+
+ if fill_value is not None:
+ padded.fill(fill_value)
+
+ # Copy old array into correct space
+ original_area_slice = tuple(
+ slice(left, left + size)
+ for size, (left, right) in zip(array.shape, pad_width)
+ )
+ padded[original_area_slice] = array
+
+ return padded, original_area_slice
+
+
+def _set_pad_area(padded, axis, width_pair, value_pair):
+ """
+ Set empty-padded area in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Array with the pad area which is modified inplace.
+ axis : int
+ Dimension with the pad area to set.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ value_pair : tuple of scalars or ndarrays
+ Values inserted into the pad area on each side. It must match or be
+ broadcastable to the shape of `arr`.
+ """
+ left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
+ padded[left_slice] = value_pair[0]
+
+ right_slice = _slice_at_axis(
+ slice(padded.shape[axis] - width_pair[1], None), axis)
+ padded[right_slice] = value_pair[1]
+
+
+def _get_edges(padded, axis, width_pair):
+ """
+ Retrieve edge values from empty-padded array in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Empty-padded array.
+ axis : int
+ Dimension in which the edges are considered.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+
+ Returns
+ -------
+ left_edge, right_edge : ndarray
+ Edge values of the valid area in `padded` in the given dimension. Its
+ shape will always match `padded` except for the dimension given by
+ `axis` which will have a length of 1.
+ """
+ left_index = width_pair[0]
+ left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
+ left_edge = padded[left_slice]
+
+ right_index = padded.shape[axis] - width_pair[1]
+ right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
+ right_edge = padded[right_slice]
+
+ return left_edge, right_edge
+
+
+def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
+ """
+ Construct linear ramps for empty-padded array in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Empty-padded array.
+ axis : int
+ Dimension in which the ramps are constructed.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ end_value_pair : (scalar, scalar)
+ End values for the linear ramps which form the edge of the fully padded
+ array. These values are included in the linear ramps.
+
+ Returns
+ -------
+ left_ramp, right_ramp : ndarray
+ Linear ramps to set on both sides of `padded`.
+ """
+ edge_pair = _get_edges(padded, axis, width_pair)
+
+ left_ramp, right_ramp = (
+ np.linspace(
+ start=end_value,
+ stop=edge.squeeze(axis), # Dimension is replaced by linspace
+ num=width,
+ endpoint=False,
+ dtype=padded.dtype,
+ axis=axis
+ )
+ for end_value, edge, width in zip(
+ end_value_pair, edge_pair, width_pair
+ )
+ )
+
+ # Reverse linear space in appropriate dimension
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
+
+ return left_ramp, right_ramp
+
+
+def _get_stats(padded, axis, width_pair, length_pair, stat_func):
+ """
+ Calculate statistic for the empty-padded array in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Empty-padded array.
+ axis : int
+ Dimension in which the statistic is calculated.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ length_pair : 2-element sequence of None or int
+ Gives the number of values in valid area from each side that is
+ taken into account when calculating the statistic. If None the entire
+ valid area in `padded` is considered.
+ stat_func : function
+ Function to compute statistic. The expected signature is
+ ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
+
+ Returns
+ -------
+ left_stat, right_stat : ndarray
+ Calculated statistic for both sides of `padded`.
+ """
+ # Calculate indices of the edges of the area with original values
+ left_index = width_pair[0]
+ right_index = padded.shape[axis] - width_pair[1]
+ # as well as its length
+ max_length = right_index - left_index
+
+ # Limit stat_lengths to max_length
+ left_length, right_length = length_pair
+ if left_length is None or max_length < left_length:
+ left_length = max_length
+ if right_length is None or max_length < right_length:
+ right_length = max_length
+
+ if (left_length == 0 or right_length == 0) \
+ and stat_func in {np.amax, np.amin}:
+ # amax and amin can't operate on an empty array,
+ # raise a more descriptive warning here instead of the default one
+ raise ValueError("stat_length of 0 yields no value for padding")
+
+ # Calculate statistic for the left side
+ left_slice = _slice_at_axis(
+ slice(left_index, left_index + left_length), axis)
+ left_chunk = padded[left_slice]
+ left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
+ _round_if_needed(left_stat, padded.dtype)
+
+ if left_length == right_length == max_length:
+ # return early as right_stat must be identical to left_stat
+ return left_stat, left_stat
+
+ # Calculate statistic for the right side
+ right_slice = _slice_at_axis(
+ slice(right_index - right_length, right_index), axis)
+ right_chunk = padded[right_slice]
+ right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
+ _round_if_needed(right_stat, padded.dtype)
+
+ return left_stat, right_stat
+
+
+def _set_reflect_both(padded, axis, width_pair, method,
+ original_period, include_edge=False):
+ """
+ Pad `axis` of `arr` with reflection.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Input array of arbitrary shape.
+ axis : int
+ Axis along which to pad `arr`.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ method : str
+ Controls method of reflection; options are 'even' or 'odd'.
+ original_period : int
+ Original length of data on `axis` of `arr`.
+ include_edge : bool
+ If true, edge value is included in reflection, otherwise the edge
+ value forms the symmetric axis to the reflection.
+
+ Returns
+ -------
+ pad_amt : tuple of ints, length 2
+ New index positions of padding to do along the `axis`. If these are
+ both 0, padding is done in this dimension.
+ """
+ left_pad, right_pad = width_pair
+ old_length = padded.shape[axis] - right_pad - left_pad
+
+ if include_edge:
+ # Avoid wrapping with only a subset of the original area
+ # by ensuring period can only be a multiple of the original
+ # area's length.
+ old_length = old_length // original_period * original_period
+ # Edge is included, we need to offset the pad amount by 1
+ edge_offset = 1
+ else:
+ # Avoid wrapping with only a subset of the original area
+ # by ensuring period can only be a multiple of the original
+ # area's length.
+ old_length = ((old_length - 1) // (original_period - 1)
+ * (original_period - 1) + 1)
+ edge_offset = 0 # Edge is not included, no need to offset pad amount
+ old_length -= 1 # but must be omitted from the chunk
+
+ if left_pad > 0:
+ # Pad with reflected values on left side:
+ # First limit chunk size which can't be larger than pad area
+ chunk_length = min(old_length, left_pad)
+ # Slice right to left, stop on or next to edge, start relative to stop
+ stop = left_pad - edge_offset
+ start = stop + chunk_length
+ left_slice = _slice_at_axis(slice(start, stop, -1), axis)
+ left_chunk = padded[left_slice]
+
+ if method == "odd":
+ # Negate chunk and align with edge
+ edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
+ left_chunk = 2 * padded[edge_slice] - left_chunk
+
+ # Insert chunk into padded area
+ start = left_pad - chunk_length
+ stop = left_pad
+ pad_area = _slice_at_axis(slice(start, stop), axis)
+ padded[pad_area] = left_chunk
+ # Adjust pointer to left edge for next iteration
+ left_pad -= chunk_length
+
+ if right_pad > 0:
+ # Pad with reflected values on right side:
+ # First limit chunk size which can't be larger than pad area
+ chunk_length = min(old_length, right_pad)
+ # Slice right to left, start on or next to edge, stop relative to start
+ start = -right_pad + edge_offset - 2
+ stop = start - chunk_length
+ right_slice = _slice_at_axis(slice(start, stop, -1), axis)
+ right_chunk = padded[right_slice]
+
+ if method == "odd":
+ # Negate chunk and align with edge
+ edge_slice = _slice_at_axis(
+ slice(-right_pad - 1, -right_pad), axis)
+ right_chunk = 2 * padded[edge_slice] - right_chunk
+
+ # Insert chunk into padded area
+ start = padded.shape[axis] - right_pad
+ stop = start + chunk_length
+ pad_area = _slice_at_axis(slice(start, stop), axis)
+ padded[pad_area] = right_chunk
+ # Adjust pointer to right edge for next iteration
+ right_pad -= chunk_length
+
+ return left_pad, right_pad
+
+
+def _set_wrap_both(padded, axis, width_pair, original_period):
+ """
+ Pad `axis` of `arr` with wrapped values.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Input array of arbitrary shape.
+ axis : int
+ Axis along which to pad `arr`.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ original_period : int
+ Original length of data on `axis` of `arr`.
+
+ Returns
+ -------
+ pad_amt : tuple of ints, length 2
+ New index positions of padding to do along the `axis`. If these are
+ both 0, padding is done in this dimension.
+ """
+ left_pad, right_pad = width_pair
+ period = padded.shape[axis] - right_pad - left_pad
+ # Avoid wrapping with only a subset of the original area by ensuring period
+ # can only be a multiple of the original area's length.
+ period = period // original_period * original_period
+
+ # If the current dimension of `arr` doesn't contain enough valid values
+ # (not part of the undefined pad area) we need to pad multiple times.
+ # Each time the pad area shrinks on both sides which is communicated with
+ # these variables.
+ new_left_pad = 0
+ new_right_pad = 0
+
+ if left_pad > 0:
+ # Pad with wrapped values on left side
+ # First slice chunk from left side of the non-pad area.
+ # Use min(period, left_pad) to ensure that chunk is not larger than
+ # pad area.
+ slice_end = left_pad + period
+ slice_start = slice_end - min(period, left_pad)
+ right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
+ right_chunk = padded[right_slice]
+
+ if left_pad > period:
+ # Chunk is smaller than pad area
+ pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
+ new_left_pad = left_pad - period
+ else:
+ # Chunk matches pad area
+ pad_area = _slice_at_axis(slice(None, left_pad), axis)
+ padded[pad_area] = right_chunk
+
+ if right_pad > 0:
+ # Pad with wrapped values on right side
+ # First slice chunk from right side of the non-pad area.
+ # Use min(period, right_pad) to ensure that chunk is not larger than
+ # pad area.
+ slice_start = -right_pad - period
+ slice_end = slice_start + min(period, right_pad)
+ left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
+ left_chunk = padded[left_slice]
+
+ if right_pad > period:
+ # Chunk is smaller than pad area
+ pad_area = _slice_at_axis(
+ slice(-right_pad, -right_pad + period), axis)
+ new_right_pad = right_pad - period
+ else:
+ # Chunk matches pad area
+ pad_area = _slice_at_axis(slice(-right_pad, None), axis)
+ padded[pad_area] = left_chunk
+
+ return new_left_pad, new_right_pad
+
+
+def _as_pairs(x, ndim, as_index=False):
+ """
+ Broadcast `x` to an array with the shape (`ndim`, 2).
+
+ A helper function for `pad` that prepares and validates arguments like
+ `pad_width` for iteration in pairs.
+
+ Parameters
+ ----------
+ x : {None, scalar, array-like}
+ The object to broadcast to the shape (`ndim`, 2).
+ ndim : int
+ Number of pairs the broadcasted `x` will have.
+ as_index : bool, optional
+ If `x` is not None, try to round each element of `x` to an integer
+ (dtype `np.intp`) and ensure every element is positive.
+
+ Returns
+ -------
+ pairs : nested iterables, shape (`ndim`, 2)
+ The broadcasted version of `x`.
+
+ Raises
+ ------
+ ValueError
+ If `as_index` is True and `x` contains negative elements.
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
+ """
+ if x is None:
+ # Pass through None as a special case, otherwise np.round(x) fails
+ # with an AttributeError
+ return ((None, None),) * ndim
+
+ x = np.array(x)
+ if as_index:
+ x = np.round(x).astype(np.intp, copy=False)
+
+ if x.ndim < 3:
+ # Optimization: Possibly use faster paths for cases where `x` has
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
+ # but is currently slower
+
+ if x.size == 1:
+ # x was supplied as a single value
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
+ if as_index and x < 0:
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[0]),) * ndim
+
+ if x.size == 2 and x.shape != (2, 1):
+ # x was supplied with a single value for each side
+ # but except case when each dimension has a single value
+ # which should be broadcasted to a pair,
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
+ x = x.ravel() # Ensure x[0], x[1] works
+ if as_index and (x[0] < 0 or x[1] < 0):
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[1]),) * ndim
+
+ if as_index and x.min() < 0:
+ raise ValueError("index can't contain negative values")
+
+ # Converting the array with `tolist` seems to improve performance
+ # when iterating and indexing the result (see usage in `pad`)
+ return np.broadcast_to(x, (ndim, 2)).tolist()
+
+
+def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
+ return (array,)
+
+
+###############################################################################
+# Public functions
+
+
+@array_function_dispatch(_pad_dispatcher, module='numpy')
+def pad(array, pad_width, mode='constant', **kwargs):
+ """
+ Pad an array.
+
+ Parameters
+ ----------
+ array : array_like of rank N
+ The array to pad.
+ pad_width : {sequence, array_like, int}
+ Number of values padded to the edges of each axis.
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
+ for each axis.
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after pad for each axis.
+ ``(pad,)`` or ``int`` is a shortcut for before = after = pad width
+ for all axes.
+ mode : str or function, optional
+ One of the following string values or a user supplied function.
+
+ 'constant' (default)
+ Pads with a constant value.
+ 'edge'
+ Pads with the edge values of array.
+ 'linear_ramp'
+ Pads with the linear ramp between end_value and the
+ array edge value.
+ 'maximum'
+ Pads with the maximum value of all or part of the
+ vector along each axis.
+ 'mean'
+ Pads with the mean value of all or part of the
+ vector along each axis.
+ 'median'
+ Pads with the median value of all or part of the
+ vector along each axis.
+ 'minimum'
+ Pads with the minimum value of all or part of the
+ vector along each axis.
+ 'reflect'
+ Pads with the reflection of the vector mirrored on
+ the first and last values of the vector along each
+ axis.
+ 'symmetric'
+ Pads with the reflection of the vector mirrored
+ along the edge of the array.
+ 'wrap'
+ Pads with the wrap of the vector along the axis.
+ The first values are used to pad the end and the
+ end values are used to pad the beginning.
+ 'empty'
+ Pads with undefined values.
+
+ <function>
+ Padding function, see Notes.
+ stat_length : sequence or int, optional
+ Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
+ values at edge of each axis used to calculate the statistic value.
+
+ ``((before_1, after_1), ... (before_N, after_N))`` unique statistic
+ lengths for each axis.
+
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after statistic lengths for each axis.
+
+ ``(stat_length,)`` or ``int`` is a shortcut for
+ ``before = after = statistic`` length for all axes.
+
+ Default is ``None``, to use the entire axis.
+ constant_values : sequence or scalar, optional
+ Used in 'constant'. The values to set the padded values for each
+ axis.
+
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
+ for each axis.
+
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after constants for each axis.
+
+ ``(constant,)`` or ``constant`` is a shortcut for
+ ``before = after = constant`` for all axes.
+
+ Default is 0.
+ end_values : sequence or scalar, optional
+ Used in 'linear_ramp'. The values used for the ending value of the
+ linear_ramp and that will form the edge of the padded array.
+
+ ``((before_1, after_1), ... (before_N, after_N))`` unique end values
+ for each axis.
+
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after end values for each axis.
+
+ ``(constant,)`` or ``constant`` is a shortcut for
+ ``before = after = constant`` for all axes.
+
+ Default is 0.
+ reflect_type : {'even', 'odd'}, optional
+ Used in 'reflect', and 'symmetric'. The 'even' style is the
+ default with an unaltered reflection around the edge value. For
+ the 'odd' style, the extended part of the array is created by
+ subtracting the reflected values from two times the edge value.
+
+ Returns
+ -------
+ pad : ndarray
+ Padded array of rank equal to `array` with shape increased
+ according to `pad_width`.
+
+ Notes
+ -----
+ For an array with rank greater than 1, some of the padding of later
+ axes is calculated from padding of previous axes. This is easiest to
+ think about with a rank 2 array where the corners of the padded array
+ are calculated by using padded values from the first axis.
+
+ The padding function, if used, should modify a rank 1 array in-place. It
+ has the following signature::
+
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
+
+ where
+
+ vector : ndarray
+ A rank 1 array already padded with zeros. Padded values are
+ vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
+ iaxis_pad_width : tuple
+ A 2-tuple of ints, iaxis_pad_width[0] represents the number of
+ values padded at the beginning of vector where
+ iaxis_pad_width[1] represents the number of values padded at
+ the end of vector.
+ iaxis : int
+ The axis currently being calculated.
+ kwargs : dict
+ Any keyword arguments the function requires.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = [1, 2, 3, 4, 5]
+ >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
+ array([4, 4, 1, ..., 6, 6, 6])
+
+ >>> np.pad(a, (2, 3), 'edge')
+ array([1, 1, 1, ..., 5, 5, 5])
+
+ >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
+ array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
+
+ >>> np.pad(a, (2,), 'maximum')
+ array([5, 5, 1, 2, 3, 4, 5, 5, 5])
+
+ >>> np.pad(a, (2,), 'mean')
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
+
+ >>> np.pad(a, (2,), 'median')
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
+
+ >>> a = [[1, 2], [3, 4]]
+ >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
+ array([[1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1],
+ [3, 3, 3, 4, 3, 3, 3],
+ [1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1]])
+
+ >>> a = [1, 2, 3, 4, 5]
+ >>> np.pad(a, (2, 3), 'reflect')
+ array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
+
+ >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
+ array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
+
+ >>> np.pad(a, (2, 3), 'symmetric')
+ array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
+
+ >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
+ array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
+
+ >>> np.pad(a, (2, 3), 'wrap')
+ array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
+
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
+ ... pad_value = kwargs.get('padder', 10)
+ ... vector[:pad_width[0]] = pad_value
+ ... vector[-pad_width[1]:] = pad_value
+ >>> a = np.arange(6)
+ >>> a = a.reshape((2, 3))
+ >>> np.pad(a, 2, pad_with)
+ array([[10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 0, 1, 2, 10, 10],
+ [10, 10, 3, 4, 5, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10]])
+ >>> np.pad(a, 2, pad_with, padder=100)
+ array([[100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 0, 1, 2, 100, 100],
+ [100, 100, 3, 4, 5, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100]])
+ """
+ array = np.asarray(array)
+ pad_width = np.asarray(pad_width)
+
+ if not pad_width.dtype.kind == 'i':
+ raise TypeError('`pad_width` must be of integral type.')
+
+ # Broadcast to shape (array.ndim, 2)
+ pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
+
+ if callable(mode):
+ # Old behavior: Use user-supplied function with np.apply_along_axis
+ function = mode
+ # Create a new zero padded array
+ padded, _ = _pad_simple(array, pad_width, fill_value=0)
+ # And apply along each axis
+
+ for axis in range(padded.ndim):
+ # Iterate using ndindex as in apply_along_axis, but assuming that
+ # function operates inplace on the padded array.
+
+ # view with the iteration axis at the end
+ view = np.moveaxis(padded, axis, -1)
+
+ # compute indices for the iteration axes, and append a trailing
+ # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
+ inds = ndindex(view.shape[:-1])
+ inds = (ind + (Ellipsis,) for ind in inds)
+ for ind in inds:
+ function(view[ind], pad_width[axis], axis, kwargs)
+
+ return padded
+
+ # Make sure that no unsupported keywords were passed for the current mode
+ allowed_kwargs = {
+ 'empty': [], 'edge': [], 'wrap': [],
+ 'constant': ['constant_values'],
+ 'linear_ramp': ['end_values'],
+ 'maximum': ['stat_length'],
+ 'mean': ['stat_length'],
+ 'median': ['stat_length'],
+ 'minimum': ['stat_length'],
+ 'reflect': ['reflect_type'],
+ 'symmetric': ['reflect_type'],
+ }
+ try:
+ unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
+ except KeyError:
+ raise ValueError(f"mode '{mode}' is not supported") from None
+ if unsupported_kwargs:
+ raise ValueError("unsupported keyword arguments for mode "
+ f"'{mode}': {unsupported_kwargs}")
+
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
+ "mean": np.mean, "median": np.median}
+
+ # Create array with final shape and original values
+ # (padded area is undefined)
+ padded, original_area_slice = _pad_simple(array, pad_width)
+ # And prepare iteration over all dimensions
+ # (zipping may be more readable than using enumerate)
+ axes = range(padded.ndim)
+
+ if mode == "constant":
+ values = kwargs.get("constant_values", 0)
+ values = _as_pairs(values, padded.ndim)
+ for axis, width_pair, value_pair in zip(axes, pad_width, values):
+ roi = _view_roi(padded, original_area_slice, axis)
+ _set_pad_area(roi, axis, width_pair, value_pair)
+
+ elif mode == "empty":
+ pass # Do nothing as _pad_simple already returned the correct result
+
+ elif array.size == 0:
+ # Only modes "constant" and "empty" can extend empty axes, all other
+ # modes depend on `array` not being empty
+ # -> ensure every empty axis is only "padded with 0"
+ for axis, width_pair in zip(axes, pad_width):
+ if array.shape[axis] == 0 and any(width_pair):
+ raise ValueError(
+ f"can't extend empty axis {axis} using modes other than "
+ "'constant' or 'empty'"
+ )
+ # passed, don't need to do anything more as _pad_simple already
+ # returned the correct result
+
+ elif mode == "edge":
+ for axis, width_pair in zip(axes, pad_width):
+ roi = _view_roi(padded, original_area_slice, axis)
+ edge_pair = _get_edges(roi, axis, width_pair)
+ _set_pad_area(roi, axis, width_pair, edge_pair)
+
+ elif mode == "linear_ramp":
+ end_values = kwargs.get("end_values", 0)
+ end_values = _as_pairs(end_values, padded.ndim)
+ for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
+ roi = _view_roi(padded, original_area_slice, axis)
+ ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
+ _set_pad_area(roi, axis, width_pair, ramp_pair)
+
+ elif mode in stat_functions:
+ func = stat_functions[mode]
+ length = kwargs.get("stat_length")
+ length = _as_pairs(length, padded.ndim, as_index=True)
+ for axis, width_pair, length_pair in zip(axes, pad_width, length):
+ roi = _view_roi(padded, original_area_slice, axis)
+ stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
+ _set_pad_area(roi, axis, width_pair, stat_pair)
+
+ elif mode in {"reflect", "symmetric"}:
+ method = kwargs.get("reflect_type", "even")
+ include_edge = mode == "symmetric"
+ for axis, (left_index, right_index) in zip(axes, pad_width):
+ if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
+ # Extending singleton dimension for 'reflect' is legacy
+ # behavior; it really should raise an error.
+ edge_pair = _get_edges(padded, axis, (left_index, right_index))
+ _set_pad_area(
+ padded, axis, (left_index, right_index), edge_pair)
+ continue
+
+ roi = _view_roi(padded, original_area_slice, axis)
+ while left_index > 0 or right_index > 0:
+ # Iteratively pad until dimension is filled with reflected
+ # values. This is necessary if the pad area is larger than
+ # the length of the original values in the current dimension.
+ left_index, right_index = _set_reflect_both(
+ roi, axis, (left_index, right_index),
+ method, array.shape[axis], include_edge
+ )
+
+ elif mode == "wrap":
+ for axis, (left_index, right_index) in zip(axes, pad_width):
+ roi = _view_roi(padded, original_area_slice, axis)
+ original_period = padded.shape[axis] - right_index - left_index
+ while left_index > 0 or right_index > 0:
+ # Iteratively pad until dimension is filled with wrapped
+ # values. This is necessary if the pad area is larger than
+ # the length of the original values in the current dimension.
+ left_index, right_index = _set_wrap_both(
+ roi, axis, (left_index, right_index), original_period)
+
+ return padded
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi
new file mode 100644
index 0000000..46b4376
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraypad_impl.pyi
@@ -0,0 +1,89 @@
+from typing import (
+ Any,
+ Protocol,
+ TypeAlias,
+ TypeVar,
+ overload,
+ type_check_only,
+)
+from typing import (
+ Literal as L,
+)
+
+from numpy import generic
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _ArrayLike,
+ _ArrayLikeInt,
+)
+
+__all__ = ["pad"]
+
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+
+@type_check_only
+class _ModeFunc(Protocol):
+ def __call__(
+ self,
+ vector: NDArray[Any],
+ iaxis_pad_width: tuple[int, int],
+ iaxis: int,
+ kwargs: dict[str, Any],
+ /,
+ ) -> None: ...
+
+_ModeKind: TypeAlias = L[
+ "constant",
+ "edge",
+ "linear_ramp",
+ "maximum",
+ "mean",
+ "median",
+ "minimum",
+ "reflect",
+ "symmetric",
+ "wrap",
+ "empty",
+]
+
+# TODO: In practice each keyword argument is exclusive to one or more
+# specific modes. Consider adding more overloads to express this in the future.
+
+# Expand `**kwargs` into explicit keyword-only arguments
+@overload
+def pad(
+ array: _ArrayLike[_ScalarT],
+ pad_width: _ArrayLikeInt,
+ mode: _ModeKind = ...,
+ *,
+ stat_length: _ArrayLikeInt | None = ...,
+ constant_values: ArrayLike = ...,
+ end_values: ArrayLike = ...,
+ reflect_type: L["odd", "even"] = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def pad(
+ array: ArrayLike,
+ pad_width: _ArrayLikeInt,
+ mode: _ModeKind = ...,
+ *,
+ stat_length: _ArrayLikeInt | None = ...,
+ constant_values: ArrayLike = ...,
+ end_values: ArrayLike = ...,
+ reflect_type: L["odd", "even"] = ...,
+) -> NDArray[Any]: ...
+@overload
+def pad(
+ array: _ArrayLike[_ScalarT],
+ pad_width: _ArrayLikeInt,
+ mode: _ModeFunc,
+ **kwargs: Any,
+) -> NDArray[_ScalarT]: ...
+@overload
+def pad(
+ array: ArrayLike,
+ pad_width: _ArrayLikeInt,
+ mode: _ModeFunc,
+ **kwargs: Any,
+) -> NDArray[Any]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py
new file mode 100644
index 0000000..ef0739b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.py
@@ -0,0 +1,1260 @@
+"""
+Set operations for arrays based on sorting.
+
+Notes
+-----
+
+For floating point arrays, inaccurate results may appear due to usual round-off
+and floating point comparison issues.
+
+Speed could be gained in some operations by an implementation of
+`numpy.sort`, that can provide directly the permutation vectors, thus avoiding
+calls to `numpy.argsort`.
+
+Original author: Robert Cimrman
+
+"""
+import functools
+import warnings
+from typing import NamedTuple
+
+import numpy as np
+from numpy._core import overrides
+from numpy._core._multiarray_umath import _array_converter, _unique_hash
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d",
+ "union1d", "unique", "unique_all", "unique_counts", "unique_inverse",
+ "unique_values"
+]
+
+
+def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
+ return (ary, to_end, to_begin)
+
+
+@array_function_dispatch(_ediff1d_dispatcher)
+def ediff1d(ary, to_end=None, to_begin=None):
+ """
+ The differences between consecutive elements of an array.
+
+ Parameters
+ ----------
+ ary : array_like
+ If necessary, will be flattened before the differences are taken.
+ to_end : array_like, optional
+ Number(s) to append at the end of the returned differences.
+ to_begin : array_like, optional
+ Number(s) to prepend at the beginning of the returned differences.
+
+ Returns
+ -------
+ ediff1d : ndarray
+ The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
+
+ See Also
+ --------
+ diff, gradient
+
+ Notes
+ -----
+ When applied to masked arrays, this function drops the mask information
+ if the `to_begin` and/or `to_end` parameters are used.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 4, 7, 0])
+ >>> np.ediff1d(x)
+ array([ 1, 2, 3, -7])
+
+ >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+ array([-99, 1, 2, ..., -7, 88, 99])
+
+ The returned array is always 1D.
+
+ >>> y = [[1, 2, 4], [1, 6, 24]]
+ >>> np.ediff1d(y)
+ array([ 1, 2, -3, 5, 18])
+
+ """
+ conv = _array_converter(ary)
+ # Convert to (any) array and ravel:
+ ary = conv[0].ravel()
+
+ # enforce that the dtype of `ary` is used for the output
+ dtype_req = ary.dtype
+
+ # fast track default case
+ if to_begin is None and to_end is None:
+ return ary[1:] - ary[:-1]
+
+ if to_begin is None:
+ l_begin = 0
+ else:
+ to_begin = np.asanyarray(to_begin)
+ if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
+ raise TypeError("dtype of `to_begin` must be compatible "
+ "with input `ary` under the `same_kind` rule.")
+
+ to_begin = to_begin.ravel()
+ l_begin = len(to_begin)
+
+ if to_end is None:
+ l_end = 0
+ else:
+ to_end = np.asanyarray(to_end)
+ if not np.can_cast(to_end, dtype_req, casting="same_kind"):
+ raise TypeError("dtype of `to_end` must be compatible "
+ "with input `ary` under the `same_kind` rule.")
+
+ to_end = to_end.ravel()
+ l_end = len(to_end)
+
+ # do the calculation in place and copy to_begin and to_end
+ l_diff = max(len(ary) - 1, 0)
+ result = np.empty_like(ary, shape=l_diff + l_begin + l_end)
+
+ if l_begin > 0:
+ result[:l_begin] = to_begin
+ if l_end > 0:
+ result[l_begin + l_diff:] = to_end
+ np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
+
+ return conv.wrap(result)
+
+
+def _unpack_tuple(x):
+ """ Unpacks one-element tuples for use as return values """
+ if len(x) == 1:
+ return x[0]
+ else:
+ return x
+
+
+def _unique_dispatcher(ar, return_index=None, return_inverse=None,
+ return_counts=None, axis=None, *, equal_nan=None,
+ sorted=True):
+ return (ar,)
+
+
+@array_function_dispatch(_unique_dispatcher)
+def unique(ar, return_index=False, return_inverse=False,
+ return_counts=False, axis=None, *, equal_nan=True,
+ sorted=True):
+ """
+ Find the unique elements of an array.
+
+ Returns the sorted unique elements of an array. There are three optional
+ outputs in addition to the unique elements:
+
+ * the indices of the input array that give the unique values
+ * the indices of the unique array that reconstruct the input array
+ * the number of times each unique value comes up in the input array
+
+ Parameters
+ ----------
+ ar : array_like
+ Input array. Unless `axis` is specified, this will be flattened if it
+ is not already 1-D.
+ return_index : bool, optional
+ If True, also return the indices of `ar` (along the specified axis,
+ if provided, or in the flattened array) that result in the unique array.
+ return_inverse : bool, optional
+ If True, also return the indices of the unique array (for the specified
+ axis, if provided) that can be used to reconstruct `ar`.
+ return_counts : bool, optional
+ If True, also return the number of times each unique item appears
+ in `ar`.
+ axis : int or None, optional
+ The axis to operate on. If None, `ar` will be flattened. If an integer,
+ the subarrays indexed by the given axis will be flattened and treated
+ as the elements of a 1-D array with the dimension of the given axis,
+ see the notes for more details. Object arrays or structured arrays
+ that contain objects are not supported if the `axis` kwarg is used. The
+ default is None.
+
+ equal_nan : bool, optional
+ If True, collapses multiple NaN values in the return array into one.
+
+ .. versionadded:: 1.24
+
+ sorted : bool, optional
+ If True, the unique elements are sorted. Elements may be sorted in
+ practice even if ``sorted=False``, but this could change without
+ notice.
+
+ .. versionadded:: 2.3
+
+ Returns
+ -------
+ unique : ndarray
+ The sorted unique values.
+ unique_indices : ndarray, optional
+ The indices of the first occurrences of the unique values in the
+ original array. Only provided if `return_index` is True.
+ unique_inverse : ndarray, optional
+ The indices to reconstruct the original array from the
+ unique array. Only provided if `return_inverse` is True.
+ unique_counts : ndarray, optional
+ The number of times each of the unique values comes up in the
+ original array. Only provided if `return_counts` is True.
+
+ See Also
+ --------
+ repeat : Repeat elements of an array.
+ sort : Return a sorted copy of an array.
+
+ Notes
+ -----
+ When an axis is specified the subarrays indexed by the axis are sorted.
+ This is done by making the specified axis the first dimension of the array
+ (move the axis to the first dimension to keep the order of the other axes)
+ and then flattening the subarrays in C order. The flattened subarrays are
+ then viewed as a structured type with each element given a label, with the
+ effect that we end up with a 1-D array of structured types that can be
+ treated in the same way as any other 1-D array. The result is that the
+ flattened subarrays are sorted in lexicographic order starting with the
+ first element.
+
+ .. versionchanged:: 1.21
+ Like np.sort, NaN will sort to the end of the values.
+ For complex arrays all NaN values are considered equivalent
+ (no matter whether the NaN is in the real or imaginary part).
+ As the representant for the returned array the smallest one in the
+ lexicographical order is chosen - see np.sort for how the lexicographical
+ order is defined for complex arrays.
+
+ .. versionchanged:: 2.0
+ For multi-dimensional inputs, ``unique_inverse`` is reshaped
+ such that the input can be reconstructed using
+ ``np.take(unique, unique_inverse, axis=axis)``. The result is
+ now not 1-dimensional when ``axis=None``.
+
+ Note that in NumPy 2.0.0 a higher dimensional array was returned also
+ when ``axis`` was not ``None``. This was reverted, but
+ ``inverse.reshape(-1)`` can be used to ensure compatibility with both
+ versions.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.unique([1, 1, 2, 2, 3, 3])
+ array([1, 2, 3])
+ >>> a = np.array([[1, 1], [2, 3]])
+ >>> np.unique(a)
+ array([1, 2, 3])
+
+ Return the unique rows of a 2D array
+
+ >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
+ >>> np.unique(a, axis=0)
+ array([[1, 0, 0], [2, 3, 4]])
+
+ Return the indices of the original array that give the unique values:
+
+ >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
+ >>> u, indices = np.unique(a, return_index=True)
+ >>> u
+ array(['a', 'b', 'c'], dtype='<U1')
+ >>> indices
+ array([0, 1, 3])
+ >>> a[indices]
+ array(['a', 'b', 'c'], dtype='<U1')
+
+ Reconstruct the input array from the unique values and inverse:
+
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+ >>> u, indices = np.unique(a, return_inverse=True)
+ >>> u
+ array([1, 2, 3, 4, 6])
+ >>> indices
+ array([0, 1, 4, 3, 1, 2, 1])
+ >>> u[indices]
+ array([1, 2, 6, 4, 2, 3, 2])
+
+ Reconstruct the input values from the unique values and counts:
+
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+ >>> values, counts = np.unique(a, return_counts=True)
+ >>> values
+ array([1, 2, 3, 4, 6])
+ >>> counts
+ array([1, 3, 1, 1, 1])
+ >>> np.repeat(values, counts)
+ array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
+
+ """
+ ar = np.asanyarray(ar)
+ if axis is None:
+ ret = _unique1d(ar, return_index, return_inverse, return_counts,
+ equal_nan=equal_nan, inverse_shape=ar.shape, axis=None,
+ sorted=sorted)
+ return _unpack_tuple(ret)
+
+ # axis was specified and not None
+ try:
+ ar = np.moveaxis(ar, axis, 0)
+ except np.exceptions.AxisError:
+ # this removes the "axis1" or "axis2" prefix from the error message
+ raise np.exceptions.AxisError(axis, ar.ndim) from None
+ inverse_shape = [1] * ar.ndim
+ inverse_shape[axis] = ar.shape[0]
+
+ # Must reshape to a contiguous 2D array for this to work...
+ orig_shape, orig_dtype = ar.shape, ar.dtype
+ ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
+ ar = np.ascontiguousarray(ar)
+ dtype = [(f'f{i}', ar.dtype) for i in range(ar.shape[1])]
+
+ # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
+ # data type with `m` fields where each field has the data type of `ar`.
+ # In the following, we create the array `consolidated`, which has
+ # shape `(n,)` with data type `dtype`.
+ try:
+ if ar.shape[1] > 0:
+ consolidated = ar.view(dtype)
+ else:
+ # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
+ # a data type with itemsize 0, and the call `ar.view(dtype)` will
+ # fail. Instead, we'll use `np.empty` to explicitly create the
+ # array with shape `(len(ar),)`. Because `dtype` in this case has
+ # itemsize 0, the total size of the result is still 0 bytes.
+ consolidated = np.empty(len(ar), dtype=dtype)
+ except TypeError as e:
+ # There's no good way to do this for object arrays, etc...
+ msg = 'The axis argument to unique is not supported for dtype {dt}'
+ raise TypeError(msg.format(dt=ar.dtype)) from e
+
+ def reshape_uniq(uniq):
+ n = len(uniq)
+ uniq = uniq.view(orig_dtype)
+ uniq = uniq.reshape(n, *orig_shape[1:])
+ uniq = np.moveaxis(uniq, 0, axis)
+ return uniq
+
+ output = _unique1d(consolidated, return_index,
+ return_inverse, return_counts,
+ equal_nan=equal_nan, inverse_shape=inverse_shape,
+ axis=axis, sorted=sorted)
+ output = (reshape_uniq(output[0]),) + output[1:]
+ return _unpack_tuple(output)
+
+
+def _unique1d(ar, return_index=False, return_inverse=False,
+ return_counts=False, *, equal_nan=True, inverse_shape=None,
+ axis=None, sorted=True):
+ """
+ Find the unique elements of an array, ignoring shape.
+
+ Uses a hash table to find the unique elements if possible.
+ """
+ ar = np.asanyarray(ar).flatten()
+ if len(ar.shape) != 1:
+ # np.matrix, and maybe some other array subclasses, insist on keeping
+ # two dimensions for all operations. Coerce to an ndarray in such cases.
+ ar = np.asarray(ar).flatten()
+
+ optional_indices = return_index or return_inverse
+
+ # masked arrays are not supported yet.
+ if not optional_indices and not return_counts and not np.ma.is_masked(ar):
+ # First we convert the array to a numpy array, later we wrap it back
+ # in case it was a subclass of numpy.ndarray.
+ conv = _array_converter(ar)
+ ar_, = conv
+
+ if (hash_unique := _unique_hash(ar_)) is not NotImplemented:
+ if sorted:
+ hash_unique.sort()
+ # We wrap the result back in case it was a subclass of numpy.ndarray.
+ return (conv.wrap(hash_unique),)
+
+ # If we don't use the hash map, we use the slower sorting method.
+ if optional_indices:
+ perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
+ aux = ar[perm]
+ else:
+ ar.sort()
+ aux = ar
+ mask = np.empty(aux.shape, dtype=np.bool)
+ mask[:1] = True
+ if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
+ np.isnan(aux[-1])):
+ if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
+ aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
+ else:
+ aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
+ if aux_firstnan > 0:
+ mask[1:aux_firstnan] = (
+ aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
+ mask[aux_firstnan] = True
+ mask[aux_firstnan + 1:] = False
+ else:
+ mask[1:] = aux[1:] != aux[:-1]
+
+ ret = (aux[mask],)
+ if return_index:
+ ret += (perm[mask],)
+ if return_inverse:
+ imask = np.cumsum(mask) - 1
+ inv_idx = np.empty(mask.shape, dtype=np.intp)
+ inv_idx[perm] = imask
+ ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,)
+ if return_counts:
+ idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
+ ret += (np.diff(idx),)
+ return ret
+
+
+# Array API set functions
+
+class UniqueAllResult(NamedTuple):
+ values: np.ndarray
+ indices: np.ndarray
+ inverse_indices: np.ndarray
+ counts: np.ndarray
+
+
+class UniqueCountsResult(NamedTuple):
+ values: np.ndarray
+ counts: np.ndarray
+
+
+class UniqueInverseResult(NamedTuple):
+ values: np.ndarray
+ inverse_indices: np.ndarray
+
+
+def _unique_all_dispatcher(x, /):
+ return (x,)
+
+
+@array_function_dispatch(_unique_all_dispatcher)
+def unique_all(x):
+ """
+ Find the unique elements of an array, and counts, inverse, and indices.
+
+ This function is an Array API compatible alternative to::
+
+ np.unique(x, return_index=True, return_inverse=True,
+ return_counts=True, equal_nan=False, sorted=False)
+
+ but returns a namedtuple for easier access to each output.
+
+ .. note::
+ This function currently always returns a sorted result, however,
+ this could change in any NumPy minor release.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array. It will be flattened if it is not already 1-D.
+
+ Returns
+ -------
+ out : namedtuple
+ The result containing:
+
+ * values - The unique elements of an input array.
+ * indices - The first occurring indices for each unique element.
+ * inverse_indices - The indices from the set of unique elements
+ that reconstruct `x`.
+ * counts - The corresponding counts for each unique element.
+
+ See Also
+ --------
+ unique : Find the unique elements of an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = [1, 1, 2]
+ >>> uniq = np.unique_all(x)
+ >>> uniq.values
+ array([1, 2])
+ >>> uniq.indices
+ array([0, 2])
+ >>> uniq.inverse_indices
+ array([0, 0, 1])
+ >>> uniq.counts
+ array([2, 1])
+ """
+ result = unique(
+ x,
+ return_index=True,
+ return_inverse=True,
+ return_counts=True,
+ equal_nan=False,
+ )
+ return UniqueAllResult(*result)
+
+
+def _unique_counts_dispatcher(x, /):
+ return (x,)
+
+
+@array_function_dispatch(_unique_counts_dispatcher)
+def unique_counts(x):
+ """
+ Find the unique elements and counts of an input array `x`.
+
+ This function is an Array API compatible alternative to::
+
+ np.unique(x, return_counts=True, equal_nan=False, sorted=False)
+
+ but returns a namedtuple for easier access to each output.
+
+ .. note::
+ This function currently always returns a sorted result, however,
+ this could change in any NumPy minor release.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array. It will be flattened if it is not already 1-D.
+
+ Returns
+ -------
+ out : namedtuple
+ The result containing:
+
+ * values - The unique elements of an input array.
+ * counts - The corresponding counts for each unique element.
+
+ See Also
+ --------
+ unique : Find the unique elements of an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = [1, 1, 2]
+ >>> uniq = np.unique_counts(x)
+ >>> uniq.values
+ array([1, 2])
+ >>> uniq.counts
+ array([2, 1])
+ """
+ result = unique(
+ x,
+ return_index=False,
+ return_inverse=False,
+ return_counts=True,
+ equal_nan=False,
+ )
+ return UniqueCountsResult(*result)
+
+
+def _unique_inverse_dispatcher(x, /):
+ return (x,)
+
+
+@array_function_dispatch(_unique_inverse_dispatcher)
+def unique_inverse(x):
+ """
+ Find the unique elements of `x` and indices to reconstruct `x`.
+
+ This function is an Array API compatible alternative to::
+
+ np.unique(x, return_inverse=True, equal_nan=False, sorted=False)
+
+ but returns a namedtuple for easier access to each output.
+
+ .. note::
+ This function currently always returns a sorted result, however,
+ this could change in any NumPy minor release.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array. It will be flattened if it is not already 1-D.
+
+ Returns
+ -------
+ out : namedtuple
+ The result containing:
+
+ * values - The unique elements of an input array.
+ * inverse_indices - The indices from the set of unique elements
+ that reconstruct `x`.
+
+ See Also
+ --------
+ unique : Find the unique elements of an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = [1, 1, 2]
+ >>> uniq = np.unique_inverse(x)
+ >>> uniq.values
+ array([1, 2])
+ >>> uniq.inverse_indices
+ array([0, 0, 1])
+ """
+ result = unique(
+ x,
+ return_index=False,
+ return_inverse=True,
+ return_counts=False,
+ equal_nan=False,
+ )
+ return UniqueInverseResult(*result)
+
+
+def _unique_values_dispatcher(x, /):
+ return (x,)
+
+
+@array_function_dispatch(_unique_values_dispatcher)
+def unique_values(x):
+ """
+ Returns the unique elements of an input array `x`.
+
+ This function is an Array API compatible alternative to::
+
+ np.unique(x, equal_nan=False, sorted=False)
+
+ .. versionchanged:: 2.3
+ The algorithm was changed to a faster one that does not rely on
+ sorting, and hence the results are no longer implicitly sorted.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array. It will be flattened if it is not already 1-D.
+
+ Returns
+ -------
+ out : ndarray
+ The unique elements of an input array.
+
+ See Also
+ --------
+ unique : Find the unique elements of an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.unique_values([1, 1, 2])
+ array([1, 2]) # may vary
+
+ """
+ return unique(
+ x,
+ return_index=False,
+ return_inverse=False,
+ return_counts=False,
+ equal_nan=False,
+ sorted=False,
+ )
+
+
+def _intersect1d_dispatcher(
+ ar1, ar2, assume_unique=None, return_indices=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_intersect1d_dispatcher)
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
+ """
+ Find the intersection of two arrays.
+
+ Return the sorted, unique values that are in both of the input arrays.
+
+ Parameters
+ ----------
+ ar1, ar2 : array_like
+ Input arrays. Will be flattened if not already 1D.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
+ unique, incorrect results and out-of-bounds indices could result.
+ Default is False.
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the two
+ arrays are returned. The first instance of a value is used if there are
+ multiple. Default is False.
+
+ Returns
+ -------
+ intersect1d : ndarray
+ Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
+ array([1, 3])
+
+ To intersect more than two arrays, use functools.reduce:
+
+ >>> from functools import reduce
+ >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+ array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
+ """
+ ar1 = np.asanyarray(ar1)
+ ar2 = np.asanyarray(ar2)
+
+ if not assume_unique:
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
+ aux = np.concatenate((ar1, ar2))
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
+
+
+def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setxor1d_dispatcher)
+def setxor1d(ar1, ar2, assume_unique=False):
+ """
+ Find the set exclusive-or of two arrays.
+
+ Return the sorted, unique values that are in only one (not both) of the
+ input arrays.
+
+ Parameters
+ ----------
+ ar1, ar2 : array_like
+ Input arrays.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+
+ Returns
+ -------
+ setxor1d : ndarray
+ Sorted 1D array of unique values that are in only one of the input
+ arrays.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1, 2, 3, 2, 4])
+ >>> b = np.array([2, 3, 5, 7, 5])
+ >>> np.setxor1d(a,b)
+ array([1, 4, 5, 7])
+
+ """
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+
+ aux = np.concatenate((ar1, ar2), axis=None)
+ if aux.size == 0:
+ return aux
+
+ aux.sort()
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
+ return aux[flag[1:] & flag[:-1]]
+
+
+def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
+ kind=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_in1d_dispatcher)
+def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
+ """
+ Test whether each element of a 1-D array is also present in a second array.
+
+ .. deprecated:: 2.0
+ Use :func:`isin` instead of `in1d` for new code.
+
+ Returns a boolean array the same length as `ar1` that is True
+ where an element of `ar1` is in `ar2` and False otherwise.
+
+ Parameters
+ ----------
+ ar1 : (M,) array_like
+ Input array.
+ ar2 : array_like
+ The values against which to test each value of `ar1`.
+ assume_unique : bool, optional
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+ invert : bool, optional
+ If True, the values in the returned array are inverted (that is,
+ False where an element of `ar1` is in `ar2` and True otherwise).
+ Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
+ to (but is faster than) ``np.invert(in1d(a, b))``.
+ kind : {None, 'sort', 'table'}, optional
+ The algorithm to use. This will not affect the final result,
+ but will affect the speed and memory use. The default, None,
+ will select automatically based on memory considerations.
+
+ * If 'sort', will use a mergesort-based approach. This will have
+ a memory usage of roughly 6 times the sum of the sizes of
+ `ar1` and `ar2`, not accounting for size of dtypes.
+ * If 'table', will use a lookup table approach similar
+ to a counting sort. This is only available for boolean and
+ integer arrays. This will have a memory usage of the
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
+ has no effect when the 'table' option is used.
+ * If None, will automatically choose 'table' if
+ the required memory allocation is less than or equal to
+ 6 times the sum of the sizes of `ar1` and `ar2`,
+ otherwise will use 'sort'. This is done to not use
+ a large amount of memory by default, even though
+ 'table' may be faster in most cases. If 'table' is chosen,
+ `assume_unique` will have no effect.
+
+ Returns
+ -------
+ in1d : (M,) ndarray, bool
+ The values `ar1[in1d]` are in `ar2`.
+
+ See Also
+ --------
+ isin : Version of this function that preserves the
+ shape of ar1.
+
+ Notes
+ -----
+ `in1d` can be considered as an element-wise function version of the
+ python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
+ equivalent to ``np.array([item in b for item in a])``.
+ However, this idea fails if `ar2` is a set, or similar (non-sequence)
+ container: As ``ar2`` is converted to an array, in those cases
+ ``asarray(ar2)`` is an object array rather than the expected array of
+ contained values.
+
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
+ following relationship is true:
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
+ but may use greater memory. The default value for `kind` will
+ be automatically selected based only on memory usage, so one may
+ manually set ``kind='table'`` if memory constraints can be relaxed.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> test = np.array([0, 1, 2, 5, 0])
+ >>> states = [0, 2]
+ >>> mask = np.in1d(test, states)
+ >>> mask
+ array([ True, False, True, False, True])
+ >>> test[mask]
+ array([0, 2, 0])
+ >>> mask = np.in1d(test, states, invert=True)
+ >>> mask
+ array([False, True, False, True, False])
+ >>> test[mask]
+ array([1, 5])
+ """
+
+ # Deprecated in NumPy 2.0, 2023-08-18
+ warnings.warn(
+ "`in1d` is deprecated. Use `np.isin` instead.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ return _in1d(ar1, ar2, assume_unique, invert, kind=kind)
+
+
+def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
+ # Ravel both arrays, behavior for the first array could be different
+ ar1 = np.asarray(ar1).ravel()
+ ar2 = np.asarray(ar2).ravel()
+
+ # Ensure that iteration through object arrays yields size-1 arrays
+ if ar2.dtype == object:
+ ar2 = ar2.reshape(-1, 1)
+
+ if kind not in {None, 'sort', 'table'}:
+ raise ValueError(
+ f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
+
+ # Can use the table method if all arrays are integers or boolean:
+ is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
+ use_table_method = is_int_arrays and kind in {None, 'table'}
+
+ if use_table_method:
+ if ar2.size == 0:
+ if invert:
+ return np.ones_like(ar1, dtype=bool)
+ else:
+ return np.zeros_like(ar1, dtype=bool)
+
+ # Convert booleans to uint8 so we can use the fast integer algorithm
+ if ar1.dtype == bool:
+ ar1 = ar1.astype(np.uint8)
+ if ar2.dtype == bool:
+ ar2 = ar2.astype(np.uint8)
+
+ ar2_min = int(np.min(ar2))
+ ar2_max = int(np.max(ar2))
+
+ ar2_range = ar2_max - ar2_min
+
+ # Constraints on whether we can actually use the table method:
+ # 1. Assert memory usage is not too large
+ below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
+
+ # Optimal performance is for approximately
+ # log10(size) > (log10(range) - 2.27) / 0.927.
+ # However, here we set the requirement that by default
+ # the intermediate array can only be 6x
+ # the combined memory allocation of the original
+ # arrays. See discussion on
+ # https://github.com/numpy/numpy/pull/12065.
+
+ if (
+ range_safe_from_overflow and
+ (below_memory_constraint or kind == 'table')
+ ):
+
+ if invert:
+ outgoing_array = np.ones_like(ar1, dtype=bool)
+ else:
+ outgoing_array = np.zeros_like(ar1, dtype=bool)
+
+ # Make elements 1 where the integer exists in ar2
+ if invert:
+ isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
+ isin_helper_ar[ar2 - ar2_min] = 0
+ else:
+ isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
+ isin_helper_ar[ar2 - ar2_min] = 1
+
+ # Mask out elements we know won't work
+ basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
+ in_range_ar1 = ar1[basic_mask]
+ if in_range_ar1.size == 0:
+ # Nothing more to do, since all values are out of range.
+ return outgoing_array
+
+ # Unfortunately, ar2_min can be out of range for `intp` even
+ # if the calculation result must fit in range (and be positive).
+ # In that case, use ar2.dtype which must work for all unmasked
+ # values.
+ try:
+ ar2_min = np.array(ar2_min, dtype=np.intp)
+ dtype = np.intp
+ except OverflowError:
+ dtype = ar2.dtype
+
+ out = np.empty_like(in_range_ar1, dtype=np.intp)
+ outgoing_array[basic_mask] = isin_helper_ar[
+ np.subtract(in_range_ar1, ar2_min, dtype=dtype,
+ out=out, casting="unsafe")]
+
+ return outgoing_array
+ elif kind == 'table': # not range_safe_from_overflow
+ raise RuntimeError(
+ "You have specified kind='table', "
+ "but the range of values in `ar2` or `ar1` exceed the "
+ "maximum integer of the datatype. "
+ "Please set `kind` to None or 'sort'."
+ )
+ elif kind == 'table':
+ raise ValueError(
+ "The 'table' method is only "
+ "supported for boolean or integer arrays. "
+ "Please select 'sort' or None for kind."
+ )
+
+ # Check if one of the arrays may contain arbitrary objects
+ contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
+
+ # This code is run when
+ # a) the first condition is true, making the code significantly faster
+ # b) the second condition is true (i.e. `ar1` or `ar2` may contain
+ # arbitrary objects), since then sorting is not guaranteed to work
+ if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
+ if invert:
+ mask = np.ones(len(ar1), dtype=bool)
+ for a in ar2:
+ mask &= (ar1 != a)
+ else:
+ mask = np.zeros(len(ar1), dtype=bool)
+ for a in ar2:
+ mask |= (ar1 == a)
+ return mask
+
+ # Otherwise use sorting
+ if not assume_unique:
+ ar1, rev_idx = np.unique(ar1, return_inverse=True)
+ ar2 = np.unique(ar2)
+
+ ar = np.concatenate((ar1, ar2))
+ # We need this to be a stable sort, so always use 'mergesort'
+ # here. The values from the first array should always come before
+ # the values from the second array.
+ order = ar.argsort(kind='mergesort')
+ sar = ar[order]
+ if invert:
+ bool_ar = (sar[1:] != sar[:-1])
+ else:
+ bool_ar = (sar[1:] == sar[:-1])
+ flag = np.concatenate((bool_ar, [invert]))
+ ret = np.empty(ar.shape, dtype=bool)
+ ret[order] = flag
+
+ if assume_unique:
+ return ret[:len(ar1)]
+ else:
+ return ret[rev_idx]
+
+
+def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
+ *, kind=None):
+ return (element, test_elements)
+
+
+@array_function_dispatch(_isin_dispatcher)
+def isin(element, test_elements, assume_unique=False, invert=False, *,
+ kind=None):
+ """
+ Calculates ``element in test_elements``, broadcasting over `element` only.
+ Returns a boolean array of the same shape as `element` that is True
+ where an element of `element` is in `test_elements` and False otherwise.
+
+ Parameters
+ ----------
+ element : array_like
+ Input array.
+ test_elements : array_like
+ The values against which to test each value of `element`.
+ This argument is flattened if it is an array or array_like.
+ See notes for behavior with non-array-like parameters.
+ assume_unique : bool, optional
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+ invert : bool, optional
+ If True, the values in the returned array are inverted, as if
+ calculating `element not in test_elements`. Default is False.
+ ``np.isin(a, b, invert=True)`` is equivalent to (but faster
+ than) ``np.invert(np.isin(a, b))``.
+ kind : {None, 'sort', 'table'}, optional
+ The algorithm to use. This will not affect the final result,
+ but will affect the speed and memory use. The default, None,
+ will select automatically based on memory considerations.
+
+ * If 'sort', will use a mergesort-based approach. This will have
+ a memory usage of roughly 6 times the sum of the sizes of
+ `element` and `test_elements`, not accounting for size of dtypes.
+ * If 'table', will use a lookup table approach similar
+ to a counting sort. This is only available for boolean and
+ integer arrays. This will have a memory usage of the
+ size of `element` plus the max-min value of `test_elements`.
+ `assume_unique` has no effect when the 'table' option is used.
+ * If None, will automatically choose 'table' if
+ the required memory allocation is less than or equal to
+ 6 times the sum of the sizes of `element` and `test_elements`,
+ otherwise will use 'sort'. This is done to not use
+ a large amount of memory by default, even though
+ 'table' may be faster in most cases. If 'table' is chosen,
+ `assume_unique` will have no effect.
+
+
+ Returns
+ -------
+ isin : ndarray, bool
+ Has the same shape as `element`. The values `element[isin]`
+ are in `test_elements`.
+
+ Notes
+ -----
+ `isin` is an element-wise function version of the python keyword `in`.
+ ``isin(a, b)`` is roughly equivalent to
+ ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
+
+ `element` and `test_elements` are converted to arrays if they are not
+ already. If `test_elements` is a set (or other non-sequence collection)
+ it will be converted to an object array with one element, rather than an
+ array of the values contained in `test_elements`. This is a consequence
+ of the `array` constructor's way of handling non-sequence collections.
+ Converting the set to a list usually gives the desired behavior.
+
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
+ following relationship is true:
+ ``log10(len(test_elements)) >
+ (log10(max(test_elements)-min(test_elements)) - 2.27) / 0.927``,
+ but may use greater memory. The default value for `kind` will
+ be automatically selected based only on memory usage, so one may
+ manually set ``kind='table'`` if memory constraints can be relaxed.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> element = 2*np.arange(4).reshape((2, 2))
+ >>> element
+ array([[0, 2],
+ [4, 6]])
+ >>> test_elements = [1, 2, 4, 8]
+ >>> mask = np.isin(element, test_elements)
+ >>> mask
+ array([[False, True],
+ [ True, False]])
+ >>> element[mask]
+ array([2, 4])
+
+ The indices of the matched values can be obtained with `nonzero`:
+
+ >>> np.nonzero(mask)
+ (array([0, 1]), array([1, 0]))
+
+ The test can also be inverted:
+
+ >>> mask = np.isin(element, test_elements, invert=True)
+ >>> mask
+ array([[ True, False],
+ [False, True]])
+ >>> element[mask]
+ array([0, 6])
+
+ Because of how `array` handles sets, the following does not
+ work as expected:
+
+ >>> test_set = {1, 2, 4, 8}
+ >>> np.isin(element, test_set)
+ array([[False, False],
+ [False, False]])
+
+ Casting the set to a list gives the expected result:
+
+ >>> np.isin(element, list(test_set))
+ array([[False, True],
+ [ True, False]])
+ """
+ element = np.asarray(element)
+ return _in1d(element, test_elements, assume_unique=assume_unique,
+ invert=invert, kind=kind).reshape(element.shape)
+
+
+def _union1d_dispatcher(ar1, ar2):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_union1d_dispatcher)
+def union1d(ar1, ar2):
+ """
+ Find the union of two arrays.
+
+ Return the unique, sorted array of values that are in either of the two
+ input arrays.
+
+ Parameters
+ ----------
+ ar1, ar2 : array_like
+ Input arrays. They are flattened if they are not already 1D.
+
+ Returns
+ -------
+ union1d : ndarray
+ Unique, sorted union of the input arrays.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.union1d([-1, 0, 1], [-2, 0, 2])
+ array([-2, -1, 0, 1, 2])
+
+ To find the union of more than two arrays, use functools.reduce:
+
+ >>> from functools import reduce
+ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+ array([1, 2, 3, 4, 6])
+ """
+ return unique(np.concatenate((ar1, ar2), axis=None))
+
+
+def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setdiff1d_dispatcher)
+def setdiff1d(ar1, ar2, assume_unique=False):
+ """
+ Find the set difference of two arrays.
+
+ Return the unique values in `ar1` that are not in `ar2`.
+
+ Parameters
+ ----------
+ ar1 : array_like
+ Input array.
+ ar2 : array_like
+ Input comparison array.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+
+ Returns
+ -------
+ setdiff1d : ndarray
+ 1D array of values in `ar1` that are not in `ar2`. The result
+ is sorted when `assume_unique=False`, but otherwise only sorted
+ if the input is sorted.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1, 2, 3, 2, 4, 1])
+ >>> b = np.array([3, 4, 5, 6])
+ >>> np.setdiff1d(a, b)
+ array([1, 2])
+
+ """
+ if assume_unique:
+ ar1 = np.asarray(ar1).ravel()
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)]
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi
new file mode 100644
index 0000000..a7ad5b9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arraysetops_impl.pyi
@@ -0,0 +1,444 @@
+from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload
+from typing import Literal as L
+
+from typing_extensions import TypeVar, deprecated
+
+import numpy as np
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeNumber_co,
+)
+
+__all__ = [
+ "ediff1d",
+ "in1d",
+ "intersect1d",
+ "isin",
+ "setdiff1d",
+ "setxor1d",
+ "union1d",
+ "unique",
+ "unique_all",
+ "unique_counts",
+ "unique_inverse",
+ "unique_values",
+]
+
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_)
+
+# Explicitly set all allowed values to prevent accidental castings to
+# abstract dtypes (their common super-type).
+# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
+# which could result in, for example, `int64` and `float64`producing a
+# `number[_64Bit]` array
+_EitherSCT = TypeVar(
+ "_EitherSCT",
+ np.bool,
+ np.int8, np.int16, np.int32, np.int64, np.intp,
+ np.uint8, np.uint16, np.uint32, np.uint64, np.uintp,
+ np.float16, np.float32, np.float64, np.longdouble,
+ np.complex64, np.complex128, np.clongdouble,
+ np.timedelta64, np.datetime64,
+ np.bytes_, np.str_, np.void, np.object_,
+ np.integer, np.floating, np.complexfloating, np.character,
+) # fmt: skip
+
+_AnyArray: TypeAlias = NDArray[Any]
+_IntArray: TypeAlias = NDArray[np.intp]
+
+###
+
+class UniqueAllResult(NamedTuple, Generic[_ScalarT]):
+ values: NDArray[_ScalarT]
+ indices: _IntArray
+ inverse_indices: _IntArray
+ counts: _IntArray
+
+class UniqueCountsResult(NamedTuple, Generic[_ScalarT]):
+ values: NDArray[_ScalarT]
+ counts: _IntArray
+
+class UniqueInverseResult(NamedTuple, Generic[_ScalarT]):
+ values: NDArray[_ScalarT]
+ inverse_indices: _IntArray
+
+#
+@overload
+def ediff1d(
+ ary: _ArrayLikeBool_co,
+ to_end: ArrayLike | None = None,
+ to_begin: ArrayLike | None = None,
+) -> NDArray[np.int8]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLike[_NumericT],
+ to_end: ArrayLike | None = None,
+ to_begin: ArrayLike | None = None,
+) -> NDArray[_NumericT]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLike[np.datetime64[Any]],
+ to_end: ArrayLike | None = None,
+ to_begin: ArrayLike | None = None,
+) -> NDArray[np.timedelta64]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLikeNumber_co,
+ to_end: ArrayLike | None = None,
+ to_begin: ArrayLike | None = None,
+) -> _AnyArray: ...
+
+#
+@overload # known scalar-type, FFF
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False] = False,
+ return_inverse: L[False] = False,
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> NDArray[_ScalarT]: ...
+@overload # unknown scalar-type, FFF
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = False,
+ return_inverse: L[False] = False,
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> _AnyArray: ...
+@overload # known scalar-type, TFF
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[True],
+ return_inverse: L[False] = False,
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray]: ...
+@overload # unknown scalar-type, TFF
+def unique(
+ ar: ArrayLike,
+ return_index: L[True],
+ return_inverse: L[False] = False,
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray]: ...
+@overload # known scalar-type, FTF (positional)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False],
+ return_inverse: L[True],
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray]: ...
+@overload # known scalar-type, FTF (keyword)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False] = False,
+ *,
+ return_inverse: L[True],
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray]: ...
+@overload # unknown scalar-type, FTF (positional)
+def unique(
+ ar: ArrayLike,
+ return_index: L[False],
+ return_inverse: L[True],
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray]: ...
+@overload # unknown scalar-type, FTF (keyword)
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = False,
+ *,
+ return_inverse: L[True],
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray]: ...
+@overload # known scalar-type, FFT (positional)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False],
+ return_inverse: L[False],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray]: ...
+@overload # known scalar-type, FFT (keyword)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False] = False,
+ return_inverse: L[False] = False,
+ *,
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray]: ...
+@overload # unknown scalar-type, FFT (positional)
+def unique(
+ ar: ArrayLike,
+ return_index: L[False],
+ return_inverse: L[False],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray]: ...
+@overload # unknown scalar-type, FFT (keyword)
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = False,
+ return_inverse: L[False] = False,
+ *,
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray]: ...
+@overload # known scalar-type, TTF
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[True],
+ return_inverse: L[True],
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, TTF
+def unique(
+ ar: ArrayLike,
+ return_index: L[True],
+ return_inverse: L[True],
+ return_counts: L[False] = False,
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+@overload # known scalar-type, TFT (positional)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[True],
+ return_inverse: L[False],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
+@overload # known scalar-type, TFT (keyword)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[True],
+ return_inverse: L[False] = False,
+ *,
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, TFT (positional)
+def unique(
+ ar: ArrayLike,
+ return_index: L[True],
+ return_inverse: L[False],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, TFT (keyword)
+def unique(
+ ar: ArrayLike,
+ return_index: L[True],
+ return_inverse: L[False] = False,
+ *,
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+@overload # known scalar-type, FTT (positional)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False],
+ return_inverse: L[True],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
+@overload # known scalar-type, FTT (keyword)
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[False] = False,
+ *,
+ return_inverse: L[True],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, FTT (positional)
+def unique(
+ ar: ArrayLike,
+ return_index: L[False],
+ return_inverse: L[True],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, FTT (keyword)
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = False,
+ *,
+ return_inverse: L[True],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+@overload # known scalar-type, TTT
+def unique(
+ ar: _ArrayLike[_ScalarT],
+ return_index: L[True],
+ return_inverse: L[True],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, TTT
+def unique(
+ ar: ArrayLike,
+ return_index: L[True],
+ return_inverse: L[True],
+ return_counts: L[True],
+ axis: SupportsIndex | None = None,
+ *,
+ equal_nan: bool = True,
+) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ...
+
+#
+@overload
+def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ...
+@overload
+def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ...
+
+#
+@overload
+def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ...
+@overload
+def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ...
+
+#
+@overload
+def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ...
+@overload
+def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ...
+
+#
+@overload
+def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
+@overload
+def unique_values(x: ArrayLike) -> _AnyArray: ...
+
+#
+@overload # known scalar-type, return_indices=False (default)
+def intersect1d(
+ ar1: _ArrayLike[_EitherSCT],
+ ar2: _ArrayLike[_EitherSCT],
+ assume_unique: bool = False,
+ return_indices: L[False] = False,
+) -> NDArray[_EitherSCT]: ...
+@overload # known scalar-type, return_indices=True (positional)
+def intersect1d(
+ ar1: _ArrayLike[_EitherSCT],
+ ar2: _ArrayLike[_EitherSCT],
+ assume_unique: bool,
+ return_indices: L[True],
+) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ...
+@overload # known scalar-type, return_indices=True (keyword)
+def intersect1d(
+ ar1: _ArrayLike[_EitherSCT],
+ ar2: _ArrayLike[_EitherSCT],
+ assume_unique: bool = False,
+ *,
+ return_indices: L[True],
+) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, return_indices=False (default)
+def intersect1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = False,
+ return_indices: L[False] = False,
+) -> _AnyArray: ...
+@overload # unknown scalar-type, return_indices=True (positional)
+def intersect1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool,
+ return_indices: L[True],
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+@overload # unknown scalar-type, return_indices=True (keyword)
+def intersect1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = False,
+ *,
+ return_indices: L[True],
+) -> tuple[_AnyArray, _IntArray, _IntArray]: ...
+
+#
+@overload
+def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ...
+@overload
+def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ...
+
+#
+@overload
+def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ...
+@overload
+def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ...
+
+#
+@overload
+def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ...
+@overload
+def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ...
+
+#
+def isin(
+ element: ArrayLike,
+ test_elements: ArrayLike,
+ assume_unique: bool = False,
+ invert: bool = False,
+ *,
+ kind: L["sort", "table"] | None = None,
+) -> NDArray[np.bool]: ...
+
+#
+@deprecated("Use 'isin' instead")
+def in1d(
+ element: ArrayLike,
+ test_elements: ArrayLike,
+ assume_unique: bool = False,
+ invert: bool = False,
+ *,
+ kind: L["sort", "table"] | None = None,
+) -> NDArray[np.bool]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py
new file mode 100644
index 0000000..5f7c5fc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.py
@@ -0,0 +1,224 @@
+"""
+A buffered iterator for big arrays.
+
+This module solves the problem of iterating over a big file-based array
+without having to read it into memory. The `Arrayterator` class wraps
+an array object, and when iterated it will return sub-arrays with at most
+a user-specified number of elements.
+
+"""
+from functools import reduce
+from operator import mul
+
+__all__ = ['Arrayterator']
+
+
+class Arrayterator:
+ """
+ Buffered iterator for big arrays.
+
+ `Arrayterator` creates a buffered iterator for reading big arrays in small
+ contiguous blocks. The class is useful for objects stored in the
+ file system. It allows iteration over the object *without* reading
+ everything in memory; instead, small blocks are read and iterated over.
+
+ `Arrayterator` can be used with any object that supports multidimensional
+ slices. This includes NumPy arrays, but also variables from
+ Scientific.IO.NetCDF or pynetcdf for example.
+
+ Parameters
+ ----------
+ var : array_like
+ The object to iterate over.
+ buf_size : int, optional
+ The buffer size. If `buf_size` is supplied, the maximum amount of
+ data that will be read into memory is `buf_size` elements.
+ Default is None, which will read as many element as possible
+ into memory.
+
+ Attributes
+ ----------
+ var
+ buf_size
+ start
+ stop
+ step
+ shape
+ flat
+
+ See Also
+ --------
+ numpy.ndenumerate : Multidimensional array iterator.
+ numpy.flatiter : Flat array iterator.
+ numpy.memmap : Create a memory-map to an array stored
+ in a binary file on disk.
+
+ Notes
+ -----
+ The algorithm works by first finding a "running dimension", along which
+ the blocks will be extracted. Given an array of dimensions
+ ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
+ first dimension will be used. If, on the other hand,
+ ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
+ Blocks are extracted along this dimension, and when the last block is
+ returned the process continues from the next dimension, until all
+ elements have been read.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ >>> a_itor = np.lib.Arrayterator(a, 2)
+ >>> a_itor.shape
+ (3, 4, 5, 6)
+
+ Now we can iterate over ``a_itor``, and it will return arrays of size
+ two. Since `buf_size` was smaller than any dimension, the first
+ dimension will be iterated over first:
+
+ >>> for subarr in a_itor:
+ ... if not subarr.all():
+ ... print(subarr, subarr.shape) # doctest: +SKIP
+ >>> # [[[[0 1]]]] (1, 1, 1, 2)
+
+ """
+
+ __module__ = "numpy.lib"
+
+ def __init__(self, var, buf_size=None):
+ self.var = var
+ self.buf_size = buf_size
+
+ self.start = [0 for dim in var.shape]
+ self.stop = list(var.shape)
+ self.step = [1 for dim in var.shape]
+
+ def __getattr__(self, attr):
+ return getattr(self.var, attr)
+
+ def __getitem__(self, index):
+ """
+ Return a new arrayterator.
+
+ """
+ # Fix index, handling ellipsis and incomplete slices.
+ if not isinstance(index, tuple):
+ index = (index,)
+ fixed = []
+ length, dims = len(index), self.ndim
+ for slice_ in index:
+ if slice_ is Ellipsis:
+ fixed.extend([slice(None)] * (dims - length + 1))
+ length = len(fixed)
+ elif isinstance(slice_, int):
+ fixed.append(slice(slice_, slice_ + 1, 1))
+ else:
+ fixed.append(slice_)
+ index = tuple(fixed)
+ if len(index) < dims:
+ index += (slice(None),) * (dims - len(index))
+
+ # Return a new arrayterator object.
+ out = self.__class__(self.var, self.buf_size)
+ for i, (start, stop, step, slice_) in enumerate(
+ zip(self.start, self.stop, self.step, index)):
+ out.start[i] = start + (slice_.start or 0)
+ out.step[i] = step * (slice_.step or 1)
+ out.stop[i] = start + (slice_.stop or stop - start)
+ out.stop[i] = min(stop, out.stop[i])
+ return out
+
+ def __array__(self, dtype=None, copy=None):
+ """
+ Return corresponding data.
+
+ """
+ slice_ = tuple(slice(*t) for t in zip(
+ self.start, self.stop, self.step))
+ return self.var[slice_]
+
+ @property
+ def flat(self):
+ """
+ A 1-D flat iterator for Arrayterator objects.
+
+ This iterator returns elements of the array to be iterated over in
+ `~lib.Arrayterator` one by one.
+ It is similar to `flatiter`.
+
+ See Also
+ --------
+ lib.Arrayterator
+ flatiter
+
+ Examples
+ --------
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ >>> a_itor = np.lib.Arrayterator(a, 2)
+
+ >>> for subarr in a_itor.flat:
+ ... if not subarr:
+ ... print(subarr, type(subarr))
+ ...
+ 0 <class 'numpy.int64'>
+
+ """
+ for block in self:
+ yield from block.flat
+
+ @property
+ def shape(self):
+ """
+ The shape of the array to be iterated over.
+
+ For an example, see `Arrayterator`.
+
+ """
+ return tuple(((stop - start - 1) // step + 1) for start, stop, step in
+ zip(self.start, self.stop, self.step))
+
+ def __iter__(self):
+ # Skip arrays with degenerate dimensions
+ if [dim for dim in self.shape if dim <= 0]:
+ return
+
+ start = self.start[:]
+ stop = self.stop[:]
+ step = self.step[:]
+ ndims = self.var.ndim
+
+ while True:
+ count = self.buf_size or reduce(mul, self.shape)
+
+ # iterate over each dimension, looking for the
+ # running dimension (ie, the dimension along which
+ # the blocks will be built from)
+ rundim = 0
+ for i in range(ndims - 1, -1, -1):
+ # if count is zero we ran out of elements to read
+ # along higher dimensions, so we read only a single position
+ if count == 0:
+ stop[i] = start[i] + 1
+ elif count <= self.shape[i]:
+ # limit along this dimension
+ stop[i] = start[i] + count * step[i]
+ rundim = i
+ else:
+ # read everything along this dimension
+ stop[i] = self.stop[i]
+ stop[i] = min(self.stop[i], stop[i])
+ count = count // self.shape[i]
+
+ # yield a block
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
+ yield self.var[slice_]
+
+ # Update start position, taking care of overflow to
+ # other dimensions
+ start[rundim] = stop[rundim] # start where we stopped
+ for i in range(ndims - 1, 0, -1):
+ if start[i] >= self.stop[i]:
+ start[i] = self.start[i]
+ start[i - 1] += self.step[i - 1]
+ if start[0] >= self.stop[0]:
+ return
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi
new file mode 100644
index 0000000..e1a9e05
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_arrayterator_impl.pyi
@@ -0,0 +1,46 @@
+# pyright: reportIncompatibleMethodOverride=false
+
+from collections.abc import Generator
+from types import EllipsisType
+from typing import Any, Final, TypeAlias, overload
+
+from typing_extensions import TypeVar
+
+import numpy as np
+from numpy._typing import _AnyShape, _Shape
+
+__all__ = ["Arrayterator"]
+
+_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)
+_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
+_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+
+_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...]
+
+# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
+# but its ``__getattr__` method does wrap around the former and thus has
+# access to all its methods
+
+class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]):
+ var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment]
+ buf_size: Final[int | None]
+ start: Final[list[int]]
+ stop: Final[list[int]]
+ step: Final[list[int]]
+
+ @property # type: ignore[misc]
+ def shape(self) -> _ShapeT_co: ...
+ @property
+ def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override]
+
+ #
+ def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ...
+ def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override]
+ def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ...
+
+ #
+ @overload # type: ignore[override]
+ def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py
new file mode 100644
index 0000000..72398c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.py
@@ -0,0 +1,700 @@
+"""A file interface for handling local and remote data files.
+
+The goal of datasource is to abstract some of the file system operations
+when dealing with data files so the researcher doesn't have to know all the
+low-level details. Through datasource, a researcher can obtain and use a
+file with one function call, regardless of location of the file.
+
+DataSource is meant to augment standard python libraries, not replace them.
+It should work seamlessly with standard file IO operations and the os
+module.
+
+DataSource files can originate locally or remotely:
+
+- local files : '/home/guido/src/local/data.txt'
+- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
+
+DataSource files can also be compressed or uncompressed. Currently only
+gzip, bz2 and xz are supported.
+
+Example::
+
+ >>> # Create a DataSource, use os.curdir (default) for local storage.
+ >>> from numpy import DataSource
+ >>> ds = DataSource()
+ >>>
+ >>> # Open a remote file.
+ >>> # DataSource downloads the file, stores it locally in:
+ >>> # './www.google.com/index.html'
+ >>> # opens the file and returns a file object.
+ >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
+ >>>
+ >>> # Use the file as you normally would
+ >>> fp.read() # doctest: +SKIP
+ >>> fp.close() # doctest: +SKIP
+
+"""
+import os
+
+from numpy._utils import set_module
+
+_open = open
+
+
+def _check_mode(mode, encoding, newline):
+ """Check mode and that encoding and newline are compatible.
+
+ Parameters
+ ----------
+ mode : str
+ File open mode.
+ encoding : str
+ File encoding.
+ newline : str
+ Newline for text files.
+
+ """
+ if "t" in mode:
+ if "b" in mode:
+ raise ValueError(f"Invalid mode: {mode!r}")
+ else:
+ if encoding is not None:
+ raise ValueError("Argument 'encoding' not supported in binary mode")
+ if newline is not None:
+ raise ValueError("Argument 'newline' not supported in binary mode")
+
+
+# Using a class instead of a module-level dictionary
+# to reduce the initial 'import numpy' overhead by
+# deferring the import of lzma, bz2 and gzip until needed
+
+# TODO: .zip support, .tar support?
+class _FileOpeners:
+ """
+ Container for different methods to open (un-)compressed files.
+
+ `_FileOpeners` contains a dictionary that holds one method for each
+ supported file format. Attribute lookup is implemented in such a way
+ that an instance of `_FileOpeners` itself can be indexed with the keys
+ of that dictionary. Currently uncompressed files as well as files
+ compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
+
+ Notes
+ -----
+ `_file_openers`, an instance of `_FileOpeners`, is made available for
+ use in the `_datasource` module.
+
+ Examples
+ --------
+ >>> import gzip
+ >>> np.lib._datasource._file_openers.keys()
+ [None, '.bz2', '.gz', '.xz', '.lzma']
+ >>> np.lib._datasource._file_openers['.gz'] is gzip.open
+ True
+
+ """
+
+ def __init__(self):
+ self._loaded = False
+ self._file_openers = {None: open}
+
+ def _load(self):
+ if self._loaded:
+ return
+
+ try:
+ import bz2
+ self._file_openers[".bz2"] = bz2.open
+ except ImportError:
+ pass
+
+ try:
+ import gzip
+ self._file_openers[".gz"] = gzip.open
+ except ImportError:
+ pass
+
+ try:
+ import lzma
+ self._file_openers[".xz"] = lzma.open
+ self._file_openers[".lzma"] = lzma.open
+ except (ImportError, AttributeError):
+ # There are incompatible backports of lzma that do not have the
+ # lzma.open attribute, so catch that as well as ImportError.
+ pass
+
+ self._loaded = True
+
+ def keys(self):
+ """
+ Return the keys of currently supported file openers.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ keys : list
+ The keys are None for uncompressed files and the file extension
+ strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
+ methods.
+
+ """
+ self._load()
+ return list(self._file_openers.keys())
+
+ def __getitem__(self, key):
+ self._load()
+ return self._file_openers[key]
+
+
+_file_openers = _FileOpeners()
+
+def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
+ """
+ Open `path` with `mode` and return the file object.
+
+ If ``path`` is an URL, it will be downloaded, stored in the
+ `DataSource` `destpath` directory and opened from there.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Local file path or URL to open.
+ mode : str, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
+ append. Available modes depend on the type of object specified by
+ path. Default is 'r'.
+ destpath : str, optional
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
+ encoding : {None, str}, optional
+ Open text file with given encoding. The default encoding will be
+ what `open` uses.
+ newline : {None, str}, optional
+ Newline to use when reading text file.
+
+ Returns
+ -------
+ out : file object
+ The opened file.
+
+ Notes
+ -----
+ This is a convenience function that instantiates a `DataSource` and
+ returns the file object from ``DataSource.open(path)``.
+
+ """
+
+ ds = DataSource(destpath)
+ return ds.open(path, mode, encoding=encoding, newline=newline)
+
+
+@set_module('numpy.lib.npyio')
+class DataSource:
+ """
+ DataSource(destpath='.')
+
+ A generic data source file (file, http, ftp, ...).
+
+ DataSources can be local files or remote files/URLs. The files may
+ also be compressed or uncompressed. DataSource hides some of the
+ low-level details of downloading the file, allowing you to simply pass
+ in a valid file path (or URL) and obtain a file object.
+
+ Parameters
+ ----------
+ destpath : str or None, optional
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
+
+ Notes
+ -----
+ URLs require a scheme string (``http://``) to be used, without it they
+ will fail::
+
+ >>> repos = np.lib.npyio.DataSource()
+ >>> repos.exists('www.google.com/index.html')
+ False
+ >>> repos.exists('http://www.google.com/index.html')
+ True
+
+ Temporary directories are deleted when the DataSource is deleted.
+
+ Examples
+ --------
+ ::
+
+ >>> ds = np.lib.npyio.DataSource('/home/guido')
+ >>> urlname = 'http://www.google.com/'
+ >>> gfile = ds.open('http://www.google.com/')
+ >>> ds.abspath(urlname)
+ '/home/guido/www.google.com/index.html'
+
+ >>> ds = np.lib.npyio.DataSource(None) # use with temporary file
+ >>> ds.open('/home/guido/foobar.txt')
+ <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
+ >>> ds.abspath('/home/guido/foobar.txt')
+ '/tmp/.../home/guido/foobar.txt'
+
+ """
+
+ def __init__(self, destpath=os.curdir):
+ """Create a DataSource with a local path at destpath."""
+ if destpath:
+ self._destpath = os.path.abspath(destpath)
+ self._istmpdest = False
+ else:
+ import tempfile # deferring import to improve startup time
+ self._destpath = tempfile.mkdtemp()
+ self._istmpdest = True
+
+ def __del__(self):
+ # Remove temp directories
+ if hasattr(self, '_istmpdest') and self._istmpdest:
+ import shutil
+
+ shutil.rmtree(self._destpath)
+
+ def _iszip(self, filename):
+ """Test if the filename is a zip file by looking at the file extension.
+
+ """
+ fname, ext = os.path.splitext(filename)
+ return ext in _file_openers.keys()
+
+ def _iswritemode(self, mode):
+ """Test if the given mode will open a file for writing."""
+
+ # Currently only used to test the bz2 files.
+ _writemodes = ("w", "+")
+ return any(c in _writemodes for c in mode)
+
+ def _splitzipext(self, filename):
+ """Split zip extension from filename and return filename.
+
+ Returns
+ -------
+ base, zip_ext : {tuple}
+
+ """
+
+ if self._iszip(filename):
+ return os.path.splitext(filename)
+ else:
+ return filename, None
+
+ def _possible_names(self, filename):
+ """Return a tuple containing compressed filename variations."""
+ names = [filename]
+ if not self._iszip(filename):
+ for zipext in _file_openers.keys():
+ if zipext:
+ names.append(filename + zipext)
+ return names
+
+ def _isurl(self, path):
+ """Test if path is a net location. Tests the scheme and netloc."""
+
+ # We do this here to reduce the 'import numpy' initial import time.
+ from urllib.parse import urlparse
+
+ # BUG : URLs require a scheme string ('http://') to be used.
+ # www.google.com will fail.
+ # Should we prepend the scheme for those that don't have it and
+ # test that also? Similar to the way we append .gz and test for
+ # for compressed versions of files.
+
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
+ return bool(scheme and netloc)
+
+ def _cache(self, path):
+ """Cache the file specified by path.
+
+ Creates a copy of the file in the datasource cache.
+
+ """
+ # We import these here because importing them is slow and
+ # a significant fraction of numpy's total import time.
+ import shutil
+ from urllib.request import urlopen
+
+ upath = self.abspath(path)
+
+ # ensure directory exists
+ if not os.path.exists(os.path.dirname(upath)):
+ os.makedirs(os.path.dirname(upath))
+
+ # TODO: Doesn't handle compressed files!
+ if self._isurl(path):
+ with urlopen(path) as openedurl:
+ with _open(upath, 'wb') as f:
+ shutil.copyfileobj(openedurl, f)
+ else:
+ shutil.copyfile(path, upath)
+ return upath
+
+ def _findfile(self, path):
+ """Searches for ``path`` and returns full path if found.
+
+ If path is an URL, _findfile will cache a local copy and return the
+ path to the cached file. If path is a local file, _findfile will
+ return a path to that local file.
+
+ The search will include possible compressed versions of the file
+ and return the first occurrence found.
+
+ """
+
+ # Build list of possible local file paths
+ if not self._isurl(path):
+ # Valid local paths
+ filelist = self._possible_names(path)
+ # Paths in self._destpath
+ filelist += self._possible_names(self.abspath(path))
+ else:
+ # Cached URLs in self._destpath
+ filelist = self._possible_names(self.abspath(path))
+ # Remote URLs
+ filelist = filelist + self._possible_names(path)
+
+ for name in filelist:
+ if self.exists(name):
+ if self._isurl(name):
+ name = self._cache(name)
+ return name
+ return None
+
+ def abspath(self, path):
+ """
+ Return absolute path of file in the DataSource directory.
+
+ If `path` is an URL, then `abspath` will return either the location
+ the file exists locally or the location it would exist when opened
+ using the `open` method.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Can be a local file or a remote URL.
+
+ Returns
+ -------
+ out : str
+ Complete path, including the `DataSource` destination directory.
+
+ Notes
+ -----
+ The functionality is based on `os.path.abspath`.
+
+ """
+ # We do this here to reduce the 'import numpy' initial import time.
+ from urllib.parse import urlparse
+
+ # TODO: This should be more robust. Handles case where path includes
+ # the destpath, but not other sub-paths. Failing case:
+ # path = /home/guido/datafile.txt
+ # destpath = /home/alex/
+ # upath = self.abspath(path)
+ # upath == '/home/alex/home/guido/datafile.txt'
+
+ # handle case where path includes self._destpath
+ splitpath = path.split(self._destpath, 2)
+ if len(splitpath) > 1:
+ path = splitpath[1]
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
+ netloc = self._sanitize_relative_path(netloc)
+ upath = self._sanitize_relative_path(upath)
+ return os.path.join(self._destpath, netloc, upath)
+
+ def _sanitize_relative_path(self, path):
+ """Return a sanitised relative path for which
+ os.path.abspath(os.path.join(base, path)).startswith(base)
+ """
+ last = None
+ path = os.path.normpath(path)
+ while path != last:
+ last = path
+ # Note: os.path.join treats '/' as os.sep on Windows
+ path = path.lstrip(os.sep).lstrip('/')
+ path = path.lstrip(os.pardir).removeprefix('..')
+ drive, path = os.path.splitdrive(path) # for Windows
+ return path
+
+ def exists(self, path):
+ """
+ Test if path exists.
+
+ Test if `path` exists as (and in this order):
+
+ - a local file.
+ - a remote URL that has been downloaded and stored locally in the
+ `DataSource` directory.
+ - a remote URL that has not been downloaded, but is valid and
+ accessible.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Can be a local file or a remote URL.
+
+ Returns
+ -------
+ out : bool
+ True if `path` exists.
+
+ Notes
+ -----
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
+
+ """
+
+ # First test for local path
+ if os.path.exists(path):
+ return True
+
+ # We import this here because importing urllib is slow and
+ # a significant fraction of numpy's total import time.
+ from urllib.error import URLError
+ from urllib.request import urlopen
+
+ # Test cached url
+ upath = self.abspath(path)
+ if os.path.exists(upath):
+ return True
+
+ # Test remote url
+ if self._isurl(path):
+ try:
+ netfile = urlopen(path)
+ netfile.close()
+ del netfile
+ return True
+ except URLError:
+ return False
+ return False
+
+ def open(self, path, mode='r', encoding=None, newline=None):
+ """
+ Open and return file-like object.
+
+ If `path` is an URL, it will be downloaded, stored in the
+ `DataSource` directory and opened from there.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Local file path or URL to open.
+ mode : {'r', 'w', 'a'}, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
+ encoding : {None, str}, optional
+ Open text file with given encoding. The default encoding will be
+ what `open` uses.
+ newline : {None, str}, optional
+ Newline to use when reading text file.
+
+ Returns
+ -------
+ out : file object
+ File object.
+
+ """
+
+ # TODO: There is no support for opening a file for writing which
+ # doesn't exist yet (creating a file). Should there be?
+
+ # TODO: Add a ``subdir`` parameter for specifying the subdirectory
+ # used to store URLs in self._destpath.
+
+ if self._isurl(path) and self._iswritemode(mode):
+ raise ValueError("URLs are not writeable")
+
+ # NOTE: _findfile will fail on a new file opened for writing.
+ found = self._findfile(path)
+ if found:
+ _fname, ext = self._splitzipext(found)
+ if ext == 'bz2':
+ mode.replace("+", "")
+ return _file_openers[ext](found, mode=mode,
+ encoding=encoding, newline=newline)
+ else:
+ raise FileNotFoundError(f"{path} not found.")
+
+
+class Repository (DataSource):
+ """
+ Repository(baseurl, destpath='.')
+
+ A data repository where multiple DataSource's share a base
+ URL/directory.
+
+ `Repository` extends `DataSource` by prepending a base URL (or
+ directory) to all the files it handles. Use `Repository` when you will
+ be working with multiple files from one base URL. Initialize
+ `Repository` with the base URL, then refer to each file by its filename
+ only.
+
+ Parameters
+ ----------
+ baseurl : str
+ Path to the local directory or remote location that contains the
+ data files.
+ destpath : str or None, optional
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
+
+ Examples
+ --------
+ To analyze all files in the repository, do something like this
+ (note: this is not self-contained code)::
+
+ >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
+ >>> for filename in filelist:
+ ... fp = repos.open(filename)
+ ... fp.analyze()
+ ... fp.close()
+
+ Similarly you could use a URL for a repository::
+
+ >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
+
+ """
+
+ def __init__(self, baseurl, destpath=os.curdir):
+ """Create a Repository with a shared url or directory of baseurl."""
+ DataSource.__init__(self, destpath=destpath)
+ self._baseurl = baseurl
+
+ def __del__(self):
+ DataSource.__del__(self)
+
+ def _fullpath(self, path):
+ """Return complete path for path. Prepends baseurl if necessary."""
+ splitpath = path.split(self._baseurl, 2)
+ if len(splitpath) == 1:
+ result = os.path.join(self._baseurl, path)
+ else:
+ result = path # path contains baseurl already
+ return result
+
+ def _findfile(self, path):
+ """Extend DataSource method to prepend baseurl to ``path``."""
+ return DataSource._findfile(self, self._fullpath(path))
+
+ def abspath(self, path):
+ """
+ Return absolute path of file in the Repository directory.
+
+ If `path` is an URL, then `abspath` will return either the location
+ the file exists locally or the location it would exist when opened
+ using the `open` method.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
+
+ Returns
+ -------
+ out : str
+ Complete path, including the `DataSource` destination directory.
+
+ """
+ return DataSource.abspath(self, self._fullpath(path))
+
+ def exists(self, path):
+ """
+ Test if path exists prepending Repository base URL to path.
+
+ Test if `path` exists as (and in this order):
+
+ - a local file.
+ - a remote URL that has been downloaded and stored locally in the
+ `DataSource` directory.
+ - a remote URL that has not been downloaded, but is valid and
+ accessible.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
+
+ Returns
+ -------
+ out : bool
+ True if `path` exists.
+
+ Notes
+ -----
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
+
+ """
+ return DataSource.exists(self, self._fullpath(path))
+
+ def open(self, path, mode='r', encoding=None, newline=None):
+ """
+ Open and return file-like object prepending Repository base URL.
+
+ If `path` is an URL, it will be downloaded, stored in the
+ DataSource directory and opened from there.
+
+ Parameters
+ ----------
+ path : str or pathlib.Path
+ Local file path or URL to open. This may, but does not have to,
+ include the `baseurl` with which the `Repository` was
+ initialized.
+ mode : {'r', 'w', 'a'}, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
+ encoding : {None, str}, optional
+ Open text file with given encoding. The default encoding will be
+ what `open` uses.
+ newline : {None, str}, optional
+ Newline to use when reading text file.
+
+ Returns
+ -------
+ out : file object
+ File object.
+
+ """
+ return DataSource.open(self, self._fullpath(path), mode,
+ encoding=encoding, newline=newline)
+
+ def listdir(self):
+ """
+ List files in the source Repository.
+
+ Returns
+ -------
+ files : list of str or pathlib.Path
+ List of file names (not containing a directory part).
+
+ Notes
+ -----
+ Does not currently work for remote repositories.
+
+ """
+ if self._isurl(self._baseurl):
+ raise NotImplementedError(
+ "Directory listing of URLs, not supported yet.")
+ else:
+ return os.listdir(self._baseurl)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi
new file mode 100644
index 0000000..9f91fdf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_datasource.pyi
@@ -0,0 +1,31 @@
+from pathlib import Path
+from typing import IO, Any, TypeAlias
+
+from _typeshed import OpenBinaryMode, OpenTextMode
+
+_Mode: TypeAlias = OpenBinaryMode | OpenTextMode
+
+###
+
+# exported in numpy.lib.nppyio
+class DataSource:
+ def __init__(self, /, destpath: Path | str | None = ...) -> None: ...
+ def __del__(self, /) -> None: ...
+ def abspath(self, /, path: str) -> str: ...
+ def exists(self, /, path: str) -> bool: ...
+
+ # Whether the file-object is opened in string or bytes mode (by default)
+ # depends on the file-extension of `path`
+ def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ...
+
+class Repository(DataSource):
+ def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ...
+ def listdir(self, /) -> list[str]: ...
+
+def open(
+ path: str,
+ mode: _Mode = "r",
+ destpath: str | None = ...,
+ encoding: str | None = None,
+ newline: str | None = None,
+) -> IO[Any]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py
new file mode 100644
index 0000000..7378ba5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.py
@@ -0,0 +1,1036 @@
+"""
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
+information about them.
+
+The ``.npy`` format is the standard binary file format in NumPy for
+persisting a *single* arbitrary NumPy array on disk. The format stores all
+of the shape and dtype information necessary to reconstruct the array
+correctly even on another machine with a different architecture.
+The format is designed to be as simple as possible while achieving
+its limited goals.
+
+The ``.npz`` format is the standard format for persisting *multiple* NumPy
+arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
+files, one for each array.
+
+Capabilities
+------------
+
+- Can represent all NumPy arrays including nested record arrays and
+ object arrays.
+
+- Represents the data in its native binary form.
+
+- Supports Fortran-contiguous arrays directly.
+
+- Stores all of the necessary information to reconstruct the array
+ including shape and dtype on a machine of a different
+ architecture. Both little-endian and big-endian arrays are
+ supported, and a file with little-endian numbers will yield
+ a little-endian array on any machine reading the file. The
+ types are described in terms of their actual sizes. For example,
+ if a machine with a 64-bit C "long int" writes out an array with
+ "long ints", a reading machine with 32-bit C "long ints" will yield
+ an array with 64-bit integers.
+
+- Is straightforward to reverse engineer. Datasets often live longer than
+ the programs that created them. A competent developer should be
+ able to create a solution in their preferred programming language to
+ read most ``.npy`` files that they have been given without much
+ documentation.
+
+- Allows memory-mapping of the data. See `open_memmap`.
+
+- Can be read from a filelike stream object instead of an actual file.
+
+- Stores object arrays, i.e. arrays containing elements that are arbitrary
+ Python objects. Files with object arrays are not to be mmapable, but
+ can be read and written to disk.
+
+Limitations
+-----------
+
+- Arbitrary subclasses of numpy.ndarray are not completely preserved.
+ Subclasses will be accepted for writing, but only the array data will
+ be written out. A regular numpy.ndarray object will be created
+ upon reading the file.
+
+.. warning::
+
+ Due to limitations in the interpretation of structured dtypes, dtypes
+ with fields with empty names will have the names replaced by 'f0', 'f1',
+ etc. Such arrays will not round-trip through the format entirely
+ accurately. The data is intact; only the field names will differ. We are
+ working on a fix for this. This fix will not require a change in the
+ file format. The arrays with such structures can still be saved and
+ restored, and the correct dtype may be restored by using the
+ ``loadedarray.view(correct_dtype)`` method.
+
+File extensions
+---------------
+
+We recommend using the ``.npy`` and ``.npz`` extensions for files saved
+in this format. This is by no means a requirement; applications may wish
+to use these file formats but use an extension specific to the
+application. In the absence of an obvious alternative, however,
+we suggest using ``.npy`` and ``.npz``.
+
+Version numbering
+-----------------
+
+The version numbering of these formats is independent of NumPy version
+numbering. If the format is upgraded, the code in `numpy.io` will still
+be able to read and write Version 1.0 files.
+
+Format Version 1.0
+------------------
+
+The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
+
+The next 1 byte is an unsigned byte: the major version number of the file
+format, e.g. ``\\x01``.
+
+The next 1 byte is an unsigned byte: the minor version number of the file
+format, e.g. ``\\x00``. Note: the version of the file format is not tied
+to the version of the numpy package.
+
+The next 2 bytes form a little-endian unsigned short int: the length of
+the header data HEADER_LEN.
+
+The next HEADER_LEN bytes form the header data describing the array's
+format. It is an ASCII string which contains a Python literal expression
+of a dictionary. It is terminated by a newline (``\\n``) and padded with
+spaces (``\\x20``) to make the total of
+``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
+by 64 for alignment purposes.
+
+The dictionary contains three keys:
+
+ "descr" : dtype.descr
+ An object that can be passed as an argument to the `numpy.dtype`
+ constructor to create the array's dtype.
+ "fortran_order" : bool
+ Whether the array data is Fortran-contiguous or not. Since
+ Fortran-contiguous arrays are a common form of non-C-contiguity,
+ we allow them to be written directly to disk for efficiency.
+ "shape" : tuple of int
+ The shape of the array.
+
+For repeatability and readability, the dictionary keys are sorted in
+alphabetic order. This is for convenience only. A writer SHOULD implement
+this if possible. A reader MUST NOT depend on this.
+
+Following the header comes the array data. If the dtype contains Python
+objects (i.e. ``dtype.hasobject is True``), then the data is a Python
+pickle of the array. Otherwise the data is the contiguous (either C-
+or Fortran-, depending on ``fortran_order``) bytes of the array.
+Consumers can figure out the number of bytes by multiplying the number
+of elements given by the shape (noting that ``shape=()`` means there is
+1 element) by ``dtype.itemsize``.
+
+Format Version 2.0
+------------------
+
+The version 1.0 format only allowed the array header to have a total size of
+65535 bytes. This can be exceeded by structured arrays with a large number of
+columns. The version 2.0 format extends the header size to 4 GiB.
+`numpy.save` will automatically save in 2.0 format if the data requires it,
+else it will always use the more compatible 1.0 format.
+
+The description of the fourth element of the header therefore has become:
+"The next 4 bytes form a little-endian unsigned int: the length of the header
+data HEADER_LEN."
+
+Format Version 3.0
+------------------
+
+This version replaces the ASCII string (which in practice was latin1) with
+a utf8-encoded string, so supports structured types with any unicode field
+names.
+
+Notes
+-----
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the
+:doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
+evolved with time and this document is more current.
+
+"""
+import io
+import os
+import pickle
+import warnings
+
+import numpy
+from numpy._utils import set_module
+from numpy.lib._utils_impl import drop_metadata
+
+__all__ = []
+
+drop_metadata.__module__ = "numpy.lib.format"
+
+EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
+MAGIC_PREFIX = b'\x93NUMPY'
+MAGIC_LEN = len(MAGIC_PREFIX) + 2
+ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
+BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
+# allow growth within the address space of a 64 bit machine along one axis
+GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype
+
+# difference between version 1.0 and 2.0 is a 4 byte (I) header length
+# instead of 2 bytes (H) allowing storage of large structured arrays
+_header_size_info = {
+ (1, 0): ('<H', 'latin1'),
+ (2, 0): ('<I', 'latin1'),
+ (3, 0): ('<I', 'utf8'),
+}
+
+# Python's literal_eval is not actually safe for large inputs, since parsing
+# may become slow or even cause interpreter crashes.
+# This is an arbitrary, low limit which should make it safe in practice.
+_MAX_HEADER_SIZE = 10000
+
+
+def _check_version(version):
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
+ raise ValueError(msg % (version,))
+
+
+@set_module("numpy.lib.format")
+def magic(major, minor):
+ """ Return the magic string for the given file format version.
+
+ Parameters
+ ----------
+ major : int in [0, 255]
+ minor : int in [0, 255]
+
+ Returns
+ -------
+ magic : str
+
+ Raises
+ ------
+ ValueError if the version cannot be formatted.
+ """
+ if major < 0 or major > 255:
+ raise ValueError("major version must be 0 <= major < 256")
+ if minor < 0 or minor > 255:
+ raise ValueError("minor version must be 0 <= minor < 256")
+ return MAGIC_PREFIX + bytes([major, minor])
+
+
+@set_module("numpy.lib.format")
+def read_magic(fp):
+ """ Read the magic string to get the version of the file format.
+
+ Parameters
+ ----------
+ fp : filelike object
+
+ Returns
+ -------
+ major : int
+ minor : int
+ """
+ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
+ if magic_str[:-2] != MAGIC_PREFIX:
+ msg = "the magic string is not correct; expected %r, got %r"
+ raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
+ major, minor = magic_str[-2:]
+ return major, minor
+
+
+@set_module("numpy.lib.format")
+def dtype_to_descr(dtype):
+ """
+ Get a serializable descriptor from the dtype.
+
+ The .descr attribute of a dtype object cannot be round-tripped through
+ the dtype() constructor. Simple types, like dtype('float32'), have
+ a descr which looks like a record array with one field with '' as
+ a name. The dtype() constructor interprets this as a request to give
+ a default name. Instead, we construct descriptor that can be passed to
+ dtype().
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype of the array that will be written to disk.
+
+ Returns
+ -------
+ descr : object
+ An object that can be passed to `numpy.dtype()` in order to
+ replicate the input dtype.
+
+ """
+ # NOTE: that drop_metadata may not return the right dtype e.g. for user
+ # dtypes. In that case our code below would fail the same, though.
+ new_dtype = drop_metadata(dtype)
+ if new_dtype is not dtype:
+ warnings.warn("metadata on a dtype is not saved to an npy/npz. "
+ "Use another format (such as pickle) to store it.",
+ UserWarning, stacklevel=2)
+ dtype = new_dtype
+
+ if dtype.names is not None:
+ # This is a record array. The .descr is fine. XXX: parts of the
+ # record array with an empty name, like padding bytes, still get
+ # fiddled with. This needs to be fixed in the C implementation of
+ # dtype().
+ return dtype.descr
+ elif not type(dtype)._legacy:
+ # this must be a user-defined dtype since numpy does not yet expose any
+ # non-legacy dtypes in the public API
+ #
+ # non-legacy dtypes don't yet have __array_interface__
+ # support. Instead, as a hack, we use pickle to save the array, and lie
+ # that the dtype is object. When the array is loaded, the descriptor is
+ # unpickled with the array and the object dtype in the header is
+ # discarded.
+ #
+ # a future NEP should define a way to serialize user-defined
+ # descriptors and ideally work out the possible security implications
+ warnings.warn("Custom dtypes are saved as python objects using the "
+ "pickle protocol. Loading this file requires "
+ "allow_pickle=True to be set.",
+ UserWarning, stacklevel=2)
+ return "|O"
+ else:
+ return dtype.str
+
+
+@set_module("numpy.lib.format")
+def descr_to_dtype(descr):
+ """
+ Returns a dtype based off the given description.
+
+ This is essentially the reverse of `~lib.format.dtype_to_descr`. It will
+ remove the valueless padding fields created by, i.e. simple fields like
+ dtype('float32'), and then convert the description to its corresponding
+ dtype.
+
+ Parameters
+ ----------
+ descr : object
+ The object retrieved by dtype.descr. Can be passed to
+ `numpy.dtype` in order to replicate the input dtype.
+
+ Returns
+ -------
+ dtype : dtype
+ The dtype constructed by the description.
+
+ """
+ if isinstance(descr, str):
+ # No padding removal needed
+ return numpy.dtype(descr)
+ elif isinstance(descr, tuple):
+ # subtype, will always have a shape descr[1]
+ dt = descr_to_dtype(descr[0])
+ return numpy.dtype((dt, descr[1]))
+
+ titles = []
+ names = []
+ formats = []
+ offsets = []
+ offset = 0
+ for field in descr:
+ if len(field) == 2:
+ name, descr_str = field
+ dt = descr_to_dtype(descr_str)
+ else:
+ name, descr_str, shape = field
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
+
+ # Ignore padding bytes, which will be void bytes with '' as name
+ # Once support for blank names is removed, only "if name == ''" needed)
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
+ if not is_pad:
+ title, name = name if isinstance(name, tuple) else (None, name)
+ titles.append(title)
+ names.append(name)
+ formats.append(dt)
+ offsets.append(offset)
+ offset += dt.itemsize
+
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
+ 'offsets': offsets, 'itemsize': offset})
+
+
+@set_module("numpy.lib.format")
+def header_data_from_array_1_0(array):
+ """ Get the dictionary of header metadata from a numpy.ndarray.
+
+ Parameters
+ ----------
+ array : numpy.ndarray
+
+ Returns
+ -------
+ d : dict
+ This has the appropriate entries for writing its string representation
+ to the header of the file.
+ """
+ d = {'shape': array.shape}
+ if array.flags.c_contiguous:
+ d['fortran_order'] = False
+ elif array.flags.f_contiguous:
+ d['fortran_order'] = True
+ else:
+ # Totally non-contiguous data. We will have to make it C-contiguous
+ # before writing. Note that we need to test for C_CONTIGUOUS first
+ # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
+ d['fortran_order'] = False
+
+ d['descr'] = dtype_to_descr(array.dtype)
+ return d
+
+
+def _wrap_header(header, version):
+ """
+ Takes a stringified header, and attaches the prefix and padding to it
+ """
+ import struct
+ assert version is not None
+ fmt, encoding = _header_size_info[version]
+ header = header.encode(encoding)
+ hlen = len(header) + 1
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
+ try:
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
+ except struct.error:
+ msg = f"Header length {hlen} too big for version={version}"
+ raise ValueError(msg) from None
+
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ return header_prefix + header + b' ' * padlen + b'\n'
+
+
+def _wrap_header_guess_version(header):
+ """
+ Like `_wrap_header`, but chooses an appropriate version given the contents
+ """
+ try:
+ return _wrap_header(header, (1, 0))
+ except ValueError:
+ pass
+
+ try:
+ ret = _wrap_header(header, (2, 0))
+ except UnicodeEncodeError:
+ pass
+ else:
+ warnings.warn("Stored array in format 2.0. It can only be"
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ return ret
+
+ header = _wrap_header(header, (3, 0))
+ warnings.warn("Stored array in format 3.0. It can only be "
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
+ return header
+
+
+def _write_array_header(fp, d, version=None):
+ """ Write the header for an array and returns the version used
+
+ Parameters
+ ----------
+ fp : filelike object
+ d : dict
+ This has the appropriate entries for writing its string representation
+ to the header of the file.
+ version : tuple or None
+ None means use oldest that works. Providing an explicit version will
+ raise a ValueError if the format does not allow saving this data.
+ Default: None
+ """
+ header = ["{"]
+ for key, value in sorted(d.items()):
+ # Need to use repr here, since we eval these when reading
+ header.append(f"'{key}': {repr(value)}, ")
+ header.append("}")
+ header = "".join(header)
+
+ # Add some spare space so that the array header can be modified in-place
+ # when changing the array size, e.g. when growing it by appending data at
+ # the end.
+ shape = d['shape']
+ header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
+ shape[-1 if d['fortran_order'] else 0]
+ ))) if len(shape) > 0 else 0)
+
+ if version is None:
+ header = _wrap_header_guess_version(header)
+ else:
+ header = _wrap_header(header, version)
+ fp.write(header)
+
+
+@set_module("numpy.lib.format")
+def write_array_header_1_0(fp, d):
+ """ Write the header for an array using the 1.0 format.
+
+ Parameters
+ ----------
+ fp : filelike object
+ d : dict
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
+ """
+ _write_array_header(fp, d, (1, 0))
+
+
+@set_module("numpy.lib.format")
+def write_array_header_2_0(fp, d):
+ """ Write the header for an array using the 2.0 format.
+ The 2.0 format allows storing very large structured arrays.
+
+ Parameters
+ ----------
+ fp : filelike object
+ d : dict
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
+ """
+ _write_array_header(fp, d, (2, 0))
+
+
+@set_module("numpy.lib.format")
+def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
+ """
+ Read an array header from a filelike object using the 1.0 file format
+ version.
+
+ This will leave the file object located just after the header.
+
+ Parameters
+ ----------
+ fp : filelike object
+ A file object or something with a `.read()` method like a file.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape of the array.
+ fortran_order : bool
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
+ dtype : dtype
+ The dtype of the file's data.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:func:`ast.literal_eval()` for details.
+
+ Raises
+ ------
+ ValueError
+ If the data is invalid.
+
+ """
+ return _read_array_header(
+ fp, version=(1, 0), max_header_size=max_header_size)
+
+
+@set_module("numpy.lib.format")
+def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
+ """
+ Read an array header from a filelike object using the 2.0 file format
+ version.
+
+ This will leave the file object located just after the header.
+
+ Parameters
+ ----------
+ fp : filelike object
+ A file object or something with a `.read()` method like a file.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:func:`ast.literal_eval()` for details.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape of the array.
+ fortran_order : bool
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
+ dtype : dtype
+ The dtype of the file's data.
+
+ Raises
+ ------
+ ValueError
+ If the data is invalid.
+
+ """
+ return _read_array_header(
+ fp, version=(2, 0), max_header_size=max_header_size)
+
+
+def _filter_header(s):
+ """Clean up 'L' in npz header ints.
+
+ Cleans up the 'L' in strings representing integers. Needed to allow npz
+ headers produced in Python2 to be read in Python3.
+
+ Parameters
+ ----------
+ s : string
+ Npy file header.
+
+ Returns
+ -------
+ header : str
+ Cleaned up header.
+
+ """
+ import tokenize
+ from io import StringIO
+
+ tokens = []
+ last_token_was_number = False
+ for token in tokenize.generate_tokens(StringIO(s).readline):
+ token_type = token[0]
+ token_string = token[1]
+ if (last_token_was_number and
+ token_type == tokenize.NAME and
+ token_string == "L"):
+ continue
+ else:
+ tokens.append(token)
+ last_token_was_number = (token_type == tokenize.NUMBER)
+ return tokenize.untokenize(tokens)
+
+
+def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
+ """
+ see read_array_header_1_0
+ """
+ # Read an unsigned, little-endian short int which has the length of the
+ # header.
+ import ast
+ import struct
+ hinfo = _header_size_info.get(version)
+ if hinfo is None:
+ raise ValueError(f"Invalid version {version!r}")
+ hlength_type, encoding = hinfo
+
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
+ header = _read_bytes(fp, header_length, "array header")
+ header = header.decode(encoding)
+ if len(header) > max_header_size:
+ raise ValueError(
+ f"Header info length ({len(header)}) is large and may not be safe "
+ "to load securely.\n"
+ "To allow loading, adjust `max_header_size` or fully trust "
+ "the `.npy` file using `allow_pickle=True`.\n"
+ "For safety against large resource use or crashes, sandboxing "
+ "may be necessary.")
+
+ # The header is a pretty-printed string representation of a literal
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
+ # boundary. The keys are strings.
+ # "shape" : tuple of int
+ # "fortran_order" : bool
+ # "descr" : dtype.descr
+ # Versions (2, 0) and (1, 0) could have been created by a Python 2
+ # implementation before header filtering was implemented.
+ #
+ # For performance reasons, we try without _filter_header first though
+ try:
+ d = ast.literal_eval(header)
+ except SyntaxError as e:
+ if version <= (2, 0):
+ header = _filter_header(header)
+ try:
+ d = ast.literal_eval(header)
+ except SyntaxError as e2:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e2
+ else:
+ warnings.warn(
+ "Reading `.npy` or `.npz` file required additional "
+ "header parsing as it was created on Python 2. Save the "
+ "file again to speed up loading and avoid this warning.",
+ UserWarning, stacklevel=4)
+ else:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e
+ if not isinstance(d, dict):
+ msg = "Header is not a dictionary: {!r}"
+ raise ValueError(msg.format(d))
+
+ if EXPECTED_KEYS != d.keys():
+ keys = sorted(d.keys())
+ msg = "Header does not contain the correct keys: {!r}"
+ raise ValueError(msg.format(keys))
+
+ # Sanity-check the values.
+ if (not isinstance(d['shape'], tuple) or
+ not all(isinstance(x, int) for x in d['shape'])):
+ msg = "shape is not valid: {!r}"
+ raise ValueError(msg.format(d['shape']))
+ if not isinstance(d['fortran_order'], bool):
+ msg = "fortran_order is not a valid bool: {!r}"
+ raise ValueError(msg.format(d['fortran_order']))
+ try:
+ dtype = descr_to_dtype(d['descr'])
+ except TypeError as e:
+ msg = "descr is not a valid dtype descriptor: {!r}"
+ raise ValueError(msg.format(d['descr'])) from e
+
+ return d['shape'], d['fortran_order'], dtype
+
+
+@set_module("numpy.lib.format")
+def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
+ """
+ Write an array to an NPY file, including a header.
+
+ If the array is neither C-contiguous nor Fortran-contiguous AND the
+ file_like object is not a real file object, this function will have to
+ copy data in memory.
+
+ Parameters
+ ----------
+ fp : file_like object
+ An open, writable file object, or similar object with a
+ ``.write()`` method.
+ array : ndarray
+ The array to write to disk.
+ version : (int, int) or None, optional
+ The version number of the format. None means use the oldest
+ supported version that is able to store the data. Default: None
+ allow_pickle : bool, optional
+ Whether to allow writing pickled data. Default: True
+ pickle_kwargs : dict, optional
+ Additional keyword arguments to pass to pickle.dump, excluding
+ 'protocol'. These are only useful when pickling objects in object
+ arrays to Python 2 compatible format.
+
+ Raises
+ ------
+ ValueError
+ If the array cannot be persisted. This includes the case of
+ allow_pickle=False and array being an object array.
+ Various other errors
+ If the array contains Python objects as part of its dtype, the
+ process of pickling them may raise various errors if the objects
+ are not picklable.
+
+ """
+ _check_version(version)
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
+
+ if array.itemsize == 0:
+ buffersize = 0
+ else:
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
+
+ dtype_class = type(array.dtype)
+
+ if array.dtype.hasobject or not dtype_class._legacy:
+ # We contain Python objects so we cannot write out the data
+ # directly. Instead, we will pickle it out
+ if not allow_pickle:
+ if array.dtype.hasobject:
+ raise ValueError("Object arrays cannot be saved when "
+ "allow_pickle=False")
+ if not dtype_class._legacy:
+ raise ValueError("User-defined dtypes cannot be saved "
+ "when allow_pickle=False")
+ if pickle_kwargs is None:
+ pickle_kwargs = {}
+ pickle.dump(array, fp, protocol=4, **pickle_kwargs)
+ elif array.flags.f_contiguous and not array.flags.c_contiguous:
+ if isfileobj(fp):
+ array.T.tofile(fp)
+ else:
+ for chunk in numpy.nditer(
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
+ buffersize=buffersize, order='F'):
+ fp.write(chunk.tobytes('C'))
+ elif isfileobj(fp):
+ array.tofile(fp)
+ else:
+ for chunk in numpy.nditer(
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
+ buffersize=buffersize, order='C'):
+ fp.write(chunk.tobytes('C'))
+
+
+@set_module("numpy.lib.format")
+def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
+ max_header_size=_MAX_HEADER_SIZE):
+ """
+ Read an array from an NPY file.
+
+ Parameters
+ ----------
+ fp : file_like object
+ If this is not a real file object, then this may take extra memory
+ and time.
+ allow_pickle : bool, optional
+ Whether to allow writing pickled data. Default: False
+ pickle_kwargs : dict
+ Additional keyword arguments to pass to pickle.load. These are only
+ useful when loading object arrays saved on Python 2.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:func:`ast.literal_eval()` for details.
+ This option is ignored when `allow_pickle` is passed. In that case
+ the file is by definition trusted and the limit is unnecessary.
+
+ Returns
+ -------
+ array : ndarray
+ The array from the data on disk.
+
+ Raises
+ ------
+ ValueError
+ If the data is invalid, or allow_pickle=False and the file contains
+ an object array.
+
+ """
+ if allow_pickle:
+ # Effectively ignore max_header_size, since `allow_pickle` indicates
+ # that the input is fully trusted.
+ max_header_size = 2**64
+
+ version = read_magic(fp)
+ _check_version(version)
+ shape, fortran_order, dtype = _read_array_header(
+ fp, version, max_header_size=max_header_size)
+ if len(shape) == 0:
+ count = 1
+ else:
+ count = numpy.multiply.reduce(shape, dtype=numpy.int64)
+
+ # Now read the actual data.
+ if dtype.hasobject:
+ # The array contained Python objects. We need to unpickle the data.
+ if not allow_pickle:
+ raise ValueError("Object arrays cannot be loaded when "
+ "allow_pickle=False")
+ if pickle_kwargs is None:
+ pickle_kwargs = {}
+ try:
+ array = pickle.load(fp, **pickle_kwargs)
+ except UnicodeError as err:
+ # Friendlier error message
+ raise UnicodeError("Unpickling a python object failed: %r\n"
+ "You may need to pass the encoding= option "
+ "to numpy.load" % (err,)) from err
+ else:
+ if isfileobj(fp):
+ # We can use the fast fromfile() function.
+ array = numpy.fromfile(fp, dtype=dtype, count=count)
+ else:
+ # This is not a real file. We have to read it the
+ # memory-intensive way.
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
+ # breaking large reads from gzip streams. Chunk reads to
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
+ # of the read. In non-chunked case count < max_read_count, so
+ # only one read is performed.
+
+ # Use np.ndarray instead of np.empty since the latter does
+ # not correctly instantiate zero-width string dtypes; see
+ # https://github.com/numpy/numpy/pull/6430
+ array = numpy.ndarray(count, dtype=dtype)
+
+ if dtype.itemsize > 0:
+ # If dtype.itemsize == 0 then there's nothing more to read
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
+
+ for i in range(0, count, max_read_count):
+ read_count = min(max_read_count, count - i)
+ read_size = int(read_count * dtype.itemsize)
+ data = _read_bytes(fp, read_size, "array data")
+ array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype,
+ count=read_count)
+
+ if array.size != count:
+ raise ValueError(
+ "Failed to read all data for array. "
+ f"Expected {shape} = {count} elements, "
+ f"could only read {array.size} elements. "
+ "(file seems not fully written?)"
+ )
+
+ if fortran_order:
+ array.shape = shape[::-1]
+ array = array.transpose()
+ else:
+ array.shape = shape
+
+ return array
+
+
+@set_module("numpy.lib.format")
+def open_memmap(filename, mode='r+', dtype=None, shape=None,
+ fortran_order=False, version=None, *,
+ max_header_size=_MAX_HEADER_SIZE):
+ """
+ Open a .npy file as a memory-mapped array.
+
+ This may be used to read an existing file or create a new one.
+
+ Parameters
+ ----------
+ filename : str or path-like
+ The name of the file on disk. This may *not* be a file-like
+ object.
+ mode : str, optional
+ The mode in which to open the file; the default is 'r+'. In
+ addition to the standard file modes, 'c' is also accepted to mean
+ "copy on write." See `memmap` for the available mode strings.
+ dtype : data-type, optional
+ The data type of the array if we are creating a new file in "write"
+ mode, if not, `dtype` is ignored. The default value is None, which
+ results in a data-type of `float64`.
+ shape : tuple of int
+ The shape of the array if we are creating a new file in "write"
+ mode, in which case this parameter is required. Otherwise, this
+ parameter is ignored and is thus optional.
+ fortran_order : bool, optional
+ Whether the array should be Fortran-contiguous (True) or
+ C-contiguous (False, the default) if we are creating a new file in
+ "write" mode.
+ version : tuple of int (major, minor) or None
+ If the mode is a "write" mode, then this is the version of the file
+ format used to create the file. None means use the oldest
+ supported version that is able to store the data. Default: None
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:func:`ast.literal_eval()` for details.
+
+ Returns
+ -------
+ marray : memmap
+ The memory-mapped array.
+
+ Raises
+ ------
+ ValueError
+ If the data or the mode is invalid.
+ OSError
+ If the file is not found or cannot be opened correctly.
+
+ See Also
+ --------
+ numpy.memmap
+
+ """
+ if isfileobj(filename):
+ raise ValueError("Filename must be a string or a path-like object."
+ " Memmap cannot use existing file handles.")
+
+ if 'w' in mode:
+ # We are creating the file, not reading it.
+ # Check if we ought to create the file.
+ _check_version(version)
+ # Ensure that the given dtype is an authentic dtype object rather
+ # than just something that can be interpreted as a dtype object.
+ dtype = numpy.dtype(dtype)
+ if dtype.hasobject:
+ msg = "Array can't be memory-mapped: Python objects in dtype."
+ raise ValueError(msg)
+ d = {
+ "descr": dtype_to_descr(dtype),
+ "fortran_order": fortran_order,
+ "shape": shape,
+ }
+ # If we got here, then it should be safe to create the file.
+ with open(os.fspath(filename), mode + 'b') as fp:
+ _write_array_header(fp, d, version)
+ offset = fp.tell()
+ else:
+ # Read the header of the file first.
+ with open(os.fspath(filename), 'rb') as fp:
+ version = read_magic(fp)
+ _check_version(version)
+
+ shape, fortran_order, dtype = _read_array_header(
+ fp, version, max_header_size=max_header_size)
+ if dtype.hasobject:
+ msg = "Array can't be memory-mapped: Python objects in dtype."
+ raise ValueError(msg)
+ offset = fp.tell()
+
+ if fortran_order:
+ order = 'F'
+ else:
+ order = 'C'
+
+ # We need to change a write-only mode to a read-write mode since we've
+ # already written data to the file.
+ if mode == 'w+':
+ mode = 'r+'
+
+ marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
+ mode=mode, offset=offset)
+
+ return marray
+
+
+def _read_bytes(fp, size, error_template="ran out of data"):
+ """
+ Read from file-like object until size bytes are read.
+ Raises ValueError if not EOF is encountered before size bytes are read.
+ Non-blocking objects only supported if they derive from io objects.
+
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
+ requested.
+ """
+ data = b""
+ while True:
+ # io files (default in python3) return None or raise on
+ # would-block, python2 file will truncate, probably nothing can be
+ # done about that. note that regular files can't be non-blocking
+ try:
+ r = fp.read(size - len(data))
+ data += r
+ if len(r) == 0 or len(data) == size:
+ break
+ except BlockingIOError:
+ pass
+ if len(data) != size:
+ msg = "EOF: reading %s, expected %d bytes got %d"
+ raise ValueError(msg % (error_template, size, len(data)))
+ else:
+ return data
+
+
+@set_module("numpy.lib.format")
+def isfileobj(f):
+ if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):
+ return False
+ try:
+ # BufferedReader/Writer may raise OSError when
+ # fetching `fileno()` (e.g. when wrapping BytesIO).
+ f.fileno()
+ return True
+ except OSError:
+ return False
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi
new file mode 100644
index 0000000..f4898d9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_format_impl.pyi
@@ -0,0 +1,26 @@
+from typing import Final, Literal
+
+from numpy.lib._utils_impl import drop_metadata # noqa: F401
+
+__all__: list[str] = []
+
+EXPECTED_KEYS: Final[set[str]]
+MAGIC_PREFIX: Final[bytes]
+MAGIC_LEN: Literal[8]
+ARRAY_ALIGN: Literal[64]
+BUFFER_SIZE: Literal[262144] # 2**18
+GROWTH_AXIS_MAX_DIGITS: Literal[21]
+
+def magic(major, minor): ...
+def read_magic(fp): ...
+def dtype_to_descr(dtype): ...
+def descr_to_dtype(descr): ...
+def header_data_from_array_1_0(array): ...
+def write_array_header_1_0(fp, d): ...
+def write_array_header_2_0(fp, d): ...
+def read_array_header_1_0(fp): ...
+def read_array_header_2_0(fp): ...
+def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
+def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
+def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
+def isfileobj(f): ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py
new file mode 100644
index 0000000..9ee5944
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py
@@ -0,0 +1,5844 @@
+import builtins
+import collections.abc
+import functools
+import re
+import sys
+import warnings
+
+import numpy as np
+import numpy._core.numeric as _nx
+from numpy._core import overrides, transpose
+from numpy._core._multiarray_umath import _array_converter
+from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum
+from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index
+from numpy._core.multiarray import interp as compiled_interp
+from numpy._core.multiarray import interp_complex as compiled_interp_complex
+from numpy._core.numeric import (
+ absolute,
+ arange,
+ array,
+ asanyarray,
+ asarray,
+ concatenate,
+ dot,
+ empty,
+ integer,
+ intp,
+ isscalar,
+ ndarray,
+ ones,
+ take,
+ where,
+ zeros_like,
+)
+from numpy._core.numerictypes import typecodes
+from numpy._core.umath import (
+ add,
+ arctan2,
+ cos,
+ exp,
+ frompyfunc,
+ less_equal,
+ minimum,
+ mod,
+ not_equal,
+ pi,
+ sin,
+ sqrt,
+ subtract,
+)
+from numpy._utils import set_module
+
+# needed in this module for compatibility
+from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401
+from numpy.lib._twodim_base_impl import diag
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
+ 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'flip',
+ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
+ 'bincount', 'digitize', 'cov', 'corrcoef',
+ 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
+ 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0',
+ 'meshgrid', 'delete', 'insert', 'append', 'interp',
+ 'quantile'
+ ]
+
+# _QuantileMethods is a dictionary listing all the supported methods to
+# compute quantile/percentile.
+#
+# Below virtual_index refers to the index of the element where the percentile
+# would be found in the sorted sample.
+# When the sample contains exactly the percentile wanted, the virtual_index is
+# an integer to the index of this element.
+# When the percentile wanted is in between two elements, the virtual_index
+# is made of a integer part (a.k.a 'i' or 'left') and a fractional part
+# (a.k.a 'g' or 'gamma')
+#
+# Each method in _QuantileMethods has two properties
+# get_virtual_index : Callable
+# The function used to compute the virtual_index.
+# fix_gamma : Callable
+# A function used for discrete methods to force the index to a specific value.
+_QuantileMethods = {
+ # --- HYNDMAN and FAN METHODS
+ # Discrete methods
+ 'inverted_cdf': {
+ 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), # noqa: PLW0108
+ 'fix_gamma': None, # should never be called
+ },
+ 'averaged_inverted_cdf': {
+ 'get_virtual_index': lambda n, quantiles: (n * quantiles) - 1,
+ 'fix_gamma': lambda gamma, _: _get_gamma_mask(
+ shape=gamma.shape,
+ default_value=1.,
+ conditioned_value=0.5,
+ where=gamma == 0),
+ },
+ 'closest_observation': {
+ 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), # noqa: PLW0108
+ 'fix_gamma': None, # should never be called
+ },
+ # Continuous methods
+ 'interpolated_inverted_cdf': {
+ 'get_virtual_index': lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 0, 1),
+ 'fix_gamma': lambda gamma, _: gamma,
+ },
+ 'hazen': {
+ 'get_virtual_index': lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 0.5, 0.5),
+ 'fix_gamma': lambda gamma, _: gamma,
+ },
+ 'weibull': {
+ 'get_virtual_index': lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 0, 0),
+ 'fix_gamma': lambda gamma, _: gamma,
+ },
+ # Default method.
+ # To avoid some rounding issues, `(n-1) * quantiles` is preferred to
+ # `_compute_virtual_index(n, quantiles, 1, 1)`.
+ # They are mathematically equivalent.
+ 'linear': {
+ 'get_virtual_index': lambda n, quantiles: (n - 1) * quantiles,
+ 'fix_gamma': lambda gamma, _: gamma,
+ },
+ 'median_unbiased': {
+ 'get_virtual_index': lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0),
+ 'fix_gamma': lambda gamma, _: gamma,
+ },
+ 'normal_unbiased': {
+ 'get_virtual_index': lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0),
+ 'fix_gamma': lambda gamma, _: gamma,
+ },
+ # --- OTHER METHODS
+ 'lower': {
+ 'get_virtual_index': lambda n, quantiles: np.floor(
+ (n - 1) * quantiles).astype(np.intp),
+ 'fix_gamma': None, # should never be called, index dtype is int
+ },
+ 'higher': {
+ 'get_virtual_index': lambda n, quantiles: np.ceil(
+ (n - 1) * quantiles).astype(np.intp),
+ 'fix_gamma': None, # should never be called, index dtype is int
+ },
+ 'midpoint': {
+ 'get_virtual_index': lambda n, quantiles: 0.5 * (
+ np.floor((n - 1) * quantiles)
+ + np.ceil((n - 1) * quantiles)),
+ 'fix_gamma': lambda gamma, index: _get_gamma_mask(
+ shape=gamma.shape,
+ default_value=0.5,
+ conditioned_value=0.,
+ where=index % 1 == 0),
+ },
+ 'nearest': {
+ 'get_virtual_index': lambda n, quantiles: np.around(
+ (n - 1) * quantiles).astype(np.intp),
+ 'fix_gamma': None,
+ # should never be called, index dtype is int
+ }}
+
+
+def _rot90_dispatcher(m, k=None, axes=None):
+ return (m,)
+
+
+@array_function_dispatch(_rot90_dispatcher)
+def rot90(m, k=1, axes=(0, 1)):
+ """
+ Rotate an array by 90 degrees in the plane specified by axes.
+
+ Rotation direction is from the first towards the second axis.
+ This means for a 2D array with the default `k` and `axes`, the
+ rotation will be counterclockwise.
+
+ Parameters
+ ----------
+ m : array_like
+ Array of two or more dimensions.
+ k : integer
+ Number of times the array is rotated by 90 degrees.
+ axes : (2,) array_like
+ The array is rotated in the plane defined by the axes.
+ Axes must be different.
+
+ Returns
+ -------
+ y : ndarray
+ A rotated view of `m`.
+
+ See Also
+ --------
+ flip : Reverse the order of elements in an array along the given axis.
+ fliplr : Flip an array horizontally.
+ flipud : Flip an array vertically.
+
+ Notes
+ -----
+ ``rot90(m, k=1, axes=(1,0))`` is the reverse of
+ ``rot90(m, k=1, axes=(0,1))``
+
+ ``rot90(m, k=1, axes=(1,0))`` is equivalent to
+ ``rot90(m, k=-1, axes=(0,1))``
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> m = np.array([[1,2],[3,4]], int)
+ >>> m
+ array([[1, 2],
+ [3, 4]])
+ >>> np.rot90(m)
+ array([[2, 4],
+ [1, 3]])
+ >>> np.rot90(m, 2)
+ array([[4, 3],
+ [2, 1]])
+ >>> m = np.arange(8).reshape((2,2,2))
+ >>> np.rot90(m, 1, (1,2))
+ array([[[1, 3],
+ [0, 2]],
+ [[5, 7],
+ [4, 6]]])
+
+ """
+ axes = tuple(axes)
+ if len(axes) != 2:
+ raise ValueError("len(axes) must be 2.")
+
+ m = asanyarray(m)
+
+ if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
+ raise ValueError("Axes must be different.")
+
+ if (axes[0] >= m.ndim or axes[0] < -m.ndim
+ or axes[1] >= m.ndim or axes[1] < -m.ndim):
+ raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.")
+
+ k %= 4
+
+ if k == 0:
+ return m[:]
+ if k == 2:
+ return flip(flip(m, axes[0]), axes[1])
+
+ axes_list = arange(0, m.ndim)
+ (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
+ axes_list[axes[0]])
+
+ if k == 1:
+ return transpose(flip(m, axes[1]), axes_list)
+ else:
+ # k == 3
+ return flip(transpose(m, axes_list), axes[1])
+
+
+def _flip_dispatcher(m, axis=None):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
+def flip(m, axis=None):
+ """
+ Reverse the order of elements in an array along the given axis.
+
+ The shape of the array is preserved, but the elements are reordered.
+
+ Parameters
+ ----------
+ m : array_like
+ Input array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to flip over. The default,
+ axis=None, will flip over all of the axes of the input array.
+ If axis is negative it counts from the last to the first axis.
+
+ If axis is a tuple of ints, flipping is performed on all of the axes
+ specified in the tuple.
+
+ Returns
+ -------
+ out : array_like
+ A view of `m` with the entries of axis reversed. Since a view is
+ returned, this operation is done in constant time.
+
+ See Also
+ --------
+ flipud : Flip an array vertically (axis=0).
+ fliplr : Flip an array horizontally (axis=1).
+
+ Notes
+ -----
+ flip(m, 0) is equivalent to flipud(m).
+
+ flip(m, 1) is equivalent to fliplr(m).
+
+ flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
+
+ flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
+ positions.
+
+ flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
+ position 0 and position 1.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> A = np.arange(8).reshape((2,2,2))
+ >>> A
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> np.flip(A, 0)
+ array([[[4, 5],
+ [6, 7]],
+ [[0, 1],
+ [2, 3]]])
+ >>> np.flip(A, 1)
+ array([[[2, 3],
+ [0, 1]],
+ [[6, 7],
+ [4, 5]]])
+ >>> np.flip(A)
+ array([[[7, 6],
+ [5, 4]],
+ [[3, 2],
+ [1, 0]]])
+ >>> np.flip(A, (0, 2))
+ array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+ >>> rng = np.random.default_rng()
+ >>> A = rng.normal(size=(3,4,5))
+ >>> np.all(np.flip(A,2) == A[:,:,::-1,...])
+ True
+ """
+ if not hasattr(m, 'ndim'):
+ m = asarray(m)
+ if axis is None:
+ indexer = (np.s_[::-1],) * m.ndim
+ else:
+ axis = _nx.normalize_axis_tuple(axis, m.ndim)
+ indexer = [np.s_[:]] * m.ndim
+ for ax in axis:
+ indexer[ax] = np.s_[::-1]
+ indexer = tuple(indexer)
+ return m[indexer]
+
+
+@set_module('numpy')
+def iterable(y):
+ """
+ Check whether or not an object can be iterated over.
+
+ Parameters
+ ----------
+ y : object
+ Input object.
+
+ Returns
+ -------
+ b : bool
+ Return ``True`` if the object has an iterator method or is a
+ sequence and ``False`` otherwise.
+
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.iterable([1, 2, 3])
+ True
+ >>> np.iterable(2)
+ False
+
+ Notes
+ -----
+ In most cases, the results of ``np.iterable(obj)`` are consistent with
+ ``isinstance(obj, collections.abc.Iterable)``. One notable exception is
+ the treatment of 0-dimensional arrays::
+
+ >>> from collections.abc import Iterable
+ >>> a = np.array(1.0) # 0-dimensional numpy array
+ >>> isinstance(a, Iterable)
+ True
+ >>> np.iterable(a)
+ False
+
+ """
+ try:
+ iter(y)
+ except TypeError:
+ return False
+ return True
+
+
+def _weights_are_valid(weights, a, axis):
+ """Validate weights array.
+
+ We assume, weights is not None.
+ """
+ wgt = np.asanyarray(weights)
+
+ # Sanity checks
+ if a.shape != wgt.shape:
+ if axis is None:
+ raise TypeError(
+ "Axis must be specified when shapes of a and weights "
+ "differ.")
+ if wgt.shape != tuple(a.shape[ax] for ax in axis):
+ raise ValueError(
+ "Shape of weights must be consistent with "
+ "shape of a along specified axis.")
+
+ # setup wgt to broadcast along axis
+ wgt = wgt.transpose(np.argsort(axis))
+ wgt = wgt.reshape(tuple((s if ax in axis else 1)
+ for ax, s in enumerate(a.shape)))
+ return wgt
+
+
+def _average_dispatcher(a, axis=None, weights=None, returned=None, *,
+ keepdims=None):
+ return (a, weights)
+
+
+@array_function_dispatch(_average_dispatcher)
+def average(a, axis=None, weights=None, returned=False, *,
+ keepdims=np._NoValue):
+ """
+ Compute the weighted average along the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing data to be averaged. If `a` is not an array, a
+ conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to average `a`. The default,
+ `axis=None`, will average over all of the elements of the input array.
+ If axis is negative it counts from the last to the first axis.
+ If axis is a tuple of ints, averaging is performed on all of the axes
+ specified in the tuple instead of a single axis or all the axes as
+ before.
+ weights : array_like, optional
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the average according to its associated weight.
+ The array of weights must be the same shape as `a` if no axis is
+ specified, otherwise the weights must have dimensions and shape
+ consistent with `a` along the specified axis.
+ If `weights=None`, then all data in `a` are assumed to have a
+ weight equal to one.
+ The calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ where the sum is over all included elements.
+ The only constraint on the values of `weights` is that `sum(weights)`
+ must not be 0.
+ returned : bool, optional
+ Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
+ is returned, otherwise only the average is returned.
+ If `weights=None`, `sum_of_weights` is equivalent to the number of
+ elements over which the average is taken.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
+ or other classes whose methods do not support `keepdims`.
+
+ .. versionadded:: 1.23.0
+
+ Returns
+ -------
+ retval, [sum_of_weights] : array_type or double
+ Return the average along the specified axis. When `returned` is `True`,
+ return a tuple with the average as the first element and the sum
+ of the weights as the second element. `sum_of_weights` is of the
+ same type as `retval`. The result dtype follows a general pattern.
+ If `weights` is None, the result dtype will be that of `a` , or ``float64``
+ if `a` is integral. Otherwise, if `weights` is not None and `a` is non-
+ integral, the result type will be the type of lowest precision capable of
+ representing values of both `a` and `weights`. If `a` happens to be
+ integral, the previous rules still applies but the result dtype will
+ at least be ``float64``.
+
+ Raises
+ ------
+ ZeroDivisionError
+ When all weights along axis are zero. See `numpy.ma.average` for a
+ version robust to this type of error.
+ TypeError
+ When `weights` does not have the same shape as `a`, and `axis=None`.
+ ValueError
+ When `weights` does not have dimensions and shape consistent with `a`
+ along specified `axis`.
+
+ See Also
+ --------
+ mean
+
+ ma.average : average for masked arrays -- useful if your data contains
+ "missing" values
+ numpy.result_type : Returns the type that results from applying the
+ numpy type promotion rules to the arguments.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> data = np.arange(1, 5)
+ >>> data
+ array([1, 2, 3, 4])
+ >>> np.average(data)
+ 2.5
+ >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
+ 4.0
+
+ >>> data = np.arange(6).reshape((3, 2))
+ >>> data
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+ >>> np.average(data, axis=1, weights=[1./4, 3./4])
+ array([0.75, 2.75, 4.75])
+ >>> np.average(data, weights=[1./4, 3./4])
+ Traceback (most recent call last):
+ ...
+ TypeError: Axis must be specified when shapes of a and weights differ.
+
+ With ``keepdims=True``, the following result has shape (3, 1).
+
+ >>> np.average(data, axis=1, keepdims=True)
+ array([[0.5],
+ [2.5],
+ [4.5]])
+
+ >>> data = np.arange(8).reshape((2, 2, 2))
+ >>> data
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> np.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]])
+ array([3.4, 4.4])
+ >>> np.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]])
+ Traceback (most recent call last):
+ ...
+ ValueError: Shape of weights must be consistent
+ with shape of a along specified axis.
+ """
+ a = np.asanyarray(a)
+
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
+
+ if keepdims is np._NoValue:
+ # Don't pass on the keepdims argument if one wasn't given.
+ keepdims_kw = {}
+ else:
+ keepdims_kw = {'keepdims': keepdims}
+
+ if weights is None:
+ avg = a.mean(axis, **keepdims_kw)
+ avg_as_array = np.asanyarray(avg)
+ scl = avg_as_array.dtype.type(a.size / avg_as_array.size)
+ else:
+ wgt = _weights_are_valid(weights=weights, a=a, axis=axis)
+
+ if issubclass(a.dtype.type, (np.integer, np.bool)):
+ result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
+ else:
+ result_dtype = np.result_type(a.dtype, wgt.dtype)
+
+ scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw)
+ if np.any(scl == 0.0):
+ raise ZeroDivisionError(
+ "Weights sum to zero, can't be normalized")
+
+ avg = avg_as_array = np.multiply(a, wgt,
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
+
+ if returned:
+ if scl.shape != avg_as_array.shape:
+ scl = np.broadcast_to(scl, avg_as_array.shape).copy()
+ return avg, scl
+ else:
+ return avg
+
+
+@set_module('numpy')
+def asarray_chkfinite(a, dtype=None, order=None):
+ """Convert the input to an array, checking for NaNs or Infs.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists and ndarrays. Success requires no NaNs or Infs.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Memory layout. 'A' and 'K' depend on the order of input array a.
+ 'C' row-major (C-style),
+ 'F' column-major (Fortran-style) memory representation.
+ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+ 'K' (keep) preserve input order
+ Defaults to 'C'.
+
+ Returns
+ -------
+ out : ndarray
+ Array interpretation of `a`. No copy is performed if the input
+ is already an ndarray. If `a` is a subclass of ndarray, a base
+ class ndarray is returned.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
+
+ See Also
+ --------
+ asarray : Create and array.
+ asanyarray : Similar function which passes through subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Convert a list into an array. If all elements are finite, then
+ ``asarray_chkfinite`` is identical to ``asarray``.
+
+ >>> a = [1, 2]
+ >>> np.asarray_chkfinite(a, dtype=float)
+ array([1., 2.])
+
+ Raises ValueError if array_like contains Nans or Infs.
+
+ >>> a = [1, 2, np.inf]
+ >>> try:
+ ... np.asarray_chkfinite(a)
+ ... except ValueError:
+ ... print('ValueError')
+ ...
+ ValueError
+
+ """
+ a = asarray(a, dtype=dtype, order=order)
+ if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
+ raise ValueError(
+ "array must not contain infs or NaNs")
+ return a
+
+
+def _piecewise_dispatcher(x, condlist, funclist, *args, **kw):
+ yield x
+ # support the undocumented behavior of allowing scalars
+ if np.iterable(condlist):
+ yield from condlist
+
+
+@array_function_dispatch(_piecewise_dispatcher)
+def piecewise(x, condlist, funclist, *args, **kw):
+ """
+ Evaluate a piecewise-defined function.
+
+ Given a set of conditions and corresponding functions, evaluate each
+ function on the input data wherever its condition is true.
+
+ Parameters
+ ----------
+ x : ndarray or scalar
+ The input domain.
+ condlist : list of bool arrays or bool scalars
+ Each boolean array corresponds to a function in `funclist`. Wherever
+ `condlist[i]` is True, `funclist[i](x)` is used as the output value.
+
+ Each boolean array in `condlist` selects a piece of `x`,
+ and should therefore be of the same shape as `x`.
+
+ The length of `condlist` must correspond to that of `funclist`.
+ If one extra function is given, i.e. if
+ ``len(funclist) == len(condlist) + 1``, then that extra function
+ is the default value, used wherever all conditions are false.
+ funclist : list of callables, f(x,*args,**kw), or scalars
+ Each function is evaluated over `x` wherever its corresponding
+ condition is True. It should take a 1d array as input and give an 1d
+ array or a scalar value as output. If, instead of a callable,
+ a scalar is provided then a constant function (``lambda x: scalar``) is
+ assumed.
+ args : tuple, optional
+ Any further arguments given to `piecewise` are passed to the functions
+ upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
+ each function is called as ``f(x, 1, 'a')``.
+ kw : dict, optional
+ Keyword arguments used in calling `piecewise` are passed to the
+ functions upon execution, i.e., if called
+ ``piecewise(..., ..., alpha=1)``, then each function is called as
+ ``f(x, alpha=1)``.
+
+ Returns
+ -------
+ out : ndarray
+ The output is the same shape and type as x and is found by
+ calling the functions in `funclist` on the appropriate portions of `x`,
+ as defined by the boolean arrays in `condlist`. Portions not covered
+ by any condition have a default value of 0.
+
+
+ See Also
+ --------
+ choose, select, where
+
+ Notes
+ -----
+ This is similar to choose or select, except that functions are
+ evaluated on elements of `x` that satisfy the corresponding condition from
+ `condlist`.
+
+ The result is::
+
+ |--
+ |funclist[0](x[condlist[0]])
+ out = |funclist[1](x[condlist[1]])
+ |...
+ |funclist[n2](x[condlist[n2]])
+ |--
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
+
+ >>> x = np.linspace(-2.5, 2.5, 6)
+ >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
+ array([-1., -1., -1., 1., 1., 1.])
+
+ Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
+ ``x >= 0``.
+
+ >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
+ array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
+
+ Apply the same function to a scalar value.
+
+ >>> y = -2
+ >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x])
+ array(2)
+
+ """
+ x = asanyarray(x)
+ n2 = len(funclist)
+
+ # undocumented: single condition is promoted to a list of one condition
+ if isscalar(condlist) or (
+ not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0):
+ condlist = [condlist]
+
+ condlist = asarray(condlist, dtype=bool)
+ n = len(condlist)
+
+ if n == n2 - 1: # compute the "otherwise" condition.
+ condelse = ~np.any(condlist, axis=0, keepdims=True)
+ condlist = np.concatenate([condlist, condelse], axis=0)
+ n += 1
+ elif n != n2:
+ raise ValueError(
+ f"with {n} condition(s), either {n} or {n + 1} functions are expected"
+ )
+
+ y = zeros_like(x)
+ for cond, func in zip(condlist, funclist):
+ if not isinstance(func, collections.abc.Callable):
+ y[cond] = func
+ else:
+ vals = x[cond]
+ if vals.size > 0:
+ y[cond] = func(vals, *args, **kw)
+
+ return y
+
+
+def _select_dispatcher(condlist, choicelist, default=None):
+ yield from condlist
+ yield from choicelist
+
+
+@array_function_dispatch(_select_dispatcher)
+def select(condlist, choicelist, default=0):
+ """
+ Return an array drawn from elements in choicelist, depending on conditions.
+
+ Parameters
+ ----------
+ condlist : list of bool ndarrays
+ The list of conditions which determine from which array in `choicelist`
+ the output elements are taken. When multiple conditions are satisfied,
+ the first one encountered in `condlist` is used.
+ choicelist : list of ndarrays
+ The list of arrays from which the output elements are taken. It has
+ to be of the same length as `condlist`.
+ default : scalar, optional
+ The element inserted in `output` when all conditions evaluate to False.
+
+ Returns
+ -------
+ output : ndarray
+ The output at position m is the m-th element of the array in
+ `choicelist` where the m-th element of the corresponding array in
+ `condlist` is True.
+
+ See Also
+ --------
+ where : Return elements from one of two arrays depending on condition.
+ take, choose, compress, diag, diagonal
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Beginning with an array of integers from 0 to 5 (inclusive),
+ elements less than ``3`` are negated, elements greater than ``3``
+ are squared, and elements not meeting either of these conditions
+ (exactly ``3``) are replaced with a `default` value of ``42``.
+
+ >>> x = np.arange(6)
+ >>> condlist = [x<3, x>3]
+ >>> choicelist = [-x, x**2]
+ >>> np.select(condlist, choicelist, 42)
+ array([ 0, -1, -2, 42, 16, 25])
+
+ When multiple conditions are satisfied, the first one encountered in
+ `condlist` is used.
+
+ >>> condlist = [x<=4, x>3]
+ >>> choicelist = [x, x**2]
+ >>> np.select(condlist, choicelist, 55)
+ array([ 0, 1, 2, 3, 4, 25])
+
+ """
+ # Check the size of condlist and choicelist are the same, or abort.
+ if len(condlist) != len(choicelist):
+ raise ValueError(
+ 'list of cases must be same length as list of conditions')
+
+ # Now that the dtype is known, handle the deprecated select([], []) case
+ if len(condlist) == 0:
+ raise ValueError("select with an empty condition list is not possible")
+
+ # TODO: This preserves the Python int, float, complex manually to get the
+ # right `result_type` with NEP 50. Most likely we will grow a better
+ # way to spell this (and this can be replaced).
+ choicelist = [
+ choice if type(choice) in (int, float, complex) else np.asarray(choice)
+ for choice in choicelist]
+ choicelist.append(default if type(default) in (int, float, complex)
+ else np.asarray(default))
+
+ try:
+ dtype = np.result_type(*choicelist)
+ except TypeError as e:
+ msg = f'Choicelist and default value do not have a common dtype: {e}'
+ raise TypeError(msg) from None
+
+ # Convert conditions to arrays and broadcast conditions and choices
+ # as the shape is needed for the result. Doing it separately optimizes
+ # for example when all choices are scalars.
+ condlist = np.broadcast_arrays(*condlist)
+ choicelist = np.broadcast_arrays(*choicelist)
+
+ # If cond array is not an ndarray in boolean format or scalar bool, abort.
+ for i, cond in enumerate(condlist):
+ if cond.dtype.type is not np.bool:
+ raise TypeError(
+ f'invalid entry {i} in condlist: should be boolean ndarray')
+
+ if choicelist[0].ndim == 0:
+ # This may be common, so avoid the call.
+ result_shape = condlist[0].shape
+ else:
+ result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
+
+ result = np.full(result_shape, choicelist[-1], dtype)
+
+ # Use np.copyto to burn each choicelist array onto result, using the
+ # corresponding condlist as a boolean mask. This is done in reverse
+ # order since the first choice should take precedence.
+ choicelist = choicelist[-2::-1]
+ condlist = condlist[::-1]
+ for choice, cond in zip(choicelist, condlist):
+ np.copyto(result, choice, where=cond)
+
+ return result
+
+
+def _copy_dispatcher(a, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_copy_dispatcher)
+def copy(a, order='K', subok=False):
+ """
+ Return an array copy of the given object.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the copy. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible. (Note that this function and :meth:`ndarray.copy` are very
+ similar, but have different default values for their order=
+ arguments.)
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise the
+ returned array will be forced to be a base-class array (defaults to False).
+
+ Returns
+ -------
+ arr : ndarray
+ Array interpretation of `a`.
+
+ See Also
+ --------
+ ndarray.copy : Preferred method for creating an array copy
+
+ Notes
+ -----
+ This is equivalent to:
+
+ >>> np.array(a, copy=True) #doctest: +SKIP
+
+ The copy made of the data is shallow, i.e., for arrays with object dtype,
+ the new array will point to the same objects.
+ See Examples from `ndarray.copy`.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Create an array x, with a reference y and a copy z:
+
+ >>> x = np.array([1, 2, 3])
+ >>> y = x
+ >>> z = np.copy(x)
+
+ Note that, when we modify x, y changes, but not z:
+
+ >>> x[0] = 10
+ >>> x[0] == y[0]
+ True
+ >>> x[0] == z[0]
+ False
+
+ Note that, np.copy clears previously set WRITEABLE=False flag.
+
+ >>> a = np.array([1, 2, 3])
+ >>> a.flags["WRITEABLE"] = False
+ >>> b = np.copy(a)
+ >>> b.flags["WRITEABLE"]
+ True
+ >>> b[0] = 3
+ >>> b
+ array([3, 2, 3])
+ """
+ return array(a, order=order, subok=subok, copy=True)
+
+# Basic operations
+
+
+def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None):
+ yield f
+ yield from varargs
+
+
+@array_function_dispatch(_gradient_dispatcher)
+def gradient(f, *varargs, axis=None, edge_order=1):
+ """
+ Return the gradient of an N-dimensional array.
+
+ The gradient is computed using second order accurate central differences
+ in the interior points and either first or second order accurate one-sides
+ (forward or backwards) differences at the boundaries.
+ The returned gradient hence has the same shape as the input array.
+
+ Parameters
+ ----------
+ f : array_like
+ An N-dimensional array containing samples of a scalar function.
+ varargs : list of scalar or array, optional
+ Spacing between f values. Default unitary spacing for all dimensions.
+ Spacing can be specified using:
+
+ 1. single scalar to specify a sample distance for all dimensions.
+ 2. N scalars to specify a constant sample distance for each dimension.
+ i.e. `dx`, `dy`, `dz`, ...
+ 3. N arrays to specify the coordinates of the values along each
+ dimension of F. The length of the array must match the size of
+ the corresponding dimension
+ 4. Any combination of N scalars/arrays with the meaning of 2. and 3.
+
+ If `axis` is given, the number of varargs must equal the number of axes
+ specified in the axis parameter.
+ Default: 1. (see Examples below).
+
+ edge_order : {1, 2}, optional
+ Gradient is calculated using N-th order accurate differences
+ at the boundaries. Default: 1.
+ axis : None or int or tuple of ints, optional
+ Gradient is calculated only along the given axis or axes
+ The default (axis = None) is to calculate the gradient for all the axes
+ of the input array. axis may be negative, in which case it counts from
+ the last to the first axis.
+
+ Returns
+ -------
+ gradient : ndarray or tuple of ndarray
+ A tuple of ndarrays (or a single ndarray if there is only one
+ dimension) corresponding to the derivatives of f with respect
+ to each dimension. Each derivative has the same shape as f.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> f = np.array([1, 2, 4, 7, 11, 16])
+ >>> np.gradient(f)
+ array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+ >>> np.gradient(f, 2)
+ array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
+
+ Spacing can be also specified with an array that represents the coordinates
+ of the values F along the dimensions.
+ For instance a uniform spacing:
+
+ >>> x = np.arange(f.size)
+ >>> np.gradient(f, x)
+ array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+
+ Or a non uniform one:
+
+ >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.])
+ >>> np.gradient(f, x)
+ array([1. , 3. , 3.5, 6.7, 6.9, 2.5])
+
+ For two dimensional arrays, the return will be two arrays ordered by
+ axis. In this example the first array stands for the gradient in
+ rows and the second one in columns direction:
+
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]))
+ (array([[ 2., 2., -1.],
+ [ 2., 2., -1.]]),
+ array([[1. , 2.5, 4. ],
+ [1. , 1. , 1. ]]))
+
+ In this example the spacing is also specified:
+ uniform for axis=0 and non uniform for axis=1
+
+ >>> dx = 2.
+ >>> y = [1., 1.5, 3.5]
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y)
+ (array([[ 1. , 1. , -0.5],
+ [ 1. , 1. , -0.5]]),
+ array([[2. , 2. , 2. ],
+ [2. , 1.7, 0.5]]))
+
+ It is possible to specify how boundaries are treated using `edge_order`
+
+ >>> x = np.array([0, 1, 2, 3, 4])
+ >>> f = x**2
+ >>> np.gradient(f, edge_order=1)
+ array([1., 2., 4., 6., 7.])
+ >>> np.gradient(f, edge_order=2)
+ array([0., 2., 4., 6., 8.])
+
+ The `axis` keyword can be used to specify a subset of axes of which the
+ gradient is calculated
+
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0)
+ array([[ 2., 2., -1.],
+ [ 2., 2., -1.]])
+
+ The `varargs` argument defines the spacing between sample points in the
+ input array. It can take two forms:
+
+ 1. An array, specifying coordinates, which may be unevenly spaced:
+
+ >>> x = np.array([0., 2., 3., 6., 8.])
+ >>> y = x ** 2
+ >>> np.gradient(y, x, edge_order=2)
+ array([ 0., 4., 6., 12., 16.])
+
+ 2. A scalar, representing the fixed sample distance:
+
+ >>> dx = 2
+ >>> x = np.array([0., 2., 4., 6., 8.])
+ >>> y = x ** 2
+ >>> np.gradient(y, dx, edge_order=2)
+ array([ 0., 4., 8., 12., 16.])
+
+ It's possible to provide different data for spacing along each dimension.
+ The number of arguments must match the number of dimensions in the input
+ data.
+
+ >>> dx = 2
+ >>> dy = 3
+ >>> x = np.arange(0, 6, dx)
+ >>> y = np.arange(0, 9, dy)
+ >>> xs, ys = np.meshgrid(x, y)
+ >>> zs = xs + 2 * ys
+ >>> np.gradient(zs, dy, dx) # Passing two scalars
+ (array([[2., 2., 2.],
+ [2., 2., 2.],
+ [2., 2., 2.]]),
+ array([[1., 1., 1.],
+ [1., 1., 1.],
+ [1., 1., 1.]]))
+
+ Mixing scalars and arrays is also allowed:
+
+ >>> np.gradient(zs, y, dx) # Passing one array and one scalar
+ (array([[2., 2., 2.],
+ [2., 2., 2.],
+ [2., 2., 2.]]),
+ array([[1., 1., 1.],
+ [1., 1., 1.],
+ [1., 1., 1.]]))
+
+ Notes
+ -----
+ Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous
+ derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we
+ minimize the "consistency error" :math:`\\eta_{i}` between the true gradient
+ and its estimate from a linear combination of the neighboring grid-points:
+
+ .. math::
+
+ \\eta_{i} = f_{i}^{\\left(1\\right)} -
+ \\left[ \\alpha f\\left(x_{i}\\right) +
+ \\beta f\\left(x_{i} + h_{d}\\right) +
+ \\gamma f\\left(x_{i}-h_{s}\\right)
+ \\right]
+
+ By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
+ with their Taylor series expansion, this translates into solving
+ the following the linear system:
+
+ .. math::
+
+ \\left\\{
+ \\begin{array}{r}
+ \\alpha+\\beta+\\gamma=0 \\\\
+ \\beta h_{d}-\\gamma h_{s}=1 \\\\
+ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
+ \\end{array}
+ \\right.
+
+ The resulting approximation of :math:`f_{i}^{(1)}` is the following:
+
+ .. math::
+
+ \\hat f_{i}^{(1)} =
+ \\frac{
+ h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
+ - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
+ { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ + h_{s}h_{d}^{2}}{h_{d}
+ + h_{s}}\\right)
+
+ It is worth noting that if :math:`h_{s}=h_{d}`
+ (i.e., data are evenly spaced)
+ we find the standard second order approximation:
+
+ .. math::
+
+ \\hat f_{i}^{(1)}=
+ \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ + \\mathcal{O}\\left(h^{2}\\right)
+
+ With a similar procedure the forward/backward approximations used for
+ boundaries can be derived.
+
+ References
+ ----------
+ .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
+ (Texts in Applied Mathematics). New York: Springer.
+ .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
+ in Geophysical Fluid Dynamics. New York: Springer.
+ .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
+ Arbitrarily Spaced Grids,
+ Mathematics of Computation 51, no. 184 : 699-706.
+ `PDF <https://www.ams.org/journals/mcom/1988-51-184/
+ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
+ """
+ f = np.asanyarray(f)
+ N = f.ndim # number of dimensions
+
+ if axis is None:
+ axes = tuple(range(N))
+ else:
+ axes = _nx.normalize_axis_tuple(axis, N)
+
+ len_axes = len(axes)
+ n = len(varargs)
+ if n == 0:
+ # no spacing argument - use 1 in all axes
+ dx = [1.0] * len_axes
+ elif n == 1 and np.ndim(varargs[0]) == 0:
+ # single scalar for all axes
+ dx = varargs * len_axes
+ elif n == len_axes:
+ # scalar or 1d array for each axis
+ dx = list(varargs)
+ for i, distances in enumerate(dx):
+ distances = np.asanyarray(distances)
+ if distances.ndim == 0:
+ continue
+ elif distances.ndim != 1:
+ raise ValueError("distances must be either scalars or 1d")
+ if len(distances) != f.shape[axes[i]]:
+ raise ValueError("when 1d, distances must match "
+ "the length of the corresponding dimension")
+ if np.issubdtype(distances.dtype, np.integer):
+ # Convert numpy integer types to float64 to avoid modular
+ # arithmetic in np.diff(distances).
+ distances = distances.astype(np.float64)
+ diffx = np.diff(distances)
+ # if distances are constant reduce to the scalar case
+ # since it brings a consistent speedup
+ if (diffx == diffx[0]).all():
+ diffx = diffx[0]
+ dx[i] = diffx
+ else:
+ raise TypeError("invalid number of arguments")
+
+ if edge_order > 2:
+ raise ValueError("'edge_order' greater than 2 not supported")
+
+ # use central differences on interior and one-sided differences on the
+ # endpoints. This preserves second order-accuracy over the full domain.
+
+ outvals = []
+
+ # create slice objects --- initially all are [:, :, ..., :]
+ slice1 = [slice(None)] * N
+ slice2 = [slice(None)] * N
+ slice3 = [slice(None)] * N
+ slice4 = [slice(None)] * N
+
+ otype = f.dtype
+ if otype.type is np.datetime64:
+ # the timedelta dtype with the same unit information
+ otype = np.dtype(otype.name.replace('datetime', 'timedelta'))
+ # view as timedelta to allow addition
+ f = f.view(otype)
+ elif otype.type is np.timedelta64:
+ pass
+ elif np.issubdtype(otype, np.inexact):
+ pass
+ else:
+ # All other types convert to floating point.
+ # First check if f is a numpy integer type; if so, convert f to float64
+ # to avoid modular arithmetic when computing the changes in f.
+ if np.issubdtype(otype, np.integer):
+ f = f.astype(np.float64)
+ otype = np.float64
+
+ for axis, ax_dx in zip(axes, dx):
+ if f.shape[axis] < edge_order + 1:
+ raise ValueError(
+ "Shape of array too small to calculate a numerical gradient, "
+ "at least (edge_order + 1) elements are required.")
+ # result allocation
+ out = np.empty_like(f, dtype=otype)
+
+ # spacing for the current axis
+ uniform_spacing = np.ndim(ax_dx) == 0
+
+ # Numerical differentiation: 2nd order interior
+ slice1[axis] = slice(1, -1)
+ slice2[axis] = slice(None, -2)
+ slice3[axis] = slice(1, -1)
+ slice4[axis] = slice(2, None)
+
+ if uniform_spacing:
+ out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx)
+ else:
+ dx1 = ax_dx[0:-1]
+ dx2 = ax_dx[1:]
+ a = -(dx2) / (dx1 * (dx1 + dx2))
+ b = (dx2 - dx1) / (dx1 * dx2)
+ c = dx1 / (dx2 * (dx1 + dx2))
+ # fix the shape for broadcasting
+ shape = np.ones(N, dtype=int)
+ shape[axis] = -1
+ a.shape = b.shape = c.shape = shape
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
+ out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \
+ + c * f[tuple(slice4)]
+
+ # Numerical differentiation: 1st order edges
+ if edge_order == 1:
+ slice1[axis] = 0
+ slice2[axis] = 1
+ slice3[axis] = 0
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
+
+ slice1[axis] = -1
+ slice2[axis] = -1
+ slice3[axis] = -2
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
+
+ # Numerical differentiation: 2nd order edges
+ else:
+ slice1[axis] = 0
+ slice2[axis] = 0
+ slice3[axis] = 1
+ slice4[axis] = 2
+ if uniform_spacing:
+ a = -1.5 / ax_dx
+ b = 2. / ax_dx
+ c = -0.5 / ax_dx
+ else:
+ dx1 = ax_dx[0]
+ dx2 = ax_dx[1]
+ a = -(2. * dx1 + dx2) / (dx1 * (dx1 + dx2))
+ b = (dx1 + dx2) / (dx1 * dx2)
+ c = - dx1 / (dx2 * (dx1 + dx2))
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
+ out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \
+ + c * f[tuple(slice4)]
+
+ slice1[axis] = -1
+ slice2[axis] = -3
+ slice3[axis] = -2
+ slice4[axis] = -1
+ if uniform_spacing:
+ a = 0.5 / ax_dx
+ b = -2. / ax_dx
+ c = 1.5 / ax_dx
+ else:
+ dx1 = ax_dx[-2]
+ dx2 = ax_dx[-1]
+ a = (dx2) / (dx1 * (dx1 + dx2))
+ b = - (dx2 + dx1) / (dx1 * dx2)
+ c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
+ out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \
+ + c * f[tuple(slice4)]
+
+ outvals.append(out)
+
+ # reset the slice object in this dimension to ":"
+ slice1[axis] = slice(None)
+ slice2[axis] = slice(None)
+ slice3[axis] = slice(None)
+ slice4[axis] = slice(None)
+
+ if len_axes == 1:
+ return outvals[0]
+ return tuple(outvals)
+
+
+def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None):
+ return (a, prepend, append)
+
+
+@array_function_dispatch(_diff_dispatcher)
+def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
+ """
+ Calculate the n-th discrete difference along the given axis.
+
+ The first difference is given by ``out[i] = a[i+1] - a[i]`` along
+ the given axis, higher differences are calculated by using `diff`
+ recursively.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array
+ n : int, optional
+ The number of times values are differenced. If zero, the input
+ is returned as-is.
+ axis : int, optional
+ The axis along which the difference is taken, default is the
+ last axis.
+ prepend, append : array_like, optional
+ Values to prepend or append to `a` along axis prior to
+ performing the difference. Scalar values are expanded to
+ arrays with length 1 in the direction of axis and the shape
+ of the input array in along all other axes. Otherwise the
+ dimension and shape must match `a` except along axis.
+
+ Returns
+ -------
+ diff : ndarray
+ The n-th differences. The shape of the output is the same as `a`
+ except along `axis` where the dimension is smaller by `n`. The
+ type of the output is the same as the type of the difference
+ between any two elements of `a`. This is the same as the type of
+ `a` in most cases. A notable exception is `datetime64`, which
+ results in a `timedelta64` output array.
+
+ See Also
+ --------
+ gradient, ediff1d, cumsum
+
+ Notes
+ -----
+ Type is preserved for boolean arrays, so the result will contain
+ `False` when consecutive elements are the same and `True` when they
+ differ.
+
+ For unsigned integer arrays, the results will also be unsigned. This
+ should not be surprising, as the result is consistent with
+ calculating the difference directly:
+
+ >>> u8_arr = np.array([1, 0], dtype=np.uint8)
+ >>> np.diff(u8_arr)
+ array([255], dtype=uint8)
+ >>> u8_arr[1,...] - u8_arr[0,...]
+ np.uint8(255)
+
+ If this is not desirable, then the array should be cast to a larger
+ integer type first:
+
+ >>> i16_arr = u8_arr.astype(np.int16)
+ >>> np.diff(i16_arr)
+ array([-1], dtype=int16)
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 4, 7, 0])
+ >>> np.diff(x)
+ array([ 1, 2, 3, -7])
+ >>> np.diff(x, n=2)
+ array([ 1, 1, -10])
+
+ >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
+ >>> np.diff(x)
+ array([[2, 3, 4],
+ [5, 1, 2]])
+ >>> np.diff(x, axis=0)
+ array([[-1, 2, 0, -2]])
+
+ >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ >>> np.diff(x)
+ array([1, 1], dtype='timedelta64[D]')
+
+ """
+ if n == 0:
+ return a
+ if n < 0:
+ raise ValueError(
+ "order must be non-negative but got " + repr(n))
+
+ a = asanyarray(a)
+ nd = a.ndim
+ if nd == 0:
+ raise ValueError("diff requires input that is at least one dimensional")
+ axis = normalize_axis_index(axis, nd)
+
+ combined = []
+ if prepend is not np._NoValue:
+ prepend = np.asanyarray(prepend)
+ if prepend.ndim == 0:
+ shape = list(a.shape)
+ shape[axis] = 1
+ prepend = np.broadcast_to(prepend, tuple(shape))
+ combined.append(prepend)
+
+ combined.append(a)
+
+ if append is not np._NoValue:
+ append = np.asanyarray(append)
+ if append.ndim == 0:
+ shape = list(a.shape)
+ shape[axis] = 1
+ append = np.broadcast_to(append, tuple(shape))
+ combined.append(append)
+
+ if len(combined) > 1:
+ a = np.concatenate(combined, axis)
+
+ slice1 = [slice(None)] * nd
+ slice2 = [slice(None)] * nd
+ slice1[axis] = slice(1, None)
+ slice2[axis] = slice(None, -1)
+ slice1 = tuple(slice1)
+ slice2 = tuple(slice2)
+
+ op = not_equal if a.dtype == np.bool else subtract
+ for _ in range(n):
+ a = op(a[slice1], a[slice2])
+
+ return a
+
+
+def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None):
+ return (x, xp, fp)
+
+
+@array_function_dispatch(_interp_dispatcher)
+def interp(x, xp, fp, left=None, right=None, period=None):
+ """
+ One-dimensional linear interpolation for monotonically increasing sample points.
+
+ Returns the one-dimensional piecewise linear interpolant to a function
+ with given discrete data points (`xp`, `fp`), evaluated at `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ The x-coordinates at which to evaluate the interpolated values.
+
+ xp : 1-D sequence of floats
+ The x-coordinates of the data points, must be increasing if argument
+ `period` is not specified. Otherwise, `xp` is internally sorted after
+ normalizing the periodic boundaries with ``xp = xp % period``.
+
+ fp : 1-D sequence of float or complex
+ The y-coordinates of the data points, same length as `xp`.
+
+ left : optional float or complex corresponding to fp
+ Value to return for `x < xp[0]`, default is `fp[0]`.
+
+ right : optional float or complex corresponding to fp
+ Value to return for `x > xp[-1]`, default is `fp[-1]`.
+
+ period : None or float, optional
+ A period for the x-coordinates. This parameter allows the proper
+ interpolation of angular x-coordinates. Parameters `left` and `right`
+ are ignored if `period` is specified.
+
+ Returns
+ -------
+ y : float or complex (corresponding to fp) or ndarray
+ The interpolated values, same shape as `x`.
+
+ Raises
+ ------
+ ValueError
+ If `xp` and `fp` have different length
+ If `xp` or `fp` are not 1-D sequences
+ If `period == 0`
+
+ See Also
+ --------
+ scipy.interpolate
+
+ Warnings
+ --------
+ The x-coordinate sequence is expected to be increasing, but this is not
+ explicitly enforced. However, if the sequence `xp` is non-increasing,
+ interpolation results are meaningless.
+
+ Note that, since NaN is unsortable, `xp` also cannot contain NaNs.
+
+ A simple check for `xp` being strictly increasing is::
+
+ np.all(np.diff(xp) > 0)
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> xp = [1, 2, 3]
+ >>> fp = [3, 2, 0]
+ >>> np.interp(2.5, xp, fp)
+ 1.0
+ >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
+ array([3. , 3. , 2.5 , 0.56, 0. ])
+ >>> UNDEF = -99.0
+ >>> np.interp(3.14, xp, fp, right=UNDEF)
+ -99.0
+
+ Plot an interpolant to the sine function:
+
+ >>> x = np.linspace(0, 2*np.pi, 10)
+ >>> y = np.sin(x)
+ >>> xvals = np.linspace(0, 2*np.pi, 50)
+ >>> yinterp = np.interp(xvals, x, y)
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(x, y, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.plot(xvals, yinterp, '-x')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.show()
+
+ Interpolation with periodic x-coordinates:
+
+ >>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ >>> xp = [190, -190, 350, -350]
+ >>> fp = [5, 10, 3, 4]
+ >>> np.interp(x, xp, fp, period=360)
+ array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75])
+
+ Complex interpolation:
+
+ >>> x = [1.5, 4.0]
+ >>> xp = [2,3,5]
+ >>> fp = [1.0j, 0, 2+3j]
+ >>> np.interp(x, xp, fp)
+ array([0.+1.j , 1.+1.5j])
+
+ """
+
+ fp = np.asarray(fp)
+
+ if np.iscomplexobj(fp):
+ interp_func = compiled_interp_complex
+ input_dtype = np.complex128
+ else:
+ interp_func = compiled_interp
+ input_dtype = np.float64
+
+ if period is not None:
+ if period == 0:
+ raise ValueError("period must be a non-zero value")
+ period = abs(period)
+ left = None
+ right = None
+
+ x = np.asarray(x, dtype=np.float64)
+ xp = np.asarray(xp, dtype=np.float64)
+ fp = np.asarray(fp, dtype=input_dtype)
+
+ if xp.ndim != 1 or fp.ndim != 1:
+ raise ValueError("Data points must be 1-D sequences")
+ if xp.shape[0] != fp.shape[0]:
+ raise ValueError("fp and xp are not of the same length")
+ # normalizing periodic boundaries
+ x = x % period
+ xp = xp % period
+ asort_xp = np.argsort(xp)
+ xp = xp[asort_xp]
+ fp = fp[asort_xp]
+ xp = np.concatenate((xp[-1:] - period, xp, xp[0:1] + period))
+ fp = np.concatenate((fp[-1:], fp, fp[0:1]))
+
+ return interp_func(x, xp, fp, left, right)
+
+
+def _angle_dispatcher(z, deg=None):
+ return (z,)
+
+
+@array_function_dispatch(_angle_dispatcher)
+def angle(z, deg=False):
+ """
+ Return the angle of the complex argument.
+
+ Parameters
+ ----------
+ z : array_like
+ A complex number or sequence of complex numbers.
+ deg : bool, optional
+ Return angle in degrees if True, radians if False (default).
+
+ Returns
+ -------
+ angle : ndarray or scalar
+ The counterclockwise angle from the positive real axis on the complex
+ plane in the range ``(-pi, pi]``, with dtype as numpy.float64.
+
+ See Also
+ --------
+ arctan2
+ absolute
+
+ Notes
+ -----
+ This function passes the imaginary and real parts of the argument to
+ `arctan2` to compute the result; consequently, it follows the convention
+ of `arctan2` when the magnitude of the argument is zero. See example.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.angle([1.0, 1.0j, 1+1j]) # in radians
+ array([ 0. , 1.57079633, 0.78539816]) # may vary
+ >>> np.angle(1+1j, deg=True) # in degrees
+ 45.0
+ >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention
+ array([ 0. , 3.14159265, -0. , -3.14159265])
+
+ """
+ z = asanyarray(z)
+ if issubclass(z.dtype.type, _nx.complexfloating):
+ zimag = z.imag
+ zreal = z.real
+ else:
+ zimag = 0
+ zreal = z
+
+ a = arctan2(zimag, zreal)
+ if deg:
+ a *= 180 / pi
+ return a
+
+
+def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None):
+ return (p,)
+
+
+@array_function_dispatch(_unwrap_dispatcher)
+def unwrap(p, discont=None, axis=-1, *, period=2 * pi):
+ r"""
+ Unwrap by taking the complement of large deltas with respect to the period.
+
+ This unwraps a signal `p` by changing elements which have an absolute
+ difference from their predecessor of more than ``max(discont, period/2)``
+ to their `period`-complementary values.
+
+ For the default case where `period` is :math:`2\pi` and `discont` is
+ :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences
+ are never greater than :math:`\pi` by adding :math:`2k\pi` for some
+ integer :math:`k`.
+
+ Parameters
+ ----------
+ p : array_like
+ Input array.
+ discont : float, optional
+ Maximum discontinuity between values, default is ``period/2``.
+ Values below ``period/2`` are treated as if they were ``period/2``.
+ To have an effect different from the default, `discont` should be
+ larger than ``period/2``.
+ axis : int, optional
+ Axis along which unwrap will operate, default is the last axis.
+ period : float, optional
+ Size of the range over which the input wraps. By default, it is
+ ``2 pi``.
+
+ .. versionadded:: 1.21.0
+
+ Returns
+ -------
+ out : ndarray
+ Output array.
+
+ See Also
+ --------
+ rad2deg, deg2rad
+
+ Notes
+ -----
+ If the discontinuity in `p` is smaller than ``period/2``,
+ but larger than `discont`, no unwrapping is done because taking
+ the complement would only make the discontinuity larger.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> phase = np.linspace(0, np.pi, num=5)
+ >>> phase[3:] += np.pi
+ >>> phase
+ array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary
+ >>> np.unwrap(phase)
+ array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary
+ >>> np.unwrap([0, 1, 2, -1, 0], period=4)
+ array([0, 1, 2, 3, 4])
+ >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6)
+ array([1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4)
+ array([2, 3, 4, 5, 6, 7, 8, 9])
+ >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180
+ >>> np.unwrap(phase_deg, period=360)
+ array([-180., -140., -100., -60., -20., 20., 60., 100., 140.,
+ 180., 220., 260., 300., 340., 380., 420., 460., 500.,
+ 540.])
+ """
+ p = asarray(p)
+ nd = p.ndim
+ dd = diff(p, axis=axis)
+ if discont is None:
+ discont = period / 2
+ slice1 = [slice(None, None)] * nd # full slices
+ slice1[axis] = slice(1, None)
+ slice1 = tuple(slice1)
+ dtype = np.result_type(dd, period)
+ if _nx.issubdtype(dtype, _nx.integer):
+ interval_high, rem = divmod(period, 2)
+ boundary_ambiguous = rem == 0
+ else:
+ interval_high = period / 2
+ boundary_ambiguous = True
+ interval_low = -interval_high
+ ddmod = mod(dd - interval_low, period) + interval_low
+ if boundary_ambiguous:
+ # for `mask = (abs(dd) == period/2)`, the above line made
+ # `ddmod[mask] == -period/2`. correct these such that
+ # `ddmod[mask] == sign(dd[mask])*period/2`.
+ _nx.copyto(ddmod, interval_high,
+ where=(ddmod == interval_low) & (dd > 0))
+ ph_correct = ddmod - dd
+ _nx.copyto(ph_correct, 0, where=abs(dd) < discont)
+ up = array(p, copy=True, dtype=dtype)
+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)
+ return up
+
+
+def _sort_complex(a):
+ return (a,)
+
+
+@array_function_dispatch(_sort_complex)
+def sort_complex(a):
+ """
+ Sort a complex array using the real part first, then the imaginary part.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array
+
+ Returns
+ -------
+ out : complex ndarray
+ Always returns a sorted complex array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.sort_complex([5, 3, 6, 2, 1])
+ array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
+
+ >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
+ array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
+
+ """
+ b = array(a, copy=True)
+ b.sort()
+ if not issubclass(b.dtype.type, _nx.complexfloating):
+ if b.dtype.char in 'bhBH':
+ return b.astype('F')
+ elif b.dtype.char == 'g':
+ return b.astype('G')
+ else:
+ return b.astype('D')
+ else:
+ return b
+
+
+def _arg_trim_zeros(filt):
+ """Return indices of the first and last non-zero element.
+
+ Parameters
+ ----------
+ filt : array_like
+ Input array.
+
+ Returns
+ -------
+ start, stop : ndarray
+ Two arrays containing the indices of the first and last non-zero
+ element in each dimension.
+
+ See also
+ --------
+ trim_zeros
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0]))
+ (array([2]), array([3]))
+ """
+ nonzero = (
+ np.argwhere(filt)
+ if filt.dtype != np.object_
+ # Historically, `trim_zeros` treats `None` in an object array
+ # as non-zero while argwhere doesn't, account for that
+ else np.argwhere(filt != 0)
+ )
+ if nonzero.size == 0:
+ start = stop = np.array([], dtype=np.intp)
+ else:
+ start = nonzero.min(axis=0)
+ stop = nonzero.max(axis=0)
+ return start, stop
+
+
+def _trim_zeros(filt, trim=None, axis=None):
+ return (filt,)
+
+
+@array_function_dispatch(_trim_zeros)
+def trim_zeros(filt, trim='fb', axis=None):
+ """Remove values along a dimension which are zero along all other.
+
+ Parameters
+ ----------
+ filt : array_like
+ Input array.
+ trim : {"fb", "f", "b"}, optional
+ A string with 'f' representing trim from front and 'b' to trim from
+ back. By default, zeros are trimmed on both sides.
+ Front and back refer to the edges of a dimension, with "front" referring
+ to the side with the lowest index 0, and "back" referring to the highest
+ index (or index -1).
+ axis : int or sequence, optional
+ If None, `filt` is cropped such that the smallest bounding box is
+ returned that still contains all values which are not zero.
+ If an axis is specified, `filt` will be sliced in that dimension only
+ on the sides specified by `trim`. The remaining area will be the
+ smallest that still contains all values wich are not zero.
+
+ .. versionadded:: 2.2.0
+
+ Returns
+ -------
+ trimmed : ndarray or sequence
+ The result of trimming the input. The number of dimensions and the
+ input data type are preserved.
+
+ Notes
+ -----
+ For all-zero arrays, the first axis is trimmed first.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
+ >>> np.trim_zeros(a)
+ array([1, 2, 3, 0, 2, 1])
+
+ >>> np.trim_zeros(a, trim='b')
+ array([0, 0, 0, ..., 0, 2, 1])
+
+ Multiple dimensions are supported.
+
+ >>> b = np.array([[0, 0, 2, 3, 0, 0],
+ ... [0, 1, 0, 3, 0, 0],
+ ... [0, 0, 0, 0, 0, 0]])
+ >>> np.trim_zeros(b)
+ array([[0, 2, 3],
+ [1, 0, 3]])
+
+ >>> np.trim_zeros(b, axis=-1)
+ array([[0, 2, 3],
+ [1, 0, 3],
+ [0, 0, 0]])
+
+ The input data type is preserved, list/tuple in means list/tuple out.
+
+ >>> np.trim_zeros([0, 1, 2, 0])
+ [1, 2]
+
+ """
+ filt_ = np.asarray(filt)
+
+ trim = trim.lower()
+ if trim not in {"fb", "bf", "f", "b"}:
+ raise ValueError(f"unexpected character(s) in `trim`: {trim!r}")
+
+ start, stop = _arg_trim_zeros(filt_)
+ stop += 1 # Adjust for slicing
+
+ if start.size == 0:
+ # filt is all-zero -> assign same values to start and stop so that
+ # resulting slice will be empty
+ start = stop = np.zeros(filt_.ndim, dtype=np.intp)
+ else:
+ if 'f' not in trim:
+ start = (None,) * filt_.ndim
+ if 'b' not in trim:
+ stop = (None,) * filt_.ndim
+
+ if len(start) == 1:
+ # filt is 1D -> don't use multi-dimensional slicing to preserve
+ # non-array input types
+ sl = slice(start[0], stop[0])
+ elif axis is None:
+ # trim all axes
+ sl = tuple(slice(*x) for x in zip(start, stop))
+ else:
+ # only trim single axis
+ axis = normalize_axis_index(axis, filt_.ndim)
+ sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,)
+
+ trimmed = filt[sl]
+ return trimmed
+
+
+def _extract_dispatcher(condition, arr):
+ return (condition, arr)
+
+
+@array_function_dispatch(_extract_dispatcher)
+def extract(condition, arr):
+ """
+ Return the elements of an array that satisfy some condition.
+
+ This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
+ `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
+
+ Note that `place` does the exact opposite of `extract`.
+
+ Parameters
+ ----------
+ condition : array_like
+ An array whose nonzero or True entries indicate the elements of `arr`
+ to extract.
+ arr : array_like
+ Input array of the same size as `condition`.
+
+ Returns
+ -------
+ extract : ndarray
+ Rank 1 array of values from `arr` where `condition` is True.
+
+ See Also
+ --------
+ take, put, copyto, compress, place
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> arr = np.arange(12).reshape((3, 4))
+ >>> arr
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> condition = np.mod(arr, 3)==0
+ >>> condition
+ array([[ True, False, False, True],
+ [False, False, True, False],
+ [False, True, False, False]])
+ >>> np.extract(condition, arr)
+ array([0, 3, 6, 9])
+
+
+ If `condition` is boolean:
+
+ >>> arr[condition]
+ array([0, 3, 6, 9])
+
+ """
+ return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
+
+
+def _place_dispatcher(arr, mask, vals):
+ return (arr, mask, vals)
+
+
+@array_function_dispatch(_place_dispatcher)
+def place(arr, mask, vals):
+ """
+ Change elements of an array based on conditional and input values.
+
+ Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
+ `place` uses the first N elements of `vals`, where N is the number of
+ True values in `mask`, while `copyto` uses the elements where `mask`
+ is True.
+
+ Note that `extract` does the exact opposite of `place`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to put data into.
+ mask : array_like
+ Boolean mask array. Must have the same size as `a`.
+ vals : 1-D sequence
+ Values to put into `a`. Only the first N elements are used, where
+ N is the number of True values in `mask`. If `vals` is smaller
+ than N, it will be repeated, and if elements of `a` are to be masked,
+ this sequence must be non-empty.
+
+ See Also
+ --------
+ copyto, put, take, extract
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> arr = np.arange(6).reshape(2, 3)
+ >>> np.place(arr, arr>2, [44, 55])
+ >>> arr
+ array([[ 0, 1, 2],
+ [44, 55, 44]])
+
+ """
+ return _place(arr, mask, vals)
+
+
+def disp(mesg, device=None, linefeed=True):
+ """
+ Display a message on a device.
+
+ .. deprecated:: 2.0
+ Use your own printing function instead.
+
+ Parameters
+ ----------
+ mesg : str
+ Message to display.
+ device : object
+ Device to write message. If None, defaults to ``sys.stdout`` which is
+ very similar to ``print``. `device` needs to have ``write()`` and
+ ``flush()`` methods.
+ linefeed : bool, optional
+ Option whether to print a line feed or not. Defaults to True.
+
+ Raises
+ ------
+ AttributeError
+ If `device` does not have a ``write()`` or ``flush()`` method.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Besides ``sys.stdout``, a file-like object can also be used as it has
+ both required methods:
+
+ >>> from io import StringIO
+ >>> buf = StringIO()
+ >>> np.disp('"Display" in a file', device=buf)
+ >>> buf.getvalue()
+ '"Display" in a file\\n'
+
+ """
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`disp` is deprecated, "
+ "use your own printing function instead. "
+ "(deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ if device is None:
+ device = sys.stdout
+ if linefeed:
+ device.write(f'{mesg}\n')
+ else:
+ device.write(f'{mesg}')
+ device.flush()
+
+
+# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
+_DIMENSION_NAME = r'\w+'
+_CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?'
+_ARGUMENT = fr'\({_CORE_DIMENSION_LIST}\)'
+_ARGUMENT_LIST = f'{_ARGUMENT}(?:,{_ARGUMENT})*'
+_SIGNATURE = f'^{_ARGUMENT_LIST}->{_ARGUMENT_LIST}$'
+
+
+def _parse_gufunc_signature(signature):
+ """
+ Parse string signatures for a generalized universal function.
+
+ Arguments
+ ---------
+ signature : string
+ Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``
+ for ``np.matmul``.
+
+ Returns
+ -------
+ Tuple of input and output core dimensions parsed from the signature, each
+ of the form List[Tuple[str, ...]].
+ """
+ signature = re.sub(r'\s+', '', signature)
+
+ if not re.match(_SIGNATURE, signature):
+ raise ValueError(
+ f'not a valid gufunc signature: {signature}')
+ return tuple([tuple(re.findall(_DIMENSION_NAME, arg))
+ for arg in re.findall(_ARGUMENT, arg_list)]
+ for arg_list in signature.split('->'))
+
+
+def _update_dim_sizes(dim_sizes, arg, core_dims):
+ """
+ Incrementally check and update core dimension sizes for a single argument.
+
+ Arguments
+ ---------
+ dim_sizes : Dict[str, int]
+ Sizes of existing core dimensions. Will be updated in-place.
+ arg : ndarray
+ Argument to examine.
+ core_dims : Tuple[str, ...]
+ Core dimensions for this argument.
+ """
+ if not core_dims:
+ return
+
+ num_core_dims = len(core_dims)
+ if arg.ndim < num_core_dims:
+ raise ValueError(
+ '%d-dimensional argument does not have enough '
+ 'dimensions for all core dimensions %r'
+ % (arg.ndim, core_dims))
+
+ core_shape = arg.shape[-num_core_dims:]
+ for dim, size in zip(core_dims, core_shape):
+ if dim in dim_sizes:
+ if size != dim_sizes[dim]:
+ raise ValueError(
+ 'inconsistent size for core dimension %r: %r vs %r'
+ % (dim, size, dim_sizes[dim]))
+ else:
+ dim_sizes[dim] = size
+
+
+def _parse_input_dimensions(args, input_core_dims):
+ """
+ Parse broadcast and core dimensions for vectorize with a signature.
+
+ Arguments
+ ---------
+ args : Tuple[ndarray, ...]
+ Tuple of input arguments to examine.
+ input_core_dims : List[Tuple[str, ...]]
+ List of core dimensions corresponding to each input.
+
+ Returns
+ -------
+ broadcast_shape : Tuple[int, ...]
+ Common shape to broadcast all non-core dimensions to.
+ dim_sizes : Dict[str, int]
+ Common sizes for named core dimensions.
+ """
+ broadcast_args = []
+ dim_sizes = {}
+ for arg, core_dims in zip(args, input_core_dims):
+ _update_dim_sizes(dim_sizes, arg, core_dims)
+ ndim = arg.ndim - len(core_dims)
+ dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])
+ broadcast_args.append(dummy_array)
+ broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape(
+ *broadcast_args
+ )
+ return broadcast_shape, dim_sizes
+
+
+def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
+ """Helper for calculating broadcast shapes with core dimensions."""
+ return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims)
+ for core_dims in list_of_core_dims]
+
+
+def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes,
+ results=None):
+ """Helper for creating output arrays in vectorize."""
+ shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
+ if dtypes is None:
+ dtypes = [None] * len(shapes)
+ if results is None:
+ arrays = tuple(np.empty(shape=shape, dtype=dtype)
+ for shape, dtype in zip(shapes, dtypes))
+ else:
+ arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype)
+ for result, shape, dtype
+ in zip(results, shapes, dtypes))
+ return arrays
+
+
+def _get_vectorize_dtype(dtype):
+ if dtype.char in "SU":
+ return dtype.char
+ return dtype
+
+
+@set_module('numpy')
+class vectorize:
+ """
+ vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None,
+ cache=False, signature=None)
+
+ Returns an object that acts like pyfunc, but takes arrays as input.
+
+ Define a vectorized function which takes a nested sequence of objects or
+ numpy arrays as inputs and returns a single numpy array or a tuple of numpy
+ arrays. The vectorized function evaluates `pyfunc` over successive tuples
+ of the input arrays like the python map function, except it uses the
+ broadcasting rules of numpy.
+
+ The data type of the output of `vectorized` is determined by calling
+ the function with the first element of the input. This can be avoided
+ by specifying the `otypes` argument.
+
+ Parameters
+ ----------
+ pyfunc : callable, optional
+ A python function or method.
+ Can be omitted to produce a decorator with keyword arguments.
+ otypes : str or list of dtypes, optional
+ The output data type. It must be specified as either a string of
+ typecode characters or a list of data type specifiers. There should
+ be one data type specifier for each output.
+ doc : str, optional
+ The docstring for the function. If None, the docstring will be the
+ ``pyfunc.__doc__``.
+ excluded : set, optional
+ Set of strings or integers representing the positional or keyword
+ arguments for which the function will not be vectorized. These will be
+ passed directly to `pyfunc` unmodified.
+
+ cache : bool, optional
+ If `True`, then cache the first function call that determines the number
+ of outputs if `otypes` is not provided.
+
+ signature : string, optional
+ Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
+ vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
+ be called with (and expected to return) arrays with shapes given by the
+ size of corresponding core dimensions. By default, ``pyfunc`` is
+ assumed to take scalars as input and output.
+
+ Returns
+ -------
+ out : callable
+ A vectorized function if ``pyfunc`` was provided,
+ a decorator otherwise.
+
+ See Also
+ --------
+ frompyfunc : Takes an arbitrary Python function and returns a ufunc
+
+ Notes
+ -----
+ The `vectorize` function is provided primarily for convenience, not for
+ performance. The implementation is essentially a for loop.
+
+ If `otypes` is not specified, then a call to the function with the
+ first argument will be used to determine the number of outputs. The
+ results of this call will be cached if `cache` is `True` to prevent
+ calling the function twice. However, to implement the cache, the
+ original function must be wrapped which will slow down subsequent
+ calls, so only do this if your function is expensive.
+
+ The new keyword argument interface and `excluded` argument support
+ further degrades performance.
+
+ References
+ ----------
+ .. [1] :doc:`/reference/c-api/generalized-ufuncs`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> def myfunc(a, b):
+ ... "Return a-b if a>b, otherwise return a+b"
+ ... if a > b:
+ ... return a - b
+ ... else:
+ ... return a + b
+
+ >>> vfunc = np.vectorize(myfunc)
+ >>> vfunc([1, 2, 3, 4], 2)
+ array([3, 4, 1, 2])
+
+ The docstring is taken from the input function to `vectorize` unless it
+ is specified:
+
+ >>> vfunc.__doc__
+ 'Return a-b if a>b, otherwise return a+b'
+ >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
+ >>> vfunc.__doc__
+ 'Vectorized `myfunc`'
+
+ The output type is determined by evaluating the first element of the input,
+ unless it is specified:
+
+ >>> out = vfunc([1, 2, 3, 4], 2)
+ >>> type(out[0])
+ <class 'numpy.int64'>
+ >>> vfunc = np.vectorize(myfunc, otypes=[float])
+ >>> out = vfunc([1, 2, 3, 4], 2)
+ >>> type(out[0])
+ <class 'numpy.float64'>
+
+ The `excluded` argument can be used to prevent vectorizing over certain
+ arguments. This can be useful for array-like arguments of a fixed length
+ such as the coefficients for a polynomial as in `polyval`:
+
+ >>> def mypolyval(p, x):
+ ... _p = list(p)
+ ... res = _p.pop(0)
+ ... while _p:
+ ... res = res*x + _p.pop(0)
+ ... return res
+
+ Here, we exclude the zeroth argument from vectorization whether it is
+ passed by position or keyword.
+
+ >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'})
+ >>> vpolyval([1, 2, 3], x=[0, 1])
+ array([3, 6])
+ >>> vpolyval(p=[1, 2, 3], x=[0, 1])
+ array([3, 6])
+
+ The `signature` argument allows for vectorizing functions that act on
+ non-scalar arrays of fixed length. For example, you can use it for a
+ vectorized calculation of Pearson correlation coefficient and its p-value:
+
+ >>> import scipy.stats
+ >>> pearsonr = np.vectorize(scipy.stats.pearsonr,
+ ... signature='(n),(n)->(),()')
+ >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
+ (array([ 1., -1.]), array([ 0., 0.]))
+
+ Or for a vectorized convolution:
+
+ >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
+ >>> convolve(np.eye(4), [1, 2, 1])
+ array([[1., 2., 1., 0., 0., 0.],
+ [0., 1., 2., 1., 0., 0.],
+ [0., 0., 1., 2., 1., 0.],
+ [0., 0., 0., 1., 2., 1.]])
+
+ Decorator syntax is supported. The decorator can be called as
+ a function to provide keyword arguments:
+
+ >>> @np.vectorize
+ ... def identity(x):
+ ... return x
+ ...
+ >>> identity([0, 1, 2])
+ array([0, 1, 2])
+ >>> @np.vectorize(otypes=[float])
+ ... def as_float(x):
+ ... return x
+ ...
+ >>> as_float([0, 1, 2])
+ array([0., 1., 2.])
+ """
+ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None,
+ excluded=None, cache=False, signature=None):
+
+ if (pyfunc != np._NoValue) and (not callable(pyfunc)):
+ # Splitting the error message to keep
+ # the length below 79 characters.
+ part1 = "When used as a decorator, "
+ part2 = "only accepts keyword arguments."
+ raise TypeError(part1 + part2)
+
+ self.pyfunc = pyfunc
+ self.cache = cache
+ self.signature = signature
+ if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'):
+ self.__name__ = pyfunc.__name__
+
+ self._ufunc = {} # Caching to improve default performance
+ self._doc = None
+ self.__doc__ = doc
+ if doc is None and hasattr(pyfunc, '__doc__'):
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self._doc = doc
+
+ if isinstance(otypes, str):
+ for char in otypes:
+ if char not in typecodes['All']:
+ raise ValueError(f"Invalid otype specified: {char}")
+ elif iterable(otypes):
+ otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes]
+ elif otypes is not None:
+ raise ValueError("Invalid otype specification")
+ self.otypes = otypes
+
+ # Excluded variable support
+ if excluded is None:
+ excluded = set()
+ self.excluded = set(excluded)
+
+ if signature is not None:
+ self._in_and_out_core_dims = _parse_gufunc_signature(signature)
+ else:
+ self._in_and_out_core_dims = None
+
+ def _init_stage_2(self, pyfunc, *args, **kwargs):
+ self.__name__ = pyfunc.__name__
+ self.pyfunc = pyfunc
+ if self._doc is None:
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self.__doc__ = self._doc
+
+ def _call_as_normal(self, *args, **kwargs):
+ """
+ Return arrays with the results of `pyfunc` broadcast (vectorized) over
+ `args` and `kwargs` not in `excluded`.
+ """
+ excluded = self.excluded
+ if not kwargs and not excluded:
+ func = self.pyfunc
+ vargs = args
+ else:
+ # The wrapper accepts only positional arguments: we use `names` and
+ # `inds` to mutate `the_args` and `kwargs` to pass to the original
+ # function.
+ nargs = len(args)
+
+ names = [_n for _n in kwargs if _n not in excluded]
+ inds = [_i for _i in range(nargs) if _i not in excluded]
+ the_args = list(args)
+
+ def func(*vargs):
+ for _n, _i in enumerate(inds):
+ the_args[_i] = vargs[_n]
+ kwargs.update(zip(names, vargs[len(inds):]))
+ return self.pyfunc(*the_args, **kwargs)
+
+ vargs = [args[_i] for _i in inds]
+ vargs.extend([kwargs[_n] for _n in names])
+
+ return self._vectorize_call(func=func, args=vargs)
+
+ def __call__(self, *args, **kwargs):
+ if self.pyfunc is np._NoValue:
+ self._init_stage_2(*args, **kwargs)
+ return self
+
+ return self._call_as_normal(*args, **kwargs)
+
+ def _get_ufunc_and_otypes(self, func, args):
+ """Return (ufunc, otypes)."""
+ # frompyfunc will fail if args is empty
+ if not args:
+ raise ValueError('args can not be empty')
+
+ if self.otypes is not None:
+ otypes = self.otypes
+
+ # self._ufunc is a dictionary whose keys are the number of
+ # arguments (i.e. len(args)) and whose values are ufuncs created
+ # by frompyfunc. len(args) can be different for different calls if
+ # self.pyfunc has parameters with default values. We only use the
+ # cache when func is self.pyfunc, which occurs when the call uses
+ # only positional arguments and no arguments are excluded.
+
+ nin = len(args)
+ nout = len(self.otypes)
+ if func is not self.pyfunc or nin not in self._ufunc:
+ ufunc = frompyfunc(func, nin, nout)
+ else:
+ ufunc = None # We'll get it from self._ufunc
+ if func is self.pyfunc:
+ ufunc = self._ufunc.setdefault(nin, ufunc)
+ else:
+ # Get number of outputs and output types by calling the function on
+ # the first entries of args. We also cache the result to prevent
+ # the subsequent call when the ufunc is evaluated.
+ # Assumes that ufunc first evaluates the 0th elements in the input
+ # arrays (the input values are not checked to ensure this)
+ args = [asarray(a) for a in args]
+ if builtins.any(arg.size == 0 for arg in args):
+ raise ValueError('cannot call `vectorize` on size 0 inputs '
+ 'unless `otypes` is set')
+
+ inputs = [arg.flat[0] for arg in args]
+ outputs = func(*inputs)
+
+ # Performance note: profiling indicates that -- for simple
+ # functions at least -- this wrapping can almost double the
+ # execution time.
+ # Hence we make it optional.
+ if self.cache:
+ _cache = [outputs]
+
+ def _func(*vargs):
+ if _cache:
+ return _cache.pop()
+ else:
+ return func(*vargs)
+ else:
+ _func = func
+
+ if isinstance(outputs, tuple):
+ nout = len(outputs)
+ else:
+ nout = 1
+ outputs = (outputs,)
+
+ otypes = ''.join([asarray(outputs[_k]).dtype.char
+ for _k in range(nout)])
+
+ # Performance note: profiling indicates that creating the ufunc is
+ # not a significant cost compared with wrapping so it seems not
+ # worth trying to cache this.
+ ufunc = frompyfunc(_func, len(args), nout)
+
+ return ufunc, otypes
+
+ def _vectorize_call(self, func, args):
+ """Vectorized call to `func` over positional `args`."""
+ if self.signature is not None:
+ res = self._vectorize_call_with_signature(func, args)
+ elif not args:
+ res = func()
+ else:
+ ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
+ # gh-29196: `dtype=object` should eventually be removed
+ args = [asanyarray(a, dtype=object) for a in args]
+ outputs = ufunc(*args, out=...)
+
+ if ufunc.nout == 1:
+ res = asanyarray(outputs, dtype=otypes[0])
+ else:
+ res = tuple(asanyarray(x, dtype=t)
+ for x, t in zip(outputs, otypes))
+ return res
+
+ def _vectorize_call_with_signature(self, func, args):
+ """Vectorized call over positional arguments with a signature."""
+ input_core_dims, output_core_dims = self._in_and_out_core_dims
+
+ if len(args) != len(input_core_dims):
+ raise TypeError('wrong number of positional arguments: '
+ 'expected %r, got %r'
+ % (len(input_core_dims), len(args)))
+ args = tuple(asanyarray(arg) for arg in args)
+
+ broadcast_shape, dim_sizes = _parse_input_dimensions(
+ args, input_core_dims)
+ input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,
+ input_core_dims)
+ args = [np.broadcast_to(arg, shape, subok=True)
+ for arg, shape in zip(args, input_shapes)]
+
+ outputs = None
+ otypes = self.otypes
+ nout = len(output_core_dims)
+
+ for index in np.ndindex(*broadcast_shape):
+ results = func(*(arg[index] for arg in args))
+
+ n_results = len(results) if isinstance(results, tuple) else 1
+
+ if nout != n_results:
+ raise ValueError(
+ 'wrong number of outputs from pyfunc: expected %r, got %r'
+ % (nout, n_results))
+
+ if nout == 1:
+ results = (results,)
+
+ if outputs is None:
+ for result, core_dims in zip(results, output_core_dims):
+ _update_dim_sizes(dim_sizes, result, core_dims)
+
+ outputs = _create_arrays(broadcast_shape, dim_sizes,
+ output_core_dims, otypes, results)
+
+ for output, result in zip(outputs, results):
+ output[index] = result
+
+ if outputs is None:
+ # did not call the function even once
+ if otypes is None:
+ raise ValueError('cannot call `vectorize` on size 0 inputs '
+ 'unless `otypes` is set')
+ if builtins.any(dim not in dim_sizes
+ for dims in output_core_dims
+ for dim in dims):
+ raise ValueError('cannot call `vectorize` with a signature '
+ 'including new output dimensions on size 0 '
+ 'inputs')
+ outputs = _create_arrays(broadcast_shape, dim_sizes,
+ output_core_dims, otypes)
+
+ return outputs[0] if nout == 1 else outputs
+
+
+def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,
+ fweights=None, aweights=None, *, dtype=None):
+ return (m, y, fweights, aweights)
+
+
+@array_function_dispatch(_cov_dispatcher)
+def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
+ aweights=None, *, dtype=None):
+ """
+ Estimate a covariance matrix, given data and weights.
+
+ Covariance indicates the level to which two variables vary together.
+ If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
+ then the covariance matrix element :math:`C_{ij}` is the covariance of
+ :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
+ of :math:`x_i`.
+
+ See the notes for an outline of the algorithm.
+
+ Parameters
+ ----------
+ m : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `m` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same form
+ as that of `m`.
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : bool, optional
+ Default normalization (False) is by ``(N - 1)``, where ``N`` is the
+ number of observations given (unbiased estimate). If `bias` is True,
+ then normalization is by ``N``. These values can be overridden by using
+ the keyword ``ddof`` in numpy versions >= 1.5.
+ ddof : int, optional
+ If not ``None`` the default value implied by `bias` is overridden.
+ Note that ``ddof=1`` will return the unbiased estimate, even if both
+ `fweights` and `aweights` are specified, and ``ddof=0`` will return
+ the simple average. See the notes for the details. The default value
+ is ``None``.
+ fweights : array_like, int, optional
+ 1-D array of integer frequency weights; the number of times each
+ observation vector should be repeated.
+ aweights : array_like, optional
+ 1-D array of observation vector weights. These relative weights are
+ typically large for observations considered "important" and smaller for
+ observations considered less "important". If ``ddof=0`` the array of
+ weights can be used to assign probabilities to observation vectors.
+ dtype : data-type, optional
+ Data-type of the result. By default, the return data-type will have
+ at least `numpy.float64` precision.
+
+ .. versionadded:: 1.20
+
+ Returns
+ -------
+ out : ndarray
+ The covariance matrix of the variables.
+
+ See Also
+ --------
+ corrcoef : Normalized covariance matrix
+
+ Notes
+ -----
+ Assume that the observations are in the columns of the observation
+ array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
+ steps to compute the weighted covariance are as follows::
+
+ >>> m = np.arange(10, dtype=np.float64)
+ >>> f = np.arange(10) * 2
+ >>> a = np.arange(10) ** 2.
+ >>> ddof = 1
+ >>> w = f * a
+ >>> v1 = np.sum(w)
+ >>> v2 = np.sum(w * a)
+ >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1
+ >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
+
+ Note that when ``a == 1``, the normalization factor
+ ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
+ as it should.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Consider two variables, :math:`x_0` and :math:`x_1`, which
+ correlate perfectly, but in opposite directions:
+
+ >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
+ >>> x
+ array([[0, 1, 2],
+ [2, 1, 0]])
+
+ Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
+ matrix shows this clearly:
+
+ >>> np.cov(x)
+ array([[ 1., -1.],
+ [-1., 1.]])
+
+ Note that element :math:`C_{0,1}`, which shows the correlation between
+ :math:`x_0` and :math:`x_1`, is negative.
+
+ Further, note how `x` and `y` are combined:
+
+ >>> x = [-2.1, -1, 4.3]
+ >>> y = [3, 1.1, 0.12]
+ >>> X = np.stack((x, y), axis=0)
+ >>> np.cov(X)
+ array([[11.71 , -4.286 ], # may vary
+ [-4.286 , 2.144133]])
+ >>> np.cov(x, y)
+ array([[11.71 , -4.286 ], # may vary
+ [-4.286 , 2.144133]])
+ >>> np.cov(x)
+ array(11.71)
+
+ """
+ # Check inputs
+ if ddof is not None and ddof != int(ddof):
+ raise ValueError(
+ "ddof must be integer")
+
+ # Handles complex arrays too
+ m = np.asarray(m)
+ if m.ndim > 2:
+ raise ValueError("m has more than 2 dimensions")
+
+ if y is not None:
+ y = np.asarray(y)
+ if y.ndim > 2:
+ raise ValueError("y has more than 2 dimensions")
+
+ if dtype is None:
+ if y is None:
+ dtype = np.result_type(m, np.float64)
+ else:
+ dtype = np.result_type(m, y, np.float64)
+
+ X = array(m, ndmin=2, dtype=dtype)
+ if not rowvar and m.ndim != 1:
+ X = X.T
+ if X.shape[0] == 0:
+ return np.array([]).reshape(0, 0)
+ if y is not None:
+ y = array(y, copy=None, ndmin=2, dtype=dtype)
+ if not rowvar and y.shape[0] != 1:
+ y = y.T
+ X = np.concatenate((X, y), axis=0)
+
+ if ddof is None:
+ if bias == 0:
+ ddof = 1
+ else:
+ ddof = 0
+
+ # Get the product of frequencies and weights
+ w = None
+ if fweights is not None:
+ fweights = np.asarray(fweights, dtype=float)
+ if not np.all(fweights == np.around(fweights)):
+ raise TypeError(
+ "fweights must be integer")
+ if fweights.ndim > 1:
+ raise RuntimeError(
+ "cannot handle multidimensional fweights")
+ if fweights.shape[0] != X.shape[1]:
+ raise RuntimeError(
+ "incompatible numbers of samples and fweights")
+ if any(fweights < 0):
+ raise ValueError(
+ "fweights cannot be negative")
+ w = fweights
+ if aweights is not None:
+ aweights = np.asarray(aweights, dtype=float)
+ if aweights.ndim > 1:
+ raise RuntimeError(
+ "cannot handle multidimensional aweights")
+ if aweights.shape[0] != X.shape[1]:
+ raise RuntimeError(
+ "incompatible numbers of samples and aweights")
+ if any(aweights < 0):
+ raise ValueError(
+ "aweights cannot be negative")
+ if w is None:
+ w = aweights
+ else:
+ w *= aweights
+
+ avg, w_sum = average(X, axis=1, weights=w, returned=True)
+ w_sum = w_sum[0]
+
+ # Determine the normalization
+ if w is None:
+ fact = X.shape[1] - ddof
+ elif ddof == 0:
+ fact = w_sum
+ elif aweights is None:
+ fact = w_sum - ddof
+ else:
+ fact = w_sum - ddof * sum(w * aweights) / w_sum
+
+ if fact <= 0:
+ warnings.warn("Degrees of freedom <= 0 for slice",
+ RuntimeWarning, stacklevel=2)
+ fact = 0.0
+
+ X -= avg[:, None]
+ if w is None:
+ X_T = X.T
+ else:
+ X_T = (X * w).T
+ c = dot(X, X_T.conj())
+ c *= np.true_divide(1, fact)
+ return c.squeeze()
+
+
+def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *,
+ dtype=None):
+ return (x, y)
+
+
+@array_function_dispatch(_corrcoef_dispatcher)
+def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *,
+ dtype=None):
+ """
+ Return Pearson product-moment correlation coefficients.
+
+ Please refer to the documentation for `cov` for more detail. The
+ relationship between the correlation coefficient matrix, `R`, and the
+ covariance matrix, `C`, is
+
+ .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } }
+
+ The values of `R` are between -1 and 1, inclusive.
+
+ Parameters
+ ----------
+ x : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `x` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same
+ shape as `x`.
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.10.0
+ ddof : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.10.0
+ dtype : data-type, optional
+ Data-type of the result. By default, the return data-type will have
+ at least `numpy.float64` precision.
+
+ .. versionadded:: 1.20
+
+ Returns
+ -------
+ R : ndarray
+ The correlation coefficient matrix of the variables.
+
+ See Also
+ --------
+ cov : Covariance matrix
+
+ Notes
+ -----
+ Due to floating point rounding the resulting array may not be Hermitian,
+ the diagonal elements may not be 1, and the elements may not satisfy the
+ inequality abs(a) <= 1. The real and imaginary parts are clipped to the
+ interval [-1, 1] in an attempt to improve on that situation but is not
+ much help in the complex case.
+
+ This function accepts but discards arguments `bias` and `ddof`. This is
+ for backwards compatibility with previous versions of this function. These
+ arguments had no effect on the return values of the function and can be
+ safely ignored in this and previous versions of numpy.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ In this example we generate two random arrays, ``xarr`` and ``yarr``, and
+ compute the row-wise and column-wise Pearson correlation coefficients,
+ ``R``. Since ``rowvar`` is true by default, we first find the row-wise
+ Pearson correlation coefficients between the variables of ``xarr``.
+
+ >>> import numpy as np
+ >>> rng = np.random.default_rng(seed=42)
+ >>> xarr = rng.random((3, 3))
+ >>> xarr
+ array([[0.77395605, 0.43887844, 0.85859792],
+ [0.69736803, 0.09417735, 0.97562235],
+ [0.7611397 , 0.78606431, 0.12811363]])
+ >>> R1 = np.corrcoef(xarr)
+ >>> R1
+ array([[ 1. , 0.99256089, -0.68080986],
+ [ 0.99256089, 1. , -0.76492172],
+ [-0.68080986, -0.76492172, 1. ]])
+
+ If we add another set of variables and observations ``yarr``, we can
+ compute the row-wise Pearson correlation coefficients between the
+ variables in ``xarr`` and ``yarr``.
+
+ >>> yarr = rng.random((3, 3))
+ >>> yarr
+ array([[0.45038594, 0.37079802, 0.92676499],
+ [0.64386512, 0.82276161, 0.4434142 ],
+ [0.22723872, 0.55458479, 0.06381726]])
+ >>> R2 = np.corrcoef(xarr, yarr)
+ >>> R2
+ array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 ,
+ -0.99004057],
+ [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098,
+ -0.99981569],
+ [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355,
+ 0.77714685],
+ [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855,
+ -0.83571711],
+ [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. ,
+ 0.97517215],
+ [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215,
+ 1. ]])
+
+ Finally if we use the option ``rowvar=False``, the columns are now
+ being treated as the variables and we will find the column-wise Pearson
+ correlation coefficients between variables in ``xarr`` and ``yarr``.
+
+ >>> R3 = np.corrcoef(xarr, yarr, rowvar=False)
+ >>> R3
+ array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 ,
+ 0.22423734],
+ [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587,
+ -0.44069024],
+ [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648,
+ 0.75137473],
+ [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469,
+ 0.47536961],
+ [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. ,
+ -0.46666491],
+ [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491,
+ 1. ]])
+
+ """
+ if bias is not np._NoValue or ddof is not np._NoValue:
+ # 2015-03-15, 1.10
+ warnings.warn('bias and ddof have no effect and are deprecated',
+ DeprecationWarning, stacklevel=2)
+ c = cov(x, y, rowvar, dtype=dtype)
+ try:
+ d = diag(c)
+ except ValueError:
+ # scalar covariance
+ # nan if incorrect value (nan, inf, 0), 1 otherwise
+ return c / c
+ stddev = sqrt(d.real)
+ c /= stddev[:, None]
+ c /= stddev[None, :]
+
+ # Clip real and imaginary parts to [-1, 1]. This does not guarantee
+ # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
+ # excessive work.
+ np.clip(c.real, -1, 1, out=c.real)
+ if np.iscomplexobj(c):
+ np.clip(c.imag, -1, 1, out=c.imag)
+
+ return c
+
+
+@set_module('numpy')
+def blackman(M):
+ """
+ Return the Blackman window.
+
+ The Blackman window is a taper formed by using the first three
+ terms of a summation of cosines. It was designed to have close to the
+ minimal leakage possible. It is close to optimal, only slightly worse
+ than a Kaiser window.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an empty
+ array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The window, with the maximum value normalized to one (the value one
+ appears only if the number of samples is odd).
+
+ See Also
+ --------
+ bartlett, hamming, hanning, kaiser
+
+ Notes
+ -----
+ The Blackman window is defined as
+
+ .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
+
+ Most references to the Blackman window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function. It is known as a
+ "near optimal" tapering function, almost as good (by some measures)
+ as the kaiser window.
+
+ References
+ ----------
+ Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
+ Dover Publications, New York.
+
+ Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
+ Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> np.blackman(12)
+ array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary
+ 4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
+ 9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
+ 1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
+
+ Plot the window and the frequency response.
+
+ .. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ from numpy.fft import fft, fftshift
+ window = np.blackman(51)
+ plt.plot(window)
+ plt.title("Blackman window")
+ plt.ylabel("Amplitude")
+ plt.xlabel("Sample")
+ plt.show() # doctest: +SKIP
+
+ plt.figure()
+ A = fft(window, 2048) / 25.5
+ mag = np.abs(fftshift(A))
+ freq = np.linspace(-0.5, 0.5, len(A))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ response = 20 * np.log10(mag)
+ response = np.clip(response, -100, 100)
+ plt.plot(freq, response)
+ plt.title("Frequency response of Blackman window")
+ plt.ylabel("Magnitude [dB]")
+ plt.xlabel("Normalized frequency [cycles per sample]")
+ plt.axis('tight')
+ plt.show()
+
+ """
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
+ if M < 1:
+ return array([], dtype=values.dtype)
+ if M == 1:
+ return ones(1, dtype=values.dtype)
+ n = arange(1 - M, M, 2)
+ return 0.42 + 0.5 * cos(pi * n / (M - 1)) + 0.08 * cos(2.0 * pi * n / (M - 1))
+
+
+@set_module('numpy')
+def bartlett(M):
+ """
+ Return the Bartlett window.
+
+ The Bartlett window is very similar to a triangular window, except
+ that the end points are at zero. It is often used in signal
+ processing for tapering a signal, without generating too much
+ ripple in the frequency domain.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+
+ Returns
+ -------
+ out : array
+ The triangular window, with the maximum value normalized to one
+ (the value one appears only if the number of samples is odd), with
+ the first and last samples equal to zero.
+
+ See Also
+ --------
+ blackman, hamming, hanning, kaiser
+
+ Notes
+ -----
+ The Bartlett window is defined as
+
+ .. math:: w(n) = \\frac{2}{M-1} \\left(
+ \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
+ \\right)
+
+ Most references to the Bartlett window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. Note that convolution with this window produces linear
+ interpolation. It is also known as an apodization (which means "removing
+ the foot", i.e. smoothing discontinuities at the beginning and end of the
+ sampled signal) or tapering function. The Fourier transform of the
+ Bartlett window is the product of two sinc functions. Note the excellent
+ discussion in Kanasewich [2]_.
+
+ References
+ ----------
+ .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
+ Biometrika 37, 1-16, 1950.
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
+ The University of Alberta Press, 1975, pp. 109-110.
+ .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
+ Processing", Prentice-Hall, 1999, pp. 468-471.
+ .. [4] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+ .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+ "Numerical Recipes", Cambridge University Press, 1986, page 429.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> np.bartlett(12)
+ array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary
+ 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
+ 0.18181818, 0. ])
+
+ Plot the window and its frequency response (requires SciPy and matplotlib).
+
+ .. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ from numpy.fft import fft, fftshift
+ window = np.bartlett(51)
+ plt.plot(window)
+ plt.title("Bartlett window")
+ plt.ylabel("Amplitude")
+ plt.xlabel("Sample")
+ plt.show()
+ plt.figure()
+ A = fft(window, 2048) / 25.5
+ mag = np.abs(fftshift(A))
+ freq = np.linspace(-0.5, 0.5, len(A))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ response = 20 * np.log10(mag)
+ response = np.clip(response, -100, 100)
+ plt.plot(freq, response)
+ plt.title("Frequency response of Bartlett window")
+ plt.ylabel("Magnitude [dB]")
+ plt.xlabel("Normalized frequency [cycles per sample]")
+ plt.axis('tight')
+ plt.show()
+
+ """
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
+ if M < 1:
+ return array([], dtype=values.dtype)
+ if M == 1:
+ return ones(1, dtype=values.dtype)
+ n = arange(1 - M, M, 2)
+ return where(less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1))
+
+
+@set_module('numpy')
+def hanning(M):
+ """
+ Return the Hanning window.
+
+ The Hanning window is a taper formed by using a weighted cosine.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+
+ Returns
+ -------
+ out : ndarray, shape(M,)
+ The window, with the maximum value normalized to one (the value
+ one appears only if `M` is odd).
+
+ See Also
+ --------
+ bartlett, blackman, hamming, kaiser
+
+ Notes
+ -----
+ The Hanning window is defined as
+
+ .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
+ \\qquad 0 \\leq n \\leq M-1
+
+ The Hanning was named for Julius von Hann, an Austrian meteorologist.
+ It is also known as the Cosine Bell. Some authors prefer that it be
+ called a Hann window, to help avoid confusion with the very similar
+ Hamming window.
+
+ Most references to the Hanning window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function.
+
+ References
+ ----------
+ .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+ spectra, Dover Publications, New York.
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
+ The University of Alberta Press, 1975, pp. 106-108.
+ .. [3] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+ .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+ "Numerical Recipes", Cambridge University Press, 1986, page 425.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.hanning(12)
+ array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
+ 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
+ 0.07937323, 0. ])
+
+ Plot the window and its frequency response.
+
+ .. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ from numpy.fft import fft, fftshift
+ window = np.hanning(51)
+ plt.plot(window)
+ plt.title("Hann window")
+ plt.ylabel("Amplitude")
+ plt.xlabel("Sample")
+ plt.show()
+
+ plt.figure()
+ A = fft(window, 2048) / 25.5
+ mag = np.abs(fftshift(A))
+ freq = np.linspace(-0.5, 0.5, len(A))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ response = 20 * np.log10(mag)
+ response = np.clip(response, -100, 100)
+ plt.plot(freq, response)
+ plt.title("Frequency response of the Hann window")
+ plt.ylabel("Magnitude [dB]")
+ plt.xlabel("Normalized frequency [cycles per sample]")
+ plt.axis('tight')
+ plt.show()
+
+ """
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
+ if M < 1:
+ return array([], dtype=values.dtype)
+ if M == 1:
+ return ones(1, dtype=values.dtype)
+ n = arange(1 - M, M, 2)
+ return 0.5 + 0.5 * cos(pi * n / (M - 1))
+
+
+@set_module('numpy')
+def hamming(M):
+ """
+ Return the Hamming window.
+
+ The Hamming window is a taper formed by using a weighted cosine.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The window, with the maximum value normalized to one (the value
+ one appears only if the number of samples is odd).
+
+ See Also
+ --------
+ bartlett, blackman, hanning, kaiser
+
+ Notes
+ -----
+ The Hamming window is defined as
+
+ .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
+ \\qquad 0 \\leq n \\leq M-1
+
+ The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
+ and is described in Blackman and Tukey. It was recommended for
+ smoothing the truncated autocovariance function in the time domain.
+ Most references to the Hamming window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function.
+
+ References
+ ----------
+ .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+ spectra, Dover Publications, New York.
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
+ University of Alberta Press, 1975, pp. 109-110.
+ .. [3] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+ .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+ "Numerical Recipes", Cambridge University Press, 1986, page 425.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.hamming(12)
+ array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary
+ 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
+ 0.15302337, 0.08 ])
+
+ Plot the window and the frequency response.
+
+ .. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ from numpy.fft import fft, fftshift
+ window = np.hamming(51)
+ plt.plot(window)
+ plt.title("Hamming window")
+ plt.ylabel("Amplitude")
+ plt.xlabel("Sample")
+ plt.show()
+
+ plt.figure()
+ A = fft(window, 2048) / 25.5
+ mag = np.abs(fftshift(A))
+ freq = np.linspace(-0.5, 0.5, len(A))
+ response = 20 * np.log10(mag)
+ response = np.clip(response, -100, 100)
+ plt.plot(freq, response)
+ plt.title("Frequency response of Hamming window")
+ plt.ylabel("Magnitude [dB]")
+ plt.xlabel("Normalized frequency [cycles per sample]")
+ plt.axis('tight')
+ plt.show()
+
+ """
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range.
+ values = np.array([0.0, M])
+ M = values[1]
+
+ if M < 1:
+ return array([], dtype=values.dtype)
+ if M == 1:
+ return ones(1, dtype=values.dtype)
+ n = arange(1 - M, M, 2)
+ return 0.54 + 0.46 * cos(pi * n / (M - 1))
+
+
+## Code from cephes for i0
+
+_i0A = [
+ -4.41534164647933937950E-18,
+ 3.33079451882223809783E-17,
+ -2.43127984654795469359E-16,
+ 1.71539128555513303061E-15,
+ -1.16853328779934516808E-14,
+ 7.67618549860493561688E-14,
+ -4.85644678311192946090E-13,
+ 2.95505266312963983461E-12,
+ -1.72682629144155570723E-11,
+ 9.67580903537323691224E-11,
+ -5.18979560163526290666E-10,
+ 2.65982372468238665035E-9,
+ -1.30002500998624804212E-8,
+ 6.04699502254191894932E-8,
+ -2.67079385394061173391E-7,
+ 1.11738753912010371815E-6,
+ -4.41673835845875056359E-6,
+ 1.64484480707288970893E-5,
+ -5.75419501008210370398E-5,
+ 1.88502885095841655729E-4,
+ -5.76375574538582365885E-4,
+ 1.63947561694133579842E-3,
+ -4.32430999505057594430E-3,
+ 1.05464603945949983183E-2,
+ -2.37374148058994688156E-2,
+ 4.93052842396707084878E-2,
+ -9.49010970480476444210E-2,
+ 1.71620901522208775349E-1,
+ -3.04682672343198398683E-1,
+ 6.76795274409476084995E-1
+ ]
+
+_i0B = [
+ -7.23318048787475395456E-18,
+ -4.83050448594418207126E-18,
+ 4.46562142029675999901E-17,
+ 3.46122286769746109310E-17,
+ -2.82762398051658348494E-16,
+ -3.42548561967721913462E-16,
+ 1.77256013305652638360E-15,
+ 3.81168066935262242075E-15,
+ -9.55484669882830764870E-15,
+ -4.15056934728722208663E-14,
+ 1.54008621752140982691E-14,
+ 3.85277838274214270114E-13,
+ 7.18012445138366623367E-13,
+ -1.79417853150680611778E-12,
+ -1.32158118404477131188E-11,
+ -3.14991652796324136454E-11,
+ 1.18891471078464383424E-11,
+ 4.94060238822496958910E-10,
+ 3.39623202570838634515E-9,
+ 2.26666899049817806459E-8,
+ 2.04891858946906374183E-7,
+ 2.89137052083475648297E-6,
+ 6.88975834691682398426E-5,
+ 3.36911647825569408990E-3,
+ 8.04490411014108831608E-1
+ ]
+
+
+def _chbevl(x, vals):
+ b0 = vals[0]
+ b1 = 0.0
+
+ for i in range(1, len(vals)):
+ b2 = b1
+ b1 = b0
+ b0 = x * b1 - b2 + vals[i]
+
+ return 0.5 * (b0 - b2)
+
+
+def _i0_1(x):
+ return exp(x) * _chbevl(x / 2.0 - 2, _i0A)
+
+
+def _i0_2(x):
+ return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x)
+
+
+def _i0_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_i0_dispatcher)
+def i0(x):
+ """
+ Modified Bessel function of the first kind, order 0.
+
+ Usually denoted :math:`I_0`.
+
+ Parameters
+ ----------
+ x : array_like of float
+ Argument of the Bessel function.
+
+ Returns
+ -------
+ out : ndarray, shape = x.shape, dtype = float
+ The modified Bessel function evaluated at each of the elements of `x`.
+
+ See Also
+ --------
+ scipy.special.i0, scipy.special.iv, scipy.special.ive
+
+ Notes
+ -----
+ The scipy implementation is recommended over this function: it is a
+ proper ufunc written in C, and more than an order of magnitude faster.
+
+ We use the algorithm published by Clenshaw [1]_ and referenced by
+ Abramowitz and Stegun [2]_, for which the function domain is
+ partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
+ polynomial expansions are employed in each interval. Relative error on
+ the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
+ peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
+
+ References
+ ----------
+ .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
+ *National Physical Laboratory Mathematical Tables*, vol. 5, London:
+ Her Majesty's Stationery Office, 1962.
+ .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
+ Functions*, 10th printing, New York: Dover, 1964, pp. 379.
+ https://personal.math.ubc.ca/~cbm/aands/page_379.htm
+ .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.i0(0.)
+ array(1.0)
+ >>> np.i0([0, 1, 2, 3])
+ array([1. , 1.26606588, 2.2795853 , 4.88079259])
+
+ """
+ x = np.asanyarray(x)
+ if x.dtype.kind == 'c':
+ raise TypeError("i0 not supported for complex values")
+ if x.dtype.kind != 'f':
+ x = x.astype(float)
+ x = np.abs(x)
+ return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
+
+## End of cephes code for i0
+
+
+@set_module('numpy')
+def kaiser(M, beta):
+ """
+ Return the Kaiser window.
+
+ The Kaiser window is a taper formed by using a Bessel function.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+ beta : float
+ Shape parameter for window.
+
+ Returns
+ -------
+ out : array
+ The window, with the maximum value normalized to one (the value
+ one appears only if the number of samples is odd).
+
+ See Also
+ --------
+ bartlett, blackman, hamming, hanning
+
+ Notes
+ -----
+ The Kaiser window is defined as
+
+ .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
+ \\right)/I_0(\\beta)
+
+ with
+
+ .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
+
+ where :math:`I_0` is the modified zeroth-order Bessel function.
+
+ The Kaiser was named for Jim Kaiser, who discovered a simple
+ approximation to the DPSS window based on Bessel functions. The Kaiser
+ window is a very good approximation to the Digital Prolate Spheroidal
+ Sequence, or Slepian window, which is the transform which maximizes the
+ energy in the main lobe of the window relative to total energy.
+
+ The Kaiser can approximate many other windows by varying the beta
+ parameter.
+
+ ==== =======================
+ beta Window shape
+ ==== =======================
+ 0 Rectangular
+ 5 Similar to a Hamming
+ 6 Similar to a Hanning
+ 8.6 Similar to a Blackman
+ ==== =======================
+
+ A beta value of 14 is probably a good starting point. Note that as beta
+ gets large, the window narrows, and so the number of samples needs to be
+ large enough to sample the increasingly narrow spike, otherwise NaNs will
+ get returned.
+
+ Most references to the Kaiser window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function.
+
+ References
+ ----------
+ .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
+ digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
+ John Wiley and Sons, New York, (1966).
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
+ University of Alberta Press, 1975, pp. 177-178.
+ .. [3] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> np.kaiser(12, 14)
+ array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary
+ 2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
+ 9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
+ 4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
+
+
+ Plot the window and the frequency response.
+
+ .. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ from numpy.fft import fft, fftshift
+ window = np.kaiser(51, 14)
+ plt.plot(window)
+ plt.title("Kaiser window")
+ plt.ylabel("Amplitude")
+ plt.xlabel("Sample")
+ plt.show()
+
+ plt.figure()
+ A = fft(window, 2048) / 25.5
+ mag = np.abs(fftshift(A))
+ freq = np.linspace(-0.5, 0.5, len(A))
+ response = 20 * np.log10(mag)
+ response = np.clip(response, -100, 100)
+ plt.plot(freq, response)
+ plt.title("Frequency response of Kaiser window")
+ plt.ylabel("Magnitude [dB]")
+ plt.xlabel("Normalized frequency [cycles per sample]")
+ plt.axis('tight')
+ plt.show()
+
+ """
+ # Ensures at least float64 via 0.0. M should be an integer, but conversion
+ # to double is safe for a range. (Simplified result_type with 0.0
+ # strongly typed. result-type is not/less order sensitive, but that mainly
+ # matters for integers anyway.)
+ values = np.array([0.0, M, beta])
+ M = values[1]
+ beta = values[2]
+
+ if M == 1:
+ return np.ones(1, dtype=values.dtype)
+ n = arange(0, M)
+ alpha = (M - 1) / 2.0
+ return i0(beta * sqrt(1 - ((n - alpha) / alpha)**2.0)) / i0(beta)
+
+
+def _sinc_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_sinc_dispatcher)
+def sinc(x):
+ r"""
+ Return the normalized sinc function.
+
+ The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument
+ :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not
+ only everywhere continuous but also infinitely differentiable.
+
+ .. note::
+
+ Note the normalization factor of ``pi`` used in the definition.
+ This is the most commonly used definition in signal processing.
+ Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function
+ :math:`\sin(x)/x` that is more common in mathematics.
+
+ Parameters
+ ----------
+ x : ndarray
+ Array (possibly multi-dimensional) of values for which to calculate
+ ``sinc(x)``.
+
+ Returns
+ -------
+ out : ndarray
+ ``sinc(x)``, which has the same shape as the input.
+
+ Notes
+ -----
+ The name sinc is short for "sine cardinal" or "sinus cardinalis".
+
+ The sinc function is used in various signal processing applications,
+ including in anti-aliasing, in the construction of a Lanczos resampling
+ filter, and in interpolation.
+
+ For bandlimited interpolation of discrete-time signals, the ideal
+ interpolation kernel is proportional to the sinc function.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
+ Resource. https://mathworld.wolfram.com/SincFunction.html
+ .. [2] Wikipedia, "Sinc function",
+ https://en.wikipedia.org/wiki/Sinc_function
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.linspace(-4, 4, 41)
+ >>> np.sinc(x)
+ array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary
+ -8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
+ 6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
+ 8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
+ -1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
+ 3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
+ 7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
+ 9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
+ 2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
+ -2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
+ -3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
+ 1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
+ -5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
+ -4.92362781e-02, -3.89804309e-17])
+
+ >>> plt.plot(x, np.sinc(x))
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Sinc Function")
+ Text(0.5, 1.0, 'Sinc Function')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("X")
+ Text(0.5, 0, 'X')
+ >>> plt.show()
+
+ """
+ x = np.asanyarray(x)
+ x = pi * x
+ # Hope that 1e-20 is sufficient for objects...
+ eps = np.finfo(x.dtype).eps if x.dtype.kind == "f" else 1e-20
+ y = where(x, x, eps)
+ return sin(y) / y
+
+
+def _ureduce(a, func, keepdims=False, **kwargs):
+ """
+ Internal Function.
+ Call `func` with `a` as first argument swapping the axes to use extended
+ axis on functions that don't support it natively.
+
+ Returns result and a.shape with axis dims set to 1.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ func : callable
+ Reduction function capable of receiving a single axis argument.
+ It is called with `a` as first argument followed by `kwargs`.
+ kwargs : keyword arguments
+ additional keyword arguments to pass to `func`.
+
+ Returns
+ -------
+ result : tuple
+ Result of func(a, **kwargs) and a.shape with axis dims set to 1
+ which can be used to reshape the result to the same shape a ufunc with
+ keepdims=True would produce.
+
+ """
+ a = np.asanyarray(a)
+ axis = kwargs.get('axis')
+ out = kwargs.get('out')
+
+ if keepdims is np._NoValue:
+ keepdims = False
+
+ nd = a.ndim
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, nd)
+
+ if keepdims and out is not None:
+ index_out = tuple(
+ 0 if i in axis else slice(None) for i in range(nd))
+ kwargs['out'] = out[(Ellipsis, ) + index_out]
+
+ if len(axis) == 1:
+ kwargs['axis'] = axis[0]
+ else:
+ keep = set(range(nd)) - set(axis)
+ nkeep = len(keep)
+ # swap axis that should not be reduced to front
+ for i, s in enumerate(sorted(keep)):
+ a = a.swapaxes(i, s)
+ # merge reduced axis
+ a = a.reshape(a.shape[:nkeep] + (-1,))
+ kwargs['axis'] = -1
+ elif keepdims and out is not None:
+ index_out = (0, ) * nd
+ kwargs['out'] = out[(Ellipsis, ) + index_out]
+
+ r = func(a, **kwargs)
+
+ if out is not None:
+ return out
+
+ if keepdims:
+ if axis is None:
+ index_r = (np.newaxis, ) * nd
+ else:
+ index_r = tuple(
+ np.newaxis if i in axis else slice(None)
+ for i in range(nd))
+ r = r[(Ellipsis, ) + index_r]
+
+ return r
+
+
+def _median_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_median_dispatcher)
+def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
+ """
+ Compute the median along the specified axis.
+
+ Returns the median of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the medians are computed. The default,
+ axis=None, will compute the median along a flattened version of
+ the array. If a sequence of axes, the array is first flattened
+ along the given axes, then the median is computed along the
+ resulting flattened axis.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow use of memory of input array `a` for
+ calculations. The input array will be modified by the call to
+ `median`. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. If `overwrite_input` is ``True`` and `a` is not already an
+ `ndarray`, an error will be raised.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ Returns
+ -------
+ median : ndarray
+ A new array holding the result. If the input contains integers
+ or floats smaller than ``float64``, then the output data-type is
+ ``np.float64``. Otherwise, the data-type of the output is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean, percentile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i
+ e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
+ two middle values of ``V_sorted`` when ``N`` is even.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.median(a)
+ np.float64(3.5)
+ >>> np.median(a, axis=0)
+ array([6.5, 4.5, 2.5])
+ >>> np.median(a, axis=1)
+ array([7., 2.])
+ >>> np.median(a, axis=(0, 1))
+ np.float64(3.5)
+ >>> m = np.median(a, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.median(a, axis=0, out=m)
+ array([6.5, 4.5, 2.5])
+ >>> m
+ array([6.5, 4.5, 2.5])
+ >>> b = a.copy()
+ >>> np.median(b, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+ >>> b = a.copy()
+ >>> np.median(b, axis=None, overwrite_input=True)
+ np.float64(3.5)
+ >>> assert not np.all(a==b)
+
+ """
+ return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
+ overwrite_input=overwrite_input)
+
+
+def _median(a, axis=None, out=None, overwrite_input=False):
+ # can't be reasonably be implemented in terms of percentile as we have to
+ # call mean to not break astropy
+ a = np.asanyarray(a)
+
+ # Set the partition indexes
+ if axis is None:
+ sz = a.size
+ else:
+ sz = a.shape[axis]
+ if sz % 2 == 0:
+ szh = sz // 2
+ kth = [szh - 1, szh]
+ else:
+ kth = [(sz - 1) // 2]
+
+ # We have to check for NaNs (as of writing 'M' doesn't actually work).
+ supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm'
+ if supports_nans:
+ kth.append(-1)
+
+ if overwrite_input:
+ if axis is None:
+ part = a.ravel()
+ part.partition(kth)
+ else:
+ a.partition(kth, axis=axis)
+ part = a
+ else:
+ part = partition(a, kth, axis=axis)
+
+ if part.shape == ():
+ # make 0-D arrays work
+ return part.item()
+ if axis is None:
+ axis = 0
+
+ indexer = [slice(None)] * part.ndim
+ index = part.shape[axis] // 2
+ if part.shape[axis] % 2 == 1:
+ # index with slice to allow mean (below) to work
+ indexer[axis] = slice(index, index + 1)
+ else:
+ indexer[axis] = slice(index - 1, index + 1)
+ indexer = tuple(indexer)
+
+ # Use mean in both odd and even case to coerce data type,
+ # using out array if needed.
+ rout = mean(part[indexer], axis=axis, out=out)
+ if supports_nans and sz > 0:
+ # If nans are possible, warn and replace by nans like mean would.
+ rout = np.lib._utils_impl._median_nancheck(part, rout, axis)
+
+ return rout
+
+
+def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, weights=None,
+ interpolation=None):
+ return (a, q, out, weights)
+
+
+@array_function_dispatch(_percentile_dispatcher)
+def percentile(a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=False,
+ *,
+ weights=None,
+ interpolation=None):
+ """
+ Compute the q-th percentile of the data along the specified axis.
+
+ Returns the q-th percentile(s) of the array elements.
+
+ Parameters
+ ----------
+ a : array_like of real numbers
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Percentage or sequence of percentages for the percentiles to compute.
+ Values must be between 0 and 100 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the percentiles are computed. The
+ default is to compute the percentile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ percentile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ weights : array_like, optional
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the percentile according to its associated weight.
+ The weights array can either be 1-D (in which case its length must be
+ the size of `a` along the given axis) or of the same shape as `a`.
+ If `weights=None`, then all data in `a` are assumed to have a
+ weight equal to one.
+ Only `method="inverted_cdf"` supports weights.
+ See the notes for more details.
+
+ .. versionadded:: 2.0.0
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ percentile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple percentiles are given, first axis of
+ the result corresponds to the percentiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ median : equivalent to ``percentile(..., 50)``
+ nanpercentile
+ quantile : equivalent to percentile, except q in the range [0, 1].
+
+ Notes
+ -----
+ The behavior of `numpy.percentile` with percentage `q` is
+ that of `numpy.quantile` with argument ``q/100``.
+ For more information, please see `numpy.quantile`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.percentile(a, 50)
+ 3.5
+ >>> np.percentile(a, 50, axis=0)
+ array([6.5, 4.5, 2.5])
+ >>> np.percentile(a, 50, axis=1)
+ array([7., 2.])
+ >>> np.percentile(a, 50, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+
+ >>> m = np.percentile(a, 50, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.percentile(a, 50, axis=0, out=out)
+ array([6.5, 4.5, 2.5])
+ >>> m
+ array([6.5, 4.5, 2.5])
+
+ >>> b = a.copy()
+ >>> np.percentile(b, 50, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a == b)
+
+ The different methods can be visualized graphically:
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+
+ a = np.arange(4)
+ p = np.linspace(0, 100, 6001)
+ ax = plt.gca()
+ lines = [
+ ('linear', '-', 'C0'),
+ ('inverted_cdf', ':', 'C1'),
+ # Almost the same as `inverted_cdf`:
+ ('averaged_inverted_cdf', '-.', 'C1'),
+ ('closest_observation', ':', 'C2'),
+ ('interpolated_inverted_cdf', '--', 'C1'),
+ ('hazen', '--', 'C3'),
+ ('weibull', '-.', 'C4'),
+ ('median_unbiased', '--', 'C5'),
+ ('normal_unbiased', '-.', 'C6'),
+ ]
+ for method, style, color in lines:
+ ax.plot(
+ p, np.percentile(a, p, method=method),
+ label=method, linestyle=style, color=color)
+ ax.set(
+ title='Percentiles for different methods and data: ' + str(a),
+ xlabel='Percentile',
+ ylabel='Estimated percentile value',
+ yticks=a)
+ ax.legend(bbox_to_anchor=(1.03, 1))
+ plt.tight_layout()
+ plt.show()
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = _check_interpolation_as_method(
+ method, interpolation, "percentile")
+
+ a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
+ # Use dtype of array if possible (e.g., if q is a python int or float)
+ # by making the divisor have the dtype of the data array.
+ q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...)
+ if not _quantile_is_valid(q):
+ raise ValueError("Percentiles must be in the range [0, 100]")
+
+ if weights is not None:
+ if method != "inverted_cdf":
+ msg = ("Only method 'inverted_cdf' supports weights. "
+ f"Got: {method}.")
+ raise ValueError(msg)
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
+ weights = _weights_are_valid(weights=weights, a=a, axis=axis)
+ if np.any(weights < 0):
+ raise ValueError("Weights must be non-negative.")
+
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims, weights)
+
+
+def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, weights=None,
+ interpolation=None):
+ return (a, q, out, weights)
+
+
+@array_function_dispatch(_quantile_dispatcher)
+def quantile(a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=False,
+ *,
+ weights=None,
+ interpolation=None):
+ """
+ Compute the q-th quantile of the data along the specified axis.
+
+ Parameters
+ ----------
+ a : array_like of real numbers
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Probability or sequence of probabilities of the quantiles to compute.
+ Values must be between 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The default is
+ to compute the quantile(s) along a flattened version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape and buffer length as the expected output, but the
+ type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by
+ intermediate calculations, to save memory. In this case, the
+ contents of the input `a` after this function completes is
+ undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ quantile. There are many different methods, some unique to NumPy.
+ The recommended options, numbered as they appear in [1]_, are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. For backward compatibility
+ with previous versions of NumPy, the following discontinuous variations
+ of the default 'linear' (7.) option are available:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ See Notes for details.
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ weights : array_like, optional
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the quantile according to its associated weight.
+ The weights array can either be 1-D (in which case its length must be
+ the size of `a` along the given axis) or of the same shape as `a`.
+ If `weights=None`, then all data in `a` are assumed to have a
+ weight equal to one.
+ Only `method="inverted_cdf"` supports weights.
+ See the notes for more details.
+
+ .. versionadded:: 2.0.0
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single probability and `axis=None`, then the result
+ is a scalar. If multiple probability levels are given, first axis
+ of the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ percentile : equivalent to quantile, but with q in the range [0, 100].
+ median : equivalent to ``quantile(..., 0.5)``
+ nanquantile
+
+ Notes
+ -----
+ Given a sample `a` from an underlying distribution, `quantile` provides a
+ nonparametric estimate of the inverse cumulative distribution function.
+
+ By default, this is done by interpolating between adjacent elements in
+ ``y``, a sorted copy of `a`::
+
+ (1-g)*y[j] + g*y[j+1]
+
+ where the index ``j`` and coefficient ``g`` are the integral and
+ fractional components of ``q * (n-1)``, and ``n`` is the number of
+ elements in the sample.
+
+ This is a special case of Equation 1 of H&F [1]_. More generally,
+
+ - ``j = (q*n + m - 1) // 1``, and
+ - ``g = (q*n + m - 1) % 1``,
+
+ where ``m`` may be defined according to several different conventions.
+ The preferred convention may be selected using the ``method`` parameter:
+
+ =============================== =============== ===============
+ ``method`` number in H&F ``m``
+ =============================== =============== ===============
+ ``interpolated_inverted_cdf`` 4 ``0``
+ ``hazen`` 5 ``1/2``
+ ``weibull`` 6 ``q``
+ ``linear`` (default) 7 ``1 - q``
+ ``median_unbiased`` 8 ``q/3 + 1/3``
+ ``normal_unbiased`` 9 ``q/4 + 3/8``
+ =============================== =============== ===============
+
+ Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to
+ ``n - 1`` when the results of the formula would be outside the allowed
+ range of non-negative indices. The ``- 1`` in the formulas for ``j`` and
+ ``g`` accounts for Python's 0-based indexing.
+
+ The table above includes only the estimators from H&F that are continuous
+ functions of probability `q` (estimators 4-9). NumPy also provides the
+ three discontinuous estimators from H&F (estimators 1-3), where ``j`` is
+ defined as above, ``m`` is defined as follows, and ``g`` is a function
+ of the real-valued ``index = q*n + m - 1`` and ``j``.
+
+ 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)``
+ 2. ``averaged_inverted_cdf``: ``m = 0`` and
+ ``g = (1 + int(index - j > 0)) / 2``
+ 3. ``closest_observation``: ``m = -1/2`` and
+ ``g = 1 - int((index == j) & (j%2 == 1))``
+
+ For backward compatibility with previous versions of NumPy, `quantile`
+ provides four additional discontinuous estimators. Like
+ ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``,
+ but ``g`` is defined as follows.
+
+ - ``lower``: ``g = 0``
+ - ``midpoint``: ``g = 0.5``
+ - ``higher``: ``g = 1``
+ - ``nearest``: ``g = (q*(n-1) % 1) > 0.5``
+
+ **Weighted quantiles:**
+ More formally, the quantile at probability level :math:`q` of a cumulative
+ distribution function :math:`F(y)=P(Y \\leq y)` with probability measure
+ :math:`P` is defined as any number :math:`x` that fulfills the
+ *coverage conditions*
+
+ .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q
+
+ with random variable :math:`Y\\sim P`.
+ Sample quantiles, the result of `quantile`, provide nonparametric
+ estimation of the underlying population counterparts, represented by the
+ unknown :math:`F`, given a data vector `a` of length ``n``.
+
+ Some of the estimators above arise when one considers :math:`F` as the
+ empirical distribution function of the data, i.e.
+ :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`.
+ Then, different methods correspond to different choices of :math:`x` that
+ fulfill the above coverage conditions. Methods that follow this approach
+ are ``inverted_cdf`` and ``averaged_inverted_cdf``.
+
+ For weighted quantiles, the coverage conditions still hold. The
+ empirical cumulative distribution is simply replaced by its weighted
+ version, i.e.
+ :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`.
+ Only ``method="inverted_cdf"`` supports weights.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.quantile(a, 0.5)
+ 3.5
+ >>> np.quantile(a, 0.5, axis=0)
+ array([6.5, 4.5, 2.5])
+ >>> np.quantile(a, 0.5, axis=1)
+ array([7., 2.])
+ >>> np.quantile(a, 0.5, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+ >>> m = np.quantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.quantile(a, 0.5, axis=0, out=out)
+ array([6.5, 4.5, 2.5])
+ >>> m
+ array([6.5, 4.5, 2.5])
+ >>> b = a.copy()
+ >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a == b)
+
+ See also `numpy.percentile` for a visualization of most methods.
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = _check_interpolation_as_method(
+ method, interpolation, "quantile")
+
+ a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
+ # Use dtype of array if possible (e.g., if q is a python int or float).
+ if isinstance(q, (int, float)) and a.dtype.kind == "f":
+ q = np.asanyarray(q, dtype=a.dtype)
+ else:
+ q = np.asanyarray(q)
+
+ if not _quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+
+ if weights is not None:
+ if method != "inverted_cdf":
+ msg = ("Only method 'inverted_cdf' supports weights. "
+ f"Got: {method}.")
+ raise ValueError(msg)
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
+ weights = _weights_are_valid(weights=weights, a=a, axis=axis)
+ if np.any(weights < 0):
+ raise ValueError("Weights must be non-negative.")
+
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims, weights)
+
+
+def _quantile_unchecked(a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=False,
+ weights=None):
+ """Assumes that q is in [0, 1], and is an ndarray"""
+ return _ureduce(a,
+ func=_quantile_ureduce_func,
+ q=q,
+ weights=weights,
+ keepdims=keepdims,
+ axis=axis,
+ out=out,
+ overwrite_input=overwrite_input,
+ method=method)
+
+
+def _quantile_is_valid(q):
+ # avoid expensive reductions, relevant for arrays with < O(1000) elements
+ if q.ndim == 1 and q.size < 10:
+ for i in range(q.size):
+ if not (0.0 <= q[i] <= 1.0):
+ return False
+ elif not (q.min() >= 0 and q.max() <= 1):
+ return False
+ return True
+
+
+def _check_interpolation_as_method(method, interpolation, fname):
+ # Deprecated NumPy 1.22, 2021-11-08
+ warnings.warn(
+ f"the `interpolation=` argument to {fname} was renamed to "
+ "`method=`, which has additional options.\n"
+ "Users of the modes 'nearest', 'lower', 'higher', or "
+ "'midpoint' are encouraged to review the method they used. "
+ "(Deprecated NumPy 1.22)",
+ DeprecationWarning, stacklevel=4)
+ if method != "linear":
+ # sanity check, we assume this basically never happens
+ raise TypeError(
+ "You shall not pass both `method` and `interpolation`!\n"
+ "(`interpolation` is Deprecated in favor of `method`)")
+ return interpolation
+
+
+def _compute_virtual_index(n, quantiles, alpha: float, beta: float):
+ """
+ Compute the floating point indexes of an array for the linear
+ interpolation of quantiles.
+ n : array_like
+ The sample sizes.
+ quantiles : array_like
+ The quantiles values.
+ alpha : float
+ A constant used to correct the index computed.
+ beta : float
+ A constant used to correct the index computed.
+
+ alpha and beta values depend on the chosen method
+ (see quantile documentation)
+
+ Reference:
+ Hyndman&Fan paper "Sample Quantiles in Statistical Packages",
+ DOI: 10.1080/00031305.1996.10473566
+ """
+ return n * quantiles + (
+ alpha + quantiles * (1 - alpha - beta)
+ ) - 1
+
+
+def _get_gamma(virtual_indexes, previous_indexes, method):
+ """
+ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation
+ of quantiles.
+
+ virtual_indexes : array_like
+ The indexes where the percentile is supposed to be found in the sorted
+ sample.
+ previous_indexes : array_like
+ The floor values of virtual_indexes.
+ interpolation : dict
+ The interpolation method chosen, which may have a specific rule
+ modifying gamma.
+
+ gamma is usually the fractional part of virtual_indexes but can be modified
+ by the interpolation method.
+ """
+ gamma = np.asanyarray(virtual_indexes - previous_indexes)
+ gamma = method["fix_gamma"](gamma, virtual_indexes)
+ # Ensure both that we have an array, and that we keep the dtype
+ # (which may have been matched to the input array).
+ return np.asanyarray(gamma, dtype=virtual_indexes.dtype)
+
+
+def _lerp(a, b, t, out=None):
+ """
+ Compute the linear interpolation weighted by gamma on each point of
+ two same shape array.
+
+ a : array_like
+ Left bound.
+ b : array_like
+ Right bound.
+ t : array_like
+ The interpolation weight.
+ out : array_like
+ Output array.
+ """
+ diff_b_a = subtract(b, a)
+ # asanyarray is a stop-gap until gh-13105
+ lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out))
+ subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5,
+ casting='unsafe', dtype=type(lerp_interpolation.dtype))
+ if lerp_interpolation.ndim == 0 and out is None:
+ lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays
+ return lerp_interpolation
+
+
+def _get_gamma_mask(shape, default_value, conditioned_value, where):
+ out = np.full(shape, default_value)
+ np.copyto(out, conditioned_value, where=where, casting="unsafe")
+ return out
+
+
+def _discrete_interpolation_to_boundaries(index, gamma_condition_fun):
+ previous = np.floor(index)
+ next = previous + 1
+ gamma = index - previous
+ res = _get_gamma_mask(shape=index.shape,
+ default_value=next,
+ conditioned_value=previous,
+ where=gamma_condition_fun(gamma, index)
+ ).astype(np.intp)
+ # Some methods can lead to out-of-bound integers, clip them:
+ res[res < 0] = 0
+ return res
+
+
+def _closest_observation(n, quantiles):
+ # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362).
+ # Order is 1-based so for zero-based indexing round to nearest odd index.
+ gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1)
+ return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5,
+ gamma_fun)
+
+
+def _inverted_cdf(n, quantiles):
+ gamma_fun = lambda gamma, _: (gamma == 0)
+ return _discrete_interpolation_to_boundaries((n * quantiles) - 1,
+ gamma_fun)
+
+
+def _quantile_ureduce_func(
+ a: np.array,
+ q: np.array,
+ weights: np.array,
+ axis: int | None = None,
+ out=None,
+ overwrite_input: bool = False,
+ method="linear",
+) -> np.array:
+ if q.ndim > 2:
+ # The code below works fine for nd, but it might not have useful
+ # semantics. For now, keep the supported dimensions the same as it was
+ # before.
+ raise ValueError("q must be a scalar or 1d")
+ if overwrite_input:
+ if axis is None:
+ axis = 0
+ arr = a.ravel()
+ wgt = None if weights is None else weights.ravel()
+ else:
+ arr = a
+ wgt = weights
+ elif axis is None:
+ axis = 0
+ arr = a.flatten()
+ wgt = None if weights is None else weights.flatten()
+ else:
+ arr = a.copy()
+ wgt = weights
+ result = _quantile(arr,
+ quantiles=q,
+ axis=axis,
+ method=method,
+ out=out,
+ weights=wgt)
+ return result
+
+
+def _get_indexes(arr, virtual_indexes, valid_values_count):
+ """
+ Get the valid indexes of arr neighbouring virtual_indexes.
+ Note
+ This is a companion function to linear interpolation of
+ Quantiles
+
+ Returns
+ -------
+ (previous_indexes, next_indexes): Tuple
+ A Tuple of virtual_indexes neighbouring indexes
+ """
+ previous_indexes = np.asanyarray(np.floor(virtual_indexes))
+ next_indexes = np.asanyarray(previous_indexes + 1)
+ indexes_above_bounds = virtual_indexes >= valid_values_count - 1
+ # When indexes is above max index, take the max value of the array
+ if indexes_above_bounds.any():
+ previous_indexes[indexes_above_bounds] = -1
+ next_indexes[indexes_above_bounds] = -1
+ # When indexes is below min index, take the min value of the array
+ indexes_below_bounds = virtual_indexes < 0
+ if indexes_below_bounds.any():
+ previous_indexes[indexes_below_bounds] = 0
+ next_indexes[indexes_below_bounds] = 0
+ if np.issubdtype(arr.dtype, np.inexact):
+ # After the sort, slices having NaNs will have for last element a NaN
+ virtual_indexes_nans = np.isnan(virtual_indexes)
+ if virtual_indexes_nans.any():
+ previous_indexes[virtual_indexes_nans] = -1
+ next_indexes[virtual_indexes_nans] = -1
+ previous_indexes = previous_indexes.astype(np.intp)
+ next_indexes = next_indexes.astype(np.intp)
+ return previous_indexes, next_indexes
+
+
+def _quantile(
+ arr: np.array,
+ quantiles: np.array,
+ axis: int = -1,
+ method="linear",
+ out=None,
+ weights=None,
+):
+ """
+ Private function that doesn't support extended axis or keepdims.
+ These methods are extended to this function using _ureduce
+ See nanpercentile for parameter usage
+ It computes the quantiles of the array for the given axis.
+ A linear interpolation is performed based on the `interpolation`.
+
+ By default, the method is "linear" where alpha == beta == 1 which
+ performs the 7th method of Hyndman&Fan.
+ With "median_unbiased" we get alpha == beta == 1/3
+ thus the 8th method of Hyndman&Fan.
+ """
+ # --- Setup
+ arr = np.asanyarray(arr)
+ values_count = arr.shape[axis]
+ # The dimensions of `q` are prepended to the output shape, so we need the
+ # axis being sampled from `arr` to be last.
+ if axis != 0: # But moveaxis is slow, so only call it if necessary.
+ arr = np.moveaxis(arr, axis, destination=0)
+ supports_nans = (
+ np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm'
+ )
+
+ if weights is None:
+ # --- Computation of indexes
+ # Index where to find the value in the sorted array.
+ # Virtual because it is a floating point value, not an valid index.
+ # The nearest neighbours are used for interpolation
+ try:
+ method_props = _QuantileMethods[method]
+ except KeyError:
+ raise ValueError(
+ f"{method!r} is not a valid method. Use one of: "
+ f"{_QuantileMethods.keys()}") from None
+ virtual_indexes = method_props["get_virtual_index"](values_count,
+ quantiles)
+ virtual_indexes = np.asanyarray(virtual_indexes)
+
+ if method_props["fix_gamma"] is None:
+ supports_integers = True
+ else:
+ int_virtual_indices = np.issubdtype(virtual_indexes.dtype,
+ np.integer)
+ supports_integers = method == 'linear' and int_virtual_indices
+
+ if supports_integers:
+ # No interpolation needed, take the points along axis
+ if supports_nans:
+ # may contain nan, which would sort to the end
+ arr.partition(
+ concatenate((virtual_indexes.ravel(), [-1])), axis=0,
+ )
+ slices_having_nans = np.isnan(arr[-1, ...])
+ else:
+ # cannot contain nan
+ arr.partition(virtual_indexes.ravel(), axis=0)
+ slices_having_nans = np.array(False, dtype=bool)
+ result = take(arr, virtual_indexes, axis=0, out=out)
+ else:
+ previous_indexes, next_indexes = _get_indexes(arr,
+ virtual_indexes,
+ values_count)
+ # --- Sorting
+ arr.partition(
+ np.unique(np.concatenate(([0, -1],
+ previous_indexes.ravel(),
+ next_indexes.ravel(),
+ ))),
+ axis=0)
+ if supports_nans:
+ slices_having_nans = np.isnan(arr[-1, ...])
+ else:
+ slices_having_nans = None
+ # --- Get values from indexes
+ previous = arr[previous_indexes]
+ next = arr[next_indexes]
+ # --- Linear interpolation
+ gamma = _get_gamma(virtual_indexes, previous_indexes, method_props)
+ result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1)
+ gamma = gamma.reshape(result_shape)
+ result = _lerp(previous,
+ next,
+ gamma,
+ out=out)
+ else:
+ # Weighted case
+ # This implements method="inverted_cdf", the only supported weighted
+ # method, which needs to sort anyway.
+ weights = np.asanyarray(weights)
+ if axis != 0:
+ weights = np.moveaxis(weights, axis, destination=0)
+ index_array = np.argsort(arr, axis=0, kind="stable")
+
+ # arr = arr[index_array, ...] # but this adds trailing dimensions of
+ # 1.
+ arr = np.take_along_axis(arr, index_array, axis=0)
+ if weights.shape == arr.shape:
+ weights = np.take_along_axis(weights, index_array, axis=0)
+ else:
+ # weights is 1d
+ weights = weights.reshape(-1)[index_array, ...]
+
+ if supports_nans:
+ # may contain nan, which would sort to the end
+ slices_having_nans = np.isnan(arr[-1, ...])
+ else:
+ # cannot contain nan
+ slices_having_nans = np.array(False, dtype=bool)
+
+ # We use the weights to calculate the empirical cumulative
+ # distribution function cdf
+ cdf = weights.cumsum(axis=0, dtype=np.float64)
+ cdf /= cdf[-1, ...] # normalization to 1
+ # Search index i such that
+ # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i)
+ # is then equivalent to
+ # cdf[i-1] < quantile <= cdf[i]
+ # Unfortunately, searchsorted only accepts 1-d arrays as first
+ # argument, so we will need to iterate over dimensions.
+
+ # Without the following cast, searchsorted can return surprising
+ # results, e.g.
+ # np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]),
+ # np.array(0.4, dtype=np.float32), side="left")
+ # returns 2 instead of 1 because 0.4 is not binary representable.
+ if quantiles.dtype.kind == "f":
+ cdf = cdf.astype(quantiles.dtype)
+ # Weights must be non-negative, so we might have zero weights at the
+ # beginning leading to some leading zeros in cdf. The call to
+ # np.searchsorted for quantiles=0 will then pick the first element,
+ # but should pick the first one larger than zero. We
+ # therefore simply set 0 values in cdf to -1.
+ if np.any(cdf[0, ...] == 0):
+ cdf[cdf == 0] = -1
+
+ def find_cdf_1d(arr, cdf):
+ indices = np.searchsorted(cdf, quantiles, side="left")
+ # We might have reached the maximum with i = len(arr), e.g. for
+ # quantiles = 1, and need to cut it to len(arr) - 1.
+ indices = minimum(indices, values_count - 1)
+ result = take(arr, indices, axis=0)
+ return result
+
+ r_shape = arr.shape[1:]
+ if quantiles.ndim > 0:
+ r_shape = quantiles.shape + r_shape
+ if out is None:
+ result = np.empty_like(arr, shape=r_shape)
+ else:
+ if out.shape != r_shape:
+ msg = (f"Wrong shape of argument 'out', shape={r_shape} is "
+ f"required; got shape={out.shape}.")
+ raise ValueError(msg)
+ result = out
+
+ # See apply_along_axis, which we do for axis=0. Note that Ni = (,)
+ # always, so we remove it here.
+ Nk = arr.shape[1:]
+ for kk in np.ndindex(Nk):
+ result[(...,) + kk] = find_cdf_1d(
+ arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk]
+ )
+
+ # Make result the same as in unweighted inverted_cdf.
+ if result.shape == () and result.dtype == np.dtype("O"):
+ result = result.item()
+
+ if np.any(slices_having_nans):
+ if result.ndim == 0 and out is None:
+ # can't write to a scalar, but indexing will be correct
+ result = arr[-1]
+ else:
+ np.copyto(result, arr[-1, ...], where=slices_having_nans)
+ return result
+
+
+def _trapezoid_dispatcher(y, x=None, dx=None, axis=None):
+ return (y, x)
+
+
+@array_function_dispatch(_trapezoid_dispatcher)
+def trapezoid(y, x=None, dx=1.0, axis=-1):
+ r"""
+ Integrate along the given axis using the composite trapezoidal rule.
+
+ If `x` is provided, the integration happens in sequence along its
+ elements - they are not sorted.
+
+ Integrate `y` (`x`) along each 1d slice on the given axis, compute
+ :math:`\int y(x) dx`.
+ When `x` is specified, this integrates along the parametric curve,
+ computing :math:`\int_t y(t) dt =
+ \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
+
+ .. versionadded:: 2.0.0
+
+ Parameters
+ ----------
+ y : array_like
+ Input array to integrate.
+ x : array_like, optional
+ The sample points corresponding to the `y` values. If `x` is None,
+ the sample points are assumed to be evenly spaced `dx` apart. The
+ default is None.
+ dx : scalar, optional
+ The spacing between sample points when `x` is None. The default is 1.
+ axis : int, optional
+ The axis along which to integrate.
+
+ Returns
+ -------
+ trapezoid : float or ndarray
+ Definite integral of `y` = n-dimensional array as approximated along
+ a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
+ then the result is a float. If `n` is greater than 1, then the result
+ is an `n`-1 dimensional array.
+
+ See Also
+ --------
+ sum, cumsum
+
+ Notes
+ -----
+ Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
+ will be taken from `y` array, by default x-axis distances between
+ points will be 1.0, alternatively they can be provided with `x` array
+ or with `dx` scalar. Return value will be equal to combined area under
+ the red lines.
+
+
+ References
+ ----------
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
+
+ .. [2] Illustration image:
+ https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Use the trapezoidal rule on evenly spaced points:
+
+ >>> np.trapezoid([1, 2, 3])
+ 4.0
+
+ The spacing between sample points can be selected by either the
+ ``x`` or ``dx`` arguments:
+
+ >>> np.trapezoid([1, 2, 3], x=[4, 6, 8])
+ 8.0
+ >>> np.trapezoid([1, 2, 3], dx=2)
+ 8.0
+
+ Using a decreasing ``x`` corresponds to integrating in reverse:
+
+ >>> np.trapezoid([1, 2, 3], x=[8, 6, 4])
+ -8.0
+
+ More generally ``x`` is used to integrate along a parametric curve. We can
+ estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
+
+ >>> x = np.linspace(0, 1, num=50)
+ >>> y = x**2
+ >>> np.trapezoid(y, x)
+ 0.33340274885464394
+
+ Or estimate the area of a circle, noting we repeat the sample which closes
+ the curve:
+
+ >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
+ >>> np.trapezoid(np.cos(theta), x=np.sin(theta))
+ 3.141571941375841
+
+ ``np.trapezoid`` can be applied along a specified axis to do multiple
+ computations in one call:
+
+ >>> a = np.arange(6).reshape(2, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.trapezoid(a, axis=0)
+ array([1.5, 2.5, 3.5])
+ >>> np.trapezoid(a, axis=1)
+ array([2., 8.])
+ """
+
+ y = asanyarray(y)
+ if x is None:
+ d = dx
+ else:
+ x = asanyarray(x)
+ if x.ndim == 1:
+ d = diff(x)
+ # reshape to correct shape
+ shape = [1] * y.ndim
+ shape[axis] = d.shape[0]
+ d = d.reshape(shape)
+ else:
+ d = diff(x, axis=axis)
+ nd = y.ndim
+ slice1 = [slice(None)] * nd
+ slice2 = [slice(None)] * nd
+ slice1[axis] = slice(1, None)
+ slice2[axis] = slice(None, -1)
+ try:
+ ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
+ except ValueError:
+ # Operations didn't work, cast to ndarray
+ d = np.asarray(d)
+ y = np.asarray(y)
+ ret = add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis)
+ return ret
+
+
+@set_module('numpy')
+def trapz(y, x=None, dx=1.0, axis=-1):
+ """
+ `trapz` is deprecated in NumPy 2.0.
+
+ Please use `trapezoid` instead, or one of the numerical integration
+ functions in `scipy.integrate`.
+ """
+ # Deprecated in NumPy 2.0, 2023-08-18
+ warnings.warn(
+ "`trapz` is deprecated. Use `trapezoid` instead, or one of the "
+ "numerical integration functions in `scipy.integrate`.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+ return trapezoid(y, x=x, dx=dx, axis=axis)
+
+
+def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
+ return xi
+
+
+# Based on scitools meshgrid
+@array_function_dispatch(_meshgrid_dispatcher)
+def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
+ """
+ Return a tuple of coordinate matrices from coordinate vectors.
+
+ Make N-D coordinate arrays for vectorized evaluations of
+ N-D scalar/vector fields over N-D grids, given
+ one-dimensional coordinate arrays x1, x2,..., xn.
+
+ Parameters
+ ----------
+ x1, x2,..., xn : array_like
+ 1-D arrays representing the coordinates of a grid.
+ indexing : {'xy', 'ij'}, optional
+ Cartesian ('xy', default) or matrix ('ij') indexing of output.
+ See Notes for more details.
+ sparse : bool, optional
+ If True the shape of the returned coordinate array for dimension *i*
+ is reduced from ``(N1, ..., Ni, ... Nn)`` to
+ ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are
+ intended to be used with :ref:`basics.broadcasting`. When all
+ coordinates are used in an expression, broadcasting still leads to a
+ fully-dimensonal result array.
+
+ Default is False.
+
+ copy : bool, optional
+ If False, a view into the original arrays are returned in order to
+ conserve memory. Default is True. Please note that
+ ``sparse=False, copy=False`` will likely return non-contiguous
+ arrays. Furthermore, more than one element of a broadcast array
+ may refer to a single memory location. If you need to write to the
+ arrays, make copies first.
+
+ Returns
+ -------
+ X1, X2,..., XN : tuple of ndarrays
+ For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``,
+ returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij'
+ or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy'
+ with the elements of `xi` repeated to fill the matrix along
+ the first dimension for `x1`, the second for `x2` and so on.
+
+ Notes
+ -----
+ This function supports both indexing conventions through the indexing
+ keyword argument. Giving the string 'ij' returns a meshgrid with
+ matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
+ In the 2-D case with inputs of length M and N, the outputs are of shape
+ (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
+ with inputs of length M, N and P, outputs are of shape (N, M, P) for
+ 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
+ illustrated by the following code snippet::
+
+ xv, yv = np.meshgrid(x, y, indexing='ij')
+ for i in range(nx):
+ for j in range(ny):
+ # treat xv[i,j], yv[i,j]
+
+ xv, yv = np.meshgrid(x, y, indexing='xy')
+ for i in range(nx):
+ for j in range(ny):
+ # treat xv[j,i], yv[j,i]
+
+ In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
+
+ See Also
+ --------
+ mgrid : Construct a multi-dimensional "meshgrid" using indexing notation.
+ ogrid : Construct an open multi-dimensional "meshgrid" using indexing
+ notation.
+ :ref:`how-to-index`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> nx, ny = (3, 2)
+ >>> x = np.linspace(0, 1, nx)
+ >>> y = np.linspace(0, 1, ny)
+ >>> xv, yv = np.meshgrid(x, y)
+ >>> xv
+ array([[0. , 0.5, 1. ],
+ [0. , 0.5, 1. ]])
+ >>> yv
+ array([[0., 0., 0.],
+ [1., 1., 1.]])
+
+ The result of `meshgrid` is a coordinate grid:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none')
+ >>> plt.show()
+
+ You can create sparse output arrays to save memory and computation time.
+
+ >>> xv, yv = np.meshgrid(x, y, sparse=True)
+ >>> xv
+ array([[0. , 0.5, 1. ]])
+ >>> yv
+ array([[0.],
+ [1.]])
+
+ `meshgrid` is very useful to evaluate functions on a grid. If the
+ function depends on all coordinates, both dense and sparse outputs can be
+ used.
+
+ >>> x = np.linspace(-5, 5, 101)
+ >>> y = np.linspace(-5, 5, 101)
+ >>> # full coordinate arrays
+ >>> xx, yy = np.meshgrid(x, y)
+ >>> zz = np.sqrt(xx**2 + yy**2)
+ >>> xx.shape, yy.shape, zz.shape
+ ((101, 101), (101, 101), (101, 101))
+ >>> # sparse coordinate arrays
+ >>> xs, ys = np.meshgrid(x, y, sparse=True)
+ >>> zs = np.sqrt(xs**2 + ys**2)
+ >>> xs.shape, ys.shape, zs.shape
+ ((1, 101), (101, 1), (101, 101))
+ >>> np.array_equal(zz, zs)
+ True
+
+ >>> h = plt.contourf(x, y, zs)
+ >>> plt.axis('scaled')
+ >>> plt.colorbar()
+ >>> plt.show()
+ """
+ ndim = len(xi)
+
+ if indexing not in ['xy', 'ij']:
+ raise ValueError(
+ "Valid values for `indexing` are 'xy' and 'ij'.")
+
+ s0 = (1,) * ndim
+ output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:])
+ for i, x in enumerate(xi)]
+
+ if indexing == 'xy' and ndim > 1:
+ # switch first and second axis
+ output[0].shape = (1, -1) + s0[2:]
+ output[1].shape = (-1, 1) + s0[2:]
+
+ if not sparse:
+ # Return the full N-D matrix (not only the 1-D vector)
+ output = np.broadcast_arrays(*output, subok=True)
+
+ if copy:
+ output = tuple(x.copy() for x in output)
+
+ return output
+
+
+def _delete_dispatcher(arr, obj, axis=None):
+ return (arr, obj)
+
+
+@array_function_dispatch(_delete_dispatcher)
+def delete(arr, obj, axis=None):
+ """
+ Return a new array with sub-arrays along an axis deleted. For a one
+ dimensional array, this returns those entries not returned by
+ `arr[obj]`.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input array.
+ obj : slice, int, array-like of ints or bools
+ Indicate indices of sub-arrays to remove along the specified axis.
+
+ .. versionchanged:: 1.19.0
+ Boolean indices are now treated as a mask of elements to remove,
+ rather than being cast to the integers 0 and 1.
+
+ axis : int, optional
+ The axis along which to delete the subarray defined by `obj`.
+ If `axis` is None, `obj` is applied to the flattened array.
+
+ Returns
+ -------
+ out : ndarray
+ A copy of `arr` with the elements specified by `obj` removed. Note
+ that `delete` does not occur in-place. If `axis` is None, `out` is
+ a flattened array.
+
+ See Also
+ --------
+ insert : Insert elements into an array.
+ append : Append elements at the end of an array.
+
+ Notes
+ -----
+ Often it is preferable to use a boolean mask. For example:
+
+ >>> arr = np.arange(12) + 1
+ >>> mask = np.ones(len(arr), dtype=bool)
+ >>> mask[[0,2,4]] = False
+ >>> result = arr[mask,...]
+
+ Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further
+ use of `mask`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
+ >>> arr
+ array([[ 1, 2, 3, 4],
+ [ 5, 6, 7, 8],
+ [ 9, 10, 11, 12]])
+ >>> np.delete(arr, 1, 0)
+ array([[ 1, 2, 3, 4],
+ [ 9, 10, 11, 12]])
+
+ >>> np.delete(arr, np.s_[::2], 1)
+ array([[ 2, 4],
+ [ 6, 8],
+ [10, 12]])
+ >>> np.delete(arr, [1,3,5], None)
+ array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
+
+ """
+ conv = _array_converter(arr)
+ arr, = conv.as_arrays(subok=False)
+
+ ndim = arr.ndim
+ arrorder = 'F' if arr.flags.fnc else 'C'
+ if axis is None:
+ if ndim != 1:
+ arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
+ ndim = arr.ndim
+ axis = ndim - 1
+ else:
+ axis = normalize_axis_index(axis, ndim)
+
+ slobj = [slice(None)] * ndim
+ N = arr.shape[axis]
+ newshape = list(arr.shape)
+
+ if isinstance(obj, slice):
+ start, stop, step = obj.indices(N)
+ xr = range(start, stop, step)
+ numtodel = len(xr)
+
+ if numtodel <= 0:
+ return conv.wrap(arr.copy(order=arrorder), to_scalar=False)
+
+ # Invert if step is negative:
+ if step < 0:
+ step = -step
+ start = xr[-1]
+ stop = xr[0] + 1
+
+ newshape[axis] -= numtodel
+ new = empty(newshape, arr.dtype, arrorder)
+ # copy initial chunk
+ if start == 0:
+ pass
+ else:
+ slobj[axis] = slice(None, start)
+ new[tuple(slobj)] = arr[tuple(slobj)]
+ # copy end chunk
+ if stop == N:
+ pass
+ else:
+ slobj[axis] = slice(stop - numtodel, None)
+ slobj2 = [slice(None)] * ndim
+ slobj2[axis] = slice(stop, None)
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+ # copy middle pieces
+ if step == 1:
+ pass
+ else: # use array indexing.
+ keep = ones(stop - start, dtype=bool)
+ keep[:stop - start:step] = False
+ slobj[axis] = slice(start, stop - numtodel)
+ slobj2 = [slice(None)] * ndim
+ slobj2[axis] = slice(start, stop)
+ arr = arr[tuple(slobj2)]
+ slobj2[axis] = keep
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+
+ return conv.wrap(new, to_scalar=False)
+
+ if isinstance(obj, (int, integer)) and not isinstance(obj, bool):
+ single_value = True
+ else:
+ single_value = False
+ _obj = obj
+ obj = np.asarray(obj)
+ # `size == 0` to allow empty lists similar to indexing, but (as there)
+ # is really too generic:
+ if obj.size == 0 and not isinstance(_obj, np.ndarray):
+ obj = obj.astype(intp)
+ elif obj.size == 1 and obj.dtype.kind in "ui":
+ # For a size 1 integer array we can use the single-value path
+ # (most dtypes, except boolean, should just fail later).
+ obj = obj.item()
+ single_value = True
+
+ if single_value:
+ # optimization for a single value
+ if (obj < -N or obj >= N):
+ raise IndexError(
+ f"index {obj} is out of bounds for axis {axis} with "
+ f"size {N}")
+ if (obj < 0):
+ obj += N
+ newshape[axis] -= 1
+ new = empty(newshape, arr.dtype, arrorder)
+ slobj[axis] = slice(None, obj)
+ new[tuple(slobj)] = arr[tuple(slobj)]
+ slobj[axis] = slice(obj, None)
+ slobj2 = [slice(None)] * ndim
+ slobj2[axis] = slice(obj + 1, None)
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+ else:
+ if obj.dtype == bool:
+ if obj.shape != (N,):
+ raise ValueError('boolean array argument obj to delete '
+ 'must be one dimensional and match the axis '
+ f'length of {N}')
+
+ # optimization, the other branch is slower
+ keep = ~obj
+ else:
+ keep = ones(N, dtype=bool)
+ keep[obj,] = False
+
+ slobj[axis] = keep
+ new = arr[tuple(slobj)]
+
+ return conv.wrap(new, to_scalar=False)
+
+
+def _insert_dispatcher(arr, obj, values, axis=None):
+ return (arr, obj, values)
+
+
+@array_function_dispatch(_insert_dispatcher)
+def insert(arr, obj, values, axis=None):
+ """
+ Insert values along the given axis before the given indices.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input array.
+ obj : slice, int, array-like of ints or bools
+ Object that defines the index or indices before which `values` is
+ inserted.
+
+ .. versionchanged:: 2.1.2
+ Boolean indices are now treated as a mask of elements to insert,
+ rather than being cast to the integers 0 and 1.
+
+ Support for multiple insertions when `obj` is a single scalar or a
+ sequence with one element (similar to calling insert multiple
+ times).
+ values : array_like
+ Values to insert into `arr`. If the type of `values` is different
+ from that of `arr`, `values` is converted to the type of `arr`.
+ `values` should be shaped so that ``arr[...,obj,...] = values``
+ is legal.
+ axis : int, optional
+ Axis along which to insert `values`. If `axis` is None then `arr`
+ is flattened first.
+
+ Returns
+ -------
+ out : ndarray
+ A copy of `arr` with `values` inserted. Note that `insert`
+ does not occur in-place: a new array is returned. If
+ `axis` is None, `out` is a flattened array.
+
+ See Also
+ --------
+ append : Append elements at the end of an array.
+ concatenate : Join a sequence of arrays along an existing axis.
+ delete : Delete elements from an array.
+
+ Notes
+ -----
+ Note that for higher dimensional inserts ``obj=0`` behaves very different
+ from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from
+ ``arr[:,[0],:] = values``. This is because of the difference between basic
+ and advanced :ref:`indexing <basics.indexing>`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(6).reshape(3, 2)
+ >>> a
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+ >>> np.insert(a, 1, 6)
+ array([0, 6, 1, 2, 3, 4, 5])
+ >>> np.insert(a, 1, 6, axis=1)
+ array([[0, 6, 1],
+ [2, 6, 3],
+ [4, 6, 5]])
+
+ Difference between sequence and scalars,
+ showing how ``obj=[1]`` behaves different from ``obj=1``:
+
+ >>> np.insert(a, [1], [[7],[8],[9]], axis=1)
+ array([[0, 7, 1],
+ [2, 8, 3],
+ [4, 9, 5]])
+ >>> np.insert(a, 1, [[7],[8],[9]], axis=1)
+ array([[0, 7, 8, 9, 1],
+ [2, 7, 8, 9, 3],
+ [4, 7, 8, 9, 5]])
+ >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1),
+ ... np.insert(a, [1], [[7],[8],[9]], axis=1))
+ True
+
+ >>> b = a.flatten()
+ >>> b
+ array([0, 1, 2, 3, 4, 5])
+ >>> np.insert(b, [2, 2], [6, 7])
+ array([0, 1, 6, 7, 2, 3, 4, 5])
+
+ >>> np.insert(b, slice(2, 4), [7, 8])
+ array([0, 1, 7, 2, 8, 3, 4, 5])
+
+ >>> np.insert(b, [2, 2], [7.13, False]) # type casting
+ array([0, 1, 7, 0, 2, 3, 4, 5])
+
+ >>> x = np.arange(8).reshape(2, 4)
+ >>> idx = (1, 3)
+ >>> np.insert(x, idx, 999, axis=1)
+ array([[ 0, 999, 1, 2, 999, 3],
+ [ 4, 999, 5, 6, 999, 7]])
+
+ """
+ conv = _array_converter(arr)
+ arr, = conv.as_arrays(subok=False)
+
+ ndim = arr.ndim
+ arrorder = 'F' if arr.flags.fnc else 'C'
+ if axis is None:
+ if ndim != 1:
+ arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
+ ndim = arr.ndim
+ axis = ndim - 1
+ else:
+ axis = normalize_axis_index(axis, ndim)
+ slobj = [slice(None)] * ndim
+ N = arr.shape[axis]
+ newshape = list(arr.shape)
+
+ if isinstance(obj, slice):
+ # turn it into a range object
+ indices = arange(*obj.indices(N), dtype=intp)
+ else:
+ # need to copy obj, because indices will be changed in-place
+ indices = np.array(obj)
+ if indices.dtype == bool:
+ if obj.ndim != 1:
+ raise ValueError('boolean array argument obj to insert '
+ 'must be one dimensional')
+ indices = np.flatnonzero(obj)
+ elif indices.ndim > 1:
+ raise ValueError(
+ "index array argument obj to insert must be one dimensional "
+ "or scalar")
+ if indices.size == 1:
+ index = indices.item()
+ if index < -N or index > N:
+ raise IndexError(f"index {obj} is out of bounds for axis {axis} "
+ f"with size {N}")
+ if (index < 0):
+ index += N
+
+ # There are some object array corner cases here, but we cannot avoid
+ # that:
+ values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype)
+ if indices.ndim == 0:
+ # broadcasting is very different here, since a[:,0,:] = ... behaves
+ # very different from a[:,[0],:] = ...! This changes values so that
+ # it works likes the second case. (here a[:,0:1,:])
+ values = np.moveaxis(values, 0, axis)
+ numnew = values.shape[axis]
+ newshape[axis] += numnew
+ new = empty(newshape, arr.dtype, arrorder)
+ slobj[axis] = slice(None, index)
+ new[tuple(slobj)] = arr[tuple(slobj)]
+ slobj[axis] = slice(index, index + numnew)
+ new[tuple(slobj)] = values
+ slobj[axis] = slice(index + numnew, None)
+ slobj2 = [slice(None)] * ndim
+ slobj2[axis] = slice(index, None)
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+
+ return conv.wrap(new, to_scalar=False)
+
+ elif indices.size == 0 and not isinstance(obj, np.ndarray):
+ # Can safely cast the empty list to intp
+ indices = indices.astype(intp)
+
+ indices[indices < 0] += N
+
+ numnew = len(indices)
+ order = indices.argsort(kind='mergesort') # stable sort
+ indices[order] += np.arange(numnew)
+
+ newshape[axis] += numnew
+ old_mask = ones(newshape[axis], dtype=bool)
+ old_mask[indices] = False
+
+ new = empty(newshape, arr.dtype, arrorder)
+ slobj2 = [slice(None)] * ndim
+ slobj[axis] = indices
+ slobj2[axis] = old_mask
+ new[tuple(slobj)] = values
+ new[tuple(slobj2)] = arr
+
+ return conv.wrap(new, to_scalar=False)
+
+
+def _append_dispatcher(arr, values, axis=None):
+ return (arr, values)
+
+
+@array_function_dispatch(_append_dispatcher)
+def append(arr, values, axis=None):
+ """
+ Append values to the end of an array.
+
+ Parameters
+ ----------
+ arr : array_like
+ Values are appended to a copy of this array.
+ values : array_like
+ These values are appended to a copy of `arr`. It must be of the
+ correct shape (the same shape as `arr`, excluding `axis`). If
+ `axis` is not specified, `values` can be any shape and will be
+ flattened before use.
+ axis : int, optional
+ The axis along which `values` are appended. If `axis` is not
+ given, both `arr` and `values` are flattened before use.
+
+ Returns
+ -------
+ append : ndarray
+ A copy of `arr` with `values` appended to `axis`. Note that
+ `append` does not occur in-place: a new array is allocated and
+ filled. If `axis` is None, `out` is a flattened array.
+
+ See Also
+ --------
+ insert : Insert elements into an array.
+ delete : Delete elements from an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
+ array([1, 2, 3, ..., 7, 8, 9])
+
+ When `axis` is specified, `values` must have the correct shape.
+
+ >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
+ array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+ >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
+ Traceback (most recent call last):
+ ...
+ ValueError: all the input arrays must have same number of dimensions, but
+ the array at index 0 has 2 dimension(s) and the array at index 1 has 1
+ dimension(s)
+
+ >>> a = np.array([1, 2], dtype=int)
+ >>> c = np.append(a, [])
+ >>> c
+ array([1., 2.])
+ >>> c.dtype
+ float64
+
+ Default dtype for empty ndarrays is `float64` thus making the output of dtype
+ `float64` when appended with dtype `int64`
+
+ """
+ arr = asanyarray(arr)
+ if axis is None:
+ if arr.ndim != 1:
+ arr = arr.ravel()
+ values = ravel(values)
+ axis = arr.ndim - 1
+ return concatenate((arr, values), axis=axis)
+
+
+def _digitize_dispatcher(x, bins, right=None):
+ return (x, bins)
+
+
+@array_function_dispatch(_digitize_dispatcher)
+def digitize(x, bins, right=False):
+ """
+ Return the indices of the bins to which each value in input array belongs.
+
+ ========= ============= ============================
+ `right` order of bins returned index `i` satisfies
+ ========= ============= ============================
+ ``False`` increasing ``bins[i-1] <= x < bins[i]``
+ ``True`` increasing ``bins[i-1] < x <= bins[i]``
+ ``False`` decreasing ``bins[i-1] > x >= bins[i]``
+ ``True`` decreasing ``bins[i-1] >= x > bins[i]``
+ ========= ============= ============================
+
+ If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
+ returned as appropriate.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array to be binned. Prior to NumPy 1.10.0, this array had to
+ be 1-dimensional, but can now have any shape.
+ bins : array_like
+ Array of bins. It has to be 1-dimensional and monotonic.
+ right : bool, optional
+ Indicating whether the intervals include the right or the left bin
+ edge. Default behavior is (right==False) indicating that the interval
+ does not include the right edge. The left bin end is open in this
+ case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
+ monotonically increasing bins.
+
+ Returns
+ -------
+ indices : ndarray of ints
+ Output array of indices, of same shape as `x`.
+
+ Raises
+ ------
+ ValueError
+ If `bins` is not monotonic.
+ TypeError
+ If the type of the input is complex.
+
+ See Also
+ --------
+ bincount, histogram, unique, searchsorted
+
+ Notes
+ -----
+ If values in `x` are such that they fall outside the bin range,
+ attempting to index `bins` with the indices that `digitize` returns
+ will result in an IndexError.
+
+ .. versionadded:: 1.10.0
+
+ `numpy.digitize` is implemented in terms of `numpy.searchsorted`.
+ This means that a binary search is used to bin the values, which scales
+ much better for larger number of bins than the previous linear search.
+ It also removes the requirement for the input array to be 1-dimensional.
+
+ For monotonically *increasing* `bins`, the following are equivalent::
+
+ np.digitize(x, bins, right=True)
+ np.searchsorted(bins, x, side='left')
+
+ Note that as the order of the arguments are reversed, the side must be too.
+ The `searchsorted` call is marginally faster, as it does not do any
+ monotonicity checks. Perhaps more importantly, it supports all dtypes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([0.2, 6.4, 3.0, 1.6])
+ >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
+ >>> inds = np.digitize(x, bins)
+ >>> inds
+ array([1, 4, 3, 2])
+ >>> for n in range(x.size):
+ ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
+ ...
+ 0.0 <= 0.2 < 1.0
+ 4.0 <= 6.4 < 10.0
+ 2.5 <= 3.0 < 4.0
+ 1.0 <= 1.6 < 2.5
+
+ >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
+ >>> bins = np.array([0, 5, 10, 15, 20])
+ >>> np.digitize(x,bins,right=True)
+ array([1, 2, 3, 4, 4])
+ >>> np.digitize(x,bins,right=False)
+ array([1, 3, 3, 4, 5])
+ """
+ x = _nx.asarray(x)
+ bins = _nx.asarray(bins)
+
+ # here for compatibility, searchsorted below is happy to take this
+ if np.issubdtype(x.dtype, _nx.complexfloating):
+ raise TypeError("x may not be complex")
+
+ mono = _monotonicity(bins)
+ if mono == 0:
+ raise ValueError("bins must be monotonically increasing or decreasing")
+
+ # this is backwards because the arguments below are swapped
+ side = 'left' if right else 'right'
+ if mono == -1:
+ # reverse the bins, and invert the results
+ return len(bins) - _nx.searchsorted(bins[::-1], x, side=side)
+ else:
+ return _nx.searchsorted(bins, x, side=side)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi
new file mode 100644
index 0000000..090fb23
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.pyi
@@ -0,0 +1,985 @@
+# ruff: noqa: ANN401
+from collections.abc import Callable, Iterable, Sequence
+from typing import (
+ Any,
+ Concatenate,
+ ParamSpec,
+ Protocol,
+ SupportsIndex,
+ SupportsInt,
+ TypeAlias,
+ TypeVar,
+ overload,
+ type_check_only,
+)
+from typing import Literal as L
+
+from _typeshed import Incomplete
+from typing_extensions import TypeIs, deprecated
+
+import numpy as np
+from numpy import (
+ _OrderKACF,
+ bool_,
+ complex128,
+ complexfloating,
+ datetime64,
+ float64,
+ floating,
+ generic,
+ integer,
+ intp,
+ object_,
+ timedelta64,
+ vectorize,
+)
+from numpy._core.multiarray import bincount
+from numpy._globals import _NoValueType
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NDArray,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeDT64_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeNumber_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeTD64_co,
+ _ComplexLike_co,
+ _DTypeLike,
+ _FloatLike_co,
+ _NestedSequence,
+ _NumberLike_co,
+ _ScalarLike_co,
+ _ShapeLike,
+)
+
+__all__ = [
+ "select",
+ "piecewise",
+ "trim_zeros",
+ "copy",
+ "iterable",
+ "percentile",
+ "diff",
+ "gradient",
+ "angle",
+ "unwrap",
+ "sort_complex",
+ "flip",
+ "rot90",
+ "extract",
+ "place",
+ "vectorize",
+ "asarray_chkfinite",
+ "average",
+ "bincount",
+ "digitize",
+ "cov",
+ "corrcoef",
+ "median",
+ "sinc",
+ "hamming",
+ "hanning",
+ "bartlett",
+ "blackman",
+ "kaiser",
+ "trapezoid",
+ "trapz",
+ "i0",
+ "meshgrid",
+ "delete",
+ "insert",
+ "append",
+ "interp",
+ "quantile",
+]
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+# The `{}ss` suffix refers to the Python 3.12 syntax: `**P`
+_Pss = ParamSpec("_Pss")
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+_ScalarT1 = TypeVar("_ScalarT1", bound=generic)
+_ScalarT2 = TypeVar("_ScalarT2", bound=generic)
+_ArrayT = TypeVar("_ArrayT", bound=np.ndarray)
+
+_2Tuple: TypeAlias = tuple[_T, _T]
+_MeshgridIdx: TypeAlias = L['ij', 'xy']
+
+@type_check_only
+class _TrimZerosSequence(Protocol[_T_co]):
+ def __len__(self, /) -> int: ...
+ @overload
+ def __getitem__(self, key: int, /) -> object: ...
+ @overload
+ def __getitem__(self, key: slice, /) -> _T_co: ...
+
+###
+
+@overload
+def rot90(
+ m: _ArrayLike[_ScalarT],
+ k: int = ...,
+ axes: tuple[int, int] = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def rot90(
+ m: ArrayLike,
+ k: int = ...,
+ axes: tuple[int, int] = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ...
+@overload
+def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
+@overload
+def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ...
+@overload
+def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ...
+
+def iterable(y: object) -> TypeIs[Iterable[Any]]: ...
+
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = None,
+ weights: _ArrayLikeFloat_co | None = None,
+ returned: L[False] = False,
+ *,
+ keepdims: L[False] | _NoValueType = ...,
+) -> floating: ...
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = None,
+ weights: _ArrayLikeFloat_co | None = None,
+ *,
+ returned: L[True],
+ keepdims: L[False] | _NoValueType = ...,
+) -> _2Tuple[floating]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = None,
+ weights: _ArrayLikeComplex_co | None = None,
+ returned: L[False] = False,
+ *,
+ keepdims: L[False] | _NoValueType = ...,
+) -> complexfloating: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = None,
+ weights: _ArrayLikeComplex_co | None = None,
+ *,
+ returned: L[True],
+ keepdims: L[False] | _NoValueType = ...,
+) -> _2Tuple[complexfloating]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = None,
+ weights: object | None = None,
+ *,
+ returned: L[True],
+ keepdims: bool | bool_ | _NoValueType = ...,
+) -> _2Tuple[Incomplete]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = None,
+ weights: object | None = None,
+ returned: bool | bool_ = False,
+ *,
+ keepdims: bool | bool_ | _NoValueType = ...,
+) -> Incomplete: ...
+
+@overload
+def asarray_chkfinite(
+ a: _ArrayLike[_ScalarT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def asarray_chkfinite(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: _DTypeLike[_ScalarT],
+ order: _OrderKACF = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def piecewise(
+ x: _ArrayLike[_ScalarT],
+ condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co],
+ funclist: Sequence[
+ Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]]
+ | _ScalarT | object
+ ],
+ /,
+ *args: _Pss.args,
+ **kw: _Pss.kwargs,
+) -> NDArray[_ScalarT]: ...
+@overload
+def piecewise(
+ x: ArrayLike,
+ condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co],
+ funclist: Sequence[
+ Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]]
+ | object
+ ],
+ /,
+ *args: _Pss.args,
+ **kw: _Pss.kwargs,
+) -> NDArray[Any]: ...
+
+def select(
+ condlist: Sequence[ArrayLike],
+ choicelist: Sequence[ArrayLike],
+ default: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def copy(
+ a: _ArrayT,
+ order: _OrderKACF,
+ subok: L[True],
+) -> _ArrayT: ...
+@overload
+def copy(
+ a: _ArrayT,
+ order: _OrderKACF = ...,
+ *,
+ subok: L[True],
+) -> _ArrayT: ...
+@overload
+def copy(
+ a: _ArrayLike[_ScalarT],
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def copy(
+ a: ArrayLike,
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[Any]: ...
+
+def gradient(
+ f: ArrayLike,
+ *varargs: ArrayLike,
+ axis: _ShapeLike | None = ...,
+ edge_order: L[1, 2] = ...,
+) -> Any: ...
+
+@overload
+def diff(
+ a: _T,
+ n: L[0],
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> _T: ...
+@overload
+def diff(
+ a: ArrayLike,
+ n: int = ...,
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload # float scalar
+def interp(
+ x: _FloatLike_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeFloat_co,
+ left: _FloatLike_co | None = None,
+ right: _FloatLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> float64: ...
+@overload # float array
+def interp(
+ x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeFloat_co,
+ left: _FloatLike_co | None = None,
+ right: _FloatLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> NDArray[float64]: ...
+@overload # float scalar or array
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeFloat_co,
+ left: _FloatLike_co | None = None,
+ right: _FloatLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> NDArray[float64] | float64: ...
+@overload # complex scalar
+def interp(
+ x: _FloatLike_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLike[complexfloating],
+ left: _NumberLike_co | None = None,
+ right: _NumberLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> complex128: ...
+@overload # complex or float scalar
+def interp(
+ x: _FloatLike_co,
+ xp: _ArrayLikeFloat_co,
+ fp: Sequence[complex | complexfloating],
+ left: _NumberLike_co | None = None,
+ right: _NumberLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> complex128 | float64: ...
+@overload # complex array
+def interp(
+ x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLike[complexfloating],
+ left: _NumberLike_co | None = None,
+ right: _NumberLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> NDArray[complex128]: ...
+@overload # complex or float array
+def interp(
+ x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],
+ xp: _ArrayLikeFloat_co,
+ fp: Sequence[complex | complexfloating],
+ left: _NumberLike_co | None = None,
+ right: _NumberLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> NDArray[complex128 | float64]: ...
+@overload # complex scalar or array
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLike[complexfloating],
+ left: _NumberLike_co | None = None,
+ right: _NumberLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> NDArray[complex128] | complex128: ...
+@overload # complex or float scalar or array
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeNumber_co,
+ left: _NumberLike_co | None = None,
+ right: _NumberLike_co | None = None,
+ period: _FloatLike_co | None = None,
+) -> NDArray[complex128 | float64] | complex128 | float64: ...
+
+@overload
+def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ...
+@overload
+def angle(z: object_, deg: bool = ...) -> Any: ...
+@overload
+def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ...
+@overload
+def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
+
+@overload
+def unwrap(
+ p: _ArrayLikeFloat_co,
+ discont: float | None = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[floating]: ...
+@overload
+def unwrap(
+ p: _ArrayLikeObject_co,
+ discont: float | None = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[object_]: ...
+
+def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ...
+
+def trim_zeros(
+ filt: _TrimZerosSequence[_T],
+ trim: L["f", "b", "fb", "bf"] = ...,
+) -> _T: ...
+
+@overload
+def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
+@overload
+def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
+
+def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
+
+@overload
+def cov(
+ m: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co | None = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: SupportsIndex | SupportsInt | None = ...,
+ fweights: ArrayLike | None = ...,
+ aweights: ArrayLike | None = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[floating]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co | None = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: SupportsIndex | SupportsInt | None = ...,
+ fweights: ArrayLike | None = ...,
+ aweights: ArrayLike | None = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co | None = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: SupportsIndex | SupportsInt | None = ...,
+ fweights: ArrayLike | None = ...,
+ aweights: ArrayLike | None = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+) -> NDArray[_ScalarT]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co | None = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: SupportsIndex | SupportsInt | None = ...,
+ fweights: ArrayLike | None = ...,
+ aweights: ArrayLike | None = ...,
+ *,
+ dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+# NOTE `bias` and `ddof` are deprecated and ignored
+@overload
+def corrcoef(
+ m: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co | None = None,
+ rowvar: bool = True,
+ bias: _NoValueType = ...,
+ ddof: _NoValueType = ...,
+ *,
+ dtype: None = None,
+) -> NDArray[floating]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co | None = None,
+ rowvar: bool = True,
+ bias: _NoValueType = ...,
+ ddof: _NoValueType = ...,
+ *,
+ dtype: None = None,
+) -> NDArray[complexfloating]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co | None = None,
+ rowvar: bool = True,
+ bias: _NoValueType = ...,
+ ddof: _NoValueType = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+) -> NDArray[_ScalarT]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co | None = None,
+ rowvar: bool = True,
+ bias: _NoValueType = ...,
+ ddof: _NoValueType = ...,
+ *,
+ dtype: DTypeLike | None = None,
+) -> NDArray[Any]: ...
+
+def blackman(M: _FloatLike_co) -> NDArray[floating]: ...
+
+def bartlett(M: _FloatLike_co) -> NDArray[floating]: ...
+
+def hanning(M: _FloatLike_co) -> NDArray[floating]: ...
+
+def hamming(M: _FloatLike_co) -> NDArray[floating]: ...
+
+def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ...
+
+def kaiser(
+ M: _FloatLike_co,
+ beta: _FloatLike_co,
+) -> NDArray[floating]: ...
+
+@overload
+def sinc(x: _FloatLike_co) -> floating: ...
+@overload
+def sinc(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ...
+@overload
+def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def median(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> floating: ...
+@overload
+def median(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> complexfloating: ...
+@overload
+def median(
+ a: _ArrayLikeTD64_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def median(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> _ArrayT: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: _ShapeLike | None = ...,
+ *,
+ out: _ArrayT,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> _ArrayT: ...
+
+_MethodKind = L[
+ "inverted_cdf",
+ "averaged_inverted_cdf",
+ "closest_observation",
+ "interpolated_inverted_cdf",
+ "hazen",
+ "weibull",
+ "linear",
+ "median_unbiased",
+ "normal_unbiased",
+ "lower",
+ "higher",
+ "midpoint",
+ "nearest",
+]
+
+@overload
+def percentile(
+ a: _ArrayLikeFloat_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> floating: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> complexfloating: ...
+@overload
+def percentile(
+ a: _ArrayLikeTD64_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> timedelta64: ...
+@overload
+def percentile(
+ a: _ArrayLikeDT64_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> datetime64: ...
+@overload
+def percentile(
+ a: _ArrayLikeObject_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> Any: ...
+@overload
+def percentile(
+ a: _ArrayLikeFloat_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> NDArray[floating]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def percentile(
+ a: _ArrayLikeTD64_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def percentile(
+ a: _ArrayLikeDT64_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def percentile(
+ a: _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> NDArray[object_]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: _ShapeLike | None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: bool = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> Any: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: _ShapeLike | None,
+ out: _ArrayT,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: bool = ...,
+ *,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> _ArrayT: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: _ShapeLike | None = ...,
+ *,
+ out: _ArrayT,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: bool = ...,
+ weights: _ArrayLikeFloat_co | None = ...,
+) -> _ArrayT: ...
+
+# NOTE: Not an alias, but they do have identical signatures
+# (that we can reuse)
+quantile = percentile
+
+_ScalarT_fm = TypeVar(
+ "_ScalarT_fm",
+ bound=floating | complexfloating | timedelta64,
+)
+
+class _SupportsRMulFloat(Protocol[_T_co]):
+ def __rmul__(self, other: float, /) -> _T_co: ...
+
+@overload
+def trapezoid( # type: ignore[overload-overlap]
+ y: Sequence[_FloatLike_co],
+ x: Sequence[_FloatLike_co] | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> float64: ...
+@overload
+def trapezoid(
+ y: Sequence[_ComplexLike_co],
+ x: Sequence[_ComplexLike_co] | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> complex128: ...
+@overload
+def trapezoid(
+ y: _ArrayLike[bool_ | integer],
+ x: _ArrayLike[bool_ | integer] | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> float64 | NDArray[float64]: ...
+@overload
+def trapezoid( # type: ignore[overload-overlap]
+ y: _ArrayLikeObject_co,
+ x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> float | NDArray[object_]: ...
+@overload
+def trapezoid(
+ y: _ArrayLike[_ScalarT_fm],
+ x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ...
+@overload
+def trapezoid(
+ y: Sequence[_SupportsRMulFloat[_T]],
+ x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> _T: ...
+@overload
+def trapezoid(
+ y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> (
+ floating | complexfloating | timedelta64
+ | NDArray[floating | complexfloating | timedelta64 | object_]
+): ...
+
+@deprecated("Use 'trapezoid' instead")
+def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ...
+
+@overload
+def meshgrid(
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[()]: ...
+@overload
+def meshgrid(
+ x1: _ArrayLike[_ScalarT],
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[_ScalarT]]: ...
+@overload
+def meshgrid(
+ x1: ArrayLike,
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[Any]]: ...
+@overload
+def meshgrid(
+ x1: _ArrayLike[_ScalarT1],
+ x2: _ArrayLike[_ScalarT2],
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ...
+@overload
+def meshgrid(
+ x1: ArrayLike,
+ x2: _ArrayLike[_ScalarT],
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ...
+@overload
+def meshgrid(
+ x1: _ArrayLike[_ScalarT],
+ x2: ArrayLike,
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ...
+@overload
+def meshgrid(
+ x1: ArrayLike,
+ x2: ArrayLike,
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[Any], NDArray[Any]]: ...
+@overload
+def meshgrid(
+ x1: ArrayLike,
+ x2: ArrayLike,
+ x3: ArrayLike,
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...
+@overload
+def meshgrid(
+ x1: ArrayLike,
+ x2: ArrayLike,
+ x3: ArrayLike,
+ x4: ArrayLike,
+ /,
+ *,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ...
+@overload
+def meshgrid(
+ *xi: ArrayLike,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: _MeshgridIdx = ...,
+) -> tuple[NDArray[Any], ...]: ...
+
+@overload
+def delete(
+ arr: _ArrayLike[_ScalarT],
+ obj: slice | _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def delete(
+ arr: ArrayLike,
+ obj: slice | _ArrayLikeInt_co,
+ axis: SupportsIndex | None = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def insert(
+ arr: _ArrayLike[_ScalarT],
+ obj: slice | _ArrayLikeInt_co,
+ values: ArrayLike,
+ axis: SupportsIndex | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def insert(
+ arr: ArrayLike,
+ obj: slice | _ArrayLikeInt_co,
+ values: ArrayLike,
+ axis: SupportsIndex | None = ...,
+) -> NDArray[Any]: ...
+
+def append(
+ arr: ArrayLike,
+ values: ArrayLike,
+ axis: SupportsIndex | None = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def digitize(
+ x: _FloatLike_co,
+ bins: _ArrayLikeFloat_co,
+ right: bool = ...,
+) -> intp: ...
+@overload
+def digitize(
+ x: _ArrayLikeFloat_co,
+ bins: _ArrayLikeFloat_co,
+ right: bool = ...,
+) -> NDArray[intp]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py
new file mode 100644
index 0000000..b4aacd0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.py
@@ -0,0 +1,1085 @@
+"""
+Histogram-related functions
+"""
+import contextlib
+import functools
+import operator
+import warnings
+
+import numpy as np
+from numpy._core import overrides
+
+__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+# range is a keyword argument to many functions, so save the builtin so they can
+# use it.
+_range = range
+
+
+def _ptp(x):
+ """Peak-to-peak value of x.
+
+ This implementation avoids the problem of signed integer arrays having a
+ peak-to-peak value that cannot be represented with the array's data type.
+ This function returns an unsigned value for signed integer arrays.
+ """
+ return _unsigned_subtract(x.max(), x.min())
+
+
+def _hist_bin_sqrt(x, range):
+ """
+ Square root histogram bin estimator.
+
+ Bin width is inversely proportional to the data size. Used by many
+ programs for its simplicity.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return _ptp(x) / np.sqrt(x.size)
+
+
+def _hist_bin_sturges(x, range):
+ """
+ Sturges histogram bin estimator.
+
+ A very simplistic estimator based on the assumption of normality of
+ the data. This estimator has poor performance for non-normal data,
+ which becomes especially obvious for large data sets. The estimate
+ depends only on size of the data.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return _ptp(x) / (np.log2(x.size) + 1.0)
+
+
+def _hist_bin_rice(x, range):
+ """
+ Rice histogram bin estimator.
+
+ Another simple estimator with no normality assumption. It has better
+ performance for large data than Sturges, but tends to overestimate
+ the number of bins. The number of bins is proportional to the cube
+ root of data size (asymptotically optimal). The estimate depends
+ only on size of the data.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
+
+
+def _hist_bin_scott(x, range):
+ """
+ Scott histogram bin estimator.
+
+ The binwidth is proportional to the standard deviation of the data
+ and inversely proportional to the cube root of data size
+ (asymptotically optimal).
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
+
+
+def _hist_bin_stone(x, range):
+ """
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
+
+ The number of bins is chosen by minimizing the estimated ISE against the unknown
+ true distribution. The ISE is estimated using cross-validation and can be regarded
+ as a generalization of Scott's rule.
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
+
+ This paper by Stone appears to be the origination of this rule.
+ https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+ range : (float, float)
+ The lower and upper range of the bins.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """ # noqa: E501
+
+ n = x.size
+ ptp_x = _ptp(x)
+ if n <= 1 or ptp_x == 0:
+ return 0
+
+ def jhat(nbins):
+ hh = ptp_x / nbins
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
+
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
+ if nbins == nbins_upper_bound:
+ warnings.warn("The number of bins estimated may be suboptimal.",
+ RuntimeWarning, stacklevel=3)
+ return ptp_x / nbins
+
+
+def _hist_bin_doane(x, range):
+ """
+ Doane's histogram bin estimator.
+
+ Improved version of Sturges' formula which works better for
+ non-normal data. See
+ stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ if x.size > 2:
+ sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
+ sigma = np.std(x)
+ if sigma > 0.0:
+ # These three operations add up to
+ # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
+ # but use only one temp array instead of three
+ temp = x - np.mean(x)
+ np.true_divide(temp, sigma, temp)
+ np.power(temp, 3, temp)
+ g1 = np.mean(temp)
+ return _ptp(x) / (1.0 + np.log2(x.size) +
+ np.log2(1.0 + np.absolute(g1) / sg1))
+ return 0.0
+
+
+def _hist_bin_fd(x, range):
+ """
+ The Freedman-Diaconis histogram bin estimator.
+
+ The Freedman-Diaconis rule uses interquartile range (IQR) to
+ estimate binwidth. It is considered a variation of the Scott rule
+ with more robustness as the IQR is less affected by outliers than
+ the standard deviation. However, the IQR depends on fewer points
+ than the standard deviation, so it is less accurate, especially for
+ long tailed distributions.
+
+ If the IQR is 0, this function returns 0 for the bin width.
+ Binwidth is inversely proportional to the cube root of data size
+ (asymptotically optimal).
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ iqr = np.subtract(*np.percentile(x, [75, 25]))
+ return 2.0 * iqr * x.size ** (-1.0 / 3.0)
+
+
+def _hist_bin_auto(x, range):
+ """
+ Histogram bin estimator that uses the minimum width of a relaxed
+ Freedman-Diaconis and Sturges estimators if the FD bin width does
+ not result in a large number of bins. The relaxed Freedman-Diaconis estimator
+ limits the bin width to half the sqrt estimated to avoid small bins.
+
+ The FD estimator is usually the most robust method, but its width
+ estimate tends to be too large for small `x` and bad for data with limited
+ variance. The Sturges estimator is quite good for small (<1000) datasets
+ and is the default in the R language. This method gives good off-the-shelf
+ behaviour.
+
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+ range : Tuple with range for the histogram
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+
+ See Also
+ --------
+ _hist_bin_fd, _hist_bin_sturges
+ """
+ fd_bw = _hist_bin_fd(x, range)
+ sturges_bw = _hist_bin_sturges(x, range)
+ sqrt_bw = _hist_bin_sqrt(x, range)
+ # heuristic to limit the maximal number of bins
+ fd_bw_corrected = max(fd_bw, sqrt_bw / 2)
+ return min(fd_bw_corrected, sturges_bw)
+
+
+# Private dict initialized at module load time
+_hist_bin_selectors = {'stone': _hist_bin_stone,
+ 'auto': _hist_bin_auto,
+ 'doane': _hist_bin_doane,
+ 'fd': _hist_bin_fd,
+ 'rice': _hist_bin_rice,
+ 'scott': _hist_bin_scott,
+ 'sqrt': _hist_bin_sqrt,
+ 'sturges': _hist_bin_sturges}
+
+
+def _ravel_and_check_weights(a, weights):
+ """ Check a and weights have matching shapes, and ravel both """
+ a = np.asarray(a)
+
+ # Ensure that the array is a "subtractable" dtype
+ if a.dtype == np.bool:
+ msg = f"Converting input from {a.dtype} to {np.uint8} for compatibility."
+ warnings.warn(msg, RuntimeWarning, stacklevel=3)
+ a = a.astype(np.uint8)
+
+ if weights is not None:
+ weights = np.asarray(weights)
+ if weights.shape != a.shape:
+ raise ValueError(
+ 'weights should have the same shape as a.')
+ weights = weights.ravel()
+ a = a.ravel()
+ return a, weights
+
+
+def _get_outer_edges(a, range):
+ """
+ Determine the outer bin edges to use, from either the data or the range
+ argument
+ """
+ if range is not None:
+ first_edge, last_edge = range
+ if first_edge > last_edge:
+ raise ValueError(
+ 'max must be larger than min in range parameter.')
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+ raise ValueError(
+ f"supplied range of [{first_edge}, {last_edge}] is not finite")
+ elif a.size == 0:
+ # handle empty arrays. Can't determine range, so use 0-1.
+ first_edge, last_edge = 0, 1
+ else:
+ first_edge, last_edge = a.min(), a.max()
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+ raise ValueError(
+ f"autodetected range of [{first_edge}, {last_edge}] is not finite")
+
+ # expand empty range to avoid divide by zero
+ if first_edge == last_edge:
+ first_edge = first_edge - 0.5
+ last_edge = last_edge + 0.5
+
+ return first_edge, last_edge
+
+
+def _unsigned_subtract(a, b):
+ """
+ Subtract two values where a >= b, and produce an unsigned result
+
+ This is needed when finding the difference between the upper and lower
+ bound of an int16 histogram
+ """
+ # coerce to a single type
+ signed_to_unsigned = {
+ np.byte: np.ubyte,
+ np.short: np.ushort,
+ np.intc: np.uintc,
+ np.int_: np.uint,
+ np.longlong: np.ulonglong
+ }
+ dt = np.result_type(a, b)
+ try:
+ unsigned_dt = signed_to_unsigned[dt.type]
+ except KeyError:
+ return np.subtract(a, b, dtype=dt)
+ else:
+ # we know the inputs are integers, and we are deliberately casting
+ # signed to unsigned. The input may be negative python integers so
+ # ensure we pass in arrays with the initial dtype (related to NEP 50).
+ return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt),
+ casting='unsafe', dtype=unsigned_dt)
+
+
+def _get_bin_edges(a, bins, range, weights):
+ """
+ Computes the bins used internally by `histogram`.
+
+ Parameters
+ ==========
+ a : ndarray
+ Ravelled data array
+ bins, range
+ Forwarded arguments from `histogram`.
+ weights : ndarray, optional
+ Ravelled weights array, or None
+
+ Returns
+ =======
+ bin_edges : ndarray
+ Array of bin edges
+ uniform_bins : (Number, Number, int):
+ The upper bound, lowerbound, and number of bins, used in the optimized
+ implementation of `histogram` that works on uniform bins.
+ """
+ # parse the overloaded bins argument
+ n_equal_bins = None
+ bin_edges = None
+
+ if isinstance(bins, str):
+ bin_name = bins
+ # if `bins` is a string for an automatic method,
+ # this will replace it with the number of bins calculated
+ if bin_name not in _hist_bin_selectors:
+ raise ValueError(
+ f"{bin_name!r} is not a valid estimator for `bins`")
+ if weights is not None:
+ raise TypeError("Automated estimation of the number of "
+ "bins is not supported for weighted data")
+
+ first_edge, last_edge = _get_outer_edges(a, range)
+
+ # truncate the range if needed
+ if range is not None:
+ keep = (a >= first_edge)
+ keep &= (a <= last_edge)
+ if not np.logical_and.reduce(keep):
+ a = a[keep]
+
+ if a.size == 0:
+ n_equal_bins = 1
+ else:
+ # Do not call selectors on empty arrays
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
+ if width:
+ if np.issubdtype(a.dtype, np.integer) and width < 1:
+ width = 1
+ delta = _unsigned_subtract(last_edge, first_edge)
+ n_equal_bins = int(np.ceil(delta / width))
+ else:
+ # Width can be zero for some estimators, e.g. FD when
+ # the IQR of the data is zero.
+ n_equal_bins = 1
+
+ elif np.ndim(bins) == 0:
+ try:
+ n_equal_bins = operator.index(bins)
+ except TypeError as e:
+ raise TypeError(
+ '`bins` must be an integer, a string, or an array') from e
+ if n_equal_bins < 1:
+ raise ValueError('`bins` must be positive, when an integer')
+
+ first_edge, last_edge = _get_outer_edges(a, range)
+
+ elif np.ndim(bins) == 1:
+ bin_edges = np.asarray(bins)
+ if np.any(bin_edges[:-1] > bin_edges[1:]):
+ raise ValueError(
+ '`bins` must increase monotonically, when an array')
+
+ else:
+ raise ValueError('`bins` must be 1d, when an array')
+
+ if n_equal_bins is not None:
+ # gh-10322 means that type resolution rules are dependent on array
+ # shapes. To avoid this causing problems, we pick a type now and stick
+ # with it throughout.
+ bin_type = np.result_type(first_edge, last_edge, a)
+ if np.issubdtype(bin_type, np.integer):
+ bin_type = np.result_type(bin_type, float)
+
+ # bin edges must be computed
+ bin_edges = np.linspace(
+ first_edge, last_edge, n_equal_bins + 1,
+ endpoint=True, dtype=bin_type)
+ if np.any(bin_edges[:-1] >= bin_edges[1:]):
+ raise ValueError(
+ f'Too many bins for data range. Cannot create {n_equal_bins} '
+ f'finite-sized bins.')
+ return bin_edges, (first_edge, last_edge, n_equal_bins)
+ else:
+ return bin_edges, None
+
+
+def _search_sorted_inclusive(a, v):
+ """
+ Like `searchsorted`, but where the last item in `v` is placed on the right.
+
+ In the context of a histogram, this makes the last bin edge inclusive
+ """
+ return np.concatenate((
+ a.searchsorted(v[:-1], 'left'),
+ a.searchsorted(v[-1:], 'right')
+ ))
+
+
+def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_bin_edges_dispatcher)
+def histogram_bin_edges(a, bins=10, range=None, weights=None):
+ r"""
+ Function to calculate only the edges of the bins used by the `histogram`
+ function.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data. The histogram is computed over the flattened array.
+ bins : int or sequence of scalars or str, optional
+ If `bins` is an int, it defines the number of equal-width
+ bins in the given range (10, by default). If `bins` is a
+ sequence, it defines the bin edges, including the rightmost
+ edge, allowing for non-uniform bin widths.
+
+ If `bins` is a string from the list below, `histogram_bin_edges` will
+ use the method chosen to calculate the optimal bin width and
+ consequently the number of bins (see the Notes section for more detail
+ on the estimators) from the data that falls within the requested range.
+ While the bin width will be optimal for the actual data
+ in the range, the number of bins will be computed to fill the
+ entire range, including the empty portions. For visualisation,
+ using the 'auto' option is suggested. Weighted data is not
+ supported for automated bin size selection.
+
+ 'auto'
+ Minimum bin width between the 'sturges' and 'fd' estimators.
+ Provides good all-around performance.
+
+ 'fd' (Freedman Diaconis Estimator)
+ Robust (resilient to outliers) estimator that takes into
+ account data variability and data size.
+
+ 'doane'
+ An improved version of Sturges' estimator that works better
+ with non-normal datasets.
+
+ 'scott'
+ Less robust estimator that takes into account data variability
+ and data size.
+
+ 'stone'
+ Estimator based on leave-one-out cross-validation estimate of
+ the integrated squared error. Can be regarded as a generalization
+ of Scott's rule.
+
+ 'rice'
+ Estimator does not take variability into account, only data
+ size. Commonly overestimates number of bins required.
+
+ 'sturges'
+ R's default method, only accounts for data size. Only
+ optimal for gaussian data and underestimates number of bins
+ for large non-gaussian datasets.
+
+ 'sqrt'
+ Square root (of data size) estimator, used by Excel and
+ other programs for its speed and simplicity.
+
+ range : (float, float), optional
+ The lower and upper range of the bins. If not provided, range
+ is simply ``(a.min(), a.max())``. Values outside the range are
+ ignored. The first element of the range must be less than or
+ equal to the second. `range` affects the automatic bin
+ computation as well. While bin width is computed to be optimal
+ based on the actual data within `range`, the bin count will fill
+ the entire range including portions containing no data.
+
+ weights : array_like, optional
+ An array of weights, of the same shape as `a`. Each value in
+ `a` only contributes its associated weight towards the bin count
+ (instead of 1). This is currently not used by any of the bin estimators,
+ but may be in the future.
+
+ Returns
+ -------
+ bin_edges : array of dtype float
+ The edges to pass into `histogram`
+
+ See Also
+ --------
+ histogram
+
+ Notes
+ -----
+ The methods to estimate the optimal number of bins are well founded
+ in literature, and are inspired by the choices R provides for
+ histogram visualisation. Note that having the number of bins
+ proportional to :math:`n^{1/3}` is asymptotically optimal, which is
+ why it appears in most estimators. These are simply plug-in methods
+ that give good starting points for number of bins. In the equations
+ below, :math:`h` is the binwidth and :math:`n_h` is the number of
+ bins. All estimators that compute bin counts are recast to bin width
+ using the `ptp` of the data. The final bin count is obtained from
+ ``np.round(np.ceil(range / h))``. The final bin width is often less
+ than what is returned by the estimators below.
+
+ 'auto' (minimum bin width of the 'sturges' and 'fd' estimators)
+ A compromise to get a good value. For small datasets the Sturges
+ value will usually be chosen, while larger datasets will usually
+ default to FD. Avoids the overly conservative behaviour of FD
+ and Sturges for small and large datasets respectively.
+ Switchover point is usually :math:`a.size \approx 1000`.
+
+ 'fd' (Freedman Diaconis Estimator)
+ .. math:: h = 2 \frac{IQR}{n^{1/3}}
+
+ The binwidth is proportional to the interquartile range (IQR)
+ and inversely proportional to cube root of a.size. Can be too
+ conservative for small datasets, but is quite good for large
+ datasets. The IQR is very robust to outliers.
+
+ 'scott'
+ .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
+
+ The binwidth is proportional to the standard deviation of the
+ data and inversely proportional to cube root of ``x.size``. Can
+ be too conservative for small datasets, but is quite good for
+ large datasets. The standard deviation is not very robust to
+ outliers. Values are very similar to the Freedman-Diaconis
+ estimator in the absence of outliers.
+
+ 'rice'
+ .. math:: n_h = 2n^{1/3}
+
+ The number of bins is only proportional to cube root of
+ ``a.size``. It tends to overestimate the number of bins and it
+ does not take into account data variability.
+
+ 'sturges'
+ .. math:: n_h = \log _{2}(n) + 1
+
+ The number of bins is the base 2 log of ``a.size``. This
+ estimator assumes normality of data and is too conservative for
+ larger, non-normal datasets. This is the default method in R's
+ ``hist`` method.
+
+ 'doane'
+ .. math:: n_h = 1 + \log_{2}(n) +
+ \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
+
+ g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
+
+ \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
+
+ An improved version of Sturges' formula that produces better
+ estimates for non-normal datasets. This estimator attempts to
+ account for the skew of the data.
+
+ 'sqrt'
+ .. math:: n_h = \sqrt n
+
+ The simplest and fastest estimator. Only takes into account the
+ data size.
+
+ Additionally, if the data is of integer dtype, then the binwidth will never
+ be less than 1.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
+ >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ array([0. , 0.25, 0.5 , 0.75, 1. ])
+ >>> np.histogram_bin_edges(arr, bins=2)
+ array([0. , 2.5, 5. ])
+
+ For consistency with histogram, an array of pre-computed bins is
+ passed through unmodified:
+
+ >>> np.histogram_bin_edges(arr, [1, 2])
+ array([1, 2])
+
+ This function allows one set of bins to be computed, and reused across
+ multiple histograms:
+
+ >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
+ >>> shared_bins
+ array([0., 1., 2., 3., 4., 5.])
+
+ >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
+ >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
+ >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
+
+ >>> hist_0; hist_1
+ array([1, 1, 0, 1, 0])
+ array([2, 0, 1, 1, 2])
+
+ Which gives more easily comparable results than using separate bins for
+ each histogram:
+
+ >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
+ >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
+ >>> hist_0; hist_1
+ array([1, 1, 1])
+ array([2, 1, 1, 2])
+ >>> bins_0; bins_1
+ array([0., 1., 2., 3.])
+ array([0. , 1.25, 2.5 , 3.75, 5. ])
+
+ """
+ a, weights = _ravel_and_check_weights(a, weights)
+ bin_edges, _ = _get_bin_edges(a, bins, range, weights)
+ return bin_edges
+
+
+def _histogram_dispatcher(
+ a, bins=None, range=None, density=None, weights=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_dispatcher)
+def histogram(a, bins=10, range=None, density=None, weights=None):
+ r"""
+ Compute the histogram of a dataset.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data. The histogram is computed over the flattened array.
+ bins : int or sequence of scalars or str, optional
+ If `bins` is an int, it defines the number of equal-width
+ bins in the given range (10, by default). If `bins` is a
+ sequence, it defines a monotonically increasing array of bin edges,
+ including the rightmost edge, allowing for non-uniform bin widths.
+
+ If `bins` is a string, it defines the method used to calculate the
+ optimal bin width, as defined by `histogram_bin_edges`.
+
+ range : (float, float), optional
+ The lower and upper range of the bins. If not provided, range
+ is simply ``(a.min(), a.max())``. Values outside the range are
+ ignored. The first element of the range must be less than or
+ equal to the second. `range` affects the automatic bin
+ computation as well. While bin width is computed to be optimal
+ based on the actual data within `range`, the bin count will fill
+ the entire range including portions containing no data.
+ weights : array_like, optional
+ An array of weights, of the same shape as `a`. Each value in
+ `a` only contributes its associated weight towards the bin count
+ (instead of 1). If `density` is True, the weights are
+ normalized, so that the integral of the density over the range
+ remains 1.
+ Please note that the ``dtype`` of `weights` will also become the
+ ``dtype`` of the returned accumulator (`hist`), so it must be
+ large enough to hold accumulated values as well.
+ density : bool, optional
+ If ``False``, the result will contain the number of samples in
+ each bin. If ``True``, the result is the value of the
+ probability *density* function at the bin, normalized such that
+ the *integral* over the range is 1. Note that the sum of the
+ histogram values will not be equal to 1 unless bins of unity
+ width are chosen; it is not a probability *mass* function.
+
+ Returns
+ -------
+ hist : array
+ The values of the histogram. See `density` and `weights` for a
+ description of the possible semantics. If `weights` are given,
+ ``hist.dtype`` will be taken from `weights`.
+ bin_edges : array of dtype float
+ Return the bin edges ``(length(hist)+1)``.
+
+
+ See Also
+ --------
+ histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
+
+ Notes
+ -----
+ All but the last (righthand-most) bin is half-open. In other words,
+ if `bins` is::
+
+ [1, 2, 3, 4]
+
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
+ *includes* 4.
+
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
+ (array([0, 2, 1]), array([0, 1, 2, 3]))
+ >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
+ (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
+ >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
+ (array([1, 4, 1]), array([0, 1, 2, 3]))
+
+ >>> a = np.arange(5)
+ >>> hist, bin_edges = np.histogram(a, density=True)
+ >>> hist
+ array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
+ >>> hist.sum()
+ 2.4999999999999996
+ >>> np.sum(hist * np.diff(bin_edges))
+ 1.0
+
+ Automated Bin Selection Methods example, using 2 peak random data
+ with 2000 points.
+
+ .. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ import numpy as np
+
+ rng = np.random.RandomState(10) # deterministic random data
+ a = np.hstack((rng.normal(size=1000),
+ rng.normal(loc=5, scale=2, size=1000)))
+ plt.hist(a, bins='auto') # arguments are passed to np.histogram
+ plt.title("Histogram with 'auto' bins")
+ plt.show()
+
+ """
+ a, weights = _ravel_and_check_weights(a, weights)
+
+ bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
+
+ # Histogram is an integer or a float array depending on the weights.
+ if weights is None:
+ ntype = np.dtype(np.intp)
+ else:
+ ntype = weights.dtype
+
+ # We set a block size, as this allows us to iterate over chunks when
+ # computing histograms, to minimize memory usage.
+ BLOCK = 65536
+
+ # The fast path uses bincount, but that only works for certain types
+ # of weight
+ simple_weights = (
+ weights is None or
+ np.can_cast(weights.dtype, np.double) or
+ np.can_cast(weights.dtype, complex)
+ )
+
+ if uniform_bins is not None and simple_weights:
+ # Fast algorithm for equal bins
+ # We now convert values of a to bin indices, under the assumption of
+ # equal bin widths (which is valid here).
+ first_edge, last_edge, n_equal_bins = uniform_bins
+
+ # Initialize empty histogram
+ n = np.zeros(n_equal_bins, ntype)
+
+ # Pre-compute histogram scaling factor
+ norm_numerator = n_equal_bins
+ norm_denom = _unsigned_subtract(last_edge, first_edge)
+
+ # We iterate over blocks here for two reasons: the first is that for
+ # large arrays, it is actually faster (for example for a 10^8 array it
+ # is 2x as fast) and it results in a memory footprint 3x lower in the
+ # limit of large arrays.
+ for i in _range(0, len(a), BLOCK):
+ tmp_a = a[i:i + BLOCK]
+ if weights is None:
+ tmp_w = None
+ else:
+ tmp_w = weights[i:i + BLOCK]
+
+ # Only include values in the right range
+ keep = (tmp_a >= first_edge)
+ keep &= (tmp_a <= last_edge)
+ if not np.logical_and.reduce(keep):
+ tmp_a = tmp_a[keep]
+ if tmp_w is not None:
+ tmp_w = tmp_w[keep]
+
+ # This cast ensures no type promotions occur below, which gh-10322
+ # make unpredictable. Getting it wrong leads to precision errors
+ # like gh-8123.
+ tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
+
+ # Compute the bin indices, and for values that lie exactly on
+ # last_edge we need to subtract one
+ f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
+ * norm_numerator)
+ indices = f_indices.astype(np.intp)
+ indices[indices == n_equal_bins] -= 1
+
+ # The index computation is not guaranteed to give exactly
+ # consistent results within ~1 ULP of the bin edges.
+ decrement = tmp_a < bin_edges[indices]
+ indices[decrement] -= 1
+ # The last bin includes the right edge. The other bins do not.
+ increment = ((tmp_a >= bin_edges[indices + 1])
+ & (indices != n_equal_bins - 1))
+ indices[increment] += 1
+
+ # We now compute the histogram using bincount
+ if ntype.kind == 'c':
+ n.real += np.bincount(indices, weights=tmp_w.real,
+ minlength=n_equal_bins)
+ n.imag += np.bincount(indices, weights=tmp_w.imag,
+ minlength=n_equal_bins)
+ else:
+ n += np.bincount(indices, weights=tmp_w,
+ minlength=n_equal_bins).astype(ntype)
+ else:
+ # Compute via cumulative histogram
+ cum_n = np.zeros(bin_edges.shape, ntype)
+ if weights is None:
+ for i in _range(0, len(a), BLOCK):
+ sa = np.sort(a[i:i + BLOCK])
+ cum_n += _search_sorted_inclusive(sa, bin_edges)
+ else:
+ zero = np.zeros(1, dtype=ntype)
+ for i in _range(0, len(a), BLOCK):
+ tmp_a = a[i:i + BLOCK]
+ tmp_w = weights[i:i + BLOCK]
+ sorting_index = np.argsort(tmp_a)
+ sa = tmp_a[sorting_index]
+ sw = tmp_w[sorting_index]
+ cw = np.concatenate((zero, sw.cumsum()))
+ bin_index = _search_sorted_inclusive(sa, bin_edges)
+ cum_n += cw[bin_index]
+
+ n = np.diff(cum_n)
+
+ if density:
+ db = np.array(np.diff(bin_edges), float)
+ return n / db / n.sum(), bin_edges
+
+ return n, bin_edges
+
+
+def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
+ weights=None):
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
+ yield sample
+ else:
+ yield from sample
+ with contextlib.suppress(TypeError):
+ yield from bins
+ yield weights
+
+
+@array_function_dispatch(_histogramdd_dispatcher)
+def histogramdd(sample, bins=10, range=None, density=None, weights=None):
+ """
+ Compute the multidimensional histogram of some data.
+
+ Parameters
+ ----------
+ sample : (N, D) array, or (N, D) array_like
+ The data to be histogrammed.
+
+ Note the unusual interpretation of sample when an array_like:
+
+ * When an array, each row is a coordinate in a D-dimensional space -
+ such as ``histogramdd(np.array([p1, p2, p3]))``.
+ * When an array_like, each element is the list of values for single
+ coordinate - such as ``histogramdd((X, Y, Z))``.
+
+ The first form should be preferred.
+
+ bins : sequence or int, optional
+ The bin specification:
+
+ * A sequence of arrays describing the monotonically increasing bin
+ edges along each dimension.
+ * The number of bins for each dimension (nx, ny, ... =bins)
+ * The number of bins for all dimensions (nx=ny=...=bins).
+
+ range : sequence, optional
+ A sequence of length D, each an optional (lower, upper) tuple giving
+ the outer bin edges to be used if the edges are not given explicitly in
+ `bins`.
+ An entry of None in the sequence results in the minimum and maximum
+ values being used for the corresponding dimension.
+ The default, None, is equivalent to passing a tuple of D None values.
+ density : bool, optional
+ If False, the default, returns the number of samples in each bin.
+ If True, returns the probability *density* function at the bin,
+ ``bin_count / sample_count / bin_volume``.
+ weights : (N,) array_like, optional
+ An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
+ Weights are normalized to 1 if density is True. If density is False,
+ the values of the returned histogram are equal to the sum of the
+ weights belonging to the samples falling into each bin.
+
+ Returns
+ -------
+ H : ndarray
+ The multidimensional histogram of sample x. See density and weights
+ for the different possible semantics.
+ edges : tuple of ndarrays
+ A tuple of D arrays describing the bin edges for each dimension.
+
+ See Also
+ --------
+ histogram: 1-D histogram
+ histogram2d: 2-D histogram
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> rng = np.random.default_rng()
+ >>> r = rng.normal(size=(100,3))
+ >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
+ >>> H.shape, edges[0].size, edges[1].size, edges[2].size
+ ((5, 8, 4), 6, 9, 5)
+
+ """
+
+ try:
+ # Sample is an ND-array.
+ N, D = sample.shape
+ except (AttributeError, ValueError):
+ # Sample is a sequence of 1D arrays.
+ sample = np.atleast_2d(sample).T
+ N, D = sample.shape
+
+ nbin = np.empty(D, np.intp)
+ edges = D * [None]
+ dedges = D * [None]
+ if weights is not None:
+ weights = np.asarray(weights)
+
+ try:
+ M = len(bins)
+ if M != D:
+ raise ValueError(
+ 'The dimension of bins must be equal to the dimension of the '
+ 'sample x.')
+ except TypeError:
+ # bins is an integer
+ bins = D * [bins]
+
+ # normalize the range argument
+ if range is None:
+ range = (None,) * D
+ elif len(range) != D:
+ raise ValueError('range argument must have one entry per dimension')
+
+ # Create edge arrays
+ for i in _range(D):
+ if np.ndim(bins[i]) == 0:
+ if bins[i] < 1:
+ raise ValueError(
+ f'`bins[{i}]` must be positive, when an integer')
+ smin, smax = _get_outer_edges(sample[:, i], range[i])
+ try:
+ n = operator.index(bins[i])
+
+ except TypeError as e:
+ raise TypeError(
+ f"`bins[{i}]` must be an integer, when a scalar"
+ ) from e
+
+ edges[i] = np.linspace(smin, smax, n + 1)
+ elif np.ndim(bins[i]) == 1:
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
+ raise ValueError(
+ f'`bins[{i}]` must be monotonically increasing, when an array')
+ else:
+ raise ValueError(
+ f'`bins[{i}]` must be a scalar or 1d array')
+
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
+ dedges[i] = np.diff(edges[i])
+
+ # Compute the bin number each sample falls into.
+ Ncount = tuple(
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
+ for i in _range(D)
+ )
+
+ # Using digitize, values that fall on an edge are put in the right bin.
+ # For the rightmost bin, we want values equal to the right edge to be
+ # counted in the last bin, and not as an outlier.
+ for i in _range(D):
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
+
+ # Compute the sample indices in the flattened histogram matrix.
+ # This raises an error if the array is too large.
+ xy = np.ravel_multi_index(Ncount, nbin)
+
+ # Compute the number of repetitions in xy and assign it to the
+ # flattened histmat.
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
+
+ # Shape into a proper matrix
+ hist = hist.reshape(nbin)
+
+ # This preserves the (bad) behavior observed in gh-7845, for now.
+ hist = hist.astype(float, casting='safe')
+
+ # Remove outliers (indices 0 and -1 for each dimension).
+ core = D * (slice(1, -1),)
+ hist = hist[core]
+
+ if density:
+ # calculate the probability density function
+ s = hist.sum()
+ for i in _range(D):
+ shape = np.ones(D, int)
+ shape[i] = nbin[i] - 2
+ hist = hist / dedges[i].reshape(shape)
+ hist /= s
+
+ if (hist.shape != nbin - 2).any():
+ raise RuntimeError(
+ "Internal Shape Error")
+ return hist, edges
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi
new file mode 100644
index 0000000..5e7afb5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_histograms_impl.pyi
@@ -0,0 +1,50 @@
+from collections.abc import Sequence
+from typing import (
+ Any,
+ SupportsIndex,
+ TypeAlias,
+)
+from typing import (
+ Literal as L,
+)
+
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+)
+
+__all__ = ["histogram", "histogramdd", "histogram_bin_edges"]
+
+_BinKind: TypeAlias = L[
+ "stone",
+ "auto",
+ "doane",
+ "fd",
+ "rice",
+ "scott",
+ "sqrt",
+ "sturges",
+]
+
+def histogram_bin_edges(
+ a: ArrayLike,
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
+ range: tuple[float, float] | None = ...,
+ weights: ArrayLike | None = ...,
+) -> NDArray[Any]: ...
+
+def histogram(
+ a: ArrayLike,
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
+ range: tuple[float, float] | None = ...,
+ density: bool = ...,
+ weights: ArrayLike | None = ...,
+) -> tuple[NDArray[Any], NDArray[Any]]: ...
+
+def histogramdd(
+ sample: ArrayLike,
+ bins: SupportsIndex | ArrayLike = ...,
+ range: Sequence[tuple[float, float]] = ...,
+ density: bool | None = ...,
+ weights: ArrayLike | None = ...,
+) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py
new file mode 100644
index 0000000..131bbae
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.py
@@ -0,0 +1,1067 @@
+import functools
+import math
+import sys
+import warnings
+
+import numpy as np
+import numpy._core.numeric as _nx
+import numpy.matrixlib as matrixlib
+from numpy._core import linspace, overrides
+from numpy._core.multiarray import ravel_multi_index, unravel_index
+from numpy._core.numeric import ScalarType, array
+from numpy._core.numerictypes import issubdtype
+from numpy._utils import set_module
+from numpy.lib._function_base_impl import diff
+from numpy.lib.stride_tricks import as_strided
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
+ 'diag_indices', 'diag_indices_from'
+]
+
+
+def _ix__dispatcher(*args):
+ return args
+
+
+@array_function_dispatch(_ix__dispatcher)
+def ix_(*args):
+ """
+ Construct an open mesh from multiple sequences.
+
+ This function takes N 1-D sequences and returns N outputs with N
+ dimensions each, such that the shape is 1 in all but one dimension
+ and the dimension with the non-unit shape value cycles through all
+ N dimensions.
+
+ Using `ix_` one can quickly construct index arrays that will index
+ the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
+ ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
+
+ Parameters
+ ----------
+ args : 1-D sequences
+ Each sequence should be of integer or boolean type.
+ Boolean sequences will be interpreted as boolean masks for the
+ corresponding dimension (equivalent to passing in
+ ``np.nonzero(boolean_sequence)``).
+
+ Returns
+ -------
+ out : tuple of ndarrays
+ N arrays with N dimensions each, with N the number of input
+ sequences. Together these arrays form an open mesh.
+
+ See Also
+ --------
+ ogrid, mgrid, meshgrid
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(10).reshape(2, 5)
+ >>> a
+ array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]])
+ >>> ixgrid = np.ix_([0, 1], [2, 4])
+ >>> ixgrid
+ (array([[0],
+ [1]]), array([[2, 4]]))
+ >>> ixgrid[0].shape, ixgrid[1].shape
+ ((2, 1), (1, 2))
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
+
+ >>> ixgrid = np.ix_([True, True], [2, 4])
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
+ >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
+
+ """
+ out = []
+ nd = len(args)
+ for k, new in enumerate(args):
+ if not isinstance(new, _nx.ndarray):
+ new = np.asarray(new)
+ if new.size == 0:
+ # Explicitly type empty arrays to avoid float default
+ new = new.astype(_nx.intp)
+ if new.ndim != 1:
+ raise ValueError("Cross index must be 1 dimensional")
+ if issubdtype(new.dtype, _nx.bool):
+ new, = new.nonzero()
+ new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))
+ out.append(new)
+ return tuple(out)
+
+
+class nd_grid:
+ """
+ Construct a multi-dimensional "meshgrid".
+
+ ``grid = nd_grid()`` creates an instance which will return a mesh-grid
+ when indexed. The dimension and number of the output arrays are equal
+ to the number of indexing dimensions. If the step length is not a
+ complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then the
+ integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ If instantiated with an argument of ``sparse=True``, the mesh-grid is
+ open (or not fleshed out) so that only one-dimension of each returned
+ argument is greater than 1.
+
+ Parameters
+ ----------
+ sparse : bool, optional
+ Whether the grid is sparse or not. Default is False.
+
+ Notes
+ -----
+ Two instances of `nd_grid` are made available in the NumPy namespace,
+ `mgrid` and `ogrid`, approximately defined as::
+
+ mgrid = nd_grid(sparse=False)
+ ogrid = nd_grid(sparse=True)
+
+ Users should use these pre-defined instances instead of using `nd_grid`
+ directly.
+ """
+ __slots__ = ('sparse',)
+
+ def __init__(self, sparse=False):
+ self.sparse = sparse
+
+ def __getitem__(self, key):
+ try:
+ size = []
+ # Mimic the behavior of `np.arange` and use a data type
+ # which is at least as large as `np.int_`
+ num_list = [0]
+ for k in range(len(key)):
+ step = key[k].step
+ start = key[k].start
+ stop = key[k].stop
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ if isinstance(step, (_nx.complexfloating, complex)):
+ step = abs(step)
+ size.append(int(step))
+ else:
+ size.append(
+ math.ceil((stop - start) / step))
+ num_list += [start, stop, step]
+ typ = _nx.result_type(*num_list)
+ if self.sparse:
+ nn = [_nx.arange(_x, dtype=_t)
+ for _x, _t in zip(size, (typ,) * len(size))]
+ else:
+ nn = _nx.indices(size, typ)
+ for k, kk in enumerate(key):
+ step = kk.step
+ start = kk.start
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ if isinstance(step, (_nx.complexfloating, complex)):
+ step = int(abs(step))
+ if step != 1:
+ step = (kk.stop - start) / float(step - 1)
+ nn[k] = (nn[k] * step + start)
+ if self.sparse:
+ slobj = [_nx.newaxis] * len(size)
+ for k in range(len(size)):
+ slobj[k] = slice(None, None)
+ nn[k] = nn[k][tuple(slobj)]
+ slobj[k] = _nx.newaxis
+ return tuple(nn) # ogrid -> tuple of arrays
+ return nn # mgrid -> ndarray
+ except (IndexError, TypeError):
+ step = key.step
+ stop = key.stop
+ start = key.start
+ if start is None:
+ start = 0
+ if isinstance(step, (_nx.complexfloating, complex)):
+ # Prevent the (potential) creation of integer arrays
+ step_float = abs(step)
+ step = length = int(step_float)
+ if step != 1:
+ step = (key.stop - start) / float(step - 1)
+ typ = _nx.result_type(start, stop, step_float)
+ return _nx.arange(0, length, 1, dtype=typ) * step + start
+ else:
+ return _nx.arange(start, stop, step)
+
+
+class MGridClass(nd_grid):
+ """
+ An instance which returns a dense multi-dimensional "meshgrid".
+
+ An instance which returns a dense (or fleshed out) mesh-grid
+ when indexed, so that each returned argument has the same shape.
+ The dimensions and number of the output arrays are equal to the
+ number of indexing dimensions. If the step length is not a complex
+ number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ -------
+ mesh-grid : ndarray
+ A single array, containing a set of `ndarray`\\ s all of the same
+ dimensions. stacked along the first axis.
+
+ See Also
+ --------
+ ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
+ meshgrid: return coordinate matrices from coordinate vectors
+ r_ : array concatenator
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.mgrid[0:5, 0:5]
+ array([[[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4]]])
+ >>> np.mgrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+
+ >>> np.mgrid[0:4].shape
+ (4,)
+ >>> np.mgrid[0:4, 0:5].shape
+ (2, 4, 5)
+ >>> np.mgrid[0:4, 0:5, 0:6].shape
+ (3, 4, 5, 6)
+
+ """
+ __slots__ = ()
+
+ def __init__(self):
+ super().__init__(sparse=False)
+
+
+mgrid = MGridClass()
+
+
+class OGridClass(nd_grid):
+ """
+ An instance which returns an open multi-dimensional "meshgrid".
+
+ An instance which returns an open (i.e. not fleshed out) mesh-grid
+ when indexed, so that only one dimension of each returned array is
+ greater than 1. The dimension and number of the output arrays are
+ equal to the number of indexing dimensions. If the step length is
+ not a complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ -------
+ mesh-grid : ndarray or tuple of ndarrays
+ If the input is a single slice, returns an array.
+ If the input is multiple slices, returns a tuple of arrays, with
+ only one dimension not equal to 1.
+
+ See Also
+ --------
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
+ meshgrid: return coordinate matrices from coordinate vectors
+ r_ : array concatenator
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> from numpy import ogrid
+ >>> ogrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+ >>> ogrid[0:5, 0:5]
+ (array([[0],
+ [1],
+ [2],
+ [3],
+ [4]]),
+ array([[0, 1, 2, 3, 4]]))
+
+ """
+ __slots__ = ()
+
+ def __init__(self):
+ super().__init__(sparse=True)
+
+
+ogrid = OGridClass()
+
+
+class AxisConcatenator:
+ """
+ Translates slice objects to concatenation along an axis.
+
+ For detailed documentation on usage, see `r_`.
+ """
+ __slots__ = ('axis', 'matrix', 'ndmin', 'trans1d')
+
+ # allow ma.mr_ to override this
+ concatenate = staticmethod(_nx.concatenate)
+ makemat = staticmethod(matrixlib.matrix)
+
+ def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
+ self.axis = axis
+ self.matrix = matrix
+ self.trans1d = trans1d
+ self.ndmin = ndmin
+
+ def __getitem__(self, key):
+ # handle matrix builder syntax
+ if isinstance(key, str):
+ frame = sys._getframe().f_back
+ mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
+ return mymat
+
+ if not isinstance(key, tuple):
+ key = (key,)
+
+ # copy attributes, since they can be overridden in the first argument
+ trans1d = self.trans1d
+ ndmin = self.ndmin
+ matrix = self.matrix
+ axis = self.axis
+
+ objs = []
+ # dtypes or scalars for weak scalar handling in result_type
+ result_type_objs = []
+
+ for k, item in enumerate(key):
+ scalar = False
+ if isinstance(item, slice):
+ step = item.step
+ start = item.start
+ stop = item.stop
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ if isinstance(step, (_nx.complexfloating, complex)):
+ size = int(abs(step))
+ newobj = linspace(start, stop, num=size)
+ else:
+ newobj = _nx.arange(start, stop, step)
+ if ndmin > 1:
+ newobj = array(newobj, copy=None, ndmin=ndmin)
+ if trans1d != -1:
+ newobj = newobj.swapaxes(-1, trans1d)
+ elif isinstance(item, str):
+ if k != 0:
+ raise ValueError("special directives must be the "
+ "first entry.")
+ if item in ('r', 'c'):
+ matrix = True
+ col = (item == 'c')
+ continue
+ if ',' in item:
+ vec = item.split(',')
+ try:
+ axis, ndmin = [int(x) for x in vec[:2]]
+ if len(vec) == 3:
+ trans1d = int(vec[2])
+ continue
+ except Exception as e:
+ raise ValueError(
+ f"unknown special directive {item!r}"
+ ) from e
+ try:
+ axis = int(item)
+ continue
+ except (ValueError, TypeError) as e:
+ raise ValueError("unknown special directive") from e
+ elif type(item) in ScalarType:
+ scalar = True
+ newobj = item
+ else:
+ item_ndim = np.ndim(item)
+ newobj = array(item, copy=None, subok=True, ndmin=ndmin)
+ if trans1d != -1 and item_ndim < ndmin:
+ k2 = ndmin - item_ndim
+ k1 = trans1d
+ if k1 < 0:
+ k1 += k2 + 1
+ defaxes = list(range(ndmin))
+ axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
+ newobj = newobj.transpose(axes)
+
+ objs.append(newobj)
+ if scalar:
+ result_type_objs.append(item)
+ else:
+ result_type_objs.append(newobj.dtype)
+
+ # Ensure that scalars won't up-cast unless warranted, for 0, drops
+ # through to error in concatenate.
+ if len(result_type_objs) != 0:
+ final_dtype = _nx.result_type(*result_type_objs)
+ # concatenate could do cast, but that can be overridden:
+ objs = [array(obj, copy=None, subok=True,
+ ndmin=ndmin, dtype=final_dtype) for obj in objs]
+
+ res = self.concatenate(tuple(objs), axis=axis)
+
+ if matrix:
+ oldndim = res.ndim
+ res = self.makemat(res)
+ if oldndim == 1 and col:
+ res = res.T
+ return res
+
+ def __len__(self):
+ return 0
+
+# separate classes are used here instead of just making r_ = concatenator(0),
+# etc. because otherwise we couldn't get the doc string to come out right
+# in help(r_)
+
+
+class RClass(AxisConcatenator):
+ """
+ Translates slice objects to concatenation along the first axis.
+
+ This is a simple way to build up arrays quickly. There are two use cases.
+
+ 1. If the index expression contains comma separated arrays, then stack
+ them along their first axis.
+ 2. If the index expression contains slice notation or scalars then create
+ a 1-D array with a range indicated by the slice notation.
+
+ If slice notation is used, the syntax ``start:stop:step`` is equivalent
+ to ``np.arange(start, stop, step)`` inside of the brackets. However, if
+ ``step`` is an imaginary number (i.e. 100j) then its integer portion is
+ interpreted as a number-of-points desired and the start and stop are
+ inclusive. In other words ``start:stop:stepj`` is interpreted as
+ ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
+ After expansion of slice notation, all comma separated sequences are
+ concatenated together.
+
+ Optional character strings placed as the first element of the index
+ expression can be used to change the output. The strings 'r' or 'c' result
+ in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
+ matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
+ (column) matrix is produced. If the result is 2-D then both provide the
+ same matrix result.
+
+ A string integer specifies which axis to stack multiple comma separated
+ arrays along. A string of two comma-separated integers allows indication
+ of the minimum number of dimensions to force each entry into as the
+ second integer (the axis to concatenate along is still the first integer).
+
+ A string with three comma-separated integers allows specification of the
+ axis to concatenate along, the minimum number of dimensions to force the
+ entries to, and which axis should contain the start of the arrays which
+ are less than the specified number of dimensions. In other words the third
+ integer allows you to specify where the 1's should be placed in the shape
+ of the arrays that have their shapes upgraded. By default, they are placed
+ in the front of the shape tuple. The third argument allows you to specify
+ where the start of the array should be instead. Thus, a third argument of
+ '0' would place the 1's at the end of the array shape. Negative integers
+ specify where in the new shape tuple the last dimension of upgraded arrays
+ should be placed, so the default is '-1'.
+
+ Parameters
+ ----------
+ Not a function, so takes no parameters
+
+
+ Returns
+ -------
+ A concatenated ndarray or matrix.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ c_ : Translates slice objects to concatenation along the second axis.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
+ array([1, 2, 3, ..., 4, 5, 6])
+ >>> np.r_[-1:1:6j, [0]*3, 5, 6]
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
+
+ String integers specify the axis to concatenate along or the minimum
+ number of dimensions to force entries into.
+
+ >>> a = np.array([[0, 1, 2], [3, 4, 5]])
+ >>> np.r_['-1', a, a] # concatenate along last axis
+ array([[0, 1, 2, 0, 1, 2],
+ [3, 4, 5, 3, 4, 5]])
+ >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
+ array([[1],
+ [2],
+ [3],
+ [4],
+ [5],
+ [6]])
+ >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+
+ Using 'r' or 'c' as a first string argument creates a matrix.
+
+ >>> np.r_['r',[1,2,3], [4,5,6]]
+ matrix([[1, 2, 3, 4, 5, 6]])
+
+ """
+ __slots__ = ()
+
+ def __init__(self):
+ AxisConcatenator.__init__(self, 0)
+
+
+r_ = RClass()
+
+
+class CClass(AxisConcatenator):
+ """
+ Translates slice objects to concatenation along the second axis.
+
+ This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
+ useful because of its common occurrence. In particular, arrays will be
+ stacked along their last axis after being upgraded to at least 2-D with
+ 1's post-pended to the shape (column vectors made out of 1-D arrays).
+
+ See Also
+ --------
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ r_ : For more detailed documentation.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
+ array([[1, 2, 3, ..., 4, 5, 6]])
+
+ """
+ __slots__ = ()
+
+ def __init__(self):
+ AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
+
+
+c_ = CClass()
+
+
+@set_module('numpy')
+class ndenumerate:
+ """
+ Multidimensional index iterator.
+
+ Return an iterator yielding pairs of array coordinates and values.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+
+ See Also
+ --------
+ ndindex, flatiter
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> for index, x in np.ndenumerate(a):
+ ... print(index, x)
+ (0, 0) 1
+ (0, 1) 2
+ (1, 0) 3
+ (1, 1) 4
+
+ """
+
+ def __init__(self, arr):
+ self.iter = np.asarray(arr).flat
+
+ def __next__(self):
+ """
+ Standard iterator method, returns the index tuple and array value.
+
+ Returns
+ -------
+ coords : tuple of ints
+ The indices of the current iteration.
+ val : scalar
+ The array element of the current iteration.
+
+ """
+ return self.iter.coords, next(self.iter)
+
+ def __iter__(self):
+ return self
+
+
+@set_module('numpy')
+class ndindex:
+ """
+ An N-dimensional iterator object to index arrays.
+
+ Given the shape of an array, an `ndindex` instance iterates over
+ the N-dimensional index of the array. At each iteration a tuple
+ of indices is returned, the last dimension is iterated over first.
+
+ Parameters
+ ----------
+ shape : ints, or a single tuple of ints
+ The size of each dimension of the array can be passed as
+ individual parameters or as the elements of a tuple.
+
+ See Also
+ --------
+ ndenumerate, flatiter
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Dimensions as individual arguments
+
+ >>> for index in np.ndindex(3, 2, 1):
+ ... print(index)
+ (0, 0, 0)
+ (0, 1, 0)
+ (1, 0, 0)
+ (1, 1, 0)
+ (2, 0, 0)
+ (2, 1, 0)
+
+ Same dimensions - but in a tuple ``(3, 2, 1)``
+
+ >>> for index in np.ndindex((3, 2, 1)):
+ ... print(index)
+ (0, 0, 0)
+ (0, 1, 0)
+ (1, 0, 0)
+ (1, 1, 0)
+ (2, 0, 0)
+ (2, 1, 0)
+
+ """
+
+ def __init__(self, *shape):
+ if len(shape) == 1 and isinstance(shape[0], tuple):
+ shape = shape[0]
+ x = as_strided(_nx.zeros(1), shape=shape,
+ strides=_nx.zeros_like(shape))
+ self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
+ order='C')
+
+ def __iter__(self):
+ return self
+
+ def ndincr(self):
+ """
+ Increment the multi-dimensional index by one.
+
+ This method is for backward compatibility only: do not use.
+
+ .. deprecated:: 1.20.0
+ This method has been advised against since numpy 1.8.0, but only
+ started emitting DeprecationWarning as of this version.
+ """
+ # NumPy 1.20.0, 2020-09-08
+ warnings.warn(
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
+ DeprecationWarning, stacklevel=2)
+ next(self)
+
+ def __next__(self):
+ """
+ Standard iterator method, updates the index and returns the index
+ tuple.
+
+ Returns
+ -------
+ val : tuple of ints
+ Returns a tuple containing the indices of the current
+ iteration.
+
+ """
+ next(self._it)
+ return self._it.multi_index
+
+
+# You can do all this with slice() plus a few special objects,
+# but there's a lot to remember. This version is simpler because
+# it uses the standard array indexing syntax.
+#
+# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
+# last revision: 1999-7-23
+#
+# Cosmetic changes by T. Oliphant 2001
+#
+#
+
+class IndexExpression:
+ """
+ A nicer way to build up index tuples for arrays.
+
+ .. note::
+ Use one of the two predefined instances ``index_exp`` or `s_`
+ rather than directly using `IndexExpression`.
+
+ For any index combination, including slicing and axis insertion,
+ ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
+ array `a`. However, ``np.index_exp[indices]`` can be used anywhere
+ in Python code and returns a tuple of slice objects that can be
+ used in the construction of complex index expressions.
+
+ Parameters
+ ----------
+ maketuple : bool
+ If True, always returns a tuple.
+
+ See Also
+ --------
+ s_ : Predefined instance without tuple conversion:
+ `s_ = IndexExpression(maketuple=False)`.
+ The ``index_exp`` is another predefined instance that
+ always returns a tuple:
+ `index_exp = IndexExpression(maketuple=True)`.
+
+ Notes
+ -----
+ You can do all this with :class:`slice` plus a few special objects,
+ but there's a lot to remember and this version is simpler because
+ it uses the standard array indexing syntax.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.s_[2::2]
+ slice(2, None, 2)
+ >>> np.index_exp[2::2]
+ (slice(2, None, 2),)
+
+ >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
+ array([2, 4])
+
+ """
+ __slots__ = ('maketuple',)
+
+ def __init__(self, maketuple):
+ self.maketuple = maketuple
+
+ def __getitem__(self, item):
+ if self.maketuple and not isinstance(item, tuple):
+ return (item,)
+ else:
+ return item
+
+
+index_exp = IndexExpression(maketuple=True)
+s_ = IndexExpression(maketuple=False)
+
+# End contribution from Konrad.
+
+
+# The following functions complement those in twodim_base, but are
+# applicable to N-dimensions.
+
+
+def _fill_diagonal_dispatcher(a, val, wrap=None):
+ return (a,)
+
+
+@array_function_dispatch(_fill_diagonal_dispatcher)
+def fill_diagonal(a, val, wrap=False):
+ """Fill the main diagonal of the given array of any dimensionality.
+
+ For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
+ values ``a[i, ..., i]`` with indices ``i`` all identical. This function
+ modifies the input array in-place without returning a value.
+
+ Parameters
+ ----------
+ a : array, at least 2-D.
+ Array whose diagonal is to be filled in-place.
+ val : scalar or array_like
+ Value(s) to write on the diagonal. If `val` is scalar, the value is
+ written along the diagonal. If array-like, the flattened `val` is
+ written along the diagonal, repeating if necessary to fill all
+ diagonal entries.
+
+ wrap : bool
+ For tall matrices in NumPy version up to 1.6.2, the
+ diagonal "wrapped" after N columns. You can have this behavior
+ with this option. This affects only tall matrices.
+
+ See also
+ --------
+ diag_indices, diag_indices_from
+
+ Notes
+ -----
+ This functionality can be obtained via `diag_indices`, but internally
+ this version uses a much faster implementation that never constructs the
+ indices and uses simple slicing.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.zeros((3, 3), int)
+ >>> np.fill_diagonal(a, 5)
+ >>> a
+ array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+
+ The same function can operate on a 4-D array:
+
+ >>> a = np.zeros((3, 3, 3, 3), int)
+ >>> np.fill_diagonal(a, 4)
+
+ We only show a few blocks for clarity:
+
+ >>> a[0, 0]
+ array([[4, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> a[1, 1]
+ array([[0, 0, 0],
+ [0, 4, 0],
+ [0, 0, 0]])
+ >>> a[2, 2]
+ array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 4]])
+
+ The wrap option affects only tall matrices:
+
+ >>> # tall matrices no wrap
+ >>> a = np.zeros((5, 3), int)
+ >>> np.fill_diagonal(a, 4)
+ >>> a
+ array([[4, 0, 0],
+ [0, 4, 0],
+ [0, 0, 4],
+ [0, 0, 0],
+ [0, 0, 0]])
+
+ >>> # tall matrices wrap
+ >>> a = np.zeros((5, 3), int)
+ >>> np.fill_diagonal(a, 4, wrap=True)
+ >>> a
+ array([[4, 0, 0],
+ [0, 4, 0],
+ [0, 0, 4],
+ [0, 0, 0],
+ [4, 0, 0]])
+
+ >>> # wide matrices
+ >>> a = np.zeros((3, 5), int)
+ >>> np.fill_diagonal(a, 4, wrap=True)
+ >>> a
+ array([[4, 0, 0, 0, 0],
+ [0, 4, 0, 0, 0],
+ [0, 0, 4, 0, 0]])
+
+ The anti-diagonal can be filled by reversing the order of elements
+ using either `numpy.flipud` or `numpy.fliplr`.
+
+ >>> a = np.zeros((3, 3), int);
+ >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
+ >>> a
+ array([[0, 0, 1],
+ [0, 2, 0],
+ [3, 0, 0]])
+ >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
+ >>> a
+ array([[0, 0, 3],
+ [0, 2, 0],
+ [1, 0, 0]])
+
+ Note that the order in which the diagonal is filled varies depending
+ on the flip function.
+ """
+ if a.ndim < 2:
+ raise ValueError("array must be at least 2-d")
+ end = None
+ if a.ndim == 2:
+ # Explicit, fast formula for the common case. For 2-d arrays, we
+ # accept rectangular ones.
+ step = a.shape[1] + 1
+ # This is needed to don't have tall matrix have the diagonal wrap.
+ if not wrap:
+ end = a.shape[1] * a.shape[1]
+ else:
+ # For more than d=2, the strided formula is only valid for arrays with
+ # all dimensions equal, so we check first.
+ if not np.all(diff(a.shape) == 0):
+ raise ValueError("All dimensions of input must be of equal length")
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
+
+ # Write the value out into the diagonal.
+ a.flat[:end:step] = val
+
+
+@set_module('numpy')
+def diag_indices(n, ndim=2):
+ """
+ Return the indices to access the main diagonal of an array.
+
+ This returns a tuple of indices that can be used to access the main
+ diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
+ (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
+ ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
+ for ``i = [0..n-1]``.
+
+ Parameters
+ ----------
+ n : int
+ The size, along each dimension, of the arrays for which the returned
+ indices can be used.
+
+ ndim : int, optional
+ The number of dimensions.
+
+ See Also
+ --------
+ diag_indices_from
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Create a set of indices to access the diagonal of a (4, 4) array:
+
+ >>> di = np.diag_indices(4)
+ >>> di
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+ >>> a[di] = 100
+ >>> a
+ array([[100, 1, 2, 3],
+ [ 4, 100, 6, 7],
+ [ 8, 9, 100, 11],
+ [ 12, 13, 14, 100]])
+
+ Now, we create indices to manipulate a 3-D array:
+
+ >>> d3 = np.diag_indices(2, 3)
+ >>> d3
+ (array([0, 1]), array([0, 1]), array([0, 1]))
+
+ And use it to set the diagonal of an array of zeros to 1:
+
+ >>> a = np.zeros((2, 2, 2), dtype=int)
+ >>> a[d3] = 1
+ >>> a
+ array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+
+ """
+ idx = np.arange(n)
+ return (idx,) * ndim
+
+
+def _diag_indices_from(arr):
+ return (arr,)
+
+
+@array_function_dispatch(_diag_indices_from)
+def diag_indices_from(arr):
+ """
+ Return the indices to access the main diagonal of an n-dimensional array.
+
+ See `diag_indices` for full details.
+
+ Parameters
+ ----------
+ arr : array, at least 2-D
+
+ See Also
+ --------
+ diag_indices
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Create a 4 by 4 array.
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Get the indices of the diagonal elements.
+
+ >>> di = np.diag_indices_from(a)
+ >>> di
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
+ >>> a[di]
+ array([ 0, 5, 10, 15])
+
+ This is simply syntactic sugar for diag_indices.
+
+ >>> np.diag_indices(a.shape[0])
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
+ """
+
+ if not arr.ndim >= 2:
+ raise ValueError("input array must be at least 2-d")
+ # For more than d=2, the strided formula is only valid for arrays with
+ # all dimensions equal, so we check first.
+ if not np.all(diff(arr.shape) == 0):
+ raise ValueError("All dimensions of input must be of equal length")
+
+ return diag_indices(arr.shape[0], arr.ndim)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi
new file mode 100644
index 0000000..7ac2b3a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_index_tricks_impl.pyi
@@ -0,0 +1,196 @@
+from collections.abc import Sequence
+from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload
+from typing import Literal as L
+
+from _typeshed import Incomplete
+from typing_extensions import TypeVar, deprecated
+
+import numpy as np
+from numpy._core.multiarray import ravel_multi_index, unravel_index
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _AnyShape,
+ _FiniteNestedSequence,
+ _NestedSequence,
+ _SupportsArray,
+ _SupportsDType,
+)
+
+__all__ = [ # noqa: RUF022
+ "ravel_multi_index",
+ "unravel_index",
+ "mgrid",
+ "ogrid",
+ "r_",
+ "c_",
+ "s_",
+ "index_exp",
+ "ix_",
+ "ndenumerate",
+ "ndindex",
+ "fill_diagonal",
+ "diag_indices",
+ "diag_indices_from",
+]
+
+###
+
+_T = TypeVar("_T")
+_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...])
+_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
+_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
+_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True)
+
+_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True)
+_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True)
+_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True)
+_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True)
+
+###
+
+class ndenumerate(Generic[_ScalarT_co]):
+ @overload
+ def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ...
+ @overload
+ def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ...
+ @overload
+ def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ...
+ @overload
+ def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ...
+ @overload
+ def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ...
+ @overload
+ def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ...
+ @overload
+ def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ...
+ @overload
+ def __new__(cls, arr: object) -> ndenumerate[Any]: ...
+
+ # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11)
+ @overload
+ def __next__(
+ self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64],
+ /,
+ ) -> tuple[_AnyShape, _ScalarT_co]: ...
+ @overload
+ def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ...
+ @overload
+ def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ...
+
+ #
+ def __iter__(self) -> Self: ...
+
+class ndindex:
+ @overload
+ def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ...
+ @overload
+ def __init__(self, /, *shape: SupportsIndex) -> None: ...
+
+ #
+ def __iter__(self) -> Self: ...
+ def __next__(self) -> _AnyShape: ...
+
+ #
+ @deprecated("Deprecated since 1.20.0.")
+ def ndincr(self, /) -> None: ...
+
+class nd_grid(Generic[_BoolT_co]):
+ sparse: _BoolT_co
+ def __init__(self, sparse: _BoolT_co = ...) -> None: ...
+ @overload
+ def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ...
+ @overload
+ def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ...
+
+@final
+class MGridClass(nd_grid[L[False]]):
+ def __init__(self) -> None: ...
+
+@final
+class OGridClass(nd_grid[L[True]]):
+ def __init__(self) -> None: ...
+
+class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]):
+ __slots__ = "axis", "matrix", "ndmin", "trans1d"
+
+ makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]]
+
+ axis: _AxisT_co
+ matrix: _MatrixT_co
+ ndmin: _NDMinT_co
+ trans1d: _Trans1DT_co
+
+ #
+ def __init__(
+ self,
+ /,
+ axis: _AxisT_co = ...,
+ matrix: _MatrixT_co = ...,
+ ndmin: _NDMinT_co = ...,
+ trans1d: _Trans1DT_co = ...,
+ ) -> None: ...
+
+ # TODO(jorenham): annotate this
+ def __getitem__(self, key: Incomplete, /) -> Incomplete: ...
+ def __len__(self, /) -> L[0]: ...
+
+ #
+ @staticmethod
+ @overload
+ def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ...
+ @staticmethod
+ @overload
+ def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ...
+
+@final
+class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]):
+ def __init__(self, /) -> None: ...
+
+@final
+class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]):
+ def __init__(self, /) -> None: ...
+
+class IndexExpression(Generic[_BoolT_co]):
+ maketuple: _BoolT_co
+ def __init__(self, maketuple: _BoolT_co) -> None: ...
+ @overload
+ def __getitem__(self, item: _TupleT) -> _TupleT: ...
+ @overload
+ def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ...
+ @overload
+ def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ...
+
+@overload
+def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ...
+@overload
+def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ...
+@overload
+def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ...
+@overload
+def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ...
+@overload
+def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ...
+@overload
+def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ...
+@overload
+def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ...
+
+#
+def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ...
+
+#
+def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ...
+def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ...
+
+#
+mgrid: Final[MGridClass] = ...
+ogrid: Final[OGridClass] = ...
+
+r_: Final[RClass] = ...
+c_: Final[CClass] = ...
+
+index_exp: Final[IndexExpression[L[True]]] = ...
+s_: Final[IndexExpression[L[False]]] = ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py b/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py
new file mode 100644
index 0000000..3586b41
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.py
@@ -0,0 +1,900 @@
+"""A collection of functions designed to help I/O with ascii files.
+
+"""
+__docformat__ = "restructuredtext en"
+
+import itertools
+
+import numpy as np
+import numpy._core.numeric as nx
+from numpy._utils import asbytes, asunicode
+
+
+def _decode_line(line, encoding=None):
+ """Decode bytes from binary input streams.
+
+ Defaults to decoding from 'latin1'.
+
+ Parameters
+ ----------
+ line : str or bytes
+ Line to be decoded.
+ encoding : str
+ Encoding used to decode `line`.
+
+ Returns
+ -------
+ decoded_line : str
+
+ """
+ if type(line) is bytes:
+ if encoding is None:
+ encoding = "latin1"
+ line = line.decode(encoding)
+
+ return line
+
+
+def _is_string_like(obj):
+ """
+ Check whether obj behaves like a string.
+ """
+ try:
+ obj + ''
+ except (TypeError, ValueError):
+ return False
+ return True
+
+
+def _is_bytes_like(obj):
+ """
+ Check whether obj behaves like a bytes object.
+ """
+ try:
+ obj + b''
+ except (TypeError, ValueError):
+ return False
+ return True
+
+
+def has_nested_fields(ndtype):
+ """
+ Returns whether one or several fields of a dtype are nested.
+
+ Parameters
+ ----------
+ ndtype : dtype
+ Data-type of a structured array.
+
+ Raises
+ ------
+ AttributeError
+ If `ndtype` does not have a `names` attribute.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
+ >>> np.lib._iotools.has_nested_fields(dt)
+ False
+
+ """
+ return any(ndtype[name].names is not None for name in ndtype.names or ())
+
+
+def flatten_dtype(ndtype, flatten_base=False):
+ """
+ Unpack a structured data-type by collapsing nested fields and/or fields
+ with a shape.
+
+ Note that the field names are lost.
+
+ Parameters
+ ----------
+ ndtype : dtype
+ The datatype to collapse
+ flatten_base : bool, optional
+ If True, transform a field with a shape into several fields. Default is
+ False.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ... ('block', int, (2, 3))])
+ >>> np.lib._iotools.flatten_dtype(dt)
+ [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
+ >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
+ [dtype('S4'),
+ dtype('float64'),
+ dtype('float64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64')]
+
+ """
+ names = ndtype.names
+ if names is None:
+ if flatten_base:
+ return [ndtype.base] * int(np.prod(ndtype.shape))
+ return [ndtype.base]
+ else:
+ types = []
+ for field in names:
+ info = ndtype.fields[field]
+ flat_dt = flatten_dtype(info[0], flatten_base)
+ types.extend(flat_dt)
+ return types
+
+
+class LineSplitter:
+ """
+ Object to split a string at a given delimiter or at given places.
+
+ Parameters
+ ----------
+ delimiter : str, int, or sequence of ints, optional
+ If a string, character used to delimit consecutive fields.
+ If an integer or a sequence of integers, width(s) of each field.
+ comments : str, optional
+ Character used to mark the beginning of a comment. Default is '#'.
+ autostrip : bool, optional
+ Whether to strip each individual field. Default is True.
+
+ """
+
+ def autostrip(self, method):
+ """
+ Wrapper to strip each member of the output of `method`.
+
+ Parameters
+ ----------
+ method : function
+ Function that takes a single argument and returns a sequence of
+ strings.
+
+ Returns
+ -------
+ wrapped : function
+ The result of wrapping `method`. `wrapped` takes a single input
+ argument and returns a list of strings that are stripped of
+ white-space.
+
+ """
+ return lambda input: [_.strip() for _ in method(input)]
+
+ def __init__(self, delimiter=None, comments='#', autostrip=True,
+ encoding=None):
+ delimiter = _decode_line(delimiter)
+ comments = _decode_line(comments)
+
+ self.comments = comments
+
+ # Delimiter is a character
+ if (delimiter is None) or isinstance(delimiter, str):
+ delimiter = delimiter or None
+ _handyman = self._delimited_splitter
+ # Delimiter is a list of field widths
+ elif hasattr(delimiter, '__iter__'):
+ _handyman = self._variablewidth_splitter
+ idx = np.cumsum([0] + list(delimiter))
+ delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)]
+ # Delimiter is a single integer
+ elif int(delimiter):
+ (_handyman, delimiter) = (
+ self._fixedwidth_splitter, int(delimiter))
+ else:
+ (_handyman, delimiter) = (self._delimited_splitter, None)
+ self.delimiter = delimiter
+ if autostrip:
+ self._handyman = self.autostrip(_handyman)
+ else:
+ self._handyman = _handyman
+ self.encoding = encoding
+
+ def _delimited_splitter(self, line):
+ """Chop off comments, strip, and split at delimiter. """
+ if self.comments is not None:
+ line = line.split(self.comments)[0]
+ line = line.strip(" \r\n")
+ if not line:
+ return []
+ return line.split(self.delimiter)
+
+ def _fixedwidth_splitter(self, line):
+ if self.comments is not None:
+ line = line.split(self.comments)[0]
+ line = line.strip("\r\n")
+ if not line:
+ return []
+ fixed = self.delimiter
+ slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
+ return [line[s] for s in slices]
+
+ def _variablewidth_splitter(self, line):
+ if self.comments is not None:
+ line = line.split(self.comments)[0]
+ if not line:
+ return []
+ slices = self.delimiter
+ return [line[s] for s in slices]
+
+ def __call__(self, line):
+ return self._handyman(_decode_line(line, self.encoding))
+
+
+class NameValidator:
+ """
+ Object to validate a list of strings to use as field names.
+
+ The strings are stripped of any non alphanumeric character, and spaces
+ are replaced by '_'. During instantiation, the user can define a list
+ of names to exclude, as well as a list of invalid characters. Names in
+ the exclusion list are appended a '_' character.
+
+ Once an instance has been created, it can be called with a list of
+ names, and a list of valid names will be created. The `__call__`
+ method accepts an optional keyword "default" that sets the default name
+ in case of ambiguity. By default this is 'f', so that names will
+ default to `f0`, `f1`, etc.
+
+ Parameters
+ ----------
+ excludelist : sequence, optional
+ A list of names to exclude. This list is appended to the default
+ list ['return', 'file', 'print']. Excluded names are appended an
+ underscore: for example, `file` becomes `file_` if supplied.
+ deletechars : str, optional
+ A string combining invalid characters that must be deleted from the
+ names.
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
+ * If True, field names are case-sensitive.
+ * If False or 'upper', field names are converted to upper case.
+ * If 'lower', field names are converted to lower case.
+
+ The default value is True.
+ replace_space : '_', optional
+ Character(s) used in replacement of white spaces.
+
+ Notes
+ -----
+ Calling an instance of `NameValidator` is the same as calling its
+ method `validate`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> validator = np.lib._iotools.NameValidator()
+ >>> validator(['file', 'field2', 'with space', 'CaSe'])
+ ('file_', 'field2', 'with_space', 'CaSe')
+
+ >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
+ ... deletechars='q',
+ ... case_sensitive=False)
+ >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
+ ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
+
+ """
+
+ defaultexcludelist = 'return', 'file', 'print'
+ defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
+
+ def __init__(self, excludelist=None, deletechars=None,
+ case_sensitive=None, replace_space='_'):
+ # Process the exclusion list ..
+ if excludelist is None:
+ excludelist = []
+ excludelist.extend(self.defaultexcludelist)
+ self.excludelist = excludelist
+ # Process the list of characters to delete
+ if deletechars is None:
+ delete = set(self.defaultdeletechars)
+ else:
+ delete = set(deletechars)
+ delete.add('"')
+ self.deletechars = delete
+ # Process the case option .....
+ if (case_sensitive is None) or (case_sensitive is True):
+ self.case_converter = lambda x: x
+ elif (case_sensitive is False) or case_sensitive.startswith('u'):
+ self.case_converter = lambda x: x.upper()
+ elif case_sensitive.startswith('l'):
+ self.case_converter = lambda x: x.lower()
+ else:
+ msg = f'unrecognized case_sensitive value {case_sensitive}.'
+ raise ValueError(msg)
+
+ self.replace_space = replace_space
+
+ def validate(self, names, defaultfmt="f%i", nbfields=None):
+ """
+ Validate a list of strings as field names for a structured array.
+
+ Parameters
+ ----------
+ names : sequence of str
+ Strings to be validated.
+ defaultfmt : str, optional
+ Default format string, used if validating a given string
+ reduces its length to zero.
+ nbfields : integer, optional
+ Final number of validated names, used to expand or shrink the
+ initial list of names.
+
+ Returns
+ -------
+ validatednames : list of str
+ The list of validated field names.
+
+ Notes
+ -----
+ A `NameValidator` instance can be called directly, which is the
+ same as calling `validate`. For examples, see `NameValidator`.
+
+ """
+ # Initial checks ..............
+ if (names is None):
+ if (nbfields is None):
+ return None
+ names = []
+ if isinstance(names, str):
+ names = [names, ]
+ if nbfields is not None:
+ nbnames = len(names)
+ if (nbnames < nbfields):
+ names = list(names) + [''] * (nbfields - nbnames)
+ elif (nbnames > nbfields):
+ names = names[:nbfields]
+ # Set some shortcuts ...........
+ deletechars = self.deletechars
+ excludelist = self.excludelist
+ case_converter = self.case_converter
+ replace_space = self.replace_space
+ # Initializes some variables ...
+ validatednames = []
+ seen = {}
+ nbempty = 0
+
+ for item in names:
+ item = case_converter(item).strip()
+ if replace_space:
+ item = item.replace(' ', replace_space)
+ item = ''.join([c for c in item if c not in deletechars])
+ if item == '':
+ item = defaultfmt % nbempty
+ while item in names:
+ nbempty += 1
+ item = defaultfmt % nbempty
+ nbempty += 1
+ elif item in excludelist:
+ item += '_'
+ cnt = seen.get(item, 0)
+ if cnt > 0:
+ validatednames.append(item + '_%d' % cnt)
+ else:
+ validatednames.append(item)
+ seen[item] = cnt + 1
+ return tuple(validatednames)
+
+ def __call__(self, names, defaultfmt="f%i", nbfields=None):
+ return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
+
+
+def str2bool(value):
+ """
+ Tries to transform a string supposed to represent a boolean to a boolean.
+
+ Parameters
+ ----------
+ value : str
+ The string that is transformed to a boolean.
+
+ Returns
+ -------
+ boolval : bool
+ The boolean representation of `value`.
+
+ Raises
+ ------
+ ValueError
+ If the string is not 'True' or 'False' (case independent)
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.lib._iotools.str2bool('TRUE')
+ True
+ >>> np.lib._iotools.str2bool('false')
+ False
+
+ """
+ value = value.upper()
+ if value == 'TRUE':
+ return True
+ elif value == 'FALSE':
+ return False
+ else:
+ raise ValueError("Invalid boolean")
+
+
+class ConverterError(Exception):
+ """
+ Exception raised when an error occurs in a converter for string values.
+
+ """
+ pass
+
+
+class ConverterLockError(ConverterError):
+ """
+ Exception raised when an attempt is made to upgrade a locked converter.
+
+ """
+ pass
+
+
+class ConversionWarning(UserWarning):
+ """
+ Warning issued when a string converter has a problem.
+
+ Notes
+ -----
+ In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
+ is explicitly suppressed with the "invalid_raise" keyword.
+
+ """
+ pass
+
+
+class StringConverter:
+ """
+ Factory class for function transforming a string into another object
+ (int, float).
+
+ After initialization, an instance can be called to transform a string
+ into another object. If the string is recognized as representing a
+ missing value, a default value is returned.
+
+ Attributes
+ ----------
+ func : function
+ Function used for the conversion.
+ default : any
+ Default value to return when the input corresponds to a missing
+ value.
+ type : type
+ Type of the output.
+ _status : int
+ Integer representing the order of the conversion.
+ _mapper : sequence of tuples
+ Sequence of tuples (dtype, function, default value) to evaluate in
+ order.
+ _locked : bool
+ Holds `locked` parameter.
+
+ Parameters
+ ----------
+ dtype_or_func : {None, dtype, function}, optional
+ If a `dtype`, specifies the input data type, used to define a basic
+ function and a default value for missing data. For example, when
+ `dtype` is float, the `func` attribute is set to `float` and the
+ default value to `np.nan`. If a function, this function is used to
+ convert a string to another object. In this case, it is recommended
+ to give an associated default value as input.
+ default : any, optional
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given, `StringConverter`
+ tries to supply a reasonable default value.
+ missing_values : {None, sequence of str}, optional
+ ``None`` or sequence of strings indicating a missing value. If ``None``
+ then missing values are indicated by empty entries. The default is
+ ``None``.
+ locked : bool, optional
+ Whether the StringConverter should be locked to prevent automatic
+ upgrade or not. Default is False.
+
+ """
+ _mapper = [(nx.bool, str2bool, False),
+ (nx.int_, int, -1),]
+
+ # On 32-bit systems, we need to make sure that we explicitly include
+ # nx.int64 since ns.int_ is nx.int32.
+ if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
+ _mapper.append((nx.int64, int, -1))
+
+ _mapper.extend([(nx.float64, float, nx.nan),
+ (nx.complex128, complex, nx.nan + 0j),
+ (nx.longdouble, nx.longdouble, nx.nan),
+ # If a non-default dtype is passed, fall back to generic
+ # ones (should only be used for the converter)
+ (nx.integer, int, -1),
+ (nx.floating, float, nx.nan),
+ (nx.complexfloating, complex, nx.nan + 0j),
+ # Last, try with the string types (must be last, because
+ # `_mapper[-1]` is used as default in some cases)
+ (nx.str_, asunicode, '???'),
+ (nx.bytes_, asbytes, '???'),
+ ])
+
+ @classmethod
+ def _getdtype(cls, val):
+ """Returns the dtype of the input variable."""
+ return np.array(val).dtype
+
+ @classmethod
+ def _getsubdtype(cls, val):
+ """Returns the type of the dtype of the input variable."""
+ return np.array(val).dtype.type
+
+ @classmethod
+ def _dtypeortype(cls, dtype):
+ """Returns dtype for datetime64 and type of dtype otherwise."""
+
+ # This is a bit annoying. We want to return the "general" type in most
+ # cases (ie. "string" rather than "S10"), but we want to return the
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
+ # "datetime64").
+ if dtype.type == np.datetime64:
+ return dtype
+ return dtype.type
+
+ @classmethod
+ def upgrade_mapper(cls, func, default=None):
+ """
+ Upgrade the mapper of a StringConverter by adding a new function and
+ its corresponding default.
+
+ The input function (or sequence of functions) and its associated
+ default value (if any) is inserted in penultimate position of the
+ mapper. The corresponding type is estimated from the dtype of the
+ default value.
+
+ Parameters
+ ----------
+ func : var
+ Function, or sequence of functions
+
+ Examples
+ --------
+ >>> import dateutil.parser
+ >>> import datetime
+ >>> dateparser = dateutil.parser.parse
+ >>> defaultdate = datetime.date(2000, 1, 1)
+ >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
+ """
+ # Func is a single functions
+ if callable(func):
+ cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
+ return
+ elif hasattr(func, '__iter__'):
+ if isinstance(func[0], (tuple, list)):
+ for _ in func:
+ cls._mapper.insert(-1, _)
+ return
+ if default is None:
+ default = [None] * len(func)
+ else:
+ default = list(default)
+ default.append([None] * (len(func) - len(default)))
+ for fct, dft in zip(func, default):
+ cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
+
+ @classmethod
+ def _find_map_entry(cls, dtype):
+ # if a converter for the specific dtype is available use that
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
+ if dtype.type == deftype:
+ return i, (deftype, func, default_def)
+
+ # otherwise find an inexact match
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
+ if np.issubdtype(dtype.type, deftype):
+ return i, (deftype, func, default_def)
+
+ raise LookupError
+
+ def __init__(self, dtype_or_func=None, default=None, missing_values=None,
+ locked=False):
+ # Defines a lock for upgrade
+ self._locked = bool(locked)
+ # No input dtype: minimal initialization
+ if dtype_or_func is None:
+ self.func = str2bool
+ self._status = 0
+ self.default = default or False
+ dtype = np.dtype('bool')
+ else:
+ # Is the input a np.dtype ?
+ try:
+ self.func = None
+ dtype = np.dtype(dtype_or_func)
+ except TypeError:
+ # dtype_or_func must be a function, then
+ if not callable(dtype_or_func):
+ errmsg = ("The input argument `dtype` is neither a"
+ " function nor a dtype (got '%s' instead)")
+ raise TypeError(errmsg % type(dtype_or_func))
+ # Set the function
+ self.func = dtype_or_func
+ # If we don't have a default, try to guess it or set it to
+ # None
+ if default is None:
+ try:
+ default = self.func('0')
+ except ValueError:
+ default = None
+ dtype = self._getdtype(default)
+
+ # find the best match in our mapper
+ try:
+ self._status, (_, func, default_def) = self._find_map_entry(dtype)
+ except LookupError:
+ # no match
+ self.default = default
+ _, func, _ = self._mapper[-1]
+ self._status = 0
+ else:
+ # use the found default only if we did not already have one
+ if default is None:
+ self.default = default_def
+ else:
+ self.default = default
+
+ # If the input was a dtype, set the function to the last we saw
+ if self.func is None:
+ self.func = func
+
+ # If the status is 1 (int), change the function to
+ # something more robust.
+ if self.func == self._mapper[1][1]:
+ if issubclass(dtype.type, np.uint64):
+ self.func = np.uint64
+ elif issubclass(dtype.type, np.int64):
+ self.func = np.int64
+ else:
+ self.func = lambda x: int(float(x))
+ # Store the list of strings corresponding to missing values.
+ if missing_values is None:
+ self.missing_values = {''}
+ else:
+ if isinstance(missing_values, str):
+ missing_values = missing_values.split(",")
+ self.missing_values = set(list(missing_values) + [''])
+
+ self._callingfunction = self._strict_call
+ self.type = self._dtypeortype(dtype)
+ self._checked = False
+ self._initial_default = default
+
+ def _loose_call(self, value):
+ try:
+ return self.func(value)
+ except ValueError:
+ return self.default
+
+ def _strict_call(self, value):
+ try:
+
+ # We check if we can convert the value using the current function
+ new_value = self.func(value)
+
+ # In addition to having to check whether func can convert the
+ # value, we also have to make sure that we don't get overflow
+ # errors for integers.
+ if self.func is int:
+ try:
+ np.array(value, dtype=self.type)
+ except OverflowError:
+ raise ValueError
+
+ # We're still here so we can now return the new value
+ return new_value
+
+ except ValueError:
+ if value.strip() in self.missing_values:
+ if not self._status:
+ self._checked = False
+ return self.default
+ raise ValueError(f"Cannot convert string '{value}'")
+
+ def __call__(self, value):
+ return self._callingfunction(value)
+
+ def _do_upgrade(self):
+ # Raise an exception if we locked the converter...
+ if self._locked:
+ errmsg = "Converter is locked and cannot be upgraded"
+ raise ConverterLockError(errmsg)
+ _statusmax = len(self._mapper)
+ # Complains if we try to upgrade by the maximum
+ _status = self._status
+ if _status == _statusmax:
+ errmsg = "Could not find a valid conversion function"
+ raise ConverterError(errmsg)
+ elif _status < _statusmax - 1:
+ _status += 1
+ self.type, self.func, default = self._mapper[_status]
+ self._status = _status
+ if self._initial_default is not None:
+ self.default = self._initial_default
+ else:
+ self.default = default
+
+ def upgrade(self, value):
+ """
+ Find the best converter for a given string, and return the result.
+
+ The supplied string `value` is converted by testing different
+ converters in order. First the `func` method of the
+ `StringConverter` instance is tried, if this fails other available
+ converters are tried. The order in which these other converters
+ are tried is determined by the `_status` attribute of the instance.
+
+ Parameters
+ ----------
+ value : str
+ The string to convert.
+
+ Returns
+ -------
+ out : any
+ The result of converting `value` with the appropriate converter.
+
+ """
+ self._checked = True
+ try:
+ return self._strict_call(value)
+ except ValueError:
+ self._do_upgrade()
+ return self.upgrade(value)
+
+ def iterupgrade(self, value):
+ self._checked = True
+ if not hasattr(value, '__iter__'):
+ value = (value,)
+ _strict_call = self._strict_call
+ try:
+ for _m in value:
+ _strict_call(_m)
+ except ValueError:
+ self._do_upgrade()
+ self.iterupgrade(value)
+
+ def update(self, func, default=None, testing_value=None,
+ missing_values='', locked=False):
+ """
+ Set StringConverter attributes directly.
+
+ Parameters
+ ----------
+ func : function
+ Conversion function.
+ default : any, optional
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given,
+ `StringConverter` tries to supply a reasonable default value.
+ testing_value : str, optional
+ A string representing a standard input value of the converter.
+ This string is used to help defining a reasonable default
+ value.
+ missing_values : {sequence of str, None}, optional
+ Sequence of strings indicating a missing value. If ``None``, then
+ the existing `missing_values` are cleared. The default is ``''``.
+ locked : bool, optional
+ Whether the StringConverter should be locked to prevent
+ automatic upgrade or not. Default is False.
+
+ Notes
+ -----
+ `update` takes the same parameters as the constructor of
+ `StringConverter`, except that `func` does not accept a `dtype`
+ whereas `dtype_or_func` in the constructor does.
+
+ """
+ self.func = func
+ self._locked = locked
+
+ # Don't reset the default to None if we can avoid it
+ if default is not None:
+ self.default = default
+ self.type = self._dtypeortype(self._getdtype(default))
+ else:
+ try:
+ tester = func(testing_value or '1')
+ except (TypeError, ValueError):
+ tester = None
+ self.type = self._dtypeortype(self._getdtype(tester))
+
+ # Add the missing values to the existing set or clear it.
+ if missing_values is None:
+ # Clear all missing values even though the ctor initializes it to
+ # set(['']) when the argument is None.
+ self.missing_values = set()
+ else:
+ if not np.iterable(missing_values):
+ missing_values = [missing_values]
+ if not all(isinstance(v, str) for v in missing_values):
+ raise TypeError("missing_values must be strings or unicode")
+ self.missing_values.update(missing_values)
+
+
+def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
+ """
+ Convenience function to create a `np.dtype` object.
+
+ The function processes the input `dtype` and matches it with the given
+ names.
+
+ Parameters
+ ----------
+ ndtype : var
+ Definition of the dtype. Can be any string or dictionary recognized
+ by the `np.dtype` function, or a sequence of types.
+ names : str or sequence, optional
+ Sequence of strings to use as field names for a structured dtype.
+ For convenience, `names` can be a string of a comma-separated list
+ of names.
+ defaultfmt : str, optional
+ Format string used to define missing names, such as ``"f%i"``
+ (default) or ``"fields_%02i"``.
+ validationargs : optional
+ A series of optional arguments used to initialize a
+ `NameValidator`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.lib._iotools.easy_dtype(float)
+ dtype('float64')
+ >>> np.lib._iotools.easy_dtype("i4, f8")
+ dtype([('f0', '<i4'), ('f1', '<f8')])
+ >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
+ dtype([('field_000', '<i4'), ('field_001', '<f8')])
+
+ >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
+ dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
+ >>> np.lib._iotools.easy_dtype(float, names="a,b,c")
+ dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
+
+ """
+ try:
+ ndtype = np.dtype(ndtype)
+ except TypeError:
+ validate = NameValidator(**validationargs)
+ nbfields = len(ndtype)
+ if names is None:
+ names = [''] * len(ndtype)
+ elif isinstance(names, str):
+ names = names.split(",")
+ names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
+ ndtype = np.dtype({"formats": ndtype, "names": names})
+ else:
+ # Explicit names
+ if names is not None:
+ validate = NameValidator(**validationargs)
+ if isinstance(names, str):
+ names = names.split(",")
+ # Simple dtype: repeat to match the nb of names
+ if ndtype.names is None:
+ formats = tuple([ndtype.type] * len(names))
+ names = validate(names, defaultfmt=defaultfmt)
+ ndtype = np.dtype(list(zip(names, formats)))
+ # Structured dtype: just validate the names as needed
+ else:
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
+ defaultfmt=defaultfmt)
+ # No implicit names
+ elif ndtype.names is not None:
+ validate = NameValidator(**validationargs)
+ # Default initial names : should we change the format ?
+ numbered_names = tuple(f"f{i}" for i in range(len(ndtype.names)))
+ if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
+ ndtype.names = validate([''] * len(ndtype.names),
+ defaultfmt=defaultfmt)
+ # Explicit initial names : just validate
+ else:
+ ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
+ return ndtype
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.pyi
new file mode 100644
index 0000000..21cfc3b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_iotools.pyi
@@ -0,0 +1,114 @@
+from collections.abc import Callable, Iterable, Sequence
+from typing import (
+ Any,
+ ClassVar,
+ Final,
+ Literal,
+ TypedDict,
+ TypeVar,
+ Unpack,
+ overload,
+ type_check_only,
+)
+
+import numpy as np
+import numpy.typing as npt
+
+_T = TypeVar("_T")
+
+@type_check_only
+class _ValidationKwargs(TypedDict, total=False):
+ excludelist: Iterable[str] | None
+ deletechars: Iterable[str] | None
+ case_sensitive: Literal["upper", "lower"] | bool | None
+ replace_space: str
+
+###
+
+__docformat__: Final[str] = "restructuredtext en"
+
+class ConverterError(Exception): ...
+class ConverterLockError(ConverterError): ...
+class ConversionWarning(UserWarning): ...
+
+class LineSplitter:
+ delimiter: str | int | Iterable[int] | None
+ comments: str
+ encoding: str | None
+
+ def __init__(
+ self,
+ /,
+ delimiter: str | bytes | int | Iterable[int] | None = None,
+ comments: str | bytes = "#",
+ autostrip: bool = True,
+ encoding: str | None = None,
+ ) -> None: ...
+ def __call__(self, /, line: str | bytes) -> list[str]: ...
+ def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ...
+
+class NameValidator:
+ defaultexcludelist: ClassVar[Sequence[str]]
+ defaultdeletechars: ClassVar[Sequence[str]]
+ excludelist: list[str]
+ deletechars: set[str]
+ case_converter: Callable[[str], str]
+ replace_space: str
+
+ def __init__(
+ self,
+ /,
+ excludelist: Iterable[str] | None = None,
+ deletechars: Iterable[str] | None = None,
+ case_sensitive: Literal["upper", "lower"] | bool | None = None,
+ replace_space: str = "_",
+ ) -> None: ...
+ def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ...
+ def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ...
+
+class StringConverter:
+ func: Callable[[str], Any] | None
+ default: Any
+ missing_values: set[str]
+ type: np.dtype[np.datetime64] | np.generic
+
+ def __init__(
+ self,
+ /,
+ dtype_or_func: npt.DTypeLike | None = None,
+ default: None = None,
+ missing_values: Iterable[str] | None = None,
+ locked: bool = False,
+ ) -> None: ...
+ def update(
+ self,
+ /,
+ func: Callable[[str], Any],
+ default: object | None = None,
+ testing_value: str | None = None,
+ missing_values: str = "",
+ locked: bool = False,
+ ) -> None: ...
+ #
+ def __call__(self, /, value: str) -> Any: ...
+ def upgrade(self, /, value: str) -> Any: ...
+ def iterupgrade(self, /, value: Iterable[str] | str) -> None: ...
+
+ #
+ @classmethod
+ def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ...
+
+@overload
+def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ...
+@overload
+def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ...
+
+#
+def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ...
+def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ...
+def easy_dtype(
+ ndtype: npt.DTypeLike,
+ names: Iterable[str] | None = None,
+ defaultfmt: str = "f%i",
+ **validationargs: Unpack[_ValidationKwargs],
+) -> np.dtype[np.void]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py
new file mode 100644
index 0000000..4a01490
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py
@@ -0,0 +1,2024 @@
+"""
+Functions that ignore NaN.
+
+Functions
+---------
+
+- `nanmin` -- minimum non-NaN value
+- `nanmax` -- maximum non-NaN value
+- `nanargmin` -- index of minimum non-NaN value
+- `nanargmax` -- index of maximum non-NaN value
+- `nansum` -- sum of non-NaN values
+- `nanprod` -- product of non-NaN values
+- `nancumsum` -- cumulative sum of non-NaN values
+- `nancumprod` -- cumulative product of non-NaN values
+- `nanmean` -- mean of non-NaN values
+- `nanvar` -- variance of non-NaN values
+- `nanstd` -- standard deviation of non-NaN values
+- `nanmedian` -- median of non-NaN values
+- `nanquantile` -- qth quantile of non-NaN values
+- `nanpercentile` -- qth percentile of non-NaN values
+
+"""
+import functools
+import warnings
+
+import numpy as np
+import numpy._core.numeric as _nx
+from numpy._core import overrides
+from numpy.lib import _function_base_impl as fnb
+from numpy.lib._function_base_impl import _weights_are_valid
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
+ 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
+ 'nancumsum', 'nancumprod', 'nanquantile'
+ ]
+
+
+def _nan_mask(a, out=None):
+ """
+ Parameters
+ ----------
+ a : array-like
+ Input array with at least 1 dimension.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output and will prevent the allocation of a new array.
+
+ Returns
+ -------
+ y : bool ndarray or True
+ A bool array where ``np.nan`` positions are marked with ``False``
+ and other positions are marked with ``True``. If the type of ``a``
+ is such that it can't possibly contain ``np.nan``, returns ``True``.
+ """
+ # we assume that a is an array for this private function
+
+ if a.dtype.kind not in 'fc':
+ return True
+
+ y = np.isnan(a, out=out)
+ y = np.invert(y, out=y)
+ return y
+
+def _replace_nan(a, val):
+ """
+ If `a` is of inexact type, make a copy of `a`, replace NaNs with
+ the `val` value, and return the copy together with a boolean mask
+ marking the locations where NaNs were present. If `a` is not of
+ inexact type, do nothing and return `a` together with a mask of None.
+
+ Note that scalars will end up as array scalars, which is important
+ for using the result as the value of the out argument in some
+ operations.
+
+ Parameters
+ ----------
+ a : array-like
+ Input array.
+ val : float
+ NaN values are set to val before doing the operation.
+
+ Returns
+ -------
+ y : ndarray
+ If `a` is of inexact type, return a copy of `a` with the NaNs
+ replaced by the fill value, otherwise return `a`.
+ mask: {bool, None}
+ If `a` is of inexact type, return a boolean mask marking locations of
+ NaNs, otherwise return None.
+
+ """
+ a = np.asanyarray(a)
+
+ if a.dtype == np.object_:
+ # object arrays do not support `isnan` (gh-9009), so make a guess
+ mask = np.not_equal(a, a, dtype=bool)
+ elif issubclass(a.dtype.type, np.inexact):
+ mask = np.isnan(a)
+ else:
+ mask = None
+
+ if mask is not None:
+ a = np.array(a, subok=True, copy=True)
+ np.copyto(a, val, where=mask)
+
+ return a, mask
+
+
+def _copyto(a, val, mask):
+ """
+ Replace values in `a` with NaN where `mask` is True. This differs from
+ copyto in that it will deal with the case where `a` is a numpy scalar.
+
+ Parameters
+ ----------
+ a : ndarray or numpy scalar
+ Array or numpy scalar some of whose values are to be replaced
+ by val.
+ val : numpy scalar
+ Value used a replacement.
+ mask : ndarray, scalar
+ Boolean array. Where True the corresponding element of `a` is
+ replaced by `val`. Broadcasts.
+
+ Returns
+ -------
+ res : ndarray, scalar
+ Array with elements replaced or scalar `val`.
+
+ """
+ if isinstance(a, np.ndarray):
+ np.copyto(a, val, where=mask, casting='unsafe')
+ else:
+ a = a.dtype.type(val)
+ return a
+
+
+def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False):
+ """
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
+
+ Presumably faster as it incurs fewer copies
+
+ Parameters
+ ----------
+ arr1d : ndarray
+ Array to remove nans from
+ second_arr1d : ndarray or None
+ A second array which will have the same positions removed as arr1d.
+ overwrite_input : bool
+ True if `arr1d` can be modified in place
+
+ Returns
+ -------
+ res : ndarray
+ Array with nan elements removed
+ second_res : ndarray or None
+ Second array with nan element positions of first array removed.
+ overwrite_input : bool
+ True if `res` can be modified in place, given the constraint on the
+ input
+ """
+ if arr1d.dtype == object:
+ # object arrays do not support `isnan` (gh-9009), so make a guess
+ c = np.not_equal(arr1d, arr1d, dtype=bool)
+ else:
+ c = np.isnan(arr1d)
+
+ s = np.nonzero(c)[0]
+ if s.size == arr1d.size:
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=6)
+ if second_arr1d is None:
+ return arr1d[:0], None, True
+ else:
+ return arr1d[:0], second_arr1d[:0], True
+ elif s.size == 0:
+ return arr1d, second_arr1d, overwrite_input
+ else:
+ if not overwrite_input:
+ arr1d = arr1d.copy()
+ # select non-nans at end of array
+ enonan = arr1d[-s.size:][~c[-s.size:]]
+ # fill nans in beginning of array with non-nans of end
+ arr1d[s[:enonan.size]] = enonan
+
+ if second_arr1d is None:
+ return arr1d[:-s.size], None, True
+ else:
+ if not overwrite_input:
+ second_arr1d = second_arr1d.copy()
+ enonan = second_arr1d[-s.size:][~c[-s.size:]]
+ second_arr1d[s[:enonan.size]] = enonan
+
+ return arr1d[:-s.size], second_arr1d[:-s.size], True
+
+
+def _divide_by_count(a, b, out=None):
+ """
+ Compute a/b ignoring invalid results. If `a` is an array the division
+ is done in place. If `a` is a scalar, then its type is preserved in the
+ output. If out is None, then a is used instead so that the division
+ is in place. Note that this is only called with `a` an inexact type.
+
+ Parameters
+ ----------
+ a : {ndarray, numpy scalar}
+ Numerator. Expected to be of inexact type but not checked.
+ b : {ndarray, numpy scalar}
+ Denominator.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+
+ Returns
+ -------
+ ret : {ndarray, numpy scalar}
+ The return value is a/b. If `a` was an ndarray the division is done
+ in place. If `a` is a numpy scalar, the division preserves its type.
+
+ """
+ with np.errstate(invalid='ignore', divide='ignore'):
+ if isinstance(a, np.ndarray):
+ if out is None:
+ return np.divide(a, b, out=a, casting='unsafe')
+ else:
+ return np.divide(a, b, out=out, casting='unsafe')
+ elif out is None:
+ # Precaution against reduced object arrays
+ try:
+ return a.dtype.type(a / b)
+ except AttributeError:
+ return a / b
+ else:
+ # This is questionable, but currently a numpy scalar can
+ # be output to a zero dimensional array.
+ return np.divide(a, b, out=out, casting='unsafe')
+
+
+def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmin_dispatcher)
+def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return minimum of an array or minimum along an axis, ignoring any NaNs.
+ When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
+ Nan is returned for that slice.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose minimum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the minimum is computed. The default is to compute
+ the minimum of the flattened array.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `min` method
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nanmin : ndarray
+ An array with the same shape as `a`, with the specified axis
+ removed. If `a` is a 0-d array, or if axis is None, an ndarray
+ scalar is returned. The same dtype as `a` is returned.
+
+ See Also
+ --------
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ isnan :
+ Shows which elements are Not a Number (NaN).
+ isfinite:
+ Shows which elements are neither NaN nor infinity.
+
+ amax, fmax, maximum
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Positive infinity is treated as a very large number and negative
+ infinity is treated as a very small (i.e. negative) number.
+
+ If the input has a integer type the function is equivalent to np.min.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanmin(a)
+ 1.0
+ >>> np.nanmin(a, axis=0)
+ array([1., 2.])
+ >>> np.nanmin(a, axis=1)
+ array([1., 3.])
+
+ When positive infinity and negative infinity are present:
+
+ >>> np.nanmin([1, 2, np.nan, np.inf])
+ 1.0
+ >>> np.nanmin([1, 2, np.nan, -np.inf])
+ -inf
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if initial is not np._NoValue:
+ kwargs['initial'] = initial
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
+ if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_:
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
+ # which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
+ res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
+ if np.isnan(res).any():
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=2)
+ else:
+ # Slow, but safe for subclasses of ndarray
+ a, mask = _replace_nan(a, +np.inf)
+ res = np.amin(a, axis=axis, out=out, **kwargs)
+ if mask is None:
+ return res
+
+ # Check for all-NaN axis
+ kwargs.pop("initial", None)
+ mask = np.all(mask, axis=axis, **kwargs)
+ if np.any(mask):
+ res = _copyto(res, np.nan, mask)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=2)
+ return res
+
+
+def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmax_dispatcher)
+def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the maximum of an array or maximum along an axis, ignoring any
+ NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
+ raised and NaN is returned for that slice.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose maximum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the maximum is computed. The default is to compute
+ the maximum of the flattened array.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `max` method
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nanmax : ndarray
+ An array with the same shape as `a`, with the specified axis removed.
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
+ returned. The same dtype as `a` is returned.
+
+ See Also
+ --------
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ isnan :
+ Shows which elements are Not a Number (NaN).
+ isfinite:
+ Shows which elements are neither NaN nor infinity.
+
+ amin, fmin, minimum
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Positive infinity is treated as a very large number and negative
+ infinity is treated as a very small (i.e. negative) number.
+
+ If the input has a integer type the function is equivalent to np.max.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanmax(a)
+ 3.0
+ >>> np.nanmax(a, axis=0)
+ array([3., 2.])
+ >>> np.nanmax(a, axis=1)
+ array([2., 3.])
+
+ When positive infinity and negative infinity are present:
+
+ >>> np.nanmax([1, 2, np.nan, -np.inf])
+ 2.0
+ >>> np.nanmax([1, 2, np.nan, np.inf])
+ inf
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if initial is not np._NoValue:
+ kwargs['initial'] = initial
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
+ if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_:
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
+ # which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
+ res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
+ if np.isnan(res).any():
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=2)
+ else:
+ # Slow, but safe for subclasses of ndarray
+ a, mask = _replace_nan(a, -np.inf)
+ res = np.amax(a, axis=axis, out=out, **kwargs)
+ if mask is None:
+ return res
+
+ # Check for all-NaN axis
+ kwargs.pop("initial", None)
+ mask = np.all(mask, axis=axis, **kwargs)
+ if np.any(mask):
+ res = _copyto(res, np.nan, mask)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=2)
+ return res
+
+
+def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmin_dispatcher)
+def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Return the indices of the minimum values in the specified axis ignoring
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
+ cannot be trusted if a slice contains only NaNs and Infs.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ .. versionadded:: 1.22.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
+
+ See Also
+ --------
+ argmin, nanargmax
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmin(a)
+ 0
+ >>> np.nanargmin(a)
+ 2
+ >>> np.nanargmin(a, axis=0)
+ array([1, 1])
+ >>> np.nanargmin(a, axis=1)
+ array([1, 0])
+
+ """
+ a, mask = _replace_nan(a, np.inf)
+ if mask is not None and mask.size:
+ mask = np.all(mask, axis=axis)
+ if np.any(mask):
+ raise ValueError("All-NaN slice encountered")
+ res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)
+ return res
+
+
+def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmax_dispatcher)
+def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Return the indices of the maximum values in the specified axis ignoring
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
+ results cannot be trusted if a slice contains only NaNs and -Infs.
+
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ .. versionadded:: 1.22.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
+
+ See Also
+ --------
+ argmax, nanargmin
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmax(a)
+ 0
+ >>> np.nanargmax(a)
+ 1
+ >>> np.nanargmax(a, axis=0)
+ array([1, 0])
+ >>> np.nanargmax(a, axis=1)
+ array([1, 1])
+
+ """
+ a, mask = _replace_nan(a, -np.inf)
+ if mask is not None and mask.size:
+ mask = np.all(mask, axis=axis)
+ if np.any(mask):
+ raise ValueError("All-NaN slice encountered")
+ res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)
+ return res
+
+
+def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nansum_dispatcher)
+def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Return the sum of array elements over a given axis treating Not a
+ Numbers (NaNs) as zero.
+
+ In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or
+ empty. In later versions zero is returned.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose sum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the sum is computed. The default is to compute the
+ sum of the flattened array.
+ dtype : data-type, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. By default, the dtype of `a` is used. An
+ exception is when `a` has an integer type with less precision than
+ the platform (u)intp. In that case, the default will be either
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
+ bits. For inexact inputs, dtype must be inexact.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``. If provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `mean` or `sum` methods
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nansum : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which it is returned. The result has the same
+ size as `a`, and the same shape as `a` if `axis` is not None
+ or `a` is a 1-d array.
+
+ See Also
+ --------
+ numpy.sum : Sum across array propagating NaNs.
+ isnan : Show which elements are NaN.
+ isfinite : Show which elements are not NaN or +/-inf.
+
+ Notes
+ -----
+ If both positive and negative infinity are present, the sum will be Not
+ A Number (NaN).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.nansum(1)
+ 1
+ >>> np.nansum([1])
+ 1
+ >>> np.nansum([1, np.nan])
+ 1.0
+ >>> a = np.array([[1, 1], [1, np.nan]])
+ >>> np.nansum(a)
+ 3.0
+ >>> np.nansum(a, axis=0)
+ array([2., 1.])
+ >>> np.nansum([1, np.nan, np.inf])
+ inf
+ >>> np.nansum([1, np.nan, -np.inf])
+ -inf
+ >>> from numpy.testing import suppress_warnings
+ >>> with np.errstate(invalid="ignore"):
+ ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
+ np.float64(nan)
+
+ """
+ a, mask = _replace_nan(a, 0)
+ return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ initial=initial, where=where)
+
+
+def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanprod_dispatcher)
+def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Return the product of array elements over a given axis treating Not a
+ Numbers (NaNs) as ones.
+
+ One is returned for slices that are all-NaN or empty.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose product is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the product is computed. The default is to compute
+ the product of the flattened array.
+ dtype : data-type, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. By default, the dtype of `a` is used. An
+ exception is when `a` has an integer type with less precision than
+ the platform (u)intp. In that case, the default will be either
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
+ bits. For inexact inputs, dtype must be inexact.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``. If provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
+ keepdims : bool, optional
+ If True, the axes which are reduced are left in the result as
+ dimensions with size one. With this option, the result will
+ broadcast correctly against the original `arr`.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to include in the product. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nanprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case it is returned.
+
+ See Also
+ --------
+ numpy.prod : Product across array propagating NaNs.
+ isnan : Show which elements are NaN.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.nanprod(1)
+ 1
+ >>> np.nanprod([1])
+ 1
+ >>> np.nanprod([1, np.nan])
+ 1.0
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanprod(a)
+ 6.0
+ >>> np.nanprod(a, axis=0)
+ array([3., 2.])
+
+ """
+ a, mask = _replace_nan(a, 1)
+ return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ initial=initial, where=where)
+
+
+def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumsum_dispatcher)
+def nancumsum(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative sum of array elements over a given axis treating Not a
+ Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
+ encountered and leading NaNs are replaced by zeros.
+
+ Zeros are returned for slices that are all-NaN or empty.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative sum is computed. The default
+ (None) is to compute the cumsum over the flattened array.
+ dtype : dtype, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If `dtype` is not specified, it defaults
+ to the dtype of `a`, unless `a` has an integer dtype with a
+ precision less than that of the default platform integer. In
+ that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
+ more details.
+
+ Returns
+ -------
+ nancumsum : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which it is returned. The result has the same
+ size as `a`, and the same shape as `a` if `axis` is not None
+ or `a` is a 1-d array.
+
+ See Also
+ --------
+ numpy.cumsum : Cumulative sum across array propagating NaNs.
+ isnan : Show which elements are NaN.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.nancumsum(1)
+ array([1])
+ >>> np.nancumsum([1])
+ array([1])
+ >>> np.nancumsum([1, np.nan])
+ array([1., 1.])
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nancumsum(a)
+ array([1., 3., 6., 6.])
+ >>> np.nancumsum(a, axis=0)
+ array([[1., 2.],
+ [4., 2.]])
+ >>> np.nancumsum(a, axis=1)
+ array([[1., 3.],
+ [3., 3.]])
+
+ """
+ a, mask = _replace_nan(a, 0)
+ return np.cumsum(a, axis=axis, dtype=dtype, out=out)
+
+
+def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumprod_dispatcher)
+def nancumprod(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative product of array elements over a given axis treating Not a
+ Numbers (NaNs) as one. The cumulative product does not change when NaNs are
+ encountered and leading NaNs are replaced by ones.
+
+ Ones are returned for slices that are all-NaN or empty.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative product is computed. By default
+ the input is flattened.
+ dtype : dtype, optional
+ Type of the returned array, as well as of the accumulator in which
+ the elements are multiplied. If *dtype* is not specified, it
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
+ a precision less than that of the default platform integer. In
+ that case, the default platform integer is used instead.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type of the resulting values will be cast if necessary.
+
+ Returns
+ -------
+ nancumprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case it is returned.
+
+ See Also
+ --------
+ numpy.cumprod : Cumulative product across array propagating NaNs.
+ isnan : Show which elements are NaN.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.nancumprod(1)
+ array([1])
+ >>> np.nancumprod([1])
+ array([1])
+ >>> np.nancumprod([1, np.nan])
+ array([1., 1.])
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nancumprod(a)
+ array([1., 2., 6., 6.])
+ >>> np.nancumprod(a, axis=0)
+ array([[1., 2.],
+ [3., 2.]])
+ >>> np.nancumprod(a, axis=1)
+ array([[1., 2.],
+ [3., 3.]])
+
+ """
+ a, mask = _replace_nan(a, 1)
+ return np.cumprod(a, axis=axis, dtype=dtype, out=out)
+
+
+def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ *, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmean_dispatcher)
+def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ *, where=np._NoValue):
+ """
+ Compute the arithmetic mean along the specified axis, ignoring NaNs.
+
+ Returns the average of the array elements. The average is taken over
+ the flattened array by default, otherwise over the specified axis.
+ `float64` intermediate and return values are used for integer inputs.
+
+ For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose mean is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the means are computed. The default is to compute
+ the mean of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for inexact inputs, it is the same as the input
+ dtype.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See :ref:`ufuncs-output-type` for more details.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `mean` or `sum` methods
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+ where : array_like of bool, optional
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ m : ndarray, see dtype parameter above
+ If `out=None`, returns a new array containing the mean values,
+ otherwise a reference to the output array is returned. Nan is
+ returned for slices that contain only NaNs.
+
+ See Also
+ --------
+ average : Weighted average
+ mean : Arithmetic mean taken while not ignoring NaNs
+ var, nanvar
+
+ Notes
+ -----
+ The arithmetic mean is the sum of the non-NaN elements along the axis
+ divided by the number of non-NaN elements.
+
+ Note that for floating-point input, the mean is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32`. Specifying a
+ higher-precision accumulator using the `dtype` keyword can alleviate
+ this issue.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanmean(a)
+ 2.6666666666666665
+ >>> np.nanmean(a, axis=0)
+ array([2., 4.])
+ >>> np.nanmean(a, axis=1)
+ array([1., 3.5]) # may vary
+
+ """
+ arr, mask = _replace_nan(a, 0)
+ if mask is None:
+ return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
+
+ if dtype is not None:
+ dtype = np.dtype(dtype)
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then dtype must be inexact")
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then out must be inexact")
+
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,
+ where=where)
+ tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
+ avg = _divide_by_count(tot, cnt, out=out)
+
+ isbad = (cnt == 0)
+ if isbad.any():
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
+ # NaN is the only possible bad value, so no further
+ # action is needed to handle bad results.
+ return avg
+
+
+def _nanmedian1d(arr1d, overwrite_input=False):
+ """
+ Private function for rank 1 arrays. Compute the median ignoring NaNs.
+ See nanmedian for parameter usage
+ """
+ arr1d_parsed, _, overwrite_input = _remove_nan_1d(
+ arr1d, overwrite_input=overwrite_input,
+ )
+
+ if arr1d_parsed.size == 0:
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
+ # is returned for `timedelta64` and `complexfloating`
+ return arr1d[-1]
+
+ return np.median(arr1d_parsed, overwrite_input=overwrite_input)
+
+
+def _nanmedian(a, axis=None, out=None, overwrite_input=False):
+ """
+ Private function that doesn't support extended axis or keepdims.
+ These methods are extended to this function using _ureduce
+ See nanmedian for parameter usage
+
+ """
+ if axis is None or a.ndim == 1:
+ part = a.ravel()
+ if out is None:
+ return _nanmedian1d(part, overwrite_input)
+ else:
+ out[...] = _nanmedian1d(part, overwrite_input)
+ return out
+ else:
+ # for small medians use sort + indexing which is still faster than
+ # apply_along_axis
+ # benchmarked with shuffled (50, 50, x) containing a few NaN
+ if a.shape[axis] < 600:
+ return _nanmedian_small(a, axis, out, overwrite_input)
+ result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
+ if out is not None:
+ out[...] = result
+ return result
+
+
+def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
+ """
+ sort + indexing median, faster for small medians along multiple
+ dimensions due to the high overhead of apply_along_axis
+
+ see nanmedian for parameter usage
+ """
+ a = np.ma.masked_array(a, np.isnan(a))
+ m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
+ for i in range(np.count_nonzero(m.mask.ravel())):
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=5)
+
+ fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
+ if out is not None:
+ out[...] = m.filled(fill_value)
+ return out
+ return m.filled(fill_value)
+
+
+def _nanmedian_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmedian_dispatcher)
+def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
+ """
+ Compute the median along the specified axis, while ignoring NaNs.
+
+ Returns the median of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the medians are computed. The default
+ is to compute the median along a flattened version of the array.
+ A sequence of axes is supported since version 1.9.0.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow use of memory of input array `a` for
+ calculations. The input array will be modified by the call to
+ `median`. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. If `overwrite_input` is ``True`` and `a` is not already an
+ `ndarray`, an error will be raised.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ Returns
+ -------
+ median : ndarray
+ A new array holding the result. If the input contains integers
+ or floats smaller than ``float64``, then the output data-type is
+ ``np.float64``. Otherwise, the data-type of the output is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean, median, percentile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
+ ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
+ middle values of ``V_sorted`` when ``N`` is even.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
+ >>> a[0, 1] = np.nan
+ >>> a
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.median(a)
+ np.float64(nan)
+ >>> np.nanmedian(a)
+ 3.0
+ >>> np.nanmedian(a, axis=0)
+ array([6.5, 2. , 2.5])
+ >>> np.median(a, axis=1)
+ array([nan, 2.])
+ >>> b = a.copy()
+ >>> np.nanmedian(b, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+ >>> b = a.copy()
+ >>> np.nanmedian(b, axis=None, overwrite_input=True)
+ 3.0
+ >>> assert not np.all(a==b)
+
+ """
+ a = np.asanyarray(a)
+ # apply_along_axis in _nanmedian doesn't handle empty arrays well,
+ # so deal them upfront
+ if a.size == 0:
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
+
+ return fnb._ureduce(a, func=_nanmedian, keepdims=keepdims,
+ axis=axis, out=out,
+ overwrite_input=overwrite_input)
+
+
+def _nanpercentile_dispatcher(
+ a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, weights=None, interpolation=None):
+ return (a, q, out, weights)
+
+
+@array_function_dispatch(_nanpercentile_dispatcher)
+def nanpercentile(
+ a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=np._NoValue,
+ *,
+ weights=None,
+ interpolation=None,
+):
+ """
+ Compute the qth percentile of the data along the specified axis,
+ while ignoring nan values.
+
+ Returns the qth percentile(s) of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored.
+ q : array_like of float
+ Percentile or sequence of percentiles to compute, which must be
+ between 0 and 100 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the percentiles are computed. The default
+ is to compute the percentile(s) along a flattened version of the
+ array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape and buffer length as the expected output, but the
+ type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by
+ intermediate calculations, to save memory. In this case, the
+ contents of the input `a` after this function completes is
+ undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ percentile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ weights : array_like, optional
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the percentile according to its associated weight.
+ The weights array can either be 1-D (in which case its length must be
+ the size of `a` along the given axis) or of the same shape as `a`.
+ If `weights=None`, then all data in `a` are assumed to have a
+ weight equal to one.
+ Only `method="inverted_cdf"` supports weights.
+
+ .. versionadded:: 2.0.0
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ percentile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple percentiles are given, first axis of
+ the result corresponds to the percentiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ nanmean
+ nanmedian : equivalent to ``nanpercentile(..., 50)``
+ percentile, median, mean
+ nanquantile : equivalent to nanpercentile, except q in range [0, 1].
+
+ Notes
+ -----
+ The behavior of `numpy.nanpercentile` with percentage `q` is that of
+ `numpy.quantile` with argument ``q/100`` (ignoring nan values).
+ For more information, please see `numpy.quantile`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.percentile(a, 50)
+ np.float64(nan)
+ >>> np.nanpercentile(a, 50)
+ 3.0
+ >>> np.nanpercentile(a, 50, axis=0)
+ array([6.5, 2. , 2.5])
+ >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+ >>> m = np.nanpercentile(a, 50, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanpercentile(a, 50, axis=0, out=out)
+ array([6.5, 2. , 2.5])
+ >>> m
+ array([6.5, 2. , 2.5])
+
+ >>> b = a.copy()
+ >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = fnb._check_interpolation_as_method(
+ method, interpolation, "nanpercentile")
+
+ a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
+ q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...)
+ if not fnb._quantile_is_valid(q):
+ raise ValueError("Percentiles must be in the range [0, 100]")
+
+ if weights is not None:
+ if method != "inverted_cdf":
+ msg = ("Only method 'inverted_cdf' supports weights. "
+ f"Got: {method}.")
+ raise ValueError(msg)
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
+ weights = _weights_are_valid(weights=weights, a=a, axis=axis)
+ if np.any(weights < 0):
+ raise ValueError("Weights must be non-negative.")
+
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims, weights)
+
+
+def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, weights=None,
+ interpolation=None):
+ return (a, q, out, weights)
+
+
+@array_function_dispatch(_nanquantile_dispatcher)
+def nanquantile(
+ a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=np._NoValue,
+ *,
+ weights=None,
+ interpolation=None,
+):
+ """
+ Compute the qth quantile of the data along the specified axis,
+ while ignoring nan values.
+ Returns the qth quantile(s) of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored
+ q : array_like of float
+ Probability or sequence of probabilities for the quantiles to compute.
+ Values must be between 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ quantile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ weights : array_like, optional
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the quantile according to its associated weight.
+ The weights array can either be 1-D (in which case its length must be
+ the size of `a` along the given axis) or of the same shape as `a`.
+ If `weights=None`, then all data in `a` are assumed to have a
+ weight equal to one.
+ Only `method="inverted_cdf"` supports weights.
+
+ .. versionadded:: 2.0.0
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single probability and `axis=None`, then the result
+ is a scalar. If multiple probability levels are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ quantile
+ nanmean, nanmedian
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
+
+ Notes
+ -----
+ The behavior of `numpy.nanquantile` is the same as that of
+ `numpy.quantile` (ignoring nan values).
+ For more information, please see `numpy.quantile`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.quantile(a, 0.5)
+ np.float64(nan)
+ >>> np.nanquantile(a, 0.5)
+ 3.0
+ >>> np.nanquantile(a, 0.5, axis=0)
+ array([6.5, 2. , 2.5])
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+ >>> m = np.nanquantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
+ array([6.5, 2. , 2.5])
+ >>> m
+ array([6.5, 2. , 2.5])
+ >>> b = a.copy()
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+
+ if interpolation is not None:
+ method = fnb._check_interpolation_as_method(
+ method, interpolation, "nanquantile")
+
+ a = np.asanyarray(a)
+ if a.dtype.kind == "c":
+ raise TypeError("a must be an array of real numbers")
+
+ # Use dtype of array if possible (e.g., if q is a python int or float).
+ if isinstance(q, (int, float)) and a.dtype.kind == "f":
+ q = np.asanyarray(q, dtype=a.dtype)
+ else:
+ q = np.asanyarray(q)
+
+ if not fnb._quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+
+ if weights is not None:
+ if method != "inverted_cdf":
+ msg = ("Only method 'inverted_cdf' supports weights. "
+ f"Got: {method}.")
+ raise ValueError(msg)
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
+ weights = _weights_are_valid(weights=weights, a=a, axis=axis)
+ if np.any(weights < 0):
+ raise ValueError("Weights must be non-negative.")
+
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims, weights)
+
+
+def _nanquantile_unchecked(
+ a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=np._NoValue,
+ weights=None,
+):
+ """Assumes that q is in [0, 1], and is an ndarray"""
+ # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
+ # so deal them upfront
+ if a.size == 0:
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
+ return fnb._ureduce(a,
+ func=_nanquantile_ureduce_func,
+ q=q,
+ weights=weights,
+ keepdims=keepdims,
+ axis=axis,
+ out=out,
+ overwrite_input=overwrite_input,
+ method=method)
+
+
+def _nanquantile_ureduce_func(
+ a: np.array,
+ q: np.array,
+ weights: np.array,
+ axis: int | None = None,
+ out=None,
+ overwrite_input: bool = False,
+ method="linear",
+):
+ """
+ Private function that doesn't support extended axis or keepdims.
+ These methods are extended to this function using _ureduce
+ See nanpercentile for parameter usage
+ """
+ if axis is None or a.ndim == 1:
+ part = a.ravel()
+ wgt = None if weights is None else weights.ravel()
+ result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt)
+ # Note that this code could try to fill in `out` right away
+ elif weights is None:
+ result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
+ overwrite_input, method, weights)
+ # apply_along_axis fills in collapsed axis with results.
+ # Move those axes to the beginning to match percentile's
+ # convention.
+ if q.ndim != 0:
+ from_ax = [axis + i for i in range(q.ndim)]
+ result = np.moveaxis(result, from_ax, list(range(q.ndim)))
+ else:
+ # We need to apply along axis over 2 arrays, a and weights.
+ # move operation axes to end for simplicity:
+ a = np.moveaxis(a, axis, -1)
+ if weights is not None:
+ weights = np.moveaxis(weights, axis, -1)
+ if out is not None:
+ result = out
+ else:
+ # weights are limited to `inverted_cdf` so the result dtype
+ # is known to be identical to that of `a` here:
+ result = np.empty_like(a, shape=q.shape + a.shape[:-1])
+
+ for ii in np.ndindex(a.shape[:-1]):
+ result[(...,) + ii] = _nanquantile_1d(
+ a[ii], q, weights=weights[ii],
+ overwrite_input=overwrite_input, method=method,
+ )
+ # This path dealt with `out` already...
+ return result
+
+ if out is not None:
+ out[...] = result
+ return result
+
+
+def _nanquantile_1d(
+ arr1d, q, overwrite_input=False, method="linear", weights=None,
+):
+ """
+ Private function for rank 1 arrays. Compute quantile ignoring NaNs.
+ See nanpercentile for parameter usage
+ """
+ # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]?
+ arr1d, weights, overwrite_input = _remove_nan_1d(arr1d,
+ second_arr1d=weights, overwrite_input=overwrite_input)
+ if arr1d.size == 0:
+ # convert to scalar
+ return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]
+
+ return fnb._quantile_unchecked(
+ arr1d,
+ q,
+ overwrite_input=overwrite_input,
+ method=method,
+ weights=weights,
+ )
+
+
+def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None, mean=None,
+ correction=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanvar_dispatcher)
+def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
+ *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue):
+ """
+ Compute the variance along the specified axis, while ignoring NaNs.
+
+ Returns the variance of the array elements, a measure of the spread of
+ a distribution. The variance is computed for the flattened array by
+ default, otherwise over the specified axis.
+
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
+ returned and a `RuntimeWarning` is raised.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose variance is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the variance is computed. The default is to compute
+ the variance of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is `float64`; for arrays of float types it is the same as
+ the array type.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output, but the type is cast if
+ necessary.
+ ddof : {int, float}, optional
+ "Delta Degrees of Freedom": the divisor used in the calculation is
+ ``N - ddof``, where ``N`` represents the number of non-NaN
+ elements. By default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ where : array_like of bool, optional
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
+ details.
+
+ .. versionadded:: 1.22.0
+
+ mean : array_like, optional
+ Provide the mean to prevent its recalculation. The mean should have
+ a shape as if it was calculated with ``keepdims=True``.
+ The axis for the calculation of the mean should be the same as used in
+ the call to this var function.
+
+ .. versionadded:: 2.0.0
+
+ correction : {int, float}, optional
+ Array API compatible name for the ``ddof`` parameter. Only one of them
+ can be provided at the same time.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ variance : ndarray, see dtype parameter above
+ If `out` is None, return a new array containing the variance,
+ otherwise return a reference to the output array. If ddof is >= the
+ number of non-NaN elements in a slice or the slice contains only
+ NaNs, then the result for that slice is NaN.
+
+ See Also
+ --------
+ std : Standard deviation
+ mean : Average
+ var : Variance while not ignoring NaNs
+ nanstd, nanmean
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ The variance is the average of the squared deviations from the mean,
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
+
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+ instead. In standard statistical practice, ``ddof=1`` provides an
+ unbiased estimator of the variance of a hypothetical infinite
+ population. ``ddof=0`` provides a maximum likelihood estimate of the
+ variance for normally distributed variables.
+
+ Note that for complex numbers, the absolute value is taken before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the variance is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32` (see example
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
+ keyword can alleviate this issue.
+
+ For this function to work on sub-classes of ndarray, they must define
+ `sum` with the kwarg `keepdims`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanvar(a)
+ 1.5555555555555554
+ >>> np.nanvar(a, axis=0)
+ array([1., 0.])
+ >>> np.nanvar(a, axis=1)
+ array([0., 0.25]) # may vary
+
+ """
+ arr, mask = _replace_nan(a, 0)
+ if mask is None:
+ return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims, where=where, mean=mean,
+ correction=correction)
+
+ if dtype is not None:
+ dtype = np.dtype(dtype)
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then dtype must be inexact")
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then out must be inexact")
+
+ if correction != np._NoValue:
+ if ddof != 0:
+ raise ValueError(
+ "ddof and correction can't be provided simultaneously."
+ )
+ else:
+ ddof = correction
+
+ # Compute mean
+ if type(arr) is np.matrix:
+ _keepdims = np._NoValue
+ else:
+ _keepdims = True
+
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,
+ where=where)
+
+ if mean is not np._NoValue:
+ avg = mean
+ else:
+ # we need to special case matrix for reverse compatibility
+ # in order for this to work, these sums need to be called with
+ # keepdims=True, however matrix now raises an error in this case, but
+ # the reason that it drops the keepdims kwarg is to force keepdims=True
+ # so this used to work by serendipity.
+ avg = np.sum(arr, axis=axis, dtype=dtype,
+ keepdims=_keepdims, where=where)
+ avg = _divide_by_count(avg, cnt)
+
+ # Compute squared deviation from mean.
+ np.subtract(arr, avg, out=arr, casting='unsafe', where=where)
+ arr = _copyto(arr, 0, mask)
+ if issubclass(arr.dtype.type, np.complexfloating):
+ sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real
+ else:
+ sqr = np.multiply(arr, arr, out=arr, where=where)
+
+ # Compute variance.
+ var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
+
+ # Precaution against reduced object arrays
+ try:
+ var_ndim = var.ndim
+ except AttributeError:
+ var_ndim = np.ndim(var)
+ if var_ndim < cnt.ndim:
+ # Subclasses of ndarray may ignore keepdims, so check here.
+ cnt = cnt.squeeze(axis)
+ dof = cnt - ddof
+ var = _divide_by_count(var, dof)
+
+ isbad = (dof <= 0)
+ if np.any(isbad):
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
+ stacklevel=2)
+ # NaN, inf, or negative numbers are all possible bad
+ # values, so explicitly replace them with NaN.
+ var = _copyto(var, np.nan, isbad)
+ return var
+
+
+def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None, mean=None,
+ correction=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanstd_dispatcher)
+def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
+ *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue):
+ """
+ Compute the standard deviation along the specified axis, while
+ ignoring NaNs.
+
+ Returns the standard deviation, a measure of the spread of a
+ distribution, of the non-NaN array elements. The standard deviation is
+ computed for the flattened array by default, otherwise over the
+ specified axis.
+
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
+ returned and a `RuntimeWarning` is raised.
+
+ Parameters
+ ----------
+ a : array_like
+ Calculate the standard deviation of the non-NaN values.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the standard deviation is computed. The default is
+ to compute the standard deviation of the flattened array.
+ dtype : dtype, optional
+ Type to use in computing the standard deviation. For arrays of
+ integer type the default is float64, for arrays of float types it
+ is the same as the array type.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type (of the
+ calculated values) will be cast if necessary.
+ ddof : {int, float}, optional
+ Means Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of non-NaN
+ elements. By default `ddof` is zero.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If this value is anything but the default it is passed through
+ as-is to the relevant functions of the sub-classes. If these
+ functions do not have a `keepdims` kwarg, a RuntimeError will
+ be raised.
+ where : array_like of bool, optional
+ Elements to include in the standard deviation.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+
+ mean : array_like, optional
+ Provide the mean to prevent its recalculation. The mean should have
+ a shape as if it was calculated with ``keepdims=True``.
+ The axis for the calculation of the mean should be the same as used in
+ the call to this std function.
+
+ .. versionadded:: 2.0.0
+
+ correction : {int, float}, optional
+ Array API compatible name for the ``ddof`` parameter. Only one of them
+ can be provided at the same time.
+
+ .. versionadded:: 2.0.0
+
+ Returns
+ -------
+ standard_deviation : ndarray, see dtype parameter above.
+ If `out` is None, return a new array containing the standard
+ deviation, otherwise return a reference to the output array. If
+ ddof is >= the number of non-NaN elements in a slice or the slice
+ contains only NaNs, then the result for that slice is NaN.
+
+ See Also
+ --------
+ var, mean, std
+ nanvar, nanmean
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ The standard deviation is the square root of the average of the squared
+ deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
+
+ The average squared deviation is normally calculated as
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
+ specified, the divisor ``N - ddof`` is used instead. In standard
+ statistical practice, ``ddof=1`` provides an unbiased estimator of the
+ variance of the infinite population. ``ddof=0`` provides a maximum
+ likelihood estimate of the variance for normally distributed variables.
+ The standard deviation computed in this function is the square root of
+ the estimated variance, so even with ``ddof=1``, it will not be an
+ unbiased estimate of the standard deviation per se.
+
+ Note that, for complex numbers, `std` takes the absolute value before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the *std* is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for float32 (see example
+ below). Specifying a higher-accuracy accumulator using the `dtype`
+ keyword can alleviate this issue.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanstd(a)
+ 1.247219128924647
+ >>> np.nanstd(a, axis=0)
+ array([1., 0.])
+ >>> np.nanstd(a, axis=1)
+ array([0., 0.5]) # may vary
+
+ """
+ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims, where=where, mean=mean,
+ correction=correction)
+ if isinstance(var, np.ndarray):
+ std = np.sqrt(var, out=var)
+ elif hasattr(var, 'dtype'):
+ std = var.dtype.type(np.sqrt(var))
+ else:
+ std = np.sqrt(var)
+ return std
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi
new file mode 100644
index 0000000..f39800d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.pyi
@@ -0,0 +1,52 @@
+from numpy._core.fromnumeric import (
+ amax,
+ amin,
+ argmax,
+ argmin,
+ cumprod,
+ cumsum,
+ mean,
+ prod,
+ std,
+ sum,
+ var,
+)
+from numpy.lib._function_base_impl import (
+ median,
+ percentile,
+ quantile,
+)
+
+__all__ = [
+ "nansum",
+ "nanmax",
+ "nanmin",
+ "nanargmax",
+ "nanargmin",
+ "nanmean",
+ "nanmedian",
+ "nanpercentile",
+ "nanvar",
+ "nanstd",
+ "nanprod",
+ "nancumsum",
+ "nancumprod",
+ "nanquantile",
+]
+
+# NOTE: In reality these functions are not aliases but distinct functions
+# with identical signatures.
+nanmin = amin
+nanmax = amax
+nanargmin = argmin
+nanargmax = argmax
+nansum = sum
+nanprod = prod
+nancumsum = cumsum
+nancumprod = cumprod
+nanmean = mean
+nanvar = var
+nanstd = std
+nanmedian = median
+nanpercentile = percentile
+nanquantile = quantile
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py
new file mode 100644
index 0000000..6aea567
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.py
@@ -0,0 +1,2596 @@
+"""
+IO related functions.
+"""
+import contextlib
+import functools
+import itertools
+import operator
+import os
+import pickle
+import re
+import warnings
+import weakref
+from collections.abc import Mapping
+from operator import itemgetter
+
+import numpy as np
+from numpy._core import overrides
+from numpy._core._multiarray_umath import _load_from_filelike
+from numpy._core.multiarray import packbits, unpackbits
+from numpy._core.overrides import finalize_array_function_like, set_module
+from numpy._utils import asbytes, asunicode
+
+from . import format
+from ._datasource import DataSource # noqa: F401
+from ._format_impl import _MAX_HEADER_SIZE
+from ._iotools import (
+ ConversionWarning,
+ ConverterError,
+ ConverterLockError,
+ LineSplitter,
+ NameValidator,
+ StringConverter,
+ _decode_line,
+ _is_string_like,
+ easy_dtype,
+ flatten_dtype,
+ has_nested_fields,
+)
+
+__all__ = [
+ 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez',
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex'
+ ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+class BagObj:
+ """
+ BagObj(obj)
+
+ Convert attribute look-ups to getitems on the object passed in.
+
+ Parameters
+ ----------
+ obj : class instance
+ Object on which attribute look-up is performed.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib._npyio_impl import BagObj as BO
+ >>> class BagDemo:
+ ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
+ ... # will call this method when any
+ ... # attribute look-up is required
+ ... result = "Doesn't matter what you want, "
+ ... return result + "you're gonna get this"
+ ...
+ >>> demo_obj = BagDemo()
+ >>> bagobj = BO(demo_obj)
+ >>> bagobj.hello_there
+ "Doesn't matter what you want, you're gonna get this"
+ >>> bagobj.I_can_be_anything
+ "Doesn't matter what you want, you're gonna get this"
+
+ """
+
+ def __init__(self, obj):
+ # Use weakref to make NpzFile objects collectable by refcount
+ self._obj = weakref.proxy(obj)
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, '_obj')[key]
+ except KeyError:
+ raise AttributeError(key) from None
+
+ def __dir__(self):
+ """
+ Enables dir(bagobj) to list the files in an NpzFile.
+
+ This also enables tab-completion in an interpreter or IPython.
+ """
+ return list(object.__getattribute__(self, '_obj').keys())
+
+
+def zipfile_factory(file, *args, **kwargs):
+ """
+ Create a ZipFile.
+
+ Allows for Zip64, and the `file` argument can accept file, str, or
+ pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
+ constructor.
+ """
+ if not hasattr(file, 'read'):
+ file = os.fspath(file)
+ import zipfile
+ kwargs['allowZip64'] = True
+ return zipfile.ZipFile(file, *args, **kwargs)
+
+
+@set_module('numpy.lib.npyio')
+class NpzFile(Mapping):
+ """
+ NpzFile(fid)
+
+ A dictionary-like object with lazy-loading of files in the zipped
+ archive provided on construction.
+
+ `NpzFile` is used to load files in the NumPy ``.npz`` data archive
+ format. It assumes that files in the archive have a ``.npy`` extension,
+ other files are ignored.
+
+ The arrays and file strings are lazily loaded on either
+ getitem access using ``obj['key']`` or attribute lookup using
+ ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
+ be obtained with ``obj.files`` and the ZipFile object itself using
+ ``obj.zip``.
+
+ Attributes
+ ----------
+ files : list of str
+ List of all files in the archive with a ``.npy`` extension.
+ zip : ZipFile instance
+ The ZipFile object initialized with the zipped archive.
+ f : BagObj instance
+ An object on which attribute can be performed as an alternative
+ to getitem access on the `NpzFile` instance itself.
+ allow_pickle : bool, optional
+ Allow loading pickled data. Default: False
+ pickle_kwargs : dict, optional
+ Additional keyword arguments to pass on to pickle.load.
+ These are only useful when loading object arrays saved on
+ Python 2.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:func:`ast.literal_eval()` for details.
+ This option is ignored when `allow_pickle` is passed. In that case
+ the file is by definition trusted and the limit is unnecessary.
+
+ Parameters
+ ----------
+ fid : file, str, or pathlib.Path
+ The zipped archive to open. This is either a file-like object
+ or a string containing the path to the archive.
+ own_fid : bool, optional
+ Whether NpzFile should close the file handle.
+ Requires that `fid` is a file-like object.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+ >>> np.savez(outfile, x=x, y=y)
+ >>> _ = outfile.seek(0)
+
+ >>> npz = np.load(outfile)
+ >>> isinstance(npz, np.lib.npyio.NpzFile)
+ True
+ >>> npz
+ NpzFile 'object' with keys: x, y
+ >>> sorted(npz.files)
+ ['x', 'y']
+ >>> npz['x'] # getitem access
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> npz.f.x # attribute lookup
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ """
+ # Make __exit__ safe if zipfile_factory raises an exception
+ zip = None
+ fid = None
+ _MAX_REPR_ARRAY_COUNT = 5
+
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
+ pickle_kwargs=None, *,
+ max_header_size=_MAX_HEADER_SIZE):
+ # Import is postponed to here since zipfile depends on gzip, an
+ # optional component of the so-called standard library.
+ _zip = zipfile_factory(fid)
+ _files = _zip.namelist()
+ self.files = [name.removesuffix(".npy") for name in _files]
+ self._files = dict(zip(self.files, _files))
+ self._files.update(zip(_files, _files))
+ self.allow_pickle = allow_pickle
+ self.max_header_size = max_header_size
+ self.pickle_kwargs = pickle_kwargs
+ self.zip = _zip
+ self.f = BagObj(self)
+ if own_fid:
+ self.fid = fid
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ """
+ Close the file.
+
+ """
+ if self.zip is not None:
+ self.zip.close()
+ self.zip = None
+ if self.fid is not None:
+ self.fid.close()
+ self.fid = None
+ self.f = None # break reference cycle
+
+ def __del__(self):
+ self.close()
+
+ # Implement the Mapping ABC
+ def __iter__(self):
+ return iter(self.files)
+
+ def __len__(self):
+ return len(self.files)
+
+ def __getitem__(self, key):
+ try:
+ key = self._files[key]
+ except KeyError:
+ raise KeyError(f"{key} is not a file in the archive") from None
+ else:
+ with self.zip.open(key) as bytes:
+ magic = bytes.read(len(format.MAGIC_PREFIX))
+ bytes.seek(0)
+ if magic == format.MAGIC_PREFIX:
+ # FIXME: This seems like it will copy strings around
+ # more than is strictly necessary. The zipfile
+ # will read the string and then
+ # the format.read_array will copy the string
+ # to another place in memory.
+ # It would be better if the zipfile could read
+ # (or at least uncompress) the data
+ # directly into the array memory.
+ return format.read_array(
+ bytes,
+ allow_pickle=self.allow_pickle,
+ pickle_kwargs=self.pickle_kwargs,
+ max_header_size=self.max_header_size
+ )
+ else:
+ return bytes.read()
+
+ def __contains__(self, key):
+ return (key in self._files)
+
+ def __repr__(self):
+ # Get filename or default to `object`
+ if isinstance(self.fid, str):
+ filename = self.fid
+ else:
+ filename = getattr(self.fid, "name", "object")
+
+ # Get the name of arrays
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
+ array_names += "..."
+ return f"NpzFile {filename!r} with keys: {array_names}"
+
+ # Work around problems with the docstrings in the Mapping methods
+ # They contain a `->`, which confuses the type annotation interpretations
+ # of sphinx-docs. See gh-25964
+
+ def get(self, key, default=None, /):
+ """
+ D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None.
+ """
+ return Mapping.get(self, key, default)
+
+ def items(self):
+ """
+ D.items() returns a set-like object providing a view on the items
+ """
+ return Mapping.items(self)
+
+ def keys(self):
+ """
+ D.keys() returns a set-like object providing a view on the keys
+ """
+ return Mapping.keys(self)
+
+ def values(self):
+ """
+ D.values() returns a set-like object providing a view on the values
+ """
+ return Mapping.values(self)
+
+
+@set_module('numpy')
+def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
+ encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE):
+ """
+ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
+
+ .. warning:: Loading files that contain object arrays uses the ``pickle``
+ module, which is not secure against erroneous or maliciously
+ constructed data. Consider passing ``allow_pickle=False`` to
+ load data that is known not to contain object arrays for the
+ safer handling of untrusted sources.
+
+ Parameters
+ ----------
+ file : file-like object, string, or pathlib.Path
+ The file to read. File-like objects must support the
+ ``seek()`` and ``read()`` methods and must always
+ be opened in binary mode. Pickled files require that the
+ file-like object support the ``readline()`` method as well.
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
+ If not None, then memory-map the file, using the given mode (see
+ `numpy.memmap` for a detailed description of the modes). A
+ memory-mapped array is kept on disk. However, it can be accessed
+ and sliced like any ndarray. Memory mapping is especially useful
+ for accessing small fragments of large files without reading the
+ entire file into memory.
+ allow_pickle : bool, optional
+ Allow loading pickled object arrays stored in npy files. Reasons for
+ disallowing pickles include security, as loading pickled data can
+ execute arbitrary code. If pickles are disallowed, loading object
+ arrays will fail. Default: False
+ fix_imports : bool, optional
+ Only useful when loading Python 2 generated pickled files,
+ which includes npy/npz files containing object arrays. If `fix_imports`
+ is True, pickle will try to map the old Python 2 names to the new names
+ used in Python 3.
+ encoding : str, optional
+ What encoding to use when reading Python 2 strings. Only useful when
+ loading Python 2 generated pickled files, which includes
+ npy/npz files containing object arrays. Values other than 'latin1',
+ 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
+ data. Default: 'ASCII'
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:func:`ast.literal_eval()` for details.
+ This option is ignored when `allow_pickle` is passed. In that case
+ the file is by definition trusted and the limit is unnecessary.
+
+ Returns
+ -------
+ result : array, tuple, dict, etc.
+ Data stored in the file. For ``.npz`` files, the returned instance
+ of NpzFile class must be closed to avoid leaking file descriptors.
+
+ Raises
+ ------
+ OSError
+ If the input file does not exist or cannot be read.
+ UnpicklingError
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
+ ValueError
+ The file contains an object array, but ``allow_pickle=False`` given.
+ EOFError
+ When calling ``np.load`` multiple times on the same file handle,
+ if all data has already been read
+
+ See Also
+ --------
+ save, savez, savez_compressed, loadtxt
+ memmap : Create a memory-map to an array stored in a file on disk.
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+ Notes
+ -----
+ - If the file contains pickle data, then whatever object is stored
+ in the pickle is returned.
+ - If the file is a ``.npy`` file, then a single array is returned.
+ - If the file is a ``.npz`` file, then a dictionary-like object is
+ returned, containing ``{filename: array}`` key-value pairs, one for
+ each file in the archive.
+ - If the file is a ``.npz`` file, the returned value supports the
+ context manager protocol in a similar fashion to the open function::
+
+ with load('foo.npz') as data:
+ a = data['a']
+
+ The underlying file descriptor is closed when exiting the 'with'
+ block.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Store data to disk, and load it again:
+
+ >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
+ >>> np.load('/tmp/123.npy')
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ Store compressed data to disk, and load it again:
+
+ >>> a=np.array([[1, 2, 3], [4, 5, 6]])
+ >>> b=np.array([1, 2])
+ >>> np.savez('/tmp/123.npz', a=a, b=b)
+ >>> data = np.load('/tmp/123.npz')
+ >>> data['a']
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> data['b']
+ array([1, 2])
+ >>> data.close()
+
+ Mem-map the stored array, and then access the second row
+ directly from disk:
+
+ >>> X = np.load('/tmp/123.npy', mmap_mode='r')
+ >>> X[1, :]
+ memmap([4, 5, 6])
+
+ """
+ if encoding not in ('ASCII', 'latin1', 'bytes'):
+ # The 'encoding' value for pickle also affects what encoding
+ # the serialized binary data of NumPy arrays is loaded
+ # in. Pickle does not pass on the encoding information to
+ # NumPy. The unpickling code in numpy._core.multiarray is
+ # written to assume that unicode data appearing where binary
+ # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
+ #
+ # Other encoding values can corrupt binary data, and we
+ # purposefully disallow them. For the same reason, the errors=
+ # argument is not exposed, as values other than 'strict'
+ # result can similarly silently corrupt numerical data.
+ raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
+
+ pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports}
+
+ with contextlib.ExitStack() as stack:
+ if hasattr(file, 'read'):
+ fid = file
+ own_fid = False
+ else:
+ fid = stack.enter_context(open(os.fspath(file), "rb"))
+ own_fid = True
+
+ # Code to distinguish from NumPy binary files and pickles.
+ _ZIP_PREFIX = b'PK\x03\x04'
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
+ N = len(format.MAGIC_PREFIX)
+ magic = fid.read(N)
+ if not magic:
+ raise EOFError("No data left in file")
+ # If the file size is less than N, we need to make sure not
+ # to seek past the beginning of the file
+ fid.seek(-min(N, len(magic)), 1) # back-up
+ if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)):
+ # zip-file (assume .npz)
+ # Potentially transfer file ownership to NpzFile
+ stack.pop_all()
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs,
+ max_header_size=max_header_size)
+ return ret
+ elif magic == format.MAGIC_PREFIX:
+ # .npy file
+ if mmap_mode:
+ if allow_pickle:
+ max_header_size = 2**64
+ return format.open_memmap(file, mode=mmap_mode,
+ max_header_size=max_header_size)
+ else:
+ return format.read_array(fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs,
+ max_header_size=max_header_size)
+ else:
+ # Try a pickle
+ if not allow_pickle:
+ raise ValueError(
+ "This file contains pickled (object) data. If you trust "
+ "the file you can load it unsafely using the "
+ "`allow_pickle=` keyword argument or `pickle.load()`.")
+ try:
+ return pickle.load(fid, **pickle_kwargs)
+ except Exception as e:
+ raise pickle.UnpicklingError(
+ f"Failed to interpret file {file!r} as a pickle") from e
+
+
+def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
+ return (arr,)
+
+
+@array_function_dispatch(_save_dispatcher)
+def save(file, arr, allow_pickle=True, fix_imports=np._NoValue):
+ """
+ Save an array to a binary file in NumPy ``.npy`` format.
+
+ Parameters
+ ----------
+ file : file, str, or pathlib.Path
+ File or filename to which the data is saved. If file is a file-object,
+ then the filename is unchanged. If file is a string or Path,
+ a ``.npy`` extension will be appended to the filename if it does not
+ already have one.
+ arr : array_like
+ Array data to be saved.
+ allow_pickle : bool, optional
+ Allow saving object arrays using Python pickles. Reasons for
+ disallowing pickles include security (loading pickled data can execute
+ arbitrary code) and portability (pickled objects may not be loadable
+ on different Python installations, for example if the stored objects
+ require libraries that are not available, and not all pickled data is
+ compatible between different versions of Python).
+ Default: True
+ fix_imports : bool, optional
+ The `fix_imports` flag is deprecated and has no effect.
+
+ .. deprecated:: 2.1
+ This flag is ignored since NumPy 1.17 and was only needed to
+ support loading in Python 2 some files written in Python 3.
+
+ See Also
+ --------
+ savez : Save several arrays into a ``.npz`` archive
+ savetxt, load
+
+ Notes
+ -----
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+
+ Any data saved to the file is appended to the end of the file.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+
+ >>> x = np.arange(10)
+ >>> np.save(outfile, x)
+
+ >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file
+ >>> np.load(outfile)
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+
+ >>> with open('test.npy', 'wb') as f:
+ ... np.save(f, np.array([1, 2]))
+ ... np.save(f, np.array([1, 3]))
+ >>> with open('test.npy', 'rb') as f:
+ ... a = np.load(f)
+ ... b = np.load(f)
+ >>> print(a, b)
+ # [1 2] [1 3]
+ """
+ if fix_imports is not np._NoValue:
+ # Deprecated 2024-05-16, NumPy 2.1
+ warnings.warn(
+ "The 'fix_imports' flag is deprecated and has no effect. "
+ "(Deprecated in NumPy 2.1)",
+ DeprecationWarning, stacklevel=2)
+ if hasattr(file, 'write'):
+ file_ctx = contextlib.nullcontext(file)
+ else:
+ file = os.fspath(file)
+ if not file.endswith('.npy'):
+ file = file + '.npy'
+ file_ctx = open(file, "wb")
+
+ with file_ctx as fid:
+ arr = np.asanyarray(arr)
+ format.write_array(fid, arr, allow_pickle=allow_pickle,
+ pickle_kwargs={'fix_imports': fix_imports})
+
+
+def _savez_dispatcher(file, *args, allow_pickle=True, **kwds):
+ yield from args
+ yield from kwds.values()
+
+
+@array_function_dispatch(_savez_dispatcher)
+def savez(file, *args, allow_pickle=True, **kwds):
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
+
+ Provide arrays as keyword arguments to store them under the
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
+
+ Parameters
+ ----------
+ file : file, str, or pathlib.Path
+ Either the filename (string) or an open file (file-like object)
+ where the data will be saved. If file is a string or a Path, the
+ ``.npz`` extension will be appended to the filename if it is not
+ already there.
+ args : Arguments, optional
+ Arrays to save to the file. Please use keyword arguments (see
+ `kwds` below) to assign names to arrays. Arrays specified as
+ args will be named "arr_0", "arr_1", and so on.
+ allow_pickle : bool, optional
+ Allow saving object arrays using Python pickles. Reasons for
+ disallowing pickles include security (loading pickled data can execute
+ arbitrary code) and portability (pickled objects may not be loadable
+ on different Python installations, for example if the stored objects
+ require libraries that are not available, and not all pickled data is
+ compatible between different versions of Python).
+ Default: True
+ kwds : Keyword arguments, optional
+ Arrays to save to the file. Each array will be saved to the
+ output file with its corresponding keyword name.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ save : Save a single array to a binary file in NumPy format.
+ savetxt : Save an array to a file as plain text.
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
+
+ Notes
+ -----
+ The ``.npz`` file format is a zipped archive of files named after the
+ variables they contain. The archive is not compressed and each file
+ in the archive contains one variable in ``.npy`` format. For a
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+
+ When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`
+ object is returned. This is a dictionary-like object which can be queried
+ for its list of arrays (with the ``.files`` attribute), and for the arrays
+ themselves.
+
+ Keys passed in `kwds` are used as filenames inside the ZIP archive.
+ Therefore, keys should be valid filenames; e.g., avoid keys that begin with
+ ``/`` or contain ``.``.
+
+ When naming variables with keyword arguments, it is not possible to name a
+ variable ``file``, as this would cause the ``file`` argument to be defined
+ twice in the call to ``savez``.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+
+ Using `savez` with \\*args, the arrays are saved with default names.
+
+ >>> np.savez(outfile, x, y)
+ >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file
+ >>> npzfile = np.load(outfile)
+ >>> npzfile.files
+ ['arr_0', 'arr_1']
+ >>> npzfile['arr_0']
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ Using `savez` with \\**kwds, the arrays are saved with the keyword names.
+
+ >>> outfile = TemporaryFile()
+ >>> np.savez(outfile, x=x, y=y)
+ >>> _ = outfile.seek(0)
+ >>> npzfile = np.load(outfile)
+ >>> sorted(npzfile.files)
+ ['x', 'y']
+ >>> npzfile['x']
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ """
+ _savez(file, args, kwds, False, allow_pickle=allow_pickle)
+
+
+def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds):
+ yield from args
+ yield from kwds.values()
+
+
+@array_function_dispatch(_savez_compressed_dispatcher)
+def savez_compressed(file, *args, allow_pickle=True, **kwds):
+ """
+ Save several arrays into a single file in compressed ``.npz`` format.
+
+ Provide arrays as keyword arguments to store them under the
+ corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``.
+
+ If arrays are specified as positional arguments, i.e.,
+ ``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc.
+
+ Parameters
+ ----------
+ file : file, str, or pathlib.Path
+ Either the filename (string) or an open file (file-like object)
+ where the data will be saved. If file is a string or a Path, the
+ ``.npz`` extension will be appended to the filename if it is not
+ already there.
+ args : Arguments, optional
+ Arrays to save to the file. Please use keyword arguments (see
+ `kwds` below) to assign names to arrays. Arrays specified as
+ args will be named "arr_0", "arr_1", and so on.
+ allow_pickle : bool, optional
+ Allow saving object arrays using Python pickles. Reasons for
+ disallowing pickles include security (loading pickled data can execute
+ arbitrary code) and portability (pickled objects may not be loadable
+ on different Python installations, for example if the stored objects
+ require libraries that are not available, and not all pickled data is
+ compatible between different versions of Python).
+ Default: True
+ kwds : Keyword arguments, optional
+ Arrays to save to the file. Each array will be saved to the
+ output file with its corresponding keyword name.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ numpy.save : Save a single array to a binary file in NumPy format.
+ numpy.savetxt : Save an array to a file as plain text.
+ numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
+ numpy.load : Load the files created by savez_compressed.
+
+ Notes
+ -----
+ The ``.npz`` file format is a zipped archive of files named after the
+ variables they contain. The archive is compressed with
+ ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :py:mod:`numpy.lib.format`.
+
+
+ When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`
+ object is returned. This is a dictionary-like object which can be queried
+ for its list of arrays (with the ``.files`` attribute), and for the arrays
+ themselves.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> test_array = np.random.rand(3, 2)
+ >>> test_vector = np.random.rand(4)
+ >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
+ >>> loaded = np.load('/tmp/123.npz')
+ >>> print(np.array_equal(test_array, loaded['a']))
+ True
+ >>> print(np.array_equal(test_vector, loaded['b']))
+ True
+
+ """
+ _savez(file, args, kwds, True, allow_pickle=allow_pickle)
+
+
+def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
+ # Import is postponed to here since zipfile depends on gzip, an optional
+ # component of the so-called standard library.
+ import zipfile
+
+ if not hasattr(file, 'write'):
+ file = os.fspath(file)
+ if not file.endswith('.npz'):
+ file = file + '.npz'
+
+ namedict = kwds
+ for i, val in enumerate(args):
+ key = 'arr_%d' % i
+ if key in namedict.keys():
+ raise ValueError(
+ f"Cannot use un-named variables and keyword {key}")
+ namedict[key] = val
+
+ if compress:
+ compression = zipfile.ZIP_DEFLATED
+ else:
+ compression = zipfile.ZIP_STORED
+
+ zipf = zipfile_factory(file, mode="w", compression=compression)
+ try:
+ for key, val in namedict.items():
+ fname = key + '.npy'
+ val = np.asanyarray(val)
+ # always force zip64, gh-10776
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
+ format.write_array(fid, val,
+ allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
+ finally:
+ zipf.close()
+
+
+def _ensure_ndmin_ndarray_check_param(ndmin):
+ """Just checks if the param ndmin is supported on
+ _ensure_ndmin_ndarray. It is intended to be used as
+ verification before running anything expensive.
+ e.g. loadtxt, genfromtxt
+ """
+ # Check correctness of the values of `ndmin`
+ if ndmin not in [0, 1, 2]:
+ raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
+
+def _ensure_ndmin_ndarray(a, *, ndmin: int):
+ """This is a helper function of loadtxt and genfromtxt to ensure
+ proper minimum dimension as requested
+
+ ndim : int. Supported values 1, 2, 3
+ ^^ whenever this changes, keep in sync with
+ _ensure_ndmin_ndarray_check_param
+ """
+ # Verify that the array has at least dimensions `ndmin`.
+ # Tweak the size and shape of the arrays - remove extraneous dimensions
+ if a.ndim > ndmin:
+ a = np.squeeze(a)
+ # and ensure we have the minimum number of dimensions asked for
+ # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
+ if a.ndim < ndmin:
+ if ndmin == 1:
+ a = np.atleast_1d(a)
+ elif ndmin == 2:
+ a = np.atleast_2d(a).T
+
+ return a
+
+
+# amount of lines loadtxt reads in one chunk, can be overridden for testing
+_loadtxt_chunksize = 50000
+
+
+def _check_nonneg_int(value, name="argument"):
+ try:
+ operator.index(value)
+ except TypeError:
+ raise TypeError(f"{name} must be an integer") from None
+ if value < 0:
+ raise ValueError(f"{name} must be nonnegative")
+
+
+def _preprocess_comments(iterable, comments, encoding):
+ """
+ Generator that consumes a line iterated iterable and strips out the
+ multiple (or multi-character) comments from lines.
+ This is a pre-processing step to achieve feature parity with loadtxt
+ (we assume that this feature is a nieche feature).
+ """
+ for line in iterable:
+ if isinstance(line, bytes):
+ # Need to handle conversion here, or the splitting would fail
+ line = line.decode(encoding)
+
+ for c in comments:
+ line = line.split(c, 1)[0]
+
+ yield line
+
+
+# The number of rows we read in one go if confronted with a parametric dtype
+_loadtxt_chunksize = 50000
+
+
+def _read(fname, *, delimiter=',', comment='#', quote='"',
+ imaginary_unit='j', usecols=None, skiplines=0,
+ max_rows=None, converters=None, ndmin=None, unpack=False,
+ dtype=np.float64, encoding=None):
+ r"""
+ Read a NumPy array from a text file.
+ This is a helper function for loadtxt.
+
+ Parameters
+ ----------
+ fname : file, str, or pathlib.Path
+ The filename or the file to be read.
+ delimiter : str, optional
+ Field delimiter of the fields in line of the file.
+ Default is a comma, ','. If None any sequence of whitespace is
+ considered a delimiter.
+ comment : str or sequence of str or None, optional
+ Character that begins a comment. All text from the comment
+ character to the end of the line is ignored.
+ Multiple comments or multiple-character comment strings are supported,
+ but may be slower and `quote` must be empty if used.
+ Use None to disable all use of comments.
+ quote : str or None, optional
+ Character that is used to quote string fields. Default is '"'
+ (a double quote). Use None to disable quote support.
+ imaginary_unit : str, optional
+ Character that represent the imaginary unit `sqrt(-1)`.
+ Default is 'j'.
+ usecols : array_like, optional
+ A one-dimensional array of integer column numbers. These are the
+ columns from the file to be included in the array. If this value
+ is not given, all the columns are used.
+ skiplines : int, optional
+ Number of lines to skip before interpreting the data in the file.
+ max_rows : int, optional
+ Maximum number of rows of data to read. Default is to read the
+ entire file.
+ converters : dict or callable, optional
+ A function to parse all columns strings into the desired value, or
+ a dictionary mapping column number to a parser function.
+ E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
+ Converters can also be used to provide a default value for missing
+ data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
+ convert empty fields to 0.
+ Default: None
+ ndmin : int, optional
+ Minimum dimension of the array returned.
+ Allowed values are 0, 1 or 2. Default is 0.
+ unpack : bool, optional
+ If True, the returned array is transposed, so that arguments may be
+ unpacked using ``x, y, z = read(...)``. When used with a structured
+ data-type, arrays are returned for each field. Default is False.
+ dtype : numpy data type
+ A NumPy dtype instance, can be a structured dtype to map to the
+ columns of the file.
+ encoding : str, optional
+ Encoding used to decode the inputfile. The special value 'bytes'
+ (the default) enables backwards-compatible behavior for `converters`,
+ ensuring that inputs to the converter functions are encoded
+ bytes objects. The special value 'bytes' has no additional effect if
+ ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
+ default system encoding is used.
+
+ Returns
+ -------
+ ndarray
+ NumPy array.
+ """
+ # Handle special 'bytes' keyword for encoding
+ byte_converters = False
+ if encoding == 'bytes':
+ encoding = None
+ byte_converters = True
+
+ if dtype is None:
+ raise TypeError("a dtype must be provided.")
+ dtype = np.dtype(dtype)
+
+ read_dtype_via_object_chunks = None
+ if dtype.kind in 'SUM' and dtype in {
+ np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}:
+ # This is a legacy "flexible" dtype. We do not truly support
+ # parametric dtypes currently (no dtype discovery step in the core),
+ # but have to support these for backward compatibility.
+ read_dtype_via_object_chunks = dtype
+ dtype = np.dtype(object)
+
+ if usecols is not None:
+ # Allow usecols to be a single int or a sequence of ints, the C-code
+ # handles the rest
+ try:
+ usecols = list(usecols)
+ except TypeError:
+ usecols = [usecols]
+
+ _ensure_ndmin_ndarray_check_param(ndmin)
+
+ if comment is None:
+ comments = None
+ else:
+ # assume comments are a sequence of strings
+ if "" in comment:
+ raise ValueError(
+ "comments cannot be an empty string. Use comments=None to "
+ "disable comments."
+ )
+ comments = tuple(comment)
+ comment = None
+ if len(comments) == 0:
+ comments = None # No comments at all
+ elif len(comments) == 1:
+ # If there is only one comment, and that comment has one character,
+ # the normal parsing can deal with it just fine.
+ if isinstance(comments[0], str) and len(comments[0]) == 1:
+ comment = comments[0]
+ comments = None
+ # Input validation if there are multiple comment characters
+ elif delimiter in comments:
+ raise TypeError(
+ f"Comment characters '{comments}' cannot include the "
+ f"delimiter '{delimiter}'"
+ )
+
+ # comment is now either a 1 or 0 character string or a tuple:
+ if comments is not None:
+ # Note: An earlier version support two character comments (and could
+ # have been extended to multiple characters, we assume this is
+ # rare enough to not optimize for.
+ if quote is not None:
+ raise ValueError(
+ "when multiple comments or a multi-character comment is "
+ "given, quotes are not supported. In this case quotechar "
+ "must be set to None.")
+
+ if len(imaginary_unit) != 1:
+ raise ValueError('len(imaginary_unit) must be 1.')
+
+ _check_nonneg_int(skiplines)
+ if max_rows is not None:
+ _check_nonneg_int(max_rows)
+ else:
+ # Passing -1 to the C code means "read the entire file".
+ max_rows = -1
+
+ fh_closing_ctx = contextlib.nullcontext()
+ filelike = False
+ try:
+ if isinstance(fname, os.PathLike):
+ fname = os.fspath(fname)
+ if isinstance(fname, str):
+ fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ if encoding is None:
+ encoding = getattr(fh, 'encoding', 'latin1')
+
+ fh_closing_ctx = contextlib.closing(fh)
+ data = fh
+ filelike = True
+ else:
+ if encoding is None:
+ encoding = getattr(fname, 'encoding', 'latin1')
+ data = iter(fname)
+ except TypeError as e:
+ raise ValueError(
+ f"fname must be a string, filehandle, list of strings,\n"
+ f"or generator. Got {type(fname)} instead.") from e
+
+ with fh_closing_ctx:
+ if comments is not None:
+ if filelike:
+ data = iter(data)
+ filelike = False
+ data = _preprocess_comments(data, comments, encoding)
+
+ if read_dtype_via_object_chunks is None:
+ arr = _load_from_filelike(
+ data, delimiter=delimiter, comment=comment, quote=quote,
+ imaginary_unit=imaginary_unit,
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
+ converters=converters, dtype=dtype,
+ encoding=encoding, filelike=filelike,
+ byte_converters=byte_converters)
+
+ else:
+ # This branch reads the file into chunks of object arrays and then
+ # casts them to the desired actual dtype. This ensures correct
+ # string-length and datetime-unit discovery (like `arr.astype()`).
+ # Due to chunking, certain error reports are less clear, currently.
+ if filelike:
+ data = iter(data) # cannot chunk when reading from file
+ filelike = False
+
+ c_byte_converters = False
+ if read_dtype_via_object_chunks == "S":
+ c_byte_converters = True # Use latin1 rather than ascii
+
+ chunks = []
+ while max_rows != 0:
+ if max_rows < 0:
+ chunk_size = _loadtxt_chunksize
+ else:
+ chunk_size = min(_loadtxt_chunksize, max_rows)
+
+ next_arr = _load_from_filelike(
+ data, delimiter=delimiter, comment=comment, quote=quote,
+ imaginary_unit=imaginary_unit,
+ usecols=usecols, skiplines=skiplines, max_rows=chunk_size,
+ converters=converters, dtype=dtype,
+ encoding=encoding, filelike=filelike,
+ byte_converters=byte_converters,
+ c_byte_converters=c_byte_converters)
+ # Cast here already. We hope that this is better even for
+ # large files because the storage is more compact. It could
+ # be adapted (in principle the concatenate could cast).
+ chunks.append(next_arr.astype(read_dtype_via_object_chunks))
+
+ skiplines = 0 # Only have to skip for first chunk
+ if max_rows >= 0:
+ max_rows -= chunk_size
+ if len(next_arr) < chunk_size:
+ # There was less data than requested, so we are done.
+ break
+
+ # Need at least one chunk, but if empty, the last one may have
+ # the wrong shape.
+ if len(chunks) > 1 and len(chunks[-1]) == 0:
+ del chunks[-1]
+ if len(chunks) == 1:
+ arr = chunks[0]
+ else:
+ arr = np.concatenate(chunks, axis=0)
+
+ # NOTE: ndmin works as advertised for structured dtypes, but normally
+ # these would return a 1D result plus the structured dimension,
+ # so ndmin=2 adds a third dimension even when no squeezing occurs.
+ # A `squeeze=False` could be a better solution (pandas uses squeeze).
+ arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
+
+ if arr.shape:
+ if arr.shape[0] == 0:
+ warnings.warn(
+ f'loadtxt: input contained no data: "{fname}"',
+ category=UserWarning,
+ stacklevel=3
+ )
+
+ if unpack:
+ # Unpack structured dtypes if requested:
+ dt = arr.dtype
+ if dt.names is not None:
+ # For structured arrays, return an array for each field.
+ return [arr[field] for field in dt.names]
+ else:
+ return arr.T
+ else:
+ return arr
+
+
+@finalize_array_function_like
+@set_module('numpy')
+def loadtxt(fname, dtype=float, comments='#', delimiter=None,
+ converters=None, skiprows=0, usecols=None, unpack=False,
+ ndmin=0, encoding=None, max_rows=None, *, quotechar=None,
+ like=None):
+ r"""
+ Load data from a text file.
+
+ Parameters
+ ----------
+ fname : file, str, pathlib.Path, list of str, generator
+ File, filename, list, or generator to read. If the filename
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
+ that generators must return bytes or strings. The strings
+ in a list or produced by a generator are treated as lines.
+ dtype : data-type, optional
+ Data-type of the resulting array; default: float. If this is a
+ structured data-type, the resulting array will be 1-dimensional, and
+ each row will be interpreted as an element of the array. In this
+ case, the number of columns used must match the number of fields in
+ the data-type.
+ comments : str or sequence of str or None, optional
+ The characters or list of characters used to indicate the start of a
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
+ delimiter : str, optional
+ The character used to separate the values. For backwards compatibility,
+ byte strings will be decoded as 'latin1'. The default is whitespace.
+
+ .. versionchanged:: 1.23.0
+ Only single character delimiters are supported. Newline characters
+ cannot be used as the delimiter.
+
+ converters : dict or callable, optional
+ Converter functions to customize value parsing. If `converters` is
+ callable, the function is applied to all columns, else it must be a
+ dict that maps column number to a parser function.
+ See examples for further details.
+ Default: None.
+
+ .. versionchanged:: 1.23.0
+ The ability to pass a single callable to be applied to all columns
+ was added.
+
+ skiprows : int, optional
+ Skip the first `skiprows` lines, including comments; default: 0.
+ usecols : int or sequence, optional
+ Which columns to read, with 0 being the first. For example,
+ ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+ The default, None, results in all columns being read.
+ unpack : bool, optional
+ If True, the returned array is transposed, so that arguments may be
+ unpacked using ``x, y, z = loadtxt(...)``. When used with a
+ structured data-type, arrays are returned for each field.
+ Default is False.
+ ndmin : int, optional
+ The returned array will have at least `ndmin` dimensions.
+ Otherwise mono-dimensional axes will be squeezed.
+ Legal values: 0 (default), 1 or 2.
+ encoding : str, optional
+ Encoding used to decode the inputfile. Does not apply to input streams.
+ The special value 'bytes' enables backward compatibility workarounds
+ that ensures you receive byte arrays as results if possible and passes
+ 'latin1' encoded strings to converters. Override this value to receive
+ unicode arrays and pass strings as input to converters. If set to None
+ the system default is used. The default value is None.
+
+ .. versionchanged:: 2.0
+ Before NumPy 2, the default was ``'bytes'`` for Python 2
+ compatibility. The default is now ``None``.
+
+ max_rows : int, optional
+ Read `max_rows` rows of content after `skiprows` lines. The default is
+ to read all the rows. Note that empty rows containing no data such as
+ empty lines and comment lines are not counted towards `max_rows`,
+ while such lines are counted in `skiprows`.
+
+ .. versionchanged:: 1.23.0
+ Lines containing no data, including comment lines (e.g., lines
+ starting with '#' or as specified via `comments`) are not counted
+ towards `max_rows`.
+ quotechar : unicode character or None, optional
+ The character used to denote the start and end of a quoted item.
+ Occurrences of the delimiter or comment characters are ignored within
+ a quoted item. The default value is ``quotechar=None``, which means
+ quoting support is disabled.
+
+ If two consecutive instances of `quotechar` are found within a quoted
+ field, the first is treated as an escape character. See examples.
+
+ .. versionadded:: 1.23.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Data read from the text file.
+
+ See Also
+ --------
+ load, fromstring, fromregex
+ genfromtxt : Load data with missing values handled as specified.
+ scipy.io.loadmat : reads MATLAB data files
+
+ Notes
+ -----
+ This function aims to be a fast reader for simply formatted files. The
+ `genfromtxt` function provides more sophisticated handling of, e.g.,
+ lines with missing values.
+
+ Each row in the input text file must have the same number of values to be
+ able to read all values. If all rows do not have same number of values, a
+ subset of up to n columns (where n is the least number of values present
+ in all rows) can be read by specifying the columns via `usecols`.
+
+ The strings produced by the Python float.hex method can be used as
+ input for floats.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from io import StringIO # StringIO behaves like a file object
+ >>> c = StringIO("0 1\n2 3")
+ >>> np.loadtxt(c)
+ array([[0., 1.],
+ [2., 3.]])
+
+ >>> d = StringIO("M 21 72\nF 35 58")
+ >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
+ ... 'formats': ('S1', 'i4', 'f4')})
+ array([(b'M', 21, 72.), (b'F', 35, 58.)],
+ dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
+
+ >>> c = StringIO("1,0,2\n3,0,4")
+ >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
+ >>> x
+ array([1., 3.])
+ >>> y
+ array([2., 4.])
+
+ The `converters` argument is used to specify functions to preprocess the
+ text prior to parsing. `converters` can be a dictionary that maps
+ preprocessing functions to each column:
+
+ >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
+ >>> conv = {
+ ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
+ ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
+ ... }
+ >>> np.loadtxt(s, delimiter=",", converters=conv)
+ array([[1., 3.],
+ [3., 5.]])
+
+ `converters` can be a callable instead of a dictionary, in which case it
+ is applied to all columns:
+
+ >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
+ >>> import functools
+ >>> conv = functools.partial(int, base=16)
+ >>> np.loadtxt(s, converters=conv)
+ array([[222., 173.],
+ [192., 222.]])
+
+ This example shows how `converters` can be used to convert a field
+ with a trailing minus sign into a negative number.
+
+ >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")
+ >>> def conv(fld):
+ ... return -float(fld[:-1]) if fld.endswith("-") else float(fld)
+ ...
+ >>> np.loadtxt(s, converters=conv)
+ array([[ 10.01, -31.25],
+ [ 19.22, 64.31],
+ [-17.57, 63.94]])
+
+ Using a callable as the converter can be particularly useful for handling
+ values with different formatting, e.g. floats with underscores:
+
+ >>> s = StringIO("1 2.7 100_000")
+ >>> np.loadtxt(s, converters=float)
+ array([1.e+00, 2.7e+00, 1.e+05])
+
+ This idea can be extended to automatically handle values specified in
+ many different formats, such as hex values:
+
+ >>> def conv(val):
+ ... try:
+ ... return float(val)
+ ... except ValueError:
+ ... return float.fromhex(val)
+ >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
+ >>> np.loadtxt(s, delimiter=",", converters=conv)
+ array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
+
+ Or a format where the ``-`` sign comes after the number:
+
+ >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")
+ >>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x)
+ >>> np.loadtxt(s, converters=conv)
+ array([[ 10.01, -31.25],
+ [ 19.22, 64.31],
+ [-17.57, 63.94]])
+
+ Support for quoted fields is enabled with the `quotechar` parameter.
+ Comment and delimiter characters are ignored when they appear within a
+ quoted item delineated by `quotechar`:
+
+ >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+ >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
+ dtype=[('label', '<U12'), ('value', '<f8')])
+
+ Quoted fields can be separated by multiple whitespace characters:
+
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
+ dtype=[('label', '<U12'), ('value', '<f8')])
+
+ Two consecutive quote characters within a quoted field are treated as a
+ single escaped character:
+
+ >>> s = StringIO('"Hello, my name is ""Monty""!"')
+ >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
+ array('Hello, my name is "Monty"!', dtype='<U26')
+
+ Read subset of columns when all rows do not contain equal number of values:
+
+ >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
+ >>> np.loadtxt(d, usecols=(0, 1))
+ array([[ 1., 2.],
+ [ 2., 4.],
+ [ 3., 9.],
+ [ 4., 16.]])
+
+ """
+
+ if like is not None:
+ return _loadtxt_with_like(
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ converters=converters, skiprows=skiprows, usecols=usecols,
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
+ max_rows=max_rows
+ )
+
+ if isinstance(delimiter, bytes):
+ delimiter.decode("latin1")
+
+ if dtype is None:
+ dtype = np.float64
+
+ comment = comments
+ # Control character type conversions for Py3 convenience
+ if comment is not None:
+ if isinstance(comment, (str, bytes)):
+ comment = [comment]
+ comment = [
+ x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
+ if isinstance(delimiter, bytes):
+ delimiter = delimiter.decode('latin1')
+
+ arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
+ converters=converters, skiplines=skiprows, usecols=usecols,
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
+ max_rows=max_rows, quote=quotechar)
+
+ return arr
+
+
+_loadtxt_with_like = array_function_dispatch()(loadtxt)
+
+
+def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
+ header=None, footer=None, comments=None,
+ encoding=None):
+ return (X,)
+
+
+@array_function_dispatch(_savetxt_dispatcher)
+def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
+ footer='', comments='# ', encoding=None):
+ """
+ Save an array to a text file.
+
+ Parameters
+ ----------
+ fname : filename, file handle or pathlib.Path
+ If the filename ends in ``.gz``, the file is automatically saved in
+ compressed gzip format. `loadtxt` understands gzipped files
+ transparently.
+ X : 1D or 2D array_like
+ Data to be saved to a text file.
+ fmt : str or sequence of strs, optional
+ A single format (%10.5f), a sequence of formats, or a
+ multi-format string, e.g. 'Iteration %d -- %10.5f', in which
+ case `delimiter` is ignored. For complex `X`, the legal options
+ for `fmt` are:
+
+ * a single specifier, ``fmt='%.4e'``, resulting in numbers formatted
+ like ``' (%s+%sj)' % (fmt, fmt)``
+ * a full string specifying every real and imaginary part, e.g.
+ ``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns
+ * a list of specifiers, one per column - in this case, the real
+ and imaginary part must have separate specifiers,
+ e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns
+ delimiter : str, optional
+ String or character separating columns.
+ newline : str, optional
+ String or character separating lines.
+ header : str, optional
+ String that will be written at the beginning of the file.
+ footer : str, optional
+ String that will be written at the end of the file.
+ comments : str, optional
+ String that will be prepended to the ``header`` and ``footer`` strings,
+ to mark them as comments. Default: '# ', as expected by e.g.
+ ``numpy.loadtxt``.
+ encoding : {None, str}, optional
+ Encoding used to encode the outputfile. Does not apply to output
+ streams. If the encoding is something other than 'bytes' or 'latin1'
+ you will not be able to load the file in NumPy versions < 1.14. Default
+ is 'latin1'.
+
+ See Also
+ --------
+ save : Save an array to a binary file in NumPy ``.npy`` format
+ savez : Save several arrays into an uncompressed ``.npz`` archive
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
+
+ Notes
+ -----
+ Further explanation of the `fmt` parameter
+ (``%[flag]width[.precision]specifier``):
+
+ flags:
+ ``-`` : left justify
+
+ ``+`` : Forces to precede result with + or -.
+
+ ``0`` : Left pad the number with zeros instead of space (see width).
+
+ width:
+ Minimum number of characters to be printed. The value is not truncated
+ if it has more characters.
+
+ precision:
+ - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
+ digits.
+ - For ``e, E`` and ``f`` specifiers, the number of digits to print
+ after the decimal point.
+ - For ``g`` and ``G``, the maximum number of significant digits.
+ - For ``s``, the maximum number of characters.
+
+ specifiers:
+ ``c`` : character
+
+ ``d`` or ``i`` : signed decimal integer
+
+ ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
+
+ ``f`` : decimal floating point
+
+ ``g,G`` : use the shorter of ``e,E`` or ``f``
+
+ ``o`` : signed octal
+
+ ``s`` : string of characters
+
+ ``u`` : unsigned decimal integer
+
+ ``x,X`` : unsigned hexadecimal integer
+
+ This explanation of ``fmt`` is not complete, for an exhaustive
+ specification see [1]_.
+
+ References
+ ----------
+ .. [1] `Format Specification Mini-Language
+ <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
+ Python Documentation.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = y = z = np.arange(0.0,5.0,1.0)
+ >>> np.savetxt('test.out', x, delimiter=',') # X is an array
+ >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
+ >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
+
+ """
+
+ class WriteWrap:
+ """Convert to bytes on bytestream inputs.
+
+ """
+ def __init__(self, fh, encoding):
+ self.fh = fh
+ self.encoding = encoding
+ self.do_write = self.first_write
+
+ def close(self):
+ self.fh.close()
+
+ def write(self, v):
+ self.do_write(v)
+
+ def write_bytes(self, v):
+ if isinstance(v, bytes):
+ self.fh.write(v)
+ else:
+ self.fh.write(v.encode(self.encoding))
+
+ def write_normal(self, v):
+ self.fh.write(asunicode(v))
+
+ def first_write(self, v):
+ try:
+ self.write_normal(v)
+ self.write = self.write_normal
+ except TypeError:
+ # input is probably a bytestream
+ self.write_bytes(v)
+ self.write = self.write_bytes
+
+ own_fh = False
+ if isinstance(fname, os.PathLike):
+ fname = os.fspath(fname)
+ if _is_string_like(fname):
+ # datasource doesn't support creating a new file ...
+ open(fname, 'wt').close()
+ fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
+ own_fh = True
+ elif hasattr(fname, 'write'):
+ # wrap to handle byte output streams
+ fh = WriteWrap(fname, encoding or 'latin1')
+ else:
+ raise ValueError('fname must be a string or file handle')
+
+ try:
+ X = np.asarray(X)
+
+ # Handle 1-dimensional arrays
+ if X.ndim == 0 or X.ndim > 2:
+ raise ValueError(
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
+ elif X.ndim == 1:
+ # Common case -- 1d array of numbers
+ if X.dtype.names is None:
+ X = np.atleast_2d(X).T
+ ncol = 1
+
+ # Complex dtype -- each field indicates a separate column
+ else:
+ ncol = len(X.dtype.names)
+ else:
+ ncol = X.shape[1]
+
+ iscomplex_X = np.iscomplexobj(X)
+ # `fmt` can be a string with multiple insertion points or a
+ # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
+ if type(fmt) in (list, tuple):
+ if len(fmt) != ncol:
+ raise AttributeError(f'fmt has wrong shape. {str(fmt)}')
+ format = delimiter.join(fmt)
+ elif isinstance(fmt, str):
+ n_fmt_chars = fmt.count('%')
+ error = ValueError(f'fmt has wrong number of % formats: {fmt}')
+ if n_fmt_chars == 1:
+ if iscomplex_X:
+ fmt = [f' ({fmt}+{fmt}j)', ] * ncol
+ else:
+ fmt = [fmt, ] * ncol
+ format = delimiter.join(fmt)
+ elif iscomplex_X and n_fmt_chars != (2 * ncol):
+ raise error
+ elif ((not iscomplex_X) and n_fmt_chars != ncol):
+ raise error
+ else:
+ format = fmt
+ else:
+ raise ValueError(f'invalid fmt: {fmt!r}')
+
+ if len(header) > 0:
+ header = header.replace('\n', '\n' + comments)
+ fh.write(comments + header + newline)
+ if iscomplex_X:
+ for row in X:
+ row2 = []
+ for number in row:
+ row2.extend((number.real, number.imag))
+ s = format % tuple(row2) + newline
+ fh.write(s.replace('+-', '-'))
+ else:
+ for row in X:
+ try:
+ v = format % tuple(row) + newline
+ except TypeError as e:
+ raise TypeError("Mismatch between array dtype ('%s') and "
+ "format specifier ('%s')"
+ % (str(X.dtype), format)) from e
+ fh.write(v)
+
+ if len(footer) > 0:
+ footer = footer.replace('\n', '\n' + comments)
+ fh.write(comments + footer + newline)
+ finally:
+ if own_fh:
+ fh.close()
+
+
+@set_module('numpy')
+def fromregex(file, regexp, dtype, encoding=None):
+ r"""
+ Construct an array from a text file, using regular expression parsing.
+
+ The returned array is always a structured array, and is constructed from
+ all matches of the regular expression in the file. Groups in the regular
+ expression are converted to fields of the structured array.
+
+ Parameters
+ ----------
+ file : file, str, or pathlib.Path
+ Filename or file object to read.
+
+ .. versionchanged:: 1.22.0
+ Now accepts `os.PathLike` implementations.
+
+ regexp : str or regexp
+ Regular expression used to parse the file.
+ Groups in the regular expression correspond to fields in the dtype.
+ dtype : dtype or list of dtypes
+ Dtype for the structured array; must be a structured datatype.
+ encoding : str, optional
+ Encoding used to decode the inputfile. Does not apply to input streams.
+
+ Returns
+ -------
+ output : ndarray
+ The output array, containing the part of the content of `file` that
+ was matched by `regexp`. `output` is always a structured array.
+
+ Raises
+ ------
+ TypeError
+ When `dtype` is not a valid dtype for a structured array.
+
+ See Also
+ --------
+ fromstring, loadtxt
+
+ Notes
+ -----
+ Dtypes for structured arrays can be specified in several forms, but all
+ forms specify at least the data type and field name. For details see
+ `basics.rec`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from io import StringIO
+ >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
+
+ >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
+ >>> output = np.fromregex(text, regexp,
+ ... [('num', np.int64), ('key', 'S3')])
+ >>> output
+ array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
+ dtype=[('num', '<i8'), ('key', 'S3')])
+ >>> output['num']
+ array([1312, 1534, 444])
+
+ """
+ own_fh = False
+ if not hasattr(file, "read"):
+ file = os.fspath(file)
+ file = np.lib._datasource.open(file, 'rt', encoding=encoding)
+ own_fh = True
+
+ try:
+ if not isinstance(dtype, np.dtype):
+ dtype = np.dtype(dtype)
+ if dtype.names is None:
+ raise TypeError('dtype must be a structured datatype.')
+
+ content = file.read()
+ if isinstance(content, bytes) and isinstance(regexp, str):
+ regexp = asbytes(regexp)
+
+ if not hasattr(regexp, 'match'):
+ regexp = re.compile(regexp)
+ seq = regexp.findall(content)
+ if seq and not isinstance(seq[0], tuple):
+ # Only one group is in the regexp.
+ # Create the new array as a single data-type and then
+ # re-interpret as a single-field structured array.
+ newdtype = np.dtype(dtype[dtype.names[0]])
+ output = np.array(seq, dtype=newdtype)
+ output.dtype = dtype
+ else:
+ output = np.array(seq, dtype=dtype)
+
+ return output
+ finally:
+ if own_fh:
+ file.close()
+
+
+#####--------------------------------------------------------------------------
+#---- --- ASCII functions ---
+#####--------------------------------------------------------------------------
+
+
+@finalize_array_function_like
+@set_module('numpy')
+def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
+ skip_header=0, skip_footer=0, converters=None,
+ missing_values=None, filling_values=None, usecols=None,
+ names=None, excludelist=None,
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008
+ replace_space='_', autostrip=False, case_sensitive=True,
+ defaultfmt="f%i", unpack=None, usemask=False, loose=True,
+ invalid_raise=True, max_rows=None, encoding=None,
+ *, ndmin=0, like=None):
+ """
+ Load data from a text file, with missing values handled as specified.
+
+ Each line past the first `skip_header` lines is split at the `delimiter`
+ character, and characters following the `comments` character are discarded.
+
+ Parameters
+ ----------
+ fname : file, str, pathlib.Path, list of str, generator
+ File, filename, list, or generator to read. If the filename
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
+ that generators must return bytes or strings. The strings
+ in a list or produced by a generator are treated as lines.
+ dtype : dtype, optional
+ Data type of the resulting array.
+ If None, the dtypes will be determined by the contents of each
+ column, individually.
+ comments : str, optional
+ The character used to indicate the start of a comment.
+ All the characters occurring on a line after a comment are discarded.
+ delimiter : str, int, or sequence, optional
+ The string used to separate values. By default, any consecutive
+ whitespaces act as delimiter. An integer or sequence of integers
+ can also be provided as width(s) of each field.
+ skiprows : int, optional
+ `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
+ skip_header : int, optional
+ The number of lines to skip at the beginning of the file.
+ skip_footer : int, optional
+ The number of lines to skip at the end of the file.
+ converters : variable, optional
+ The set of functions that convert the data of a column to a value.
+ The converters can also be used to provide a default value
+ for missing data: ``converters = {3: lambda s: float(s or 0)}``.
+ missing : variable, optional
+ `missing` was removed in numpy 1.10. Please use `missing_values`
+ instead.
+ missing_values : variable, optional
+ The set of strings corresponding to missing data.
+ filling_values : variable, optional
+ The set of values to be used as default when the data are missing.
+ usecols : sequence, optional
+ Which columns to read, with 0 being the first. For example,
+ ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
+ names : {None, True, str, sequence}, optional
+ If `names` is True, the field names are read from the first line after
+ the first `skip_header` lines. This line can optionally be preceded
+ by a comment delimiter. Any content before the comment delimiter is
+ discarded. If `names` is a sequence or a single-string of
+ comma-separated names, the names will be used to define the field
+ names in a structured dtype. If `names` is None, the names of the
+ dtype fields will be used, if any.
+ excludelist : sequence, optional
+ A list of names to exclude. This list is appended to the default list
+ ['return','file','print']. Excluded names are appended with an
+ underscore: for example, `file` would become `file_`.
+ deletechars : str, optional
+ A string combining invalid characters that must be deleted from the
+ names.
+ defaultfmt : str, optional
+ A format used to define default field names, such as "f%i" or "f_%02i".
+ autostrip : bool, optional
+ Whether to automatically strip white spaces from the variables.
+ replace_space : char, optional
+ Character(s) used in replacement of white spaces in the variable
+ names. By default, use a '_'.
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
+ If True, field names are case sensitive.
+ If False or 'upper', field names are converted to upper case.
+ If 'lower', field names are converted to lower case.
+ unpack : bool, optional
+ If True, the returned array is transposed, so that arguments may be
+ unpacked using ``x, y, z = genfromtxt(...)``. When used with a
+ structured data-type, arrays are returned for each field.
+ Default is False.
+ usemask : bool, optional
+ If True, return a masked array.
+ If False, return a regular array.
+ loose : bool, optional
+ If True, do not raise errors for invalid values.
+ invalid_raise : bool, optional
+ If True, an exception is raised if an inconsistency is detected in the
+ number of columns.
+ If False, a warning is emitted and the offending lines are skipped.
+ max_rows : int, optional
+ The maximum number of rows to read. Must not be used with skip_footer
+ at the same time. If given, the value must be at least 1. Default is
+ to read the entire file.
+ encoding : str, optional
+ Encoding used to decode the inputfile. Does not apply when `fname`
+ is a file object. The special value 'bytes' enables backward
+ compatibility workarounds that ensure that you receive byte arrays
+ when possible and passes latin1 encoded strings to converters.
+ Override this value to receive unicode arrays and pass strings
+ as input to converters. If set to None the system default is used.
+ The default value is 'bytes'.
+
+ .. versionchanged:: 2.0
+ Before NumPy 2, the default was ``'bytes'`` for Python 2
+ compatibility. The default is now ``None``.
+
+ ndmin : int, optional
+ Same parameter as `loadtxt`
+
+ .. versionadded:: 1.23.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Data read from the text file. If `usemask` is True, this is a
+ masked array.
+
+ See Also
+ --------
+ numpy.loadtxt : equivalent function when no data is missing.
+
+ Notes
+ -----
+ * When spaces are used as delimiters, or when no delimiter has been given
+ as input, there should not be any missing data between two fields.
+ * When variables are named (either by a flexible dtype or with a `names`
+ sequence), there must not be any header in the file (else a ValueError
+ exception is raised).
+ * Individual values are not stripped of spaces by default.
+ When using a custom converter, make sure the function does remove spaces.
+ * Custom converters may receive unexpected values due to dtype
+ discovery.
+
+ References
+ ----------
+ .. [1] NumPy User Guide, section `I/O with NumPy
+ <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
+
+ Examples
+ --------
+ >>> from io import StringIO
+ >>> import numpy as np
+
+ Comma delimited file with mixed dtype
+
+ >>> s = StringIO("1,1.3,abcde")
+ >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
+ ... ('mystring','S5')], delimiter=",")
+ >>> data
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
+
+ Using dtype = None
+
+ >>> _ = s.seek(0) # needed for StringIO example only
+ >>> data = np.genfromtxt(s, dtype=None,
+ ... names = ['myint','myfloat','mystring'], delimiter=",")
+ >>> data
+ array((1, 1.3, 'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '<U5')])
+
+ Specifying dtype and names
+
+ >>> _ = s.seek(0)
+ >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
+ ... names=['myint','myfloat','mystring'], delimiter=",")
+ >>> data
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
+
+ An example with fixed-width columns
+
+ >>> s = StringIO("11.3abcde")
+ >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
+ ... delimiter=[1,3,5])
+ >>> data
+ array((1, 1.3, 'abcde'),
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '<U5')])
+
+ An example to show comments
+
+ >>> f = StringIO('''
+ ... text,# of chars
+ ... hello world,11
+ ... numpy,5''')
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
+
+ """
+
+ if like is not None:
+ return _genfromtxt_with_like(
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ skip_header=skip_header, skip_footer=skip_footer,
+ converters=converters, missing_values=missing_values,
+ filling_values=filling_values, usecols=usecols, names=names,
+ excludelist=excludelist, deletechars=deletechars,
+ replace_space=replace_space, autostrip=autostrip,
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
+ unpack=unpack, usemask=usemask, loose=loose,
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
+ ndmin=ndmin,
+ )
+
+ _ensure_ndmin_ndarray_check_param(ndmin)
+
+ if max_rows is not None:
+ if skip_footer:
+ raise ValueError(
+ "The keywords 'skip_footer' and 'max_rows' can not be "
+ "specified at the same time.")
+ if max_rows < 1:
+ raise ValueError("'max_rows' must be at least 1.")
+
+ if usemask:
+ from numpy.ma import MaskedArray, make_mask_descr
+ # Check the input dictionary of converters
+ user_converters = converters or {}
+ if not isinstance(user_converters, dict):
+ raise TypeError(
+ "The input argument 'converter' should be a valid dictionary "
+ "(got '%s' instead)" % type(user_converters))
+
+ if encoding == 'bytes':
+ encoding = None
+ byte_converters = True
+ else:
+ byte_converters = False
+
+ # Initialize the filehandle, the LineSplitter and the NameValidator
+ if isinstance(fname, os.PathLike):
+ fname = os.fspath(fname)
+ if isinstance(fname, str):
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ fid_ctx = contextlib.closing(fid)
+ else:
+ fid = fname
+ fid_ctx = contextlib.nullcontext(fid)
+ try:
+ fhd = iter(fid)
+ except TypeError as e:
+ raise TypeError(
+ "fname must be a string, a filehandle, a sequence of strings,\n"
+ f"or an iterator of strings. Got {type(fname)} instead."
+ ) from e
+ with fid_ctx:
+ split_line = LineSplitter(delimiter=delimiter, comments=comments,
+ autostrip=autostrip, encoding=encoding)
+ validate_names = NameValidator(excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
+
+ # Skip the first `skip_header` rows
+ try:
+ for i in range(skip_header):
+ next(fhd)
+
+ # Keep on until we find the first valid values
+ first_values = None
+
+ while not first_values:
+ first_line = _decode_line(next(fhd), encoding)
+ if (names is True) and (comments is not None):
+ if comments in first_line:
+ first_line = (
+ ''.join(first_line.split(comments)[1:]))
+ first_values = split_line(first_line)
+ except StopIteration:
+ # return an empty array if the datafile is empty
+ first_line = ''
+ first_values = []
+ warnings.warn(
+ f'genfromtxt: Empty input file: "{fname}"', stacklevel=2
+ )
+
+ # Should we take the first values as names ?
+ if names is True:
+ fval = first_values[0].strip()
+ if comments is not None:
+ if fval in comments:
+ del first_values[0]
+
+ # Check the columns to use: make sure `usecols` is a list
+ if usecols is not None:
+ try:
+ usecols = [_.strip() for _ in usecols.split(",")]
+ except AttributeError:
+ try:
+ usecols = list(usecols)
+ except TypeError:
+ usecols = [usecols, ]
+ nbcols = len(usecols or first_values)
+
+ # Check the names and overwrite the dtype.names if needed
+ if names is True:
+ names = validate_names([str(_.strip()) for _ in first_values])
+ first_line = ''
+ elif _is_string_like(names):
+ names = validate_names([_.strip() for _ in names.split(',')])
+ elif names:
+ names = validate_names(names)
+ # Get the dtype
+ if dtype is not None:
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
+ excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
+ # Make sure the names is a list (for 2.5)
+ if names is not None:
+ names = list(names)
+
+ if usecols:
+ for (i, current) in enumerate(usecols):
+ # if usecols is a list of names, convert to a list of indices
+ if _is_string_like(current):
+ usecols[i] = names.index(current)
+ elif current < 0:
+ usecols[i] = current + len(first_values)
+ # If the dtype is not None, make sure we update it
+ if (dtype is not None) and (len(dtype) > nbcols):
+ descr = dtype.descr
+ dtype = np.dtype([descr[_] for _ in usecols])
+ names = list(dtype.names)
+ # If `names` is not None, update the names
+ elif (names is not None) and (len(names) > nbcols):
+ names = [names[_] for _ in usecols]
+ elif (names is not None) and (dtype is not None):
+ names = list(dtype.names)
+
+ # Process the missing values ...............................
+ # Rename missing_values for convenience
+ user_missing_values = missing_values or ()
+ if isinstance(user_missing_values, bytes):
+ user_missing_values = user_missing_values.decode('latin1')
+
+ # Define the list of missing_values (one column: one list)
+ missing_values = [[''] for _ in range(nbcols)]
+
+ # We have a dictionary: process it field by field
+ if isinstance(user_missing_values, dict):
+ # Loop on the items
+ for (key, val) in user_missing_values.items():
+ # Is the key a string ?
+ if _is_string_like(key):
+ try:
+ # Transform it into an integer
+ key = names.index(key)
+ except ValueError:
+ # We couldn't find it: the name must have been dropped
+ continue
+ # Redefine the key as needed if it's a column number
+ if usecols:
+ try:
+ key = usecols.index(key)
+ except ValueError:
+ pass
+ # Transform the value as a list of string
+ if isinstance(val, (list, tuple)):
+ val = [str(_) for _ in val]
+ else:
+ val = [str(val), ]
+ # Add the value(s) to the current list of missing
+ if key is None:
+ # None acts as default
+ for miss in missing_values:
+ miss.extend(val)
+ else:
+ missing_values[key].extend(val)
+ # We have a sequence : each item matches a column
+ elif isinstance(user_missing_values, (list, tuple)):
+ for (value, entry) in zip(user_missing_values, missing_values):
+ value = str(value)
+ if value not in entry:
+ entry.append(value)
+ # We have a string : apply it to all entries
+ elif isinstance(user_missing_values, str):
+ user_value = user_missing_values.split(",")
+ for entry in missing_values:
+ entry.extend(user_value)
+ # We have something else: apply it to all entries
+ else:
+ for entry in missing_values:
+ entry.extend([str(user_missing_values)])
+
+ # Process the filling_values ...............................
+ # Rename the input for convenience
+ user_filling_values = filling_values
+ if user_filling_values is None:
+ user_filling_values = []
+ # Define the default
+ filling_values = [None] * nbcols
+ # We have a dictionary : update each entry individually
+ if isinstance(user_filling_values, dict):
+ for (key, val) in user_filling_values.items():
+ if _is_string_like(key):
+ try:
+ # Transform it into an integer
+ key = names.index(key)
+ except ValueError:
+ # We couldn't find it: the name must have been dropped
+ continue
+ # Redefine the key if it's a column number
+ # and usecols is defined
+ if usecols:
+ try:
+ key = usecols.index(key)
+ except ValueError:
+ pass
+ # Add the value to the list
+ filling_values[key] = val
+ # We have a sequence : update on a one-to-one basis
+ elif isinstance(user_filling_values, (list, tuple)):
+ n = len(user_filling_values)
+ if (n <= nbcols):
+ filling_values[:n] = user_filling_values
+ else:
+ filling_values = user_filling_values[:nbcols]
+ # We have something else : use it for all entries
+ else:
+ filling_values = [user_filling_values] * nbcols
+
+ # Initialize the converters ................................
+ if dtype is None:
+ # Note: we can't use a [...]*nbcols, as we would have 3 times
+ # the same converter, instead of 3 different converters.
+ converters = [
+ StringConverter(None, missing_values=miss, default=fill)
+ for (miss, fill) in zip(missing_values, filling_values)
+ ]
+ else:
+ dtype_flat = flatten_dtype(dtype, flatten_base=True)
+ # Initialize the converters
+ if len(dtype_flat) > 1:
+ # Flexible type : get a converter from each dtype
+ zipit = zip(dtype_flat, missing_values, filling_values)
+ converters = [StringConverter(dt,
+ locked=True,
+ missing_values=miss,
+ default=fill)
+ for (dt, miss, fill) in zipit]
+ else:
+ # Set to a default converter (but w/ different missing values)
+ zipit = zip(missing_values, filling_values)
+ converters = [StringConverter(dtype,
+ locked=True,
+ missing_values=miss,
+ default=fill)
+ for (miss, fill) in zipit]
+ # Update the converters to use the user-defined ones
+ uc_update = []
+ for (j, conv) in user_converters.items():
+ # If the converter is specified by column names,
+ # use the index instead
+ if _is_string_like(j):
+ try:
+ j = names.index(j)
+ i = j
+ except ValueError:
+ continue
+ elif usecols:
+ try:
+ i = usecols.index(j)
+ except ValueError:
+ # Unused converter specified
+ continue
+ else:
+ i = j
+ # Find the value to test - first_line is not filtered by usecols:
+ if len(first_line):
+ testing_value = first_values[j]
+ else:
+ testing_value = None
+ if conv is bytes:
+ user_conv = asbytes
+ elif byte_converters:
+ # Converters may use decode to workaround numpy's old
+ # behavior, so encode the string again before passing
+ # to the user converter.
+ def tobytes_first(x, conv):
+ if type(x) is bytes:
+ return conv(x)
+ return conv(x.encode("latin1"))
+ user_conv = functools.partial(tobytes_first, conv=conv)
+ else:
+ user_conv = conv
+ converters[i].update(user_conv, locked=True,
+ testing_value=testing_value,
+ default=filling_values[i],
+ missing_values=missing_values[i],)
+ uc_update.append((i, user_conv))
+ # Make sure we have the corrected keys in user_converters...
+ user_converters.update(uc_update)
+
+ # Fixme: possible error as following variable never used.
+ # miss_chars = [_.missing_values for _ in converters]
+
+ # Initialize the output lists ...
+ # ... rows
+ rows = []
+ append_to_rows = rows.append
+ # ... masks
+ if usemask:
+ masks = []
+ append_to_masks = masks.append
+ # ... invalid
+ invalid = []
+ append_to_invalid = invalid.append
+
+ # Parse each line
+ for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
+ values = split_line(line)
+ nbvalues = len(values)
+ # Skip an empty line
+ if nbvalues == 0:
+ continue
+ if usecols:
+ # Select only the columns we need
+ try:
+ values = [values[_] for _ in usecols]
+ except IndexError:
+ append_to_invalid((i + skip_header + 1, nbvalues))
+ continue
+ elif nbvalues != nbcols:
+ append_to_invalid((i + skip_header + 1, nbvalues))
+ continue
+ # Store the values
+ append_to_rows(tuple(values))
+ if usemask:
+ append_to_masks(tuple(v.strip() in m
+ for (v, m) in zip(values,
+ missing_values)))
+ if len(rows) == max_rows:
+ break
+
+ # Upgrade the converters (if needed)
+ if dtype is None:
+ for (i, converter) in enumerate(converters):
+ current_column = [itemgetter(i)(_m) for _m in rows]
+ try:
+ converter.iterupgrade(current_column)
+ except ConverterLockError:
+ errmsg = f"Converter #{i} is locked and cannot be upgraded: "
+ current_column = map(itemgetter(i), rows)
+ for (j, value) in enumerate(current_column):
+ try:
+ converter.upgrade(value)
+ except (ConverterError, ValueError):
+ line_number = j + 1 + skip_header
+ errmsg += f"(occurred line #{line_number} for value '{value}')"
+ raise ConverterError(errmsg)
+
+ # Check that we don't have invalid values
+ nbinvalid = len(invalid)
+ if nbinvalid > 0:
+ nbrows = len(rows) + nbinvalid - skip_footer
+ # Construct the error message
+ template = f" Line #%i (got %i columns instead of {nbcols})"
+ if skip_footer > 0:
+ nbinvalid_skipped = len([_ for _ in invalid
+ if _[0] > nbrows + skip_header])
+ invalid = invalid[:nbinvalid - nbinvalid_skipped]
+ skip_footer -= nbinvalid_skipped
+#
+# nbrows -= skip_footer
+# errmsg = [template % (i, nb)
+# for (i, nb) in invalid if i < nbrows]
+# else:
+ errmsg = [template % (i, nb)
+ for (i, nb) in invalid]
+ if len(errmsg):
+ errmsg.insert(0, "Some errors were detected !")
+ errmsg = "\n".join(errmsg)
+ # Raise an exception ?
+ if invalid_raise:
+ raise ValueError(errmsg)
+ # Issue a warning ?
+ else:
+ warnings.warn(errmsg, ConversionWarning, stacklevel=2)
+
+ # Strip the last skip_footer data
+ if skip_footer > 0:
+ rows = rows[:-skip_footer]
+ if usemask:
+ masks = masks[:-skip_footer]
+
+ # Convert each value according to the converter:
+ # We want to modify the list in place to avoid creating a new one...
+ if loose:
+ rows = list(
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
+ else:
+ rows = list(
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
+
+ # Reset the dtype
+ data = rows
+ if dtype is None:
+ # Get the dtypes from the types of the converters
+ column_types = [conv.type for conv in converters]
+ # Find the columns with strings...
+ strcolidx = [i for (i, v) in enumerate(column_types)
+ if v == np.str_]
+
+ if byte_converters and strcolidx:
+ # convert strings back to bytes for backward compatibility
+ warnings.warn(
+ "Reading unicode strings without specifying the encoding "
+ "argument is deprecated. Set the encoding, use None for the "
+ "system default.",
+ np.exceptions.VisibleDeprecationWarning, stacklevel=2)
+
+ def encode_unicode_cols(row_tup):
+ row = list(row_tup)
+ for i in strcolidx:
+ row[i] = row[i].encode('latin1')
+ return tuple(row)
+
+ try:
+ data = [encode_unicode_cols(r) for r in data]
+ except UnicodeEncodeError:
+ pass
+ else:
+ for i in strcolidx:
+ column_types[i] = np.bytes_
+
+ # Update string types to be the right length
+ sized_column_types = column_types.copy()
+ for i, col_type in enumerate(column_types):
+ if np.issubdtype(col_type, np.character):
+ n_chars = max(len(row[i]) for row in data)
+ sized_column_types[i] = (col_type, n_chars)
+
+ if names is None:
+ # If the dtype is uniform (before sizing strings)
+ base = {
+ c_type
+ for c, c_type in zip(converters, column_types)
+ if c._checked}
+ if len(base) == 1:
+ uniform_type, = base
+ (ddtype, mdtype) = (uniform_type, bool)
+ else:
+ ddtype = [(defaultfmt % i, dt)
+ for (i, dt) in enumerate(sized_column_types)]
+ if usemask:
+ mdtype = [(defaultfmt % i, bool)
+ for (i, dt) in enumerate(sized_column_types)]
+ else:
+ ddtype = list(zip(names, sized_column_types))
+ mdtype = list(zip(names, [bool] * len(sized_column_types)))
+ output = np.array(data, dtype=ddtype)
+ if usemask:
+ outputmask = np.array(masks, dtype=mdtype)
+ else:
+ # Overwrite the initial dtype names if needed
+ if names and dtype.names is not None:
+ dtype.names = names
+ # Case 1. We have a structured type
+ if len(dtype_flat) > 1:
+ # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
+ # First, create the array using a flattened dtype:
+ # [('a', int), ('b1', int), ('b2', float)]
+ # Then, view the array using the specified dtype.
+ if 'O' in (_.char for _ in dtype_flat):
+ if has_nested_fields(dtype):
+ raise NotImplementedError(
+ "Nested fields involving objects are not supported...")
+ else:
+ output = np.array(data, dtype=dtype)
+ else:
+ rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
+ output = rows.view(dtype)
+ # Now, process the rowmasks the same way
+ if usemask:
+ rowmasks = np.array(
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
+ # Construct the new dtype
+ mdtype = make_mask_descr(dtype)
+ outputmask = rowmasks.view(mdtype)
+ # Case #2. We have a basic dtype
+ else:
+ # We used some user-defined converters
+ if user_converters:
+ ishomogeneous = True
+ descr = []
+ for i, ttype in enumerate([conv.type for conv in converters]):
+ # Keep the dtype of the current converter
+ if i in user_converters:
+ ishomogeneous &= (ttype == dtype.type)
+ if np.issubdtype(ttype, np.character):
+ ttype = (ttype, max(len(row[i]) for row in data))
+ descr.append(('', ttype))
+ else:
+ descr.append(('', dtype))
+ # So we changed the dtype ?
+ if not ishomogeneous:
+ # We have more than one field
+ if len(descr) > 1:
+ dtype = np.dtype(descr)
+ # We have only one field: drop the name if not needed.
+ else:
+ dtype = np.dtype(ttype)
+ #
+ output = np.array(data, dtype)
+ if usemask:
+ if dtype.names is not None:
+ mdtype = [(_, bool) for _ in dtype.names]
+ else:
+ mdtype = bool
+ outputmask = np.array(masks, dtype=mdtype)
+ # Try to take care of the missing data we missed
+ names = output.dtype.names
+ if usemask and names:
+ for (name, conv) in zip(names, converters):
+ missing_values = [conv(_) for _ in conv.missing_values
+ if _ != '']
+ for mval in missing_values:
+ outputmask[name] |= (output[name] == mval)
+ # Construct the final array
+ if usemask:
+ output = output.view(MaskedArray)
+ output._mask = outputmask
+
+ output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
+
+ if unpack:
+ if names is None:
+ return output.T
+ elif len(names) == 1:
+ # squeeze single-name dtypes too
+ return output[names[0]]
+ else:
+ # For structured arrays with multiple fields,
+ # return an array for each field.
+ return [output[field] for field in names]
+ return output
+
+
+_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
+
+
+def recfromtxt(fname, **kwargs):
+ """
+ Load ASCII data from a file and return it in a record array.
+
+ If ``usemask=False`` a standard `recarray` is returned,
+ if ``usemask=True`` a MaskedRecords array is returned.
+
+ .. deprecated:: 2.0
+ Use `numpy.genfromtxt` instead.
+
+ Parameters
+ ----------
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
+
+ See Also
+ --------
+ numpy.genfromtxt : generic function
+
+ Notes
+ -----
+ By default, `dtype` is None, which means that the data-type of the output
+ array will be determined from the data.
+
+ """
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`recfromtxt` is deprecated, "
+ "use `numpy.genfromtxt` instead."
+ "(deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ kwargs.setdefault("dtype", None)
+ usemask = kwargs.get('usemask', False)
+ output = genfromtxt(fname, **kwargs)
+ if usemask:
+ from numpy.ma.mrecords import MaskedRecords
+ output = output.view(MaskedRecords)
+ else:
+ output = output.view(np.recarray)
+ return output
+
+
+def recfromcsv(fname, **kwargs):
+ """
+ Load ASCII data stored in a comma-separated file.
+
+ The returned array is a record array (if ``usemask=False``, see
+ `recarray`) or a masked record array (if ``usemask=True``,
+ see `ma.mrecords.MaskedRecords`).
+
+ .. deprecated:: 2.0
+ Use `numpy.genfromtxt` with comma as `delimiter` instead.
+
+ Parameters
+ ----------
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
+
+ See Also
+ --------
+ numpy.genfromtxt : generic function to load ASCII data.
+
+ Notes
+ -----
+ By default, `dtype` is None, which means that the data-type of the output
+ array will be determined from the data.
+
+ """
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`recfromcsv` is deprecated, "
+ "use `numpy.genfromtxt` with comma as `delimiter` instead. "
+ "(deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ # Set default kwargs for genfromtxt as relevant to csv import.
+ kwargs.setdefault("case_sensitive", "lower")
+ kwargs.setdefault("names", True)
+ kwargs.setdefault("delimiter", ",")
+ kwargs.setdefault("dtype", None)
+ output = genfromtxt(fname, **kwargs)
+
+ usemask = kwargs.get("usemask", False)
+ if usemask:
+ from numpy.ma.mrecords import MaskedRecords
+ output = output.view(MaskedRecords)
+ else:
+ output = output.view(np.recarray)
+ return output
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi
new file mode 100644
index 0000000..40369c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_npyio_impl.pyi
@@ -0,0 +1,301 @@
+import types
+import zipfile
+from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence
+from re import Pattern
+from typing import (
+ IO,
+ Any,
+ ClassVar,
+ Generic,
+ Protocol,
+ Self,
+ TypeAlias,
+ overload,
+ type_check_only,
+)
+from typing import Literal as L
+
+from _typeshed import (
+ StrOrBytesPath,
+ StrPath,
+ SupportsKeysAndGetItem,
+ SupportsRead,
+ SupportsWrite,
+)
+from typing_extensions import TypeVar, deprecated, override
+
+import numpy as np
+from numpy._core.multiarray import packbits, unpackbits
+from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc
+from numpy.ma.mrecords import MaskedRecords
+
+from ._datasource import DataSource as DataSource
+
+__all__ = [
+ "fromregex",
+ "genfromtxt",
+ "load",
+ "loadtxt",
+ "packbits",
+ "save",
+ "savetxt",
+ "savez",
+ "savez_compressed",
+ "unpackbits",
+]
+
+_T_co = TypeVar("_T_co", covariant=True)
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True)
+
+_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes]
+_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes]
+_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes]
+_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str]
+
+@type_check_only
+class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]):
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
+class BagObj(Generic[_T_co]):
+ def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ...
+ def __getattribute__(self, key: str, /) -> _T_co: ...
+ def __dir__(self) -> list[str]: ...
+
+class NpzFile(Mapping[str, NDArray[_ScalarT_co]]):
+ _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5
+
+ zip: zipfile.ZipFile
+ fid: IO[str] | None
+ files: list[str]
+ allow_pickle: bool
+ pickle_kwargs: Mapping[str, Any] | None
+ f: BagObj[NpzFile[_ScalarT_co]]
+
+ #
+ def __init__(
+ self,
+ /,
+ fid: IO[Any],
+ own_fid: bool = False,
+ allow_pickle: bool = False,
+ pickle_kwargs: Mapping[str, object] | None = None,
+ *,
+ max_header_size: int = 10_000,
+ ) -> None: ...
+ def __del__(self) -> None: ...
+ def __enter__(self) -> Self: ...
+ def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ...
+ @override
+ def __len__(self) -> int: ...
+ @override
+ def __iter__(self) -> Iterator[str]: ...
+ @override
+ def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ...
+ def close(self) -> None: ...
+
+# NOTE: Returns a `NpzFile` if file is a zip file;
+# returns an `ndarray`/`memmap` otherwise
+def load(
+ file: StrOrBytesPath | _SupportsReadSeek[bytes],
+ mmap_mode: L["r+", "r", "w+", "c"] | None = None,
+ allow_pickle: bool = False,
+ fix_imports: bool = True,
+ encoding: L["ASCII", "latin1", "bytes"] = "ASCII",
+ *,
+ max_header_size: int = 10_000,
+) -> Any: ...
+
+@overload
+def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ...
+@overload
+@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.")
+def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ...
+@overload
+@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.")
+def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ...
+
+#
+def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ...
+
+#
+def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ...
+
+# File-like objects only have to implement `__iter__` and,
+# optionally, `encoding`
+@overload
+def loadtxt(
+ fname: _FName,
+ dtype: None = None,
+ comments: str | Sequence[str] | None = "#",
+ delimiter: str | None = None,
+ converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,
+ skiprows: int = 0,
+ usecols: int | Sequence[int] | None = None,
+ unpack: bool = False,
+ ndmin: L[0, 1, 2] = 0,
+ encoding: str | None = None,
+ max_rows: int | None = None,
+ *,
+ quotechar: str | None = None,
+ like: _SupportsArrayFunc | None = None,
+) -> NDArray[np.float64]: ...
+@overload
+def loadtxt(
+ fname: _FName,
+ dtype: _DTypeLike[_ScalarT],
+ comments: str | Sequence[str] | None = "#",
+ delimiter: str | None = None,
+ converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,
+ skiprows: int = 0,
+ usecols: int | Sequence[int] | None = None,
+ unpack: bool = False,
+ ndmin: L[0, 1, 2] = 0,
+ encoding: str | None = None,
+ max_rows: int | None = None,
+ *,
+ quotechar: str | None = None,
+ like: _SupportsArrayFunc | None = None,
+) -> NDArray[_ScalarT]: ...
+@overload
+def loadtxt(
+ fname: _FName,
+ dtype: DTypeLike,
+ comments: str | Sequence[str] | None = "#",
+ delimiter: str | None = None,
+ converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,
+ skiprows: int = 0,
+ usecols: int | Sequence[int] | None = None,
+ unpack: bool = False,
+ ndmin: L[0, 1, 2] = 0,
+ encoding: str | None = None,
+ max_rows: int | None = None,
+ *,
+ quotechar: str | None = None,
+ like: _SupportsArrayFunc | None = None,
+) -> NDArray[Any]: ...
+
+def savetxt(
+ fname: _FNameWrite,
+ X: ArrayLike,
+ fmt: str | Sequence[str] = "%.18e",
+ delimiter: str = " ",
+ newline: str = "\n",
+ header: str = "",
+ footer: str = "",
+ comments: str = "# ",
+ encoding: str | None = None,
+) -> None: ...
+
+@overload
+def fromregex(
+ file: _FNameRead,
+ regexp: str | bytes | Pattern[Any],
+ dtype: _DTypeLike[_ScalarT],
+ encoding: str | None = None,
+) -> NDArray[_ScalarT]: ...
+@overload
+def fromregex(
+ file: _FNameRead,
+ regexp: str | bytes | Pattern[Any],
+ dtype: DTypeLike,
+ encoding: str | None = None,
+) -> NDArray[Any]: ...
+
+@overload
+def genfromtxt(
+ fname: _FName,
+ dtype: None = None,
+ comments: str = ...,
+ delimiter: str | int | Iterable[int] | None = ...,
+ skip_header: int = ...,
+ skip_footer: int = ...,
+ converters: Mapping[int | str, Callable[[str], Any]] | None = ...,
+ missing_values: Any = ...,
+ filling_values: Any = ...,
+ usecols: Sequence[int] | None = ...,
+ names: L[True] | str | Collection[str] | None = ...,
+ excludelist: Sequence[str] | None = ...,
+ deletechars: str = ...,
+ replace_space: str = ...,
+ autostrip: bool = ...,
+ case_sensitive: bool | L["upper", "lower"] = ...,
+ defaultfmt: str = ...,
+ unpack: bool | None = ...,
+ usemask: bool = ...,
+ loose: bool = ...,
+ invalid_raise: bool = ...,
+ max_rows: int | None = ...,
+ encoding: str = ...,
+ *,
+ ndmin: L[0, 1, 2] = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[Any]: ...
+@overload
+def genfromtxt(
+ fname: _FName,
+ dtype: _DTypeLike[_ScalarT],
+ comments: str = ...,
+ delimiter: str | int | Iterable[int] | None = ...,
+ skip_header: int = ...,
+ skip_footer: int = ...,
+ converters: Mapping[int | str, Callable[[str], Any]] | None = ...,
+ missing_values: Any = ...,
+ filling_values: Any = ...,
+ usecols: Sequence[int] | None = ...,
+ names: L[True] | str | Collection[str] | None = ...,
+ excludelist: Sequence[str] | None = ...,
+ deletechars: str = ...,
+ replace_space: str = ...,
+ autostrip: bool = ...,
+ case_sensitive: bool | L["upper", "lower"] = ...,
+ defaultfmt: str = ...,
+ unpack: bool | None = ...,
+ usemask: bool = ...,
+ loose: bool = ...,
+ invalid_raise: bool = ...,
+ max_rows: int | None = ...,
+ encoding: str = ...,
+ *,
+ ndmin: L[0, 1, 2] = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def genfromtxt(
+ fname: _FName,
+ dtype: DTypeLike,
+ comments: str = ...,
+ delimiter: str | int | Iterable[int] | None = ...,
+ skip_header: int = ...,
+ skip_footer: int = ...,
+ converters: Mapping[int | str, Callable[[str], Any]] | None = ...,
+ missing_values: Any = ...,
+ filling_values: Any = ...,
+ usecols: Sequence[int] | None = ...,
+ names: L[True] | str | Collection[str] | None = ...,
+ excludelist: Sequence[str] | None = ...,
+ deletechars: str = ...,
+ replace_space: str = ...,
+ autostrip: bool = ...,
+ case_sensitive: bool | L["upper", "lower"] = ...,
+ defaultfmt: str = ...,
+ unpack: bool | None = ...,
+ usemask: bool = ...,
+ loose: bool = ...,
+ invalid_raise: bool = ...,
+ max_rows: int | None = ...,
+ encoding: str = ...,
+ *,
+ ndmin: L[0, 1, 2] = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ...
+@overload
+def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ...
+
+@overload
+def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ...
+@overload
+def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py
new file mode 100644
index 0000000..a58ca76
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.py
@@ -0,0 +1,1465 @@
+"""
+Functions to operate on polynomials.
+
+"""
+__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
+ 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
+ 'polyfit']
+
+import functools
+import re
+import warnings
+
+import numpy._core.numeric as NX
+from numpy._core import (
+ abs,
+ array,
+ atleast_1d,
+ dot,
+ finfo,
+ hstack,
+ isscalar,
+ ones,
+ overrides,
+)
+from numpy._utils import set_module
+from numpy.exceptions import RankWarning
+from numpy.lib._function_base_impl import trim_zeros
+from numpy.lib._twodim_base_impl import diag, vander
+from numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real
+from numpy.linalg import eigvals, inv, lstsq
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _poly_dispatcher(seq_of_zeros):
+ return seq_of_zeros
+
+
+@array_function_dispatch(_poly_dispatcher)
+def poly(seq_of_zeros):
+ """
+ Find the coefficients of a polynomial with the given sequence of roots.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Returns the coefficients of the polynomial whose leading coefficient
+ is one for the given sequence of zeros (multiple roots must be included
+ in the sequence as many times as their multiplicity; see Examples).
+ A square matrix (or array, which will be treated as a matrix) can also
+ be given, in which case the coefficients of the characteristic polynomial
+ of the matrix are returned.
+
+ Parameters
+ ----------
+ seq_of_zeros : array_like, shape (N,) or (N, N)
+ A sequence of polynomial roots, or a square array or matrix object.
+
+ Returns
+ -------
+ c : ndarray
+ 1D array of polynomial coefficients from highest to lowest degree:
+
+ ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
+ where c[0] always equals 1.
+
+ Raises
+ ------
+ ValueError
+ If input is the wrong shape (the input must be a 1-D or square
+ 2-D array).
+
+ See Also
+ --------
+ polyval : Compute polynomial values.
+ roots : Return the roots of a polynomial.
+ polyfit : Least squares polynomial fit.
+ poly1d : A one-dimensional polynomial class.
+
+ Notes
+ -----
+ Specifying the roots of a polynomial still leaves one degree of
+ freedom, typically represented by an undetermined leading
+ coefficient. [1]_ In the case of this function, that coefficient -
+ the first one in the returned array - is always taken as one. (If
+ for some reason you have one other point, the only automatic way
+ presently to leverage that information is to use ``polyfit``.)
+
+ The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
+ matrix **A** is given by
+
+ :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
+
+ where **I** is the `n`-by-`n` identity matrix. [2]_
+
+ References
+ ----------
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
+ Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
+
+ .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
+ Academic Press, pg. 182, 1980.
+
+ Examples
+ --------
+
+ Given a sequence of a polynomial's zeros:
+
+ >>> import numpy as np
+
+ >>> np.poly((0, 0, 0)) # Multiple root example
+ array([1., 0., 0., 0.])
+
+ The line above represents z**3 + 0*z**2 + 0*z + 0.
+
+ >>> np.poly((-1./2, 0, 1./2))
+ array([ 1. , 0. , -0.25, 0. ])
+
+ The line above represents z**3 - z/4
+
+ >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
+ array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
+
+ Given a square array object:
+
+ >>> P = np.array([[0, 1./3], [-1./2, 0]])
+ >>> np.poly(P)
+ array([1. , 0. , 0.16666667])
+
+ Note how in all cases the leading coefficient is always 1.
+
+ """
+ seq_of_zeros = atleast_1d(seq_of_zeros)
+ sh = seq_of_zeros.shape
+
+ if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
+ seq_of_zeros = eigvals(seq_of_zeros)
+ elif len(sh) == 1:
+ dt = seq_of_zeros.dtype
+ # Let object arrays slip through, e.g. for arbitrary precision
+ if dt != object:
+ seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
+ else:
+ raise ValueError("input must be 1d or non-empty square 2d array.")
+
+ if len(seq_of_zeros) == 0:
+ return 1.0
+ dt = seq_of_zeros.dtype
+ a = ones((1,), dtype=dt)
+ for zero in seq_of_zeros:
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
+
+ if issubclass(a.dtype.type, NX.complexfloating):
+ # if complex roots are all complex conjugates, the roots are real.
+ roots = NX.asarray(seq_of_zeros, complex)
+ if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
+ a = a.real.copy()
+
+ return a
+
+
+def _roots_dispatcher(p):
+ return p
+
+
+@array_function_dispatch(_roots_dispatcher)
+def roots(p):
+ """
+ Return the roots of a polynomial with coefficients given in p.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ The values in the rank-1 array `p` are coefficients of a polynomial.
+ If the length of `p` is n+1 then the polynomial is described by::
+
+ p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
+
+ Parameters
+ ----------
+ p : array_like
+ Rank-1 array of polynomial coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ An array containing the roots of the polynomial.
+
+ Raises
+ ------
+ ValueError
+ When `p` cannot be converted to a rank-1 array.
+
+ See also
+ --------
+ poly : Find the coefficients of a polynomial with a given sequence
+ of roots.
+ polyval : Compute polynomial values.
+ polyfit : Least squares polynomial fit.
+ poly1d : A one-dimensional polynomial class.
+
+ Notes
+ -----
+ The algorithm relies on computing the eigenvalues of the
+ companion matrix [1]_.
+
+ References
+ ----------
+ .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
+ Cambridge University Press, 1999, pp. 146-7.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> coeff = [3.2, 2, 1]
+ >>> np.roots(coeff)
+ array([-0.3125+0.46351241j, -0.3125-0.46351241j])
+
+ """
+ # If input is scalar, this makes it an array
+ p = atleast_1d(p)
+ if p.ndim != 1:
+ raise ValueError("Input must be a rank-1 array.")
+
+ # find non-zero array entries
+ non_zero = NX.nonzero(NX.ravel(p))[0]
+
+ # Return an empty array if polynomial is all zeros
+ if len(non_zero) == 0:
+ return NX.array([])
+
+ # find the number of trailing zeros -- this is the number of roots at 0.
+ trailing_zeros = len(p) - non_zero[-1] - 1
+
+ # strip leading and trailing zeros
+ p = p[int(non_zero[0]):int(non_zero[-1]) + 1]
+
+ # casting: if incoming array isn't floating point, make it floating point.
+ if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
+ p = p.astype(float)
+
+ N = len(p)
+ if N > 1:
+ # build companion matrix and find its eigenvalues (the roots)
+ A = diag(NX.ones((N - 2,), p.dtype), -1)
+ A[0, :] = -p[1:] / p[0]
+ roots = eigvals(A)
+ else:
+ roots = NX.array([])
+
+ # tack any zeros onto the back of the array
+ roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
+ return roots
+
+
+def _polyint_dispatcher(p, m=None, k=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyint_dispatcher)
+def polyint(p, m=1, k=None):
+ """
+ Return an antiderivative (indefinite integral) of a polynomial.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ The returned order `m` antiderivative `P` of polynomial `p` satisfies
+ :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
+ integration constants `k`. The constants determine the low-order
+ polynomial part
+
+ .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
+
+ of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
+
+ Parameters
+ ----------
+ p : array_like or poly1d
+ Polynomial to integrate.
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
+ m : int, optional
+ Order of the antiderivative. (Default: 1)
+ k : list of `m` scalars or scalar, optional
+ Integration constants. They are given in the order of integration:
+ those corresponding to highest-order terms come first.
+
+ If ``None`` (default), all constants are assumed to be zero.
+ If `m = 1`, a single scalar can be given instead of a list.
+
+ See Also
+ --------
+ polyder : derivative of a polynomial
+ poly1d.integ : equivalent method
+
+ Examples
+ --------
+
+ The defining property of the antiderivative:
+
+ >>> import numpy as np
+
+ >>> p = np.poly1d([1,1,1])
+ >>> P = np.polyint(p)
+ >>> P
+ poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
+ >>> np.polyder(P) == p
+ True
+
+ The integration constants default to zero, but can be specified:
+
+ >>> P = np.polyint(p, 3)
+ >>> P(0)
+ 0.0
+ >>> np.polyder(P)(0)
+ 0.0
+ >>> np.polyder(P, 2)(0)
+ 0.0
+ >>> P = np.polyint(p, 3, k=[6,5,3])
+ >>> P
+ poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
+
+ Note that 3 = 6 / 2!, and that the constants are given in the order of
+ integrations. Constant of the highest-order polynomial term comes first:
+
+ >>> np.polyder(P, 2)(0)
+ 6.0
+ >>> np.polyder(P, 1)(0)
+ 5.0
+ >>> P(0)
+ 3.0
+
+ """
+ m = int(m)
+ if m < 0:
+ raise ValueError("Order of integral must be positive (see polyder)")
+ if k is None:
+ k = NX.zeros(m, float)
+ k = atleast_1d(k)
+ if len(k) == 1 and m > 1:
+ k = k[0] * NX.ones(m, float)
+ if len(k) < m:
+ raise ValueError(
+ "k must be a scalar or a rank-1 array of length 1 or >m.")
+
+ truepoly = isinstance(p, poly1d)
+ p = NX.asarray(p)
+ if m == 0:
+ if truepoly:
+ return poly1d(p)
+ return p
+ else:
+ # Note: this must work also with object and integer arrays
+ y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
+ val = polyint(y, m - 1, k=k[1:])
+ if truepoly:
+ return poly1d(val)
+ return val
+
+
+def _polyder_dispatcher(p, m=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyder_dispatcher)
+def polyder(p, m=1):
+ """
+ Return the derivative of the specified order of a polynomial.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Parameters
+ ----------
+ p : poly1d or sequence
+ Polynomial to differentiate.
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
+ m : int, optional
+ Order of differentiation (default: 1)
+
+ Returns
+ -------
+ der : poly1d
+ A new polynomial representing the derivative.
+
+ See Also
+ --------
+ polyint : Anti-derivative of a polynomial.
+ poly1d : Class for one-dimensional polynomials.
+
+ Examples
+ --------
+
+ The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
+
+ >>> import numpy as np
+
+ >>> p = np.poly1d([1,1,1,1])
+ >>> p2 = np.polyder(p)
+ >>> p2
+ poly1d([3, 2, 1])
+
+ which evaluates to:
+
+ >>> p2(2.)
+ 17.0
+
+ We can verify this, approximating the derivative with
+ ``(f(x + h) - f(x))/h``:
+
+ >>> (p(2. + 0.001) - p(2.)) / 0.001
+ 17.007000999997857
+
+ The fourth-order derivative of a 3rd-order polynomial is zero:
+
+ >>> np.polyder(p, 2)
+ poly1d([6, 2])
+ >>> np.polyder(p, 3)
+ poly1d([6])
+ >>> np.polyder(p, 4)
+ poly1d([0])
+
+ """
+ m = int(m)
+ if m < 0:
+ raise ValueError("Order of derivative must be positive (see polyint)")
+
+ truepoly = isinstance(p, poly1d)
+ p = NX.asarray(p)
+ n = len(p) - 1
+ y = p[:-1] * NX.arange(n, 0, -1)
+ if m == 0:
+ val = p
+ else:
+ val = polyder(y, m - 1)
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
+ return (x, y, w)
+
+
+@array_function_dispatch(_polyfit_dispatcher)
+def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
+ """
+ Least squares polynomial fit.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
+ to points `(x, y)`. Returns a vector of coefficients `p` that minimises
+ the squared error in the order `deg`, `deg-1`, ... `0`.
+
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
+ method is recommended for new code as it is more stable numerically. See
+ the documentation of the method for more information.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int
+ Degree of the fitting polynomial
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (M,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+ cov : bool or str, optional
+ If given and not `False`, return not just the estimate but also its
+ covariance matrix. By default, the covariance are scaled by
+ chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
+ to be unreliable except in a relative sense and everything is scaled
+ such that the reduced chi2 is unity. This scaling is omitted if
+ ``cov='unscaled'``, as is relevant for the case that the weights are
+ w = 1/sigma, with sigma known to be a reliable estimate of the
+ uncertainty.
+
+ Returns
+ -------
+ p : ndarray, shape (deg + 1,) or (deg + 1, K)
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
+ coefficients for `k`-th data set are in ``p[:,k]``.
+
+ residuals, rank, singular_values, rcond
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the effective rank of the scaled Vandermonde
+ coefficient matrix
+ - singular_values -- singular values of the scaled Vandermonde
+ coefficient matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K)
+ Present only if ``full == False`` and ``cov == True``. The covariance
+ matrix of the polynomial coefficient estimates. The diagonal of
+ this matrix are the variance estimates for each coefficient. If y
+ is a 2-D array, then the covariance matrix for the `k`-th data set
+ are in ``V[:,:,k]``
+
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full == False``.
+
+ The warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.exceptions.RankWarning)
+
+ See Also
+ --------
+ polyval : Compute polynomial values.
+ linalg.lstsq : Computes a least-squares fit.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution minimizes the squared error
+
+ .. math::
+ E = \\sum_{j=0}^k |p(x_j) - y_j|^2
+
+ in the equations::
+
+ x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
+ x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
+ ...
+ x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
+
+ The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
+
+ `polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is
+ badly conditioned. This implies that the best fit is not well-defined due
+ to numerical error. The results may be improved by lowering the polynomial
+ degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
+ can also be set to a value smaller than its default, but the resulting
+ fit may be spurious: including contributions from the small singular
+ values can add numerical noise to the result.
+
+ Note that fitting polynomial coefficients is inherently badly conditioned
+ when the degree of the polynomial is large or the interval of sample points
+ is badly centered. The quality of the fit should always be checked in these
+ cases. When polynomial fits are not satisfactory, splines may be a good
+ alternative.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+ .. [2] Wikipedia, "Polynomial interpolation",
+ https://en.wikipedia.org/wiki/Polynomial_interpolation
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import warnings
+ >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
+ >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
+ >>> z = np.polyfit(x, y, 3)
+ >>> z
+ array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
+
+ It is convenient to use `poly1d` objects for dealing with polynomials:
+
+ >>> p = np.poly1d(z)
+ >>> p(0.5)
+ 0.6143849206349179 # may vary
+ >>> p(3.5)
+ -0.34732142857143039 # may vary
+ >>> p(10)
+ 22.579365079365115 # may vary
+
+ High-order polynomials may oscillate wildly:
+
+ >>> with warnings.catch_warnings():
+ ... warnings.simplefilter('ignore', np.exceptions.RankWarning)
+ ... p30 = np.poly1d(np.polyfit(x, y, 30))
+ ...
+ >>> p30(4)
+ -0.80000000000000204 # may vary
+ >>> p30(5)
+ -0.99999999999999445 # may vary
+ >>> p30(4.5)
+ -0.10547061179440398 # may vary
+
+ Illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> xp = np.linspace(-2, 6, 100)
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
+ >>> plt.ylim(-2,2)
+ (-2, 2)
+ >>> plt.show()
+
+ """
+ order = int(deg) + 1
+ x = NX.asarray(x) + 0.0
+ y = NX.asarray(y) + 0.0
+
+ # check arguments.
+ if deg < 0:
+ raise ValueError("expected deg >= 0")
+ if x.ndim != 1:
+ raise TypeError("expected 1D vector for x")
+ if x.size == 0:
+ raise TypeError("expected non-empty vector for x")
+ if y.ndim < 1 or y.ndim > 2:
+ raise TypeError("expected 1D or 2D array for y")
+ if x.shape[0] != y.shape[0]:
+ raise TypeError("expected x and y to have same length")
+
+ # set rcond
+ if rcond is None:
+ rcond = len(x) * finfo(x.dtype).eps
+
+ # set up least squares equation for powers of x
+ lhs = vander(x, order)
+ rhs = y
+
+ # apply weighting
+ if w is not None:
+ w = NX.asarray(w) + 0.0
+ if w.ndim != 1:
+ raise TypeError("expected a 1-d array for weights")
+ if w.shape[0] != y.shape[0]:
+ raise TypeError("expected w and y to have the same length")
+ lhs *= w[:, NX.newaxis]
+ if rhs.ndim == 2:
+ rhs *= w[:, NX.newaxis]
+ else:
+ rhs *= w
+
+ # scale lhs to improve condition number and solve
+ scale = NX.sqrt((lhs * lhs).sum(axis=0))
+ lhs /= scale
+ c, resids, rank, s = lstsq(lhs, rhs, rcond)
+ c = (c.T / scale).T # broadcast scale coefficients
+
+ # warn on rank reduction, which indicates an ill conditioned matrix
+ if rank != order and not full:
+ msg = "Polyfit may be poorly conditioned"
+ warnings.warn(msg, RankWarning, stacklevel=2)
+
+ if full:
+ return c, resids, rank, s, rcond
+ elif cov:
+ Vbase = inv(dot(lhs.T, lhs))
+ Vbase /= NX.outer(scale, scale)
+ if cov == "unscaled":
+ fac = 1
+ else:
+ if len(x) <= order:
+ raise ValueError("the number of data points must exceed order "
+ "to scale the covariance matrix")
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
+ # it was decided that the "- 2" (originally justified by "Bayesian
+ # uncertainty analysis") is not what the user expects
+ # (see gh-11196 and gh-11197)
+ fac = resids / (len(x) - order)
+ if y.ndim == 1:
+ return c, Vbase * fac
+ else:
+ return c, Vbase[:, :, NX.newaxis] * fac
+ else:
+ return c
+
+
+def _polyval_dispatcher(p, x):
+ return (p, x)
+
+
+@array_function_dispatch(_polyval_dispatcher)
+def polyval(p, x):
+ """
+ Evaluate a polynomial at specific values.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ If `p` is of length N, this function returns the value::
+
+ p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
+
+ If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
+ If `x` is another polynomial then the composite polynomial ``p(x(t))``
+ is returned.
+
+ Parameters
+ ----------
+ p : array_like or poly1d object
+ 1D array of polynomial coefficients (including coefficients equal
+ to zero) from highest degree to the constant term, or an
+ instance of poly1d.
+ x : array_like or poly1d object
+ A number, an array of numbers, or an instance of poly1d, at
+ which to evaluate `p`.
+
+ Returns
+ -------
+ values : ndarray or poly1d
+ If `x` is a poly1d instance, the result is the composition of the two
+ polynomials, i.e., `x` is "substituted" in `p` and the simplified
+ result is returned. In addition, the type of `x` - array_like or
+ poly1d - governs the type of the output: `x` array_like => `values`
+ array_like, `x` a poly1d object => `values` is also.
+
+ See Also
+ --------
+ poly1d: A polynomial class.
+
+ Notes
+ -----
+ Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
+ for polynomials of high degree the values may be inaccurate due to
+ rounding errors. Use carefully.
+
+ If `x` is a subtype of `ndarray` the return value will be of the same type.
+
+ References
+ ----------
+ .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
+ trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
+ Reinhold Co., 1985, pg. 720.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
+ 76
+ >>> np.polyval([3,0,1], np.poly1d(5))
+ poly1d([76])
+ >>> np.polyval(np.poly1d([3,0,1]), 5)
+ 76
+ >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
+ poly1d([76])
+
+ """
+ p = NX.asarray(p)
+ if isinstance(x, poly1d):
+ y = 0
+ else:
+ x = NX.asanyarray(x)
+ y = NX.zeros_like(x)
+ for pv in p:
+ y = y * x + pv
+ return y
+
+
+def _binary_op_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def polyadd(a1, a2):
+ """
+ Find the sum of two polynomials.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Returns the polynomial resulting from the sum of two input polynomials.
+ Each input must be either a poly1d object or a 1D sequence of polynomial
+ coefficients, from highest to lowest degree.
+
+ Parameters
+ ----------
+ a1, a2 : array_like or poly1d object
+ Input polynomials.
+
+ Returns
+ -------
+ out : ndarray or poly1d object
+ The sum of the inputs. If either input is a poly1d object, then the
+ output is also a poly1d object. Otherwise, it is a 1D array of
+ polynomial coefficients from highest to lowest degree.
+
+ See Also
+ --------
+ poly1d : A one-dimensional polynomial class.
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.polyadd([1, 2], [9, 5, 4])
+ array([9, 6, 6])
+
+ Using poly1d objects:
+
+ >>> p1 = np.poly1d([1, 2])
+ >>> p2 = np.poly1d([9, 5, 4])
+ >>> print(p1)
+ 1 x + 2
+ >>> print(p2)
+ 2
+ 9 x + 5 x + 4
+ >>> print(np.polyadd(p1, p2))
+ 2
+ 9 x + 6 x + 6
+
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1 = atleast_1d(a1)
+ a2 = atleast_1d(a2)
+ diff = len(a2) - len(a1)
+ if diff == 0:
+ val = a1 + a2
+ elif diff > 0:
+ zr = NX.zeros(diff, a1.dtype)
+ val = NX.concatenate((zr, a1)) + a2
+ else:
+ zr = NX.zeros(abs(diff), a2.dtype)
+ val = a1 + NX.concatenate((zr, a2))
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def polysub(a1, a2):
+ """
+ Difference (subtraction) of two polynomials.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
+ `a1` and `a2` can be either array_like sequences of the polynomials'
+ coefficients (including coefficients equal to zero), or `poly1d` objects.
+
+ Parameters
+ ----------
+ a1, a2 : array_like or poly1d
+ Minuend and subtrahend polynomials, respectively.
+
+ Returns
+ -------
+ out : ndarray or poly1d
+ Array or `poly1d` object of the difference polynomial's coefficients.
+
+ See Also
+ --------
+ polyval, polydiv, polymul, polyadd
+
+ Examples
+ --------
+
+ .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
+
+ >>> import numpy as np
+
+ >>> np.polysub([2, 10, -2], [3, 10, -4])
+ array([-1, 0, 2])
+
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1 = atleast_1d(a1)
+ a2 = atleast_1d(a2)
+ diff = len(a2) - len(a1)
+ if diff == 0:
+ val = a1 - a2
+ elif diff > 0:
+ zr = NX.zeros(diff, a1.dtype)
+ val = NX.concatenate((zr, a1)) - a2
+ else:
+ zr = NX.zeros(abs(diff), a2.dtype)
+ val = a1 - NX.concatenate((zr, a2))
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def polymul(a1, a2):
+ """
+ Find the product of two polynomials.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Finds the polynomial resulting from the multiplication of the two input
+ polynomials. Each input must be either a poly1d object or a 1D sequence
+ of polynomial coefficients, from highest to lowest degree.
+
+ Parameters
+ ----------
+ a1, a2 : array_like or poly1d object
+ Input polynomials.
+
+ Returns
+ -------
+ out : ndarray or poly1d object
+ The polynomial resulting from the multiplication of the inputs. If
+ either inputs is a poly1d object, then the output is also a poly1d
+ object. Otherwise, it is a 1D array of polynomial coefficients from
+ highest to lowest degree.
+
+ See Also
+ --------
+ poly1d : A one-dimensional polynomial class.
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
+ convolve : Array convolution. Same output as polymul, but has parameter
+ for overlap mode.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.polymul([1, 2, 3], [9, 5, 1])
+ array([ 9, 23, 38, 17, 3])
+
+ Using poly1d objects:
+
+ >>> p1 = np.poly1d([1, 2, 3])
+ >>> p2 = np.poly1d([9, 5, 1])
+ >>> print(p1)
+ 2
+ 1 x + 2 x + 3
+ >>> print(p2)
+ 2
+ 9 x + 5 x + 1
+ >>> print(np.polymul(p1, p2))
+ 4 3 2
+ 9 x + 23 x + 38 x + 17 x + 3
+
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1, a2 = poly1d(a1), poly1d(a2)
+ val = NX.convolve(a1, a2)
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+def _polydiv_dispatcher(u, v):
+ return (u, v)
+
+
+@array_function_dispatch(_polydiv_dispatcher)
+def polydiv(u, v):
+ """
+ Returns the quotient and remainder of polynomial division.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ The input arrays are the coefficients (including any coefficients
+ equal to zero) of the "numerator" (dividend) and "denominator"
+ (divisor) polynomials, respectively.
+
+ Parameters
+ ----------
+ u : array_like or poly1d
+ Dividend polynomial's coefficients.
+
+ v : array_like or poly1d
+ Divisor polynomial's coefficients.
+
+ Returns
+ -------
+ q : ndarray
+ Coefficients, including those equal to zero, of the quotient.
+ r : ndarray
+ Coefficients, including those equal to zero, of the remainder.
+
+ See Also
+ --------
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
+ polyval
+
+ Notes
+ -----
+ Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
+ not equal `v.ndim`. In other words, all four possible combinations -
+ ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
+ ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
+
+ Examples
+ --------
+
+ .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
+
+ >>> import numpy as np
+
+ >>> x = np.array([3.0, 5.0, 2.0])
+ >>> y = np.array([2.0, 1.0])
+ >>> np.polydiv(x, y)
+ (array([1.5 , 1.75]), array([0.25]))
+
+ """
+ truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
+ u = atleast_1d(u) + 0.0
+ v = atleast_1d(v) + 0.0
+ # w has the common type
+ w = u[0] + v[0]
+ m = len(u) - 1
+ n = len(v) - 1
+ scale = 1. / v[0]
+ q = NX.zeros((max(m - n + 1, 1),), w.dtype)
+ r = u.astype(w.dtype)
+ for k in range(m - n + 1):
+ d = scale * r[k]
+ q[k] = d
+ r[k:k + n + 1] -= d * v
+ while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
+ r = r[1:]
+ if truepoly:
+ return poly1d(q), poly1d(r)
+ return q, r
+
+
+_poly_mat = re.compile(r"\*\*([0-9]*)")
+def _raise_power(astr, wrap=70):
+ n = 0
+ line1 = ''
+ line2 = ''
+ output = ' '
+ while True:
+ mat = _poly_mat.search(astr, n)
+ if mat is None:
+ break
+ span = mat.span()
+ power = mat.groups()[0]
+ partstr = astr[n:span[0]]
+ n = span[1]
+ toadd2 = partstr + ' ' * (len(power) - 1)
+ toadd1 = ' ' * (len(partstr) - 1) + power
+ if ((len(line2) + len(toadd2) > wrap) or
+ (len(line1) + len(toadd1) > wrap)):
+ output += line1 + "\n" + line2 + "\n "
+ line1 = toadd1
+ line2 = toadd2
+ else:
+ line2 += partstr + ' ' * (len(power) - 1)
+ line1 += ' ' * (len(partstr) - 1) + power
+ output += line1 + "\n" + line2
+ return output + astr[n:]
+
+
+@set_module('numpy')
+class poly1d:
+ """
+ A one-dimensional polynomial class.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ A convenience class, used to encapsulate "natural" operations on
+ polynomials so that said operations may take on their customary
+ form in code (see Examples).
+
+ Parameters
+ ----------
+ c_or_r : array_like
+ The polynomial's coefficients, in decreasing powers, or if
+ the value of the second parameter is True, the polynomial's
+ roots (values where the polynomial evaluates to 0). For example,
+ ``poly1d([1, 2, 3])`` returns an object that represents
+ :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
+ one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
+ r : bool, optional
+ If True, `c_or_r` specifies the polynomial's roots; the default
+ is False.
+ variable : str, optional
+ Changes the variable used when printing `p` from `x` to `variable`
+ (see Examples).
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Construct the polynomial :math:`x^2 + 2x + 3`:
+
+ >>> import numpy as np
+
+ >>> p = np.poly1d([1, 2, 3])
+ >>> print(np.poly1d(p))
+ 2
+ 1 x + 2 x + 3
+
+ Evaluate the polynomial at :math:`x = 0.5`:
+
+ >>> p(0.5)
+ 4.25
+
+ Find the roots:
+
+ >>> p.r
+ array([-1.+1.41421356j, -1.-1.41421356j])
+ >>> p(p.r)
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
+
+ These numbers in the previous line represent (0, 0) to machine precision
+
+ Show the coefficients:
+
+ >>> p.c
+ array([1, 2, 3])
+
+ Display the order (the leading zero-coefficients are removed):
+
+ >>> p.order
+ 2
+
+ Show the coefficient of the k-th power in the polynomial
+ (which is equivalent to ``p.c[-(i+1)]``):
+
+ >>> p[1]
+ 2
+
+ Polynomials can be added, subtracted, multiplied, and divided
+ (returns quotient and remainder):
+
+ >>> p * p
+ poly1d([ 1, 4, 10, 12, 9])
+
+ >>> (p**3 + 4) / p
+ (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
+
+ ``asarray(p)`` gives the coefficient array, so polynomials can be
+ used in all functions that accept arrays:
+
+ >>> p**2 # square of polynomial
+ poly1d([ 1, 4, 10, 12, 9])
+
+ >>> np.square(p) # square of individual coefficients
+ array([1, 4, 9])
+
+ The variable used in the string representation of `p` can be modified,
+ using the `variable` parameter:
+
+ >>> p = np.poly1d([1,2,3], variable='z')
+ >>> print(p)
+ 2
+ 1 z + 2 z + 3
+
+ Construct a polynomial from its roots:
+
+ >>> np.poly1d([1, 2], True)
+ poly1d([ 1., -3., 2.])
+
+ This is the same polynomial as obtained by:
+
+ >>> np.poly1d([1, -1]) * np.poly1d([1, -2])
+ poly1d([ 1, -3, 2])
+
+ """
+ __hash__ = None
+
+ @property
+ def coeffs(self):
+ """ The polynomial coefficients """
+ return self._coeffs
+
+ @coeffs.setter
+ def coeffs(self, value):
+ # allowing this makes p.coeffs *= 2 legal
+ if value is not self._coeffs:
+ raise AttributeError("Cannot set attribute")
+
+ @property
+ def variable(self):
+ """ The name of the polynomial variable """
+ return self._variable
+
+ # calculated attributes
+ @property
+ def order(self):
+ """ The order or degree of the polynomial """
+ return len(self._coeffs) - 1
+
+ @property
+ def roots(self):
+ """ The roots of the polynomial, where self(x) == 0 """
+ return roots(self._coeffs)
+
+ # our internal _coeffs property need to be backed by __dict__['coeffs'] for
+ # scipy to work correctly.
+ @property
+ def _coeffs(self):
+ return self.__dict__['coeffs']
+
+ @_coeffs.setter
+ def _coeffs(self, coeffs):
+ self.__dict__['coeffs'] = coeffs
+
+ # alias attributes
+ r = roots
+ c = coef = coefficients = coeffs
+ o = order
+
+ def __init__(self, c_or_r, r=False, variable=None):
+ if isinstance(c_or_r, poly1d):
+ self._variable = c_or_r._variable
+ self._coeffs = c_or_r._coeffs
+
+ if set(c_or_r.__dict__) - set(self.__dict__):
+ msg = ("In the future extra properties will not be copied "
+ "across when constructing one poly1d from another")
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ self.__dict__.update(c_or_r.__dict__)
+
+ if variable is not None:
+ self._variable = variable
+ return
+ if r:
+ c_or_r = poly(c_or_r)
+ c_or_r = atleast_1d(c_or_r)
+ if c_or_r.ndim > 1:
+ raise ValueError("Polynomial must be 1d only.")
+ c_or_r = trim_zeros(c_or_r, trim='f')
+ if len(c_or_r) == 0:
+ c_or_r = NX.array([0], dtype=c_or_r.dtype)
+ self._coeffs = c_or_r
+ if variable is None:
+ variable = 'x'
+ self._variable = variable
+
+ def __array__(self, t=None, copy=None):
+ if t:
+ return NX.asarray(self.coeffs, t, copy=copy)
+ else:
+ return NX.asarray(self.coeffs, copy=copy)
+
+ def __repr__(self):
+ vals = repr(self.coeffs)
+ vals = vals[6:-1]
+ return f"poly1d({vals})"
+
+ def __len__(self):
+ return self.order
+
+ def __str__(self):
+ thestr = "0"
+ var = self.variable
+
+ # Remove leading zeros
+ coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
+ N = len(coeffs) - 1
+
+ def fmt_float(q):
+ s = f'{q:.4g}'
+ s = s.removesuffix('.0000')
+ return s
+
+ for k, coeff in enumerate(coeffs):
+ if not iscomplex(coeff):
+ coefstr = fmt_float(real(coeff))
+ elif real(coeff) == 0:
+ coefstr = f'{fmt_float(imag(coeff))}j'
+ else:
+ coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)'
+
+ power = (N - k)
+ if power == 0:
+ if coefstr != '0':
+ newstr = f'{coefstr}'
+ elif k == 0:
+ newstr = '0'
+ else:
+ newstr = ''
+ elif power == 1:
+ if coefstr == '0':
+ newstr = ''
+ elif coefstr == 'b':
+ newstr = var
+ else:
+ newstr = f'{coefstr} {var}'
+ elif coefstr == '0':
+ newstr = ''
+ elif coefstr == 'b':
+ newstr = '%s**%d' % (var, power,)
+ else:
+ newstr = '%s %s**%d' % (coefstr, var, power)
+
+ if k > 0:
+ if newstr != '':
+ if newstr.startswith('-'):
+ thestr = f"{thestr} - {newstr[1:]}"
+ else:
+ thestr = f"{thestr} + {newstr}"
+ else:
+ thestr = newstr
+ return _raise_power(thestr)
+
+ def __call__(self, val):
+ return polyval(self.coeffs, val)
+
+ def __neg__(self):
+ return poly1d(-self.coeffs)
+
+ def __pos__(self):
+ return self
+
+ def __mul__(self, other):
+ if isscalar(other):
+ return poly1d(self.coeffs * other)
+ else:
+ other = poly1d(other)
+ return poly1d(polymul(self.coeffs, other.coeffs))
+
+ def __rmul__(self, other):
+ if isscalar(other):
+ return poly1d(other * self.coeffs)
+ else:
+ other = poly1d(other)
+ return poly1d(polymul(self.coeffs, other.coeffs))
+
+ def __add__(self, other):
+ other = poly1d(other)
+ return poly1d(polyadd(self.coeffs, other.coeffs))
+
+ def __radd__(self, other):
+ other = poly1d(other)
+ return poly1d(polyadd(self.coeffs, other.coeffs))
+
+ def __pow__(self, val):
+ if not isscalar(val) or int(val) != val or val < 0:
+ raise ValueError("Power to non-negative integers only.")
+ res = [1]
+ for _ in range(val):
+ res = polymul(self.coeffs, res)
+ return poly1d(res)
+
+ def __sub__(self, other):
+ other = poly1d(other)
+ return poly1d(polysub(self.coeffs, other.coeffs))
+
+ def __rsub__(self, other):
+ other = poly1d(other)
+ return poly1d(polysub(other.coeffs, self.coeffs))
+
+ def __truediv__(self, other):
+ if isscalar(other):
+ return poly1d(self.coeffs / other)
+ else:
+ other = poly1d(other)
+ return polydiv(self, other)
+
+ def __rtruediv__(self, other):
+ if isscalar(other):
+ return poly1d(other / self.coeffs)
+ else:
+ other = poly1d(other)
+ return polydiv(other, self)
+
+ def __eq__(self, other):
+ if not isinstance(other, poly1d):
+ return NotImplemented
+ if self.coeffs.shape != other.coeffs.shape:
+ return False
+ return (self.coeffs == other.coeffs).all()
+
+ def __ne__(self, other):
+ if not isinstance(other, poly1d):
+ return NotImplemented
+ return not self.__eq__(other)
+
+ def __getitem__(self, val):
+ ind = self.order - val
+ if val > self.order:
+ return self.coeffs.dtype.type(0)
+ if val < 0:
+ return self.coeffs.dtype.type(0)
+ return self.coeffs[ind]
+
+ def __setitem__(self, key, val):
+ ind = self.order - key
+ if key < 0:
+ raise ValueError("Does not support negative powers.")
+ if key > self.order:
+ zr = NX.zeros(key - self.order, self.coeffs.dtype)
+ self._coeffs = NX.concatenate((zr, self.coeffs))
+ ind = 0
+ self._coeffs[ind] = val
+
+ def __iter__(self):
+ return iter(self.coeffs)
+
+ def integ(self, m=1, k=0):
+ """
+ Return an antiderivative (indefinite integral) of this polynomial.
+
+ Refer to `polyint` for full documentation.
+
+ See Also
+ --------
+ polyint : equivalent function
+
+ """
+ return poly1d(polyint(self.coeffs, m=m, k=k))
+
+ def deriv(self, m=1):
+ """
+ Return a derivative of this polynomial.
+
+ Refer to `polyder` for full documentation.
+
+ See Also
+ --------
+ polyder : equivalent function
+
+ """
+ return poly1d(polyder(self.coeffs, m=m))
+
+# Stuff to do on module import
+
+
+warnings.simplefilter('always', RankWarning)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi
new file mode 100644
index 0000000..faf2f01
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_polynomial_impl.pyi
@@ -0,0 +1,316 @@
+from typing import (
+ Any,
+ NoReturn,
+ SupportsIndex,
+ SupportsInt,
+ TypeAlias,
+ TypeVar,
+ overload,
+)
+from typing import (
+ Literal as L,
+)
+
+import numpy as np
+from numpy import (
+ complex128,
+ complexfloating,
+ float64,
+ floating,
+ int32,
+ int64,
+ object_,
+ poly1d,
+ signedinteger,
+ unsignedinteger,
+)
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _ArrayLikeBool_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeUInt_co,
+)
+
+_T = TypeVar("_T")
+
+_2Tup: TypeAlias = tuple[_T, _T]
+_5Tup: TypeAlias = tuple[
+ _T,
+ NDArray[float64],
+ NDArray[int32],
+ NDArray[float64],
+ NDArray[float64],
+]
+
+__all__ = [
+ "poly",
+ "roots",
+ "polyint",
+ "polyder",
+ "polyadd",
+ "polysub",
+ "polymul",
+ "polydiv",
+ "polyval",
+ "poly1d",
+ "polyfit",
+]
+
+def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ...
+
+# Returns either a float or complex array depending on the input values.
+# See `np.linalg.eigvals`.
+def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ...
+
+@overload
+def polyint(
+ p: poly1d,
+ m: SupportsInt | SupportsIndex = ...,
+ k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ...,
+) -> poly1d: ...
+@overload
+def polyint(
+ p: _ArrayLikeFloat_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: _ArrayLikeFloat_co | None = ...,
+) -> NDArray[floating]: ...
+@overload
+def polyint(
+ p: _ArrayLikeComplex_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: _ArrayLikeComplex_co | None = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def polyint(
+ p: _ArrayLikeObject_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: _ArrayLikeObject_co | None = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyder(
+ p: poly1d,
+ m: SupportsInt | SupportsIndex = ...,
+) -> poly1d: ...
+@overload
+def polyder(
+ p: _ArrayLikeFloat_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[floating]: ...
+@overload
+def polyder(
+ p: _ArrayLikeComplex_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def polyder(
+ p: _ArrayLikeObject_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: float | None = ...,
+ full: L[False] = ...,
+ w: _ArrayLikeFloat_co | None = ...,
+ cov: L[False] = ...,
+) -> NDArray[float64]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: float | None = ...,
+ full: L[False] = ...,
+ w: _ArrayLikeFloat_co | None = ...,
+ cov: L[False] = ...,
+) -> NDArray[complex128]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: float | None = ...,
+ full: L[False] = ...,
+ w: _ArrayLikeFloat_co | None = ...,
+ cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: float | None = ...,
+ full: L[False] = ...,
+ w: _ArrayLikeFloat_co | None = ...,
+ cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[complex128]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: float | None = ...,
+ full: L[True] = ...,
+ w: _ArrayLikeFloat_co | None = ...,
+ cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: float | None = ...,
+ full: L[True] = ...,
+ w: _ArrayLikeFloat_co | None = ...,
+ cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[complex128]]: ...
+
+@overload
+def polyval(
+ p: _ArrayLikeBool_co,
+ x: _ArrayLikeBool_co,
+) -> NDArray[int64]: ...
+@overload
+def polyval(
+ p: _ArrayLikeUInt_co,
+ x: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger]: ...
+@overload
+def polyval(
+ p: _ArrayLikeInt_co,
+ x: _ArrayLikeInt_co,
+) -> NDArray[signedinteger]: ...
+@overload
+def polyval(
+ p: _ArrayLikeFloat_co,
+ x: _ArrayLikeFloat_co,
+) -> NDArray[floating]: ...
+@overload
+def polyval(
+ p: _ArrayLikeComplex_co,
+ x: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating]: ...
+@overload
+def polyval(
+ p: _ArrayLikeObject_co,
+ x: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polyadd(
+ a1: poly1d,
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ a2: poly1d,
+) -> poly1d: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeBool_co,
+ a2: _ArrayLikeBool_co,
+) -> NDArray[np.bool]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeUInt_co,
+ a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeInt_co,
+ a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeFloat_co,
+ a2: _ArrayLikeFloat_co,
+) -> NDArray[floating]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeComplex_co,
+ a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeObject_co,
+ a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polysub(
+ a1: poly1d,
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polysub(
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ a2: poly1d,
+) -> poly1d: ...
+@overload
+def polysub(
+ a1: _ArrayLikeBool_co,
+ a2: _ArrayLikeBool_co,
+) -> NoReturn: ...
+@overload
+def polysub(
+ a1: _ArrayLikeUInt_co,
+ a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeInt_co,
+ a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeFloat_co,
+ a2: _ArrayLikeFloat_co,
+) -> NDArray[floating]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeComplex_co,
+ a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeObject_co,
+ a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+# NOTE: Not an alias, but they do have the same signature (that we can reuse)
+polymul = polyadd
+
+@overload
+def polydiv(
+ u: poly1d,
+ v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ v: poly1d,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeFloat_co,
+ v: _ArrayLikeFloat_co,
+) -> _2Tup[NDArray[floating]]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeComplex_co,
+ v: _ArrayLikeComplex_co,
+) -> _2Tup[NDArray[complexfloating]]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeObject_co,
+ v: _ArrayLikeObject_co,
+) -> _2Tup[NDArray[Any]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py
new file mode 100644
index 0000000..8136a7d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.py
@@ -0,0 +1,642 @@
+"""
+Wrapper functions to more user-friendly calling of certain math functions
+whose output data-type is different than the input data-type in certain
+domains of the input.
+
+For example, for functions like `log` with branch cuts, the versions in this
+module provide the mathematically valid answers in the complex plane::
+
+ >>> import math
+ >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
+ True
+
+Similarly, `sqrt`, other base logarithms, `power` and trig functions are
+correctly handled. See their respective docstrings for specific examples.
+
+"""
+import numpy._core.numeric as nx
+import numpy._core.numerictypes as nt
+from numpy._core.numeric import any, asarray
+from numpy._core.overrides import array_function_dispatch, set_module
+from numpy.lib._type_check_impl import isreal
+
+__all__ = [
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
+ 'arctanh'
+ ]
+
+
+_ln2 = nx.log(2.0)
+
+
+def _tocomplex(arr):
+ """Convert its input `arr` to a complex array.
+
+ The input is returned as a complex array of the smallest type that will fit
+ the original data: types like single, byte, short, etc. become csingle,
+ while others become cdouble.
+
+ A copy of the input is always made.
+
+ Parameters
+ ----------
+ arr : array
+
+ Returns
+ -------
+ array
+ An array with the same input data as the input but in complex form.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ First, consider an input of type short:
+
+ >>> a = np.array([1,2,3],np.short)
+
+ >>> ac = np.lib.scimath._tocomplex(a); ac
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+
+ >>> ac.dtype
+ dtype('complex64')
+
+ If the input is of type double, the output is correspondingly of the
+ complex double type as well:
+
+ >>> b = np.array([1,2,3],np.double)
+
+ >>> bc = np.lib.scimath._tocomplex(b); bc
+ array([1.+0.j, 2.+0.j, 3.+0.j])
+
+ >>> bc.dtype
+ dtype('complex128')
+
+ Note that even if the input was complex to begin with, a copy is still
+ made, since the astype() method always copies:
+
+ >>> c = np.array([1,2,3],np.csingle)
+
+ >>> cc = np.lib.scimath._tocomplex(c); cc
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+
+ >>> c *= 2; c
+ array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
+
+ >>> cc
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+ """
+ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
+ nt.ushort, nt.csingle)):
+ return arr.astype(nt.csingle)
+ else:
+ return arr.astype(nt.cdouble)
+
+
+def _fix_real_lt_zero(x):
+ """Convert `x` to complex if it has real, negative components.
+
+ Otherwise, output is just the array version of the input (via asarray).
+
+ Parameters
+ ----------
+ x : array_like
+
+ Returns
+ -------
+ array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.lib.scimath._fix_real_lt_zero([1,2])
+ array([1, 2])
+
+ >>> np.lib.scimath._fix_real_lt_zero([-1,2])
+ array([-1.+0.j, 2.+0.j])
+
+ """
+ x = asarray(x)
+ if any(isreal(x) & (x < 0)):
+ x = _tocomplex(x)
+ return x
+
+
+def _fix_int_lt_zero(x):
+ """Convert `x` to double if it has real, negative components.
+
+ Otherwise, output is just the array version of the input (via asarray).
+
+ Parameters
+ ----------
+ x : array_like
+
+ Returns
+ -------
+ array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.lib.scimath._fix_int_lt_zero([1,2])
+ array([1, 2])
+
+ >>> np.lib.scimath._fix_int_lt_zero([-1,2])
+ array([-1., 2.])
+ """
+ x = asarray(x)
+ if any(isreal(x) & (x < 0)):
+ x = x * 1.0
+ return x
+
+
+def _fix_real_abs_gt_1(x):
+ """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
+
+ Otherwise, output is just the array version of the input (via asarray).
+
+ Parameters
+ ----------
+ x : array_like
+
+ Returns
+ -------
+ array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
+ array([0, 1])
+
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
+ array([0.+0.j, 2.+0.j])
+ """
+ x = asarray(x)
+ if any(isreal(x) & (abs(x) > 1)):
+ x = _tocomplex(x)
+ return x
+
+
+def _unary_dispatcher(x):
+ return (x,)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def sqrt(x):
+ """
+ Compute the square root of x.
+
+ For negative input elements, a complex value is returned
+ (unlike `numpy.sqrt` which returns NaN).
+
+ Parameters
+ ----------
+ x : array_like
+ The input value(s).
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The square root of `x`. If `x` was a scalar, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.sqrt
+
+ Examples
+ --------
+ For real, non-negative inputs this works just like `numpy.sqrt`:
+
+ >>> import numpy as np
+
+ >>> np.emath.sqrt(1)
+ 1.0
+ >>> np.emath.sqrt([1, 4])
+ array([1., 2.])
+
+ But it automatically handles negative inputs:
+
+ >>> np.emath.sqrt(-1)
+ 1j
+ >>> np.emath.sqrt([-1,4])
+ array([0.+1.j, 2.+0.j])
+
+ Different results are expected because:
+ floating point 0.0 and -0.0 are distinct.
+
+ For more control, explicitly use complex() as follows:
+
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
+ 2j
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
+ -2j
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.sqrt(x)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def log(x):
+ """
+ Compute the natural logarithm of `x`.
+
+ Return the "principal value" (for a description of this, see `numpy.log`)
+ of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
+ returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
+ complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like
+ The value(s) whose log is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.log
+
+ Notes
+ -----
+ For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
+ (note, however, that otherwise `numpy.log` and this `log` are identical,
+ i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
+ notably, the complex principle value if ``x.imag != 0``).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.emath.log(np.exp(1))
+ 1.0
+
+ Negative arguments are handled "correctly" (recall that
+ ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
+
+ >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
+ True
+
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.log(x)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def log10(x):
+ """
+ Compute the logarithm base 10 of `x`.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
+ is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
+ returns ``inf``). Otherwise, the complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like or scalar
+ The value(s) whose log base 10 is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.log10
+
+ Notes
+ -----
+ For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
+ (note, however, that otherwise `numpy.log10` and this `log10` are
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+ and, notably, the complex principle value if ``x.imag != 0``).
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ (We set the printing precision so the example can be auto-tested)
+
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.log10(10**1)
+ 1.0
+
+ >>> np.emath.log10([-10**1, -10**2, 10**2])
+ array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
+
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.log10(x)
+
+
+def _logn_dispatcher(n, x):
+ return (n, x,)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_logn_dispatcher)
+def logn(n, x):
+ """
+ Take log base n of x.
+
+ If `x` contains negative inputs, the answer is computed and returned in the
+ complex domain.
+
+ Parameters
+ ----------
+ n : array_like
+ The integer base(s) in which the log is taken.
+ x : array_like
+ The value(s) whose log base `n` is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log base `n` of the `x` value(s). If `x` was a scalar, so is
+ `out`, otherwise an array is returned.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.logn(2, [4, 8])
+ array([2., 3.])
+ >>> np.emath.logn(2, [-4, -8, 8])
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+
+ """
+ x = _fix_real_lt_zero(x)
+ n = _fix_real_lt_zero(n)
+ return nx.log(x) / nx.log(n)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def log2(x):
+ """
+ Compute the logarithm base 2 of `x`.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
+ a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
+ ``inf``). Otherwise, the complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like
+ The value(s) whose log base 2 is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.log2
+
+ Notes
+ -----
+ For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
+ (note, however, that otherwise `numpy.log2` and this `log2` are
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+ and, notably, the complex principle value if ``x.imag != 0``).
+
+ Examples
+ --------
+
+ We set the printing precision so the example can be auto-tested:
+
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.log2(8)
+ 3.0
+ >>> np.emath.log2([-4, -8, 8])
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.log2(x)
+
+
+def _power_dispatcher(x, p):
+ return (x, p)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_power_dispatcher)
+def power(x, p):
+ """
+ Return x to the power p, (x**p).
+
+ If `x` contains negative values, the output is converted to the
+ complex domain.
+
+ Parameters
+ ----------
+ x : array_like
+ The input value(s).
+ p : array_like of ints
+ The power(s) to which `x` is raised. If `x` contains multiple values,
+ `p` has to either be a scalar, or contain the same number of values
+ as `x`. In the latter case, the result is
+ ``x[0]**p[0], x[1]**p[1], ...``.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.power
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.power(2, 2)
+ 4
+
+ >>> np.emath.power([2, 4], 2)
+ array([ 4, 16])
+
+ >>> np.emath.power([2, 4], -2)
+ array([0.25 , 0.0625])
+
+ >>> np.emath.power([-2, 4], 2)
+ array([ 4.-0.j, 16.+0.j])
+
+ >>> np.emath.power([2, 4], [2, 4])
+ array([ 4, 256])
+
+ """
+ x = _fix_real_lt_zero(x)
+ p = _fix_int_lt_zero(p)
+ return nx.power(x, p)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def arccos(x):
+ """
+ Compute the inverse cosine of x.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
+ `abs(x) <= 1`, this is a real number in the closed interval
+ :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like or scalar
+ The value(s) whose arccos is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
+ is `out`, otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.arccos
+
+ Notes
+ -----
+ For an arccos() that returns ``NAN`` when real `x` is not in the
+ interval ``[-1,1]``, use `numpy.arccos`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.arccos(1) # a scalar is returned
+ 0.0
+
+ >>> np.emath.arccos([1,2])
+ array([0.-0.j , 0.-1.317j])
+
+ """
+ x = _fix_real_abs_gt_1(x)
+ return nx.arccos(x)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def arcsin(x):
+ """
+ Compute the inverse sine of x.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
+ `abs(x) <= 1`, this is a real number in the closed interval
+ :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
+ returned.
+
+ Parameters
+ ----------
+ x : array_like or scalar
+ The value(s) whose arcsin is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
+ is `out`, otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.arcsin
+
+ Notes
+ -----
+ For an arcsin() that returns ``NAN`` when real `x` is not in the
+ interval ``[-1,1]``, use `numpy.arcsin`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.arcsin(0)
+ 0.0
+
+ >>> np.emath.arcsin([0,1])
+ array([0. , 1.5708])
+
+ """
+ x = _fix_real_abs_gt_1(x)
+ return nx.arcsin(x)
+
+
+@set_module('numpy.lib.scimath')
+@array_function_dispatch(_unary_dispatcher)
+def arctanh(x):
+ """
+ Compute the inverse hyperbolic tangent of `x`.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
+ ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
+ complex, the result is complex. Finally, `x = 1` returns``inf`` and
+ ``x=-1`` returns ``-inf``.
+
+ Parameters
+ ----------
+ x : array_like
+ The value(s) whose arctanh is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
+ a scalar so is `out`, otherwise an array is returned.
+
+
+ See Also
+ --------
+ numpy.arctanh
+
+ Notes
+ -----
+ For an arctanh() that returns ``NAN`` when real `x` is not in the
+ interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
+ return +/-inf for ``x = +/-1``).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.arctanh(0.5)
+ 0.5493061443340549
+
+ >>> from numpy.testing import suppress_warnings
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(RuntimeWarning)
+ ... np.emath.arctanh(np.eye(2))
+ array([[inf, 0.],
+ [ 0., inf]])
+ >>> np.emath.arctanh([1j])
+ array([0.+0.7854j])
+
+ """
+ x = _fix_real_abs_gt_1(x)
+ return nx.arctanh(x)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi
new file mode 100644
index 0000000..e6390c2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_scimath_impl.pyi
@@ -0,0 +1,93 @@
+from typing import Any, overload
+
+from numpy import complexfloating
+from numpy._typing import (
+ NDArray,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ComplexLike_co,
+ _FloatLike_co,
+)
+
+__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"]
+
+@overload
+def sqrt(x: _FloatLike_co) -> Any: ...
+@overload
+def sqrt(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def log(x: _FloatLike_co) -> Any: ...
+@overload
+def log(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def log10(x: _FloatLike_co) -> Any: ...
+@overload
+def log10(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def log2(x: _FloatLike_co) -> Any: ...
+@overload
+def log2(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
+@overload
+def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
+@overload
+def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ...
+@overload
+def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def arccos(x: _FloatLike_co) -> Any: ...
+@overload
+def arccos(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def arcsin(x: _FloatLike_co) -> Any: ...
+@overload
+def arcsin(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+
+@overload
+def arctanh(x: _FloatLike_co) -> Any: ...
+@overload
+def arctanh(x: _ComplexLike_co) -> complexfloating: ...
+@overload
+def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py
new file mode 100644
index 0000000..89b86c8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.py
@@ -0,0 +1,1301 @@
+import functools
+import warnings
+
+import numpy._core.numeric as _nx
+from numpy._core import atleast_3d, overrides, vstack
+from numpy._core._multiarray_umath import _array_converter
+from numpy._core.fromnumeric import reshape, transpose
+from numpy._core.multiarray import normalize_axis_index
+from numpy._core.numeric import (
+ array,
+ asanyarray,
+ asarray,
+ normalize_axis_tuple,
+ zeros,
+ zeros_like,
+)
+from numpy._core.overrides import set_module
+from numpy._core.shape_base import _arrays_for_stack_dispatcher
+from numpy.lib._index_tricks_impl import ndindex
+from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
+
+__all__ = [
+ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
+ 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
+ 'apply_along_axis', 'kron', 'tile', 'take_along_axis',
+ 'put_along_axis'
+ ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _make_along_axis_idx(arr_shape, indices, axis):
+ # compute dimensions to iterate over
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
+ raise IndexError('`indices` must be an integer array')
+ if len(arr_shape) != indices.ndim:
+ raise ValueError(
+ "`indices` and `arr` must have the same number of dimensions")
+ shape_ones = (1,) * indices.ndim
+ dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim))
+
+ # build a fancy index, consisting of orthogonal aranges, with the
+ # requested index inserted at the right location
+ fancy_index = []
+ for dim, n in zip(dest_dims, arr_shape):
+ if dim is None:
+ fancy_index.append(indices)
+ else:
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1:]
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
+
+ return tuple(fancy_index)
+
+
+def _take_along_axis_dispatcher(arr, indices, axis=None):
+ return (arr, indices)
+
+
+@array_function_dispatch(_take_along_axis_dispatcher)
+def take_along_axis(arr, indices, axis=-1):
+ """
+ Take values from the input array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to look up values in the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ Parameters
+ ----------
+ arr : ndarray (Ni..., M, Nk...)
+ Source array
+ indices : ndarray (Ni..., J, Nk...)
+ Indices to take along each 1d slice of ``arr``. This must match the
+ dimension of ``arr``, but dimensions Ni and Nj only need to broadcast
+ against ``arr``.
+ axis : int or None, optional
+ The axis to take 1d slices along. If axis is None, the input array is
+ treated as if it had first been flattened to 1d, for consistency with
+ `sort` and `argsort`.
+
+ .. versionchanged:: 2.3
+ The default value is now ``-1``.
+
+ Returns
+ -------
+ out: ndarray (Ni..., J, Nk...)
+ The indexed result.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+ out = np.empty(Ni + (J,) + Nk)
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ out_1d = out [ii + s_[:,] + kk]
+ for j in range(J):
+ out_1d[j] = a_1d[indices_1d[j]]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ out_1d[:] = a_1d[indices_1d]
+
+ See Also
+ --------
+ take : Take along an axis, using the same indices for every 1d slice
+ put_along_axis :
+ Put values into the destination array by matching 1d index and data slices
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can sort either by using sort directly, or argsort and this function
+
+ >>> np.sort(a, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+ >>> ai = np.argsort(a, axis=1)
+ >>> ai
+ array([[0, 2, 1],
+ [1, 2, 0]])
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+
+ The same works for max and min, if you maintain the trivial dimension
+ with ``keepdims``:
+
+ >>> np.max(a, axis=1, keepdims=True)
+ array([[30],
+ [60]])
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
+ >>> ai
+ array([[1],
+ [0]])
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[30],
+ [60]])
+
+ If we want to get the max and min at the same time, we can stack the
+ indices first
+
+ >>> ai_min = np.argmin(a, axis=1, keepdims=True)
+ >>> ai_max = np.argmax(a, axis=1, keepdims=True)
+ >>> ai = np.concatenate([ai_min, ai_max], axis=1)
+ >>> ai
+ array([[0, 1],
+ [1, 0]])
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 30],
+ [40, 60]])
+ """
+ # normalize inputs
+ if axis is None:
+ if indices.ndim != 1:
+ raise ValueError(
+ 'when axis=None, `indices` must have a single dimension.')
+ arr = arr.flat
+ arr_shape = (len(arr),) # flatiter has no .shape
+ axis = 0
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+
+
+def _put_along_axis_dispatcher(arr, indices, values, axis):
+ return (arr, indices, values)
+
+
+@array_function_dispatch(_put_along_axis_dispatcher)
+def put_along_axis(arr, indices, values, axis):
+ """
+ Put values into the destination array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to place values into the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ Parameters
+ ----------
+ arr : ndarray (Ni..., M, Nk...)
+ Destination array.
+ indices : ndarray (Ni..., J, Nk...)
+ Indices to change along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
+ against `arr`.
+ values : array_like (Ni..., J, Nk...)
+ values to insert at those indices. Its shape and dimension are
+ broadcast to match that of `indices`.
+ axis : int
+ The axis to take 1d slices along. If axis is None, the destination
+ array is treated as if a flattened 1d view had been created of it.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ values_1d = values [ii + s_[:,] + kk]
+ for j in range(J):
+ a_1d[indices_1d[j]] = values_1d[j]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ a_1d[indices_1d] = values_1d
+
+ See Also
+ --------
+ take_along_axis :
+ Take values from the input array by matching 1d index and data slices
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can replace the maximum values with:
+
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
+ >>> ai
+ array([[1],
+ [0]])
+ >>> np.put_along_axis(a, ai, 99, axis=1)
+ >>> a
+ array([[10, 99, 20],
+ [99, 40, 50]])
+
+ """
+ # normalize inputs
+ if axis is None:
+ if indices.ndim != 1:
+ raise ValueError(
+ 'when axis=None, `indices` must have a single dimension.')
+ arr = arr.flat
+ axis = 0
+ arr_shape = (len(arr),) # flatiter has no .shape
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+
+
+def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
+ return (arr,)
+
+
+@array_function_dispatch(_apply_along_axis_dispatcher)
+def apply_along_axis(func1d, axis, arr, *args, **kwargs):
+ """
+ Apply a function to 1-D slices along the given axis.
+
+ Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
+ and `a` is a 1-D slice of `arr` along `axis`.
+
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ f = func1d(arr[ii + s_[:,] + kk])
+ Nj = f.shape
+ for jj in ndindex(Nj):
+ out[ii + jj + kk] = f[jj]
+
+ Equivalently, eliminating the inner loop, this can be expressed as::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
+
+ Parameters
+ ----------
+ func1d : function (M,) -> (Nj...)
+ This function should accept 1-D arrays. It is applied to 1-D
+ slices of `arr` along the specified axis.
+ axis : integer
+ Axis along which `arr` is sliced.
+ arr : ndarray (Ni..., M, Nk...)
+ Input array.
+ args : any
+ Additional arguments to `func1d`.
+ kwargs : any
+ Additional named arguments to `func1d`.
+
+ Returns
+ -------
+ out : ndarray (Ni..., Nj..., Nk...)
+ The output array. The shape of `out` is identical to the shape of
+ `arr`, except along the `axis` dimension. This axis is removed, and
+ replaced with new dimensions equal to the shape of the return value
+ of `func1d`. So if `func1d` returns a scalar `out` will have one
+ fewer dimensions than `arr`.
+
+ See Also
+ --------
+ apply_over_axes : Apply a function repeatedly over multiple axes.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> def my_func(a):
+ ... \"\"\"Average first and last element of a 1-D array\"\"\"
+ ... return (a[0] + a[-1]) * 0.5
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
+ >>> np.apply_along_axis(my_func, 0, b)
+ array([4., 5., 6.])
+ >>> np.apply_along_axis(my_func, 1, b)
+ array([2., 5., 8.])
+
+ For a function that returns a 1D array, the number of dimensions in
+ `outarr` is the same as `arr`.
+
+ >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
+ >>> np.apply_along_axis(sorted, 1, b)
+ array([[1, 7, 8],
+ [3, 4, 9],
+ [2, 5, 6]])
+
+ For a function that returns a higher dimensional array, those dimensions
+ are inserted in place of the `axis` dimension.
+
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
+ >>> np.apply_along_axis(np.diag, -1, b)
+ array([[[1, 0, 0],
+ [0, 2, 0],
+ [0, 0, 3]],
+ [[4, 0, 0],
+ [0, 5, 0],
+ [0, 0, 6]],
+ [[7, 0, 0],
+ [0, 8, 0],
+ [0, 0, 9]]])
+ """
+ # handle negative axes
+ conv = _array_converter(arr)
+ arr = conv[0]
+
+ nd = arr.ndim
+ axis = normalize_axis_index(axis, nd)
+
+ # arr, with the iteration axis at the end
+ in_dims = list(range(nd))
+ inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis])
+
+ # compute indices for the iteration axes, and append a trailing ellipsis to
+ # prevent 0d arrays decaying to scalars, which fixes gh-8642
+ inds = ndindex(inarr_view.shape[:-1])
+ inds = (ind + (Ellipsis,) for ind in inds)
+
+ # invoke the function on the first item
+ try:
+ ind0 = next(inds)
+ except StopIteration:
+ raise ValueError(
+ 'Cannot apply_along_axis when any iteration dimensions are 0'
+ ) from None
+ res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
+
+ # build a buffer for storing evaluations of func1d.
+ # remove the requested axis, and add the new ones on the end.
+ # laid out so that each write is contiguous.
+ # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
+ if not isinstance(res, matrix):
+ buff = zeros_like(res, shape=inarr_view.shape[:-1] + res.shape)
+ else:
+ # Matrices are nasty with reshaping, so do not preserve them here.
+ buff = zeros(inarr_view.shape[:-1] + res.shape, dtype=res.dtype)
+
+ # permutation of axes such that out = buff.transpose(buff_permute)
+ buff_dims = list(range(buff.ndim))
+ buff_permute = (
+ buff_dims[0 : axis] +
+ buff_dims[buff.ndim - res.ndim : buff.ndim] +
+ buff_dims[axis : buff.ndim - res.ndim]
+ )
+
+ # save the first result, then compute and save all remaining results
+ buff[ind0] = res
+ for ind in inds:
+ buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
+
+ res = transpose(buff, buff_permute)
+ return conv.wrap(res)
+
+
+def _apply_over_axes_dispatcher(func, a, axes):
+ return (a,)
+
+
+@array_function_dispatch(_apply_over_axes_dispatcher)
+def apply_over_axes(func, a, axes):
+ """
+ Apply a function repeatedly over multiple axes.
+
+ `func` is called as `res = func(a, axis)`, where `axis` is the first
+ element of `axes`. The result `res` of the function call must have
+ either the same dimensions as `a` or one less dimension. If `res`
+ has one less dimension than `a`, a dimension is inserted before
+ `axis`. The call to `func` is then repeated for each axis in `axes`,
+ with `res` as the first argument.
+
+ Parameters
+ ----------
+ func : function
+ This function must take two arguments, `func(a, axis)`.
+ a : array_like
+ Input array.
+ axes : array_like
+ Axes over which `func` is applied; the elements must be integers.
+
+ Returns
+ -------
+ apply_over_axis : ndarray
+ The output array. The number of dimensions is the same as `a`,
+ but the shape can be different. This depends on whether `func`
+ changes the shape of its output with respect to its input.
+
+ See Also
+ --------
+ apply_along_axis :
+ Apply a function to 1-D slices of an array along the given axis.
+
+ Notes
+ -----
+ This function is equivalent to tuple axis arguments to reorderable ufuncs
+ with keepdims=True. Tuple axis arguments to ufuncs have been available since
+ version 1.7.0.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.arange(24).reshape(2,3,4)
+ >>> a
+ array([[[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]],
+ [[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]]])
+
+ Sum over axes 0 and 2. The result has same number of dimensions
+ as the original array:
+
+ >>> np.apply_over_axes(np.sum, a, [0,2])
+ array([[[ 60],
+ [ 92],
+ [124]]])
+
+ Tuple axis arguments to ufuncs are equivalent:
+
+ >>> np.sum(a, axis=(0,2), keepdims=True)
+ array([[[ 60],
+ [ 92],
+ [124]]])
+
+ """
+ val = asarray(a)
+ N = a.ndim
+ if array(axes).ndim == 0:
+ axes = (axes,)
+ for axis in axes:
+ if axis < 0:
+ axis = N + axis
+ args = (val, axis)
+ res = func(*args)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ res = expand_dims(res, axis)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ raise ValueError("function is not returning "
+ "an array of the correct shape")
+ return val
+
+
+def _expand_dims_dispatcher(a, axis):
+ return (a,)
+
+
+@array_function_dispatch(_expand_dims_dispatcher)
+def expand_dims(a, axis):
+ """
+ Expand the shape of an array.
+
+ Insert a new axis that will appear at the `axis` position in the expanded
+ array shape.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int or tuple of ints
+ Position in the expanded axes where the new axis (or axes) is placed.
+
+ .. deprecated:: 1.13.0
+ Passing an axis where ``axis > a.ndim`` will be treated as
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
+ be treated as ``axis == 0``. This behavior is deprecated.
+
+ Returns
+ -------
+ result : ndarray
+ View of `a` with the number of dimensions increased.
+
+ See Also
+ --------
+ squeeze : The inverse operation, removing singleton dimensions
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
+ atleast_1d, atleast_2d, atleast_3d
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2])
+ >>> x.shape
+ (2,)
+
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=0)
+ >>> y
+ array([[1, 2]])
+ >>> y.shape
+ (1, 2)
+
+ The following is equivalent to ``x[:, np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=1)
+ >>> y
+ array([[1],
+ [2]])
+ >>> y.shape
+ (2, 1)
+
+ ``axis`` may also be a tuple:
+
+ >>> y = np.expand_dims(x, axis=(0, 1))
+ >>> y
+ array([[[1, 2]]])
+
+ >>> y = np.expand_dims(x, axis=(2, 0))
+ >>> y
+ array([[[1],
+ [2]]])
+
+ Note that some examples may use ``None`` instead of ``np.newaxis``. These
+ are the same objects:
+
+ >>> np.newaxis is None
+ True
+
+ """
+ if isinstance(a, matrix):
+ a = asarray(a)
+ else:
+ a = asanyarray(a)
+
+ if not isinstance(axis, (tuple, list)):
+ axis = (axis,)
+
+ out_ndim = len(axis) + a.ndim
+ axis = normalize_axis_tuple(axis, out_ndim)
+
+ shape_it = iter(a.shape)
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
+
+ return a.reshape(shape)
+
+
+# NOTE: Remove once deprecation period passes
+@set_module("numpy")
+def row_stack(tup, *, dtype=None, casting="same_kind"):
+ # Deprecated in NumPy 2.0, 2023-08-18
+ warnings.warn(
+ "`row_stack` alias is deprecated. "
+ "Use `np.vstack` directly.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+ return vstack(tup, dtype=dtype, casting=casting)
+
+
+row_stack.__doc__ = vstack.__doc__
+
+
+def _column_stack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_column_stack_dispatcher)
+def column_stack(tup):
+ """
+ Stack 1-D arrays as columns into a 2-D array.
+
+ Take a sequence of 1-D arrays and stack them as columns
+ to make a single 2-D array. 2-D arrays are stacked as-is,
+ just like with `hstack`. 1-D arrays are turned into 2-D columns
+ first.
+
+ Parameters
+ ----------
+ tup : sequence of 1-D or 2-D arrays.
+ Arrays to stack. All of them must have the same first dimension.
+
+ Returns
+ -------
+ stacked : 2-D array
+ The array formed by stacking the given arrays.
+
+ See Also
+ --------
+ stack, hstack, vstack, concatenate
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array((1,2,3))
+ >>> b = np.array((2,3,4))
+ >>> np.column_stack((a,b))
+ array([[1, 2],
+ [2, 3],
+ [3, 4]])
+
+ """
+ arrays = []
+ for v in tup:
+ arr = asanyarray(v)
+ if arr.ndim < 2:
+ arr = array(arr, copy=None, subok=True, ndmin=2).T
+ arrays.append(arr)
+ return _nx.concatenate(arrays, 1)
+
+
+def _dstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_dstack_dispatcher)
+def dstack(tup):
+ """
+ Stack arrays in sequence depth wise (along third axis).
+
+ This is equivalent to concatenation along the third axis after 2-D arrays
+ of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
+ `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
+ `dsplit`.
+
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
+
+ Parameters
+ ----------
+ tup : sequence of arrays
+ The arrays must have the same shape along all but the third axis.
+ 1-D or 2-D arrays must have the same shape.
+
+ Returns
+ -------
+ stacked : ndarray
+ The array formed by stacking the given arrays, will be at least 3-D.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
+ vstack : Stack arrays in sequence vertically (row wise).
+ hstack : Stack arrays in sequence horizontally (column wise).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ dsplit : Split array along third axis.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array((1,2,3))
+ >>> b = np.array((2,3,4))
+ >>> np.dstack((a,b))
+ array([[[1, 2],
+ [2, 3],
+ [3, 4]]])
+
+ >>> a = np.array([[1],[2],[3]])
+ >>> b = np.array([[2],[3],[4]])
+ >>> np.dstack((a,b))
+ array([[[1, 2]],
+ [[2, 3]],
+ [[3, 4]]])
+
+ """
+ arrs = atleast_3d(*tup)
+ if not isinstance(arrs, tuple):
+ arrs = (arrs,)
+ return _nx.concatenate(arrs, 2)
+
+
+def _replace_zero_by_x_arrays(sub_arys):
+ for i in range(len(sub_arys)):
+ if _nx.ndim(sub_arys[i]) == 0:
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
+ elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
+ return sub_arys
+
+
+def _array_split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_array_split_dispatcher)
+def array_split(ary, indices_or_sections, axis=0):
+ """
+ Split an array into multiple sub-arrays.
+
+ Please refer to the ``split`` documentation. The only difference
+ between these functions is that ``array_split`` allows
+ `indices_or_sections` to be an integer that does *not* equally
+ divide the axis. For an array of length l that should be split
+ into n sections, it returns l % n sub-arrays of size l//n + 1
+ and the rest of size l//n.
+
+ See Also
+ --------
+ split : Split array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(8.0)
+ >>> np.array_split(x, 3)
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
+
+ >>> x = np.arange(9)
+ >>> np.array_split(x, 4)
+ [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
+
+ """
+ try:
+ Ntotal = ary.shape[axis]
+ except AttributeError:
+ Ntotal = len(ary)
+ try:
+ # handle array case.
+ Nsections = len(indices_or_sections) + 1
+ div_points = [0] + list(indices_or_sections) + [Ntotal]
+ except TypeError:
+ # indices_or_sections is a scalar, not an array.
+ Nsections = int(indices_or_sections)
+ if Nsections <= 0:
+ raise ValueError('number sections must be larger than 0.') from None
+ Neach_section, extras = divmod(Ntotal, Nsections)
+ section_sizes = ([0] +
+ extras * [Neach_section + 1] +
+ (Nsections - extras) * [Neach_section])
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
+
+ sub_arys = []
+ sary = _nx.swapaxes(ary, axis, 0)
+ for i in range(Nsections):
+ st = div_points[i]
+ end = div_points[i + 1]
+ sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
+
+ return sub_arys
+
+
+def _split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(ary, indices_or_sections, axis=0):
+ """
+ Split an array into multiple sub-arrays as views into `ary`.
+
+ Parameters
+ ----------
+ ary : ndarray
+ Array to be divided into sub-arrays.
+ indices_or_sections : int or 1-D array
+ If `indices_or_sections` is an integer, N, the array will be divided
+ into N equal arrays along `axis`. If such a split is not possible,
+ an error is raised.
+
+ If `indices_or_sections` is a 1-D array of sorted integers, the entries
+ indicate where along `axis` the array is split. For example,
+ ``[2, 3]`` would, for ``axis=0``, result in
+
+ - ary[:2]
+ - ary[2:3]
+ - ary[3:]
+
+ If an index exceeds the dimension of the array along `axis`,
+ an empty sub-array is returned correspondingly.
+ axis : int, optional
+ The axis along which to split, default is 0.
+
+ Returns
+ -------
+ sub-arrays : list of ndarrays
+ A list of sub-arrays as views into `ary`.
+
+ Raises
+ ------
+ ValueError
+ If `indices_or_sections` is given as an integer, but
+ a split does not result in equal division.
+
+ See Also
+ --------
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size. Does not raise an exception if
+ an equal division cannot be made.
+ hsplit : Split array into multiple sub-arrays horizontally (column-wise).
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(9.0)
+ >>> np.split(x, 3)
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
+
+ >>> x = np.arange(8.0)
+ >>> np.split(x, [3, 5, 6, 10])
+ [array([0., 1., 2.]),
+ array([3., 4.]),
+ array([5.]),
+ array([6., 7.]),
+ array([], dtype=float64)]
+
+ """
+ try:
+ len(indices_or_sections)
+ except TypeError:
+ sections = indices_or_sections
+ N = ary.shape[axis]
+ if N % sections:
+ raise ValueError(
+ 'array split does not result in an equal division') from None
+ return array_split(ary, indices_or_sections, axis)
+
+
+def _hvdsplit_dispatcher(ary, indices_or_sections):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def hsplit(ary, indices_or_sections):
+ """
+ Split an array into multiple sub-arrays horizontally (column-wise).
+
+ Please refer to the `split` documentation. `hsplit` is equivalent
+ to `split` with ``axis=1``, the array is always split along the second
+ axis except for 1-D arrays, where it is split at ``axis=0``.
+
+ See Also
+ --------
+ split : Split an array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(16.0).reshape(4, 4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+ >>> np.hsplit(x, 2)
+ [array([[ 0., 1.],
+ [ 4., 5.],
+ [ 8., 9.],
+ [12., 13.]]),
+ array([[ 2., 3.],
+ [ 6., 7.],
+ [10., 11.],
+ [14., 15.]])]
+ >>> np.hsplit(x, np.array([3, 6]))
+ [array([[ 0., 1., 2.],
+ [ 4., 5., 6.],
+ [ 8., 9., 10.],
+ [12., 13., 14.]]),
+ array([[ 3.],
+ [ 7.],
+ [11.],
+ [15.]]),
+ array([], shape=(4, 0), dtype=float64)]
+
+ With a higher dimensional array the split is still along the second axis.
+
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
+ >>> x
+ array([[[0., 1.],
+ [2., 3.]],
+ [[4., 5.],
+ [6., 7.]]])
+ >>> np.hsplit(x, 2)
+ [array([[[0., 1.]],
+ [[4., 5.]]]),
+ array([[[2., 3.]],
+ [[6., 7.]]])]
+
+ With a 1-D array, the split is along axis 0.
+
+ >>> x = np.array([0, 1, 2, 3, 4, 5])
+ >>> np.hsplit(x, 2)
+ [array([0, 1, 2]), array([3, 4, 5])]
+
+ """
+ if _nx.ndim(ary) == 0:
+ raise ValueError('hsplit only works on arrays of 1 or more dimensions')
+ if ary.ndim > 1:
+ return split(ary, indices_or_sections, 1)
+ else:
+ return split(ary, indices_or_sections, 0)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def vsplit(ary, indices_or_sections):
+ """
+ Split an array into multiple sub-arrays vertically (row-wise).
+
+ Please refer to the ``split`` documentation. ``vsplit`` is equivalent
+ to ``split`` with `axis=0` (default), the array is always split along the
+ first axis regardless of the array dimension.
+
+ See Also
+ --------
+ split : Split an array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(16.0).reshape(4, 4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+ >>> np.vsplit(x, 2)
+ [array([[0., 1., 2., 3.],
+ [4., 5., 6., 7.]]),
+ array([[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])]
+ >>> np.vsplit(x, np.array([3, 6]))
+ [array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]]),
+ array([[12., 13., 14., 15.]]),
+ array([], shape=(0, 4), dtype=float64)]
+
+ With a higher dimensional array the split is still along the first axis.
+
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
+ >>> x
+ array([[[0., 1.],
+ [2., 3.]],
+ [[4., 5.],
+ [6., 7.]]])
+ >>> np.vsplit(x, 2)
+ [array([[[0., 1.],
+ [2., 3.]]]),
+ array([[[4., 5.],
+ [6., 7.]]])]
+
+ """
+ if _nx.ndim(ary) < 2:
+ raise ValueError('vsplit only works on arrays of 2 or more dimensions')
+ return split(ary, indices_or_sections, 0)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def dsplit(ary, indices_or_sections):
+ """
+ Split array into multiple sub-arrays along the 3rd axis (depth).
+
+ Please refer to the `split` documentation. `dsplit` is equivalent
+ to `split` with ``axis=2``, the array is always split along the third
+ axis provided the array dimension is greater than or equal to 3.
+
+ See Also
+ --------
+ split : Split an array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(16.0).reshape(2, 2, 4)
+ >>> x
+ array([[[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.]],
+ [[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]]])
+ >>> np.dsplit(x, 2)
+ [array([[[ 0., 1.],
+ [ 4., 5.]],
+ [[ 8., 9.],
+ [12., 13.]]]), array([[[ 2., 3.],
+ [ 6., 7.]],
+ [[10., 11.],
+ [14., 15.]]])]
+ >>> np.dsplit(x, np.array([3, 6]))
+ [array([[[ 0., 1., 2.],
+ [ 4., 5., 6.]],
+ [[ 8., 9., 10.],
+ [12., 13., 14.]]]),
+ array([[[ 3.],
+ [ 7.]],
+ [[11.],
+ [15.]]]),
+ array([], shape=(2, 2, 0), dtype=float64)]
+ """
+ if _nx.ndim(ary) < 3:
+ raise ValueError('dsplit only works on arrays of 3 or more dimensions')
+ return split(ary, indices_or_sections, 2)
+
+
+def get_array_wrap(*args):
+ """Find the wrapper for the array with the highest priority.
+
+ In case of ties, leftmost wins. If no wrapper is found, return None.
+
+ .. deprecated:: 2.0
+ """
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`get_array_wrap` is deprecated. "
+ "(deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
+ x.__array_wrap__) for i, x in enumerate(args)
+ if hasattr(x, '__array_wrap__'))
+ if wrappers:
+ return wrappers[-1][-1]
+ return None
+
+
+def _kron_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_kron_dispatcher)
+def kron(a, b):
+ """
+ Kronecker product of two arrays.
+
+ Computes the Kronecker product, a composite array made of blocks of the
+ second array scaled by the first.
+
+ Parameters
+ ----------
+ a, b : array_like
+
+ Returns
+ -------
+ out : ndarray
+
+ See Also
+ --------
+ outer : The outer product
+
+ Notes
+ -----
+ The function assumes that the number of dimensions of `a` and `b`
+ are the same, if necessary prepending the smallest with ones.
+ If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
+ the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
+ The elements are products of elements from `a` and `b`, organized
+ explicitly by::
+
+ kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
+
+ where::
+
+ kt = it * st + jt, t = 0,...,N
+
+ In the common 2-D case (N=1), the block structure can be visualized::
+
+ [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
+ [ ... ... ],
+ [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
+
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.kron([1,10,100], [5,6,7])
+ array([ 5, 6, 7, ..., 500, 600, 700])
+ >>> np.kron([5,6,7], [1,10,100])
+ array([ 5, 50, 500, ..., 7, 70, 700])
+
+ >>> np.kron(np.eye(2), np.ones((2,2)))
+ array([[1., 1., 0., 0.],
+ [1., 1., 0., 0.],
+ [0., 0., 1., 1.],
+ [0., 0., 1., 1.]])
+
+ >>> a = np.arange(100).reshape((2,5,2,5))
+ >>> b = np.arange(24).reshape((2,3,4))
+ >>> c = np.kron(a,b)
+ >>> c.shape
+ (2, 10, 6, 20)
+ >>> I = (1,3,0,2)
+ >>> J = (0,2,1)
+ >>> J1 = (0,) + J # extend to ndim=4
+ >>> S1 = (1,) + b.shape
+ >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
+ >>> c[K] == a[I]*b[J]
+ True
+
+ """
+ # Working:
+ # 1. Equalise the shapes by prepending smaller array with 1s
+ # 2. Expand shapes of both the arrays by adding new axes at
+ # odd positions for 1st array and even positions for 2nd
+ # 3. Compute the product of the modified array
+ # 4. The inner most array elements now contain the rows of
+ # the Kronecker product
+ # 5. Reshape the result to kron's shape, which is same as
+ # product of shapes of the two arrays.
+ b = asanyarray(b)
+ a = array(a, copy=None, subok=True, ndmin=b.ndim)
+ is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
+ ndb, nda = b.ndim, a.ndim
+ nd = max(ndb, nda)
+
+ if (nda == 0 or ndb == 0):
+ return _nx.multiply(a, b)
+
+ as_ = a.shape
+ bs = b.shape
+ if not a.flags.contiguous:
+ a = reshape(a, as_)
+ if not b.flags.contiguous:
+ b = reshape(b, bs)
+
+ # Equalise the shapes by prepending smaller one with 1s
+ as_ = (1,) * max(0, ndb - nda) + as_
+ bs = (1,) * max(0, nda - ndb) + bs
+
+ # Insert empty dimensions
+ a_arr = expand_dims(a, axis=tuple(range(ndb - nda)))
+ b_arr = expand_dims(b, axis=tuple(range(nda - ndb)))
+
+ # Compute the product
+ a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2)))
+ b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2)))
+ # In case of `mat`, convert result to `array`
+ result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
+
+ # Reshape back
+ result = result.reshape(_nx.multiply(as_, bs))
+
+ return result if not is_any_mat else matrix(result, copy=False)
+
+
+def _tile_dispatcher(A, reps):
+ return (A, reps)
+
+
+@array_function_dispatch(_tile_dispatcher)
+def tile(A, reps):
+ """
+ Construct an array by repeating A the number of times given by reps.
+
+ If `reps` has length ``d``, the result will have dimension of
+ ``max(d, A.ndim)``.
+
+ If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
+ axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
+ or shape (1, 1, 3) for 3-D replication. If this is not the desired
+ behavior, promote `A` to d-dimensions manually before calling this
+ function.
+
+ If ``A.ndim > d``, `reps` is promoted to `A`.ndim by prepending 1's to it.
+ Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
+ (1, 1, 2, 2).
+
+ Note : Although tile may be used for broadcasting, it is strongly
+ recommended to use numpy's broadcasting operations and functions.
+
+ Parameters
+ ----------
+ A : array_like
+ The input array.
+ reps : array_like
+ The number of repetitions of `A` along each axis.
+
+ Returns
+ -------
+ c : ndarray
+ The tiled output array.
+
+ See Also
+ --------
+ repeat : Repeat elements of an array.
+ broadcast_to : Broadcast an array to a new shape
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([0, 1, 2])
+ >>> np.tile(a, 2)
+ array([0, 1, 2, 0, 1, 2])
+ >>> np.tile(a, (2, 2))
+ array([[0, 1, 2, 0, 1, 2],
+ [0, 1, 2, 0, 1, 2]])
+ >>> np.tile(a, (2, 1, 2))
+ array([[[0, 1, 2, 0, 1, 2]],
+ [[0, 1, 2, 0, 1, 2]]])
+
+ >>> b = np.array([[1, 2], [3, 4]])
+ >>> np.tile(b, 2)
+ array([[1, 2, 1, 2],
+ [3, 4, 3, 4]])
+ >>> np.tile(b, (2, 1))
+ array([[1, 2],
+ [3, 4],
+ [1, 2],
+ [3, 4]])
+
+ >>> c = np.array([1,2,3,4])
+ >>> np.tile(c,(4,1))
+ array([[1, 2, 3, 4],
+ [1, 2, 3, 4],
+ [1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ """
+ try:
+ tup = tuple(reps)
+ except TypeError:
+ tup = (reps,)
+ d = len(tup)
+ if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
+ # Fixes the problem that the function does not make a copy if A is a
+ # numpy array and the repetitions are 1 in all dimensions
+ return _nx.array(A, copy=True, subok=True, ndmin=d)
+ else:
+ # Note that no copy of zero-sized arrays is made. However since they
+ # have no data there is no risk of an inadvertent overwrite.
+ c = _nx.array(A, copy=None, subok=True, ndmin=d)
+ if (d < c.ndim):
+ tup = (1,) * (c.ndim - d) + tup
+ shape_out = tuple(s * t for s, t in zip(c.shape, tup))
+ n = c.size
+ if n > 0:
+ for dim_in, nrep in zip(c.shape, tup):
+ if nrep != 1:
+ c = c.reshape(-1, n).repeat(nrep, 0)
+ n //= dim_in
+ return c.reshape(shape_out)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi
new file mode 100644
index 0000000..a50d372
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_shape_base_impl.pyi
@@ -0,0 +1,235 @@
+from collections.abc import Callable, Sequence
+from typing import (
+ Any,
+ Concatenate,
+ ParamSpec,
+ Protocol,
+ SupportsIndex,
+ TypeVar,
+ overload,
+ type_check_only,
+)
+
+from typing_extensions import deprecated
+
+import numpy as np
+from numpy import (
+ _CastingKind,
+ complexfloating,
+ floating,
+ generic,
+ integer,
+ object_,
+ signedinteger,
+ ufunc,
+ unsignedinteger,
+)
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NDArray,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeUInt_co,
+ _ShapeLike,
+)
+
+__all__ = [
+ "column_stack",
+ "row_stack",
+ "dstack",
+ "array_split",
+ "split",
+ "hsplit",
+ "vsplit",
+ "dsplit",
+ "apply_over_axes",
+ "expand_dims",
+ "apply_along_axis",
+ "kron",
+ "tile",
+ "take_along_axis",
+ "put_along_axis",
+]
+
+_P = ParamSpec("_P")
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+
+# Signature of `__array_wrap__`
+@type_check_only
+class _ArrayWrap(Protocol):
+ def __call__(
+ self,
+ array: NDArray[Any],
+ context: tuple[ufunc, tuple[Any, ...], int] | None = ...,
+ return_scalar: bool = ...,
+ /,
+ ) -> Any: ...
+
+@type_check_only
+class _SupportsArrayWrap(Protocol):
+ @property
+ def __array_wrap__(self) -> _ArrayWrap: ...
+
+###
+
+def take_along_axis(
+ arr: _ScalarT | NDArray[_ScalarT],
+ indices: NDArray[integer],
+ axis: int | None = ...,
+) -> NDArray[_ScalarT]: ...
+
+def put_along_axis(
+ arr: NDArray[_ScalarT],
+ indices: NDArray[integer],
+ values: ArrayLike,
+ axis: int | None,
+) -> None: ...
+
+@overload
+def apply_along_axis(
+ func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]],
+ axis: SupportsIndex,
+ arr: ArrayLike,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> NDArray[_ScalarT]: ...
+@overload
+def apply_along_axis(
+ func1d: Callable[Concatenate[NDArray[Any], _P], Any],
+ axis: SupportsIndex,
+ arr: ArrayLike,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> NDArray[Any]: ...
+
+def apply_over_axes(
+ func: Callable[[NDArray[Any], int], NDArray[_ScalarT]],
+ a: ArrayLike,
+ axes: int | Sequence[int],
+) -> NDArray[_ScalarT]: ...
+
+@overload
+def expand_dims(
+ a: _ArrayLike[_ScalarT],
+ axis: _ShapeLike,
+) -> NDArray[_ScalarT]: ...
+@overload
+def expand_dims(
+ a: ArrayLike,
+ axis: _ShapeLike,
+) -> NDArray[Any]: ...
+
+# Deprecated in NumPy 2.0, 2023-08-18
+@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.")
+def row_stack(
+ tup: Sequence[ArrayLike],
+ *,
+ dtype: DTypeLike | None = None,
+ casting: _CastingKind = "same_kind",
+) -> NDArray[Any]: ...
+
+#
+@overload
+def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ...
+@overload
+def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
+
+@overload
+def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ...
+@overload
+def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
+
+@overload
+def array_split(
+ ary: _ArrayLike[_ScalarT],
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[_ScalarT]]: ...
+@overload
+def array_split(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def split(
+ ary: _ArrayLike[_ScalarT],
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[_ScalarT]]: ...
+@overload
+def split(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def hsplit(
+ ary: _ArrayLike[_ScalarT],
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[_ScalarT]]: ...
+@overload
+def hsplit(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def vsplit(
+ ary: _ArrayLike[_ScalarT],
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[_ScalarT]]: ...
+@overload
+def vsplit(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def dsplit(
+ ary: _ArrayLike[_ScalarT],
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[_ScalarT]]: ...
+@overload
+def dsplit(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
+@overload
+def get_array_wrap(*args: object) -> _ArrayWrap | None: ...
+
+@overload
+def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...
+@overload
+def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
+@overload
+def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
+
+@overload
+def tile(
+ A: _ArrayLike[_ScalarT],
+ reps: int | Sequence[int],
+) -> NDArray[_ScalarT]: ...
+@overload
+def tile(
+ A: ArrayLike,
+ reps: int | Sequence[int],
+) -> NDArray[Any]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py
new file mode 100644
index 0000000..d478078
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.py
@@ -0,0 +1,549 @@
+"""
+Utilities that manipulate strides to achieve desirable effects.
+
+An explanation of strides can be found in the :ref:`arrays.ndarray`.
+
+"""
+import numpy as np
+from numpy._core.numeric import normalize_axis_tuple
+from numpy._core.overrides import array_function_dispatch, set_module
+
+__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
+
+
+class DummyArray:
+ """Dummy object that just exists to hang __array_interface__ dictionaries
+ and possibly keep alive a reference to a base array.
+ """
+
+ def __init__(self, interface, base=None):
+ self.__array_interface__ = interface
+ self.base = base
+
+
+def _maybe_view_as_subclass(original_array, new_array):
+ if type(original_array) is not type(new_array):
+ # if input was an ndarray subclass and subclasses were OK,
+ # then view the result as that subclass.
+ new_array = new_array.view(type=type(original_array))
+ # Since we have done something akin to a view from original_array, we
+ # should let the subclass finalize (if it has it implemented, i.e., is
+ # not None).
+ if new_array.__array_finalize__:
+ new_array.__array_finalize__(original_array)
+ return new_array
+
+
+@set_module("numpy.lib.stride_tricks")
+def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
+ """
+ Create a view into the array with the given shape and strides.
+
+ .. warning:: This function has to be used with extreme care, see notes.
+
+ Parameters
+ ----------
+ x : ndarray
+ Array to create a new.
+ shape : sequence of int, optional
+ The shape of the new array. Defaults to ``x.shape``.
+ strides : sequence of int, optional
+ The strides of the new array. Defaults to ``x.strides``.
+ subok : bool, optional
+ If True, subclasses are preserved.
+ writeable : bool, optional
+ If set to False, the returned array will always be readonly.
+ Otherwise it will be writable if the original array was. It
+ is advisable to set this to False if possible (see Notes).
+
+ Returns
+ -------
+ view : ndarray
+
+ See also
+ --------
+ broadcast_to : broadcast an array to a given shape.
+ reshape : reshape an array.
+ lib.stride_tricks.sliding_window_view :
+ userfriendly and safe function for a creation of sliding window views.
+
+ Notes
+ -----
+ ``as_strided`` creates a view into the array given the exact strides
+ and shape. This means it manipulates the internal data structure of
+ ndarray and, if done incorrectly, the array elements can point to
+ invalid memory and can corrupt results or crash your program.
+ It is advisable to always use the original ``x.strides`` when
+ calculating new strides to avoid reliance on a contiguous memory
+ layout.
+
+ Furthermore, arrays created with this function often contain self
+ overlapping memory, so that two elements are identical.
+ Vectorized write operations on such arrays will typically be
+ unpredictable. They may even give different results for small, large,
+ or transposed arrays.
+
+ Since writing to these arrays has to be tested and done with great
+ care, you may want to use ``writeable=False`` to avoid accidental write
+ operations.
+
+ For these reasons it is advisable to avoid ``as_strided`` when
+ possible.
+ """
+ # first convert input to array, possibly keeping subclass
+ x = np.array(x, copy=None, subok=subok)
+ interface = dict(x.__array_interface__)
+ if shape is not None:
+ interface['shape'] = tuple(shape)
+ if strides is not None:
+ interface['strides'] = tuple(strides)
+
+ array = np.asarray(DummyArray(interface, base=x))
+ # The route via `__interface__` does not preserve structured
+ # dtypes. Since dtype should remain unchanged, we set it explicitly.
+ array.dtype = x.dtype
+
+ view = _maybe_view_as_subclass(x, array)
+
+ if view.flags.writeable and not writeable:
+ view.flags.writeable = False
+
+ return view
+
+
+def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
+ subok=None, writeable=None):
+ return (x,)
+
+
+@array_function_dispatch(
+ _sliding_window_view_dispatcher, module="numpy.lib.stride_tricks"
+)
+def sliding_window_view(x, window_shape, axis=None, *,
+ subok=False, writeable=False):
+ """
+ Create a sliding window view into the array with the given window shape.
+
+ Also known as rolling or moving window, the window slides across all
+ dimensions of the array and extracts subsets of the array at all window
+ positions.
+
+ .. versionadded:: 1.20.0
+
+ Parameters
+ ----------
+ x : array_like
+ Array to create the sliding window view from.
+ window_shape : int or tuple of int
+ Size of window over each axis that takes part in the sliding window.
+ If `axis` is not present, must have same length as the number of input
+ array dimensions. Single integers `i` are treated as if they were the
+ tuple `(i,)`.
+ axis : int or tuple of int, optional
+ Axis or axes along which the sliding window is applied.
+ By default, the sliding window is applied to all axes and
+ `window_shape[i]` will refer to axis `i` of `x`.
+ If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
+ the axis `axis[i]` of `x`.
+ Single integers `i` are treated as if they were the tuple `(i,)`.
+ subok : bool, optional
+ If True, sub-classes will be passed-through, otherwise the returned
+ array will be forced to be a base-class array (default).
+ writeable : bool, optional
+ When true, allow writing to the returned view. The default is false,
+ as this should be used with caution: the returned view contains the
+ same memory location multiple times, so writing to one location will
+ cause others to change.
+
+ Returns
+ -------
+ view : ndarray
+ Sliding window view of the array. The sliding window dimensions are
+ inserted at the end, and the original dimensions are trimmed as
+ required by the size of the sliding window.
+ That is, ``view.shape = x_shape_trimmed + window_shape``, where
+ ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
+ than the corresponding window size.
+
+ See Also
+ --------
+ lib.stride_tricks.as_strided: A lower-level and less safe routine for
+ creating arbitrary views from custom shape and strides.
+ broadcast_to: broadcast an array to a given shape.
+
+ Notes
+ -----
+ For many applications using a sliding window view can be convenient, but
+ potentially very slow. Often specialized solutions exist, for example:
+
+ - `scipy.signal.fftconvolve`
+
+ - filtering functions in `scipy.ndimage`
+
+ - moving window functions provided by
+ `bottleneck <https://github.com/pydata/bottleneck>`_.
+
+ As a rough estimate, a sliding window approach with an input size of `N`
+ and a window size of `W` will scale as `O(N*W)` where frequently a special
+ algorithm can achieve `O(N)`. That means that the sliding window variant
+ for a window size of 100 can be a 100 times slower than a more specialized
+ version.
+
+ Nevertheless, for small window sizes, when no custom algorithm exists, or
+ as a prototyping and developing tool, this function can be a good solution.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib.stride_tricks import sliding_window_view
+ >>> x = np.arange(6)
+ >>> x.shape
+ (6,)
+ >>> v = sliding_window_view(x, 3)
+ >>> v.shape
+ (4, 3)
+ >>> v
+ array([[0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 4],
+ [3, 4, 5]])
+
+ This also works in more dimensions, e.g.
+
+ >>> i, j = np.ogrid[:3, :4]
+ >>> x = 10*i + j
+ >>> x.shape
+ (3, 4)
+ >>> x
+ array([[ 0, 1, 2, 3],
+ [10, 11, 12, 13],
+ [20, 21, 22, 23]])
+ >>> shape = (2,2)
+ >>> v = sliding_window_view(x, shape)
+ >>> v.shape
+ (2, 3, 2, 2)
+ >>> v
+ array([[[[ 0, 1],
+ [10, 11]],
+ [[ 1, 2],
+ [11, 12]],
+ [[ 2, 3],
+ [12, 13]]],
+ [[[10, 11],
+ [20, 21]],
+ [[11, 12],
+ [21, 22]],
+ [[12, 13],
+ [22, 23]]]])
+
+ The axis can be specified explicitly:
+
+ >>> v = sliding_window_view(x, 3, 0)
+ >>> v.shape
+ (1, 4, 3)
+ >>> v
+ array([[[ 0, 10, 20],
+ [ 1, 11, 21],
+ [ 2, 12, 22],
+ [ 3, 13, 23]]])
+
+ The same axis can be used several times. In that case, every use reduces
+ the corresponding original dimension:
+
+ >>> v = sliding_window_view(x, (2, 3), (1, 1))
+ >>> v.shape
+ (3, 1, 2, 3)
+ >>> v
+ array([[[[ 0, 1, 2],
+ [ 1, 2, 3]]],
+ [[[10, 11, 12],
+ [11, 12, 13]]],
+ [[[20, 21, 22],
+ [21, 22, 23]]]])
+
+ Combining with stepped slicing (`::step`), this can be used to take sliding
+ views which skip elements:
+
+ >>> x = np.arange(7)
+ >>> sliding_window_view(x, 5)[:, ::2]
+ array([[0, 2, 4],
+ [1, 3, 5],
+ [2, 4, 6]])
+
+ or views which move by multiple elements
+
+ >>> x = np.arange(7)
+ >>> sliding_window_view(x, 3)[::2, :]
+ array([[0, 1, 2],
+ [2, 3, 4],
+ [4, 5, 6]])
+
+ A common application of `sliding_window_view` is the calculation of running
+ statistics. The simplest example is the
+ `moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
+
+ >>> x = np.arange(6)
+ >>> x.shape
+ (6,)
+ >>> v = sliding_window_view(x, 3)
+ >>> v.shape
+ (4, 3)
+ >>> v
+ array([[0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 4],
+ [3, 4, 5]])
+ >>> moving_average = v.mean(axis=-1)
+ >>> moving_average
+ array([1., 2., 3., 4.])
+
+ Note that a sliding window approach is often **not** optimal (see Notes).
+ """
+ window_shape = (tuple(window_shape)
+ if np.iterable(window_shape)
+ else (window_shape,))
+ # first convert input to array, possibly keeping subclass
+ x = np.array(x, copy=None, subok=subok)
+
+ window_shape_array = np.array(window_shape)
+ if np.any(window_shape_array < 0):
+ raise ValueError('`window_shape` cannot contain negative values')
+
+ if axis is None:
+ axis = tuple(range(x.ndim))
+ if len(window_shape) != len(axis):
+ raise ValueError(f'Since axis is `None`, must provide '
+ f'window_shape for all dimensions of `x`; '
+ f'got {len(window_shape)} window_shape elements '
+ f'and `x.ndim` is {x.ndim}.')
+ else:
+ axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
+ if len(window_shape) != len(axis):
+ raise ValueError(f'Must provide matching length window_shape and '
+ f'axis; got {len(window_shape)} window_shape '
+ f'elements and {len(axis)} axes elements.')
+
+ out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
+
+ # note: same axis can be windowed repeatedly
+ x_shape_trimmed = list(x.shape)
+ for ax, dim in zip(axis, window_shape):
+ if x_shape_trimmed[ax] < dim:
+ raise ValueError(
+ 'window shape cannot be larger than input array shape')
+ x_shape_trimmed[ax] -= dim - 1
+ out_shape = tuple(x_shape_trimmed) + window_shape
+ return as_strided(x, strides=out_strides, shape=out_shape,
+ subok=subok, writeable=writeable)
+
+
+def _broadcast_to(array, shape, subok, readonly):
+ shape = tuple(shape) if np.iterable(shape) else (shape,)
+ array = np.array(array, copy=None, subok=subok)
+ if not shape and array.shape:
+ raise ValueError('cannot broadcast a non-scalar to a scalar array')
+ if any(size < 0 for size in shape):
+ raise ValueError('all elements of broadcast shape must be non-'
+ 'negative')
+ extras = []
+ it = np.nditer(
+ (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
+ op_flags=['readonly'], itershape=shape, order='C')
+ with it:
+ # never really has writebackifcopy semantics
+ broadcast = it.itviews[0]
+ result = _maybe_view_as_subclass(array, broadcast)
+ # In a future version this will go away
+ if not readonly and array.flags._writeable_no_warn:
+ result.flags.writeable = True
+ result.flags._warn_on_write = True
+ return result
+
+
+def _broadcast_to_dispatcher(array, shape, subok=None):
+ return (array,)
+
+
+@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
+def broadcast_to(array, shape, subok=False):
+ """Broadcast an array to a new shape.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to broadcast.
+ shape : tuple or int
+ The shape of the desired array. A single integer ``i`` is interpreted
+ as ``(i,)``.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+
+ Returns
+ -------
+ broadcast : array
+ A readonly view on the original array with the given shape. It is
+ typically not contiguous. Furthermore, more than one element of a
+ broadcasted array may refer to a single memory location.
+
+ Raises
+ ------
+ ValueError
+ If the array is not compatible with the new shape according to NumPy's
+ broadcasting rules.
+
+ See Also
+ --------
+ broadcast
+ broadcast_arrays
+ broadcast_shapes
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3])
+ >>> np.broadcast_to(x, (3, 3))
+ array([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]])
+ """
+ return _broadcast_to(array, shape, subok=subok, readonly=True)
+
+
+def _broadcast_shape(*args):
+ """Returns the shape of the arrays that would result from broadcasting the
+ supplied arrays against each other.
+ """
+ # use the old-iterator because np.nditer does not handle size 0 arrays
+ # consistently
+ b = np.broadcast(*args[:32])
+ # unfortunately, it cannot handle 32 or more arguments directly
+ for pos in range(32, len(args), 31):
+ # ironically, np.broadcast does not properly handle np.broadcast
+ # objects (it treats them as scalars)
+ # use broadcasting to avoid allocating the full array
+ b = broadcast_to(0, b.shape)
+ b = np.broadcast(b, *args[pos:(pos + 31)])
+ return b.shape
+
+
+_size0_dtype = np.dtype([])
+
+
+@set_module('numpy')
+def broadcast_shapes(*args):
+ """
+ Broadcast the input shapes into a single shape.
+
+ :ref:`Learn more about broadcasting here <basics.broadcasting>`.
+
+ .. versionadded:: 1.20.0
+
+ Parameters
+ ----------
+ *args : tuples of ints, or ints
+ The shapes to be broadcast against each other.
+
+ Returns
+ -------
+ tuple
+ Broadcasted shape.
+
+ Raises
+ ------
+ ValueError
+ If the shapes are not compatible and cannot be broadcast according
+ to NumPy's broadcasting rules.
+
+ See Also
+ --------
+ broadcast
+ broadcast_arrays
+ broadcast_to
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
+ (3, 2)
+
+ >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
+ (5, 6, 7)
+ """
+ arrays = [np.empty(x, dtype=_size0_dtype) for x in args]
+ return _broadcast_shape(*arrays)
+
+
+def _broadcast_arrays_dispatcher(*args, subok=None):
+ return args
+
+
+@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
+def broadcast_arrays(*args, subok=False):
+ """
+ Broadcast any number of arrays against each other.
+
+ Parameters
+ ----------
+ *args : array_likes
+ The arrays to broadcast.
+
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned arrays will be forced to be a base-class array (default).
+
+ Returns
+ -------
+ broadcasted : tuple of arrays
+ These arrays are views on the original arrays. They are typically
+ not contiguous. Furthermore, more than one element of a
+ broadcasted array may refer to a single memory location. If you need
+ to write to the arrays, make copies first. While you can set the
+ ``writable`` flag True, writing to a single output value may end up
+ changing more than one location in the output array.
+
+ .. deprecated:: 1.17
+ The output is currently marked so that if written to, a deprecation
+ warning will be emitted. A future version will set the
+ ``writable`` flag False so writing to it will raise an error.
+
+ See Also
+ --------
+ broadcast
+ broadcast_to
+ broadcast_shapes
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([[1,2,3]])
+ >>> y = np.array([[4],[5]])
+ >>> np.broadcast_arrays(x, y)
+ (array([[1, 2, 3],
+ [1, 2, 3]]),
+ array([[4, 4, 4],
+ [5, 5, 5]]))
+
+ Here is a useful idiom for getting contiguous copies instead of
+ non-contiguous views.
+
+ >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
+ [array([[1, 2, 3],
+ [1, 2, 3]]),
+ array([[4, 4, 4],
+ [5, 5, 5]])]
+
+ """
+ # nditer is not used here to avoid the limit of 32 arrays.
+ # Otherwise, something like the following one-liner would suffice:
+ # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
+ # order='C').itviews
+
+ args = [np.array(_m, copy=None, subok=subok) for _m in args]
+
+ shape = _broadcast_shape(*args)
+
+ result = [array if array.shape == shape
+ else _broadcast_to(array, shape, subok=subok, readonly=False)
+ for array in args]
+ return tuple(result)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi
new file mode 100644
index 0000000..a7005d7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_stride_tricks_impl.pyi
@@ -0,0 +1,74 @@
+from collections.abc import Iterable
+from typing import Any, SupportsIndex, TypeVar, overload
+
+from numpy import generic
+from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike
+
+__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"]
+
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+
+class DummyArray:
+ __array_interface__: dict[str, Any]
+ base: NDArray[Any] | None
+ def __init__(
+ self,
+ interface: dict[str, Any],
+ base: NDArray[Any] | None = ...,
+ ) -> None: ...
+
+@overload
+def as_strided(
+ x: _ArrayLike[_ScalarT],
+ shape: Iterable[int] | None = ...,
+ strides: Iterable[int] | None = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def as_strided(
+ x: ArrayLike,
+ shape: Iterable[int] | None = ...,
+ strides: Iterable[int] | None = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def sliding_window_view(
+ x: _ArrayLike[_ScalarT],
+ window_shape: int | Iterable[int],
+ axis: SupportsIndex | None = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def sliding_window_view(
+ x: ArrayLike,
+ window_shape: int | Iterable[int],
+ axis: SupportsIndex | None = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def broadcast_to(
+ array: _ArrayLike[_ScalarT],
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def broadcast_to(
+ array: ArrayLike,
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[Any]: ...
+
+def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ...
+
+def broadcast_arrays(
+ *args: ArrayLike,
+ subok: bool = ...,
+) -> tuple[NDArray[Any], ...]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py
new file mode 100644
index 0000000..dc6a558
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.py
@@ -0,0 +1,1201 @@
+""" Basic functions for manipulating 2d arrays
+
+"""
+import functools
+import operator
+
+from numpy._core import iinfo, overrides
+from numpy._core._multiarray_umath import _array_converter
+from numpy._core.numeric import (
+ arange,
+ asanyarray,
+ asarray,
+ diagonal,
+ empty,
+ greater_equal,
+ indices,
+ int8,
+ int16,
+ int32,
+ int64,
+ intp,
+ multiply,
+ nonzero,
+ ones,
+ promote_types,
+ where,
+ zeros,
+)
+from numpy._core.overrides import finalize_array_function_like, set_module
+from numpy.lib._stride_tricks_impl import broadcast_to
+
+__all__ = [
+ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
+ 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
+ 'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+i1 = iinfo(int8)
+i2 = iinfo(int16)
+i4 = iinfo(int32)
+
+
+def _min_int(low, high):
+ """ get small int that fits the range """
+ if high <= i1.max and low >= i1.min:
+ return int8
+ if high <= i2.max and low >= i2.min:
+ return int16
+ if high <= i4.max and low >= i4.min:
+ return int32
+ return int64
+
+
+def _flip_dispatcher(m):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
+def fliplr(m):
+ """
+ Reverse the order of elements along axis 1 (left/right).
+
+ For a 2-D array, this flips the entries in each row in the left/right
+ direction. Columns are preserved, but appear in a different order than
+ before.
+
+ Parameters
+ ----------
+ m : array_like
+ Input array, must be at least 2-D.
+
+ Returns
+ -------
+ f : ndarray
+ A view of `m` with the columns reversed. Since a view
+ is returned, this operation is :math:`\\mathcal O(1)`.
+
+ See Also
+ --------
+ flipud : Flip array in the up/down direction.
+ flip : Flip array in one or more dimensions.
+ rot90 : Rotate array counterclockwise.
+
+ Notes
+ -----
+ Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
+ Requires the array to be at least 2-D.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> A = np.diag([1.,2.,3.])
+ >>> A
+ array([[1., 0., 0.],
+ [0., 2., 0.],
+ [0., 0., 3.]])
+ >>> np.fliplr(A)
+ array([[0., 0., 1.],
+ [0., 2., 0.],
+ [3., 0., 0.]])
+
+ >>> rng = np.random.default_rng()
+ >>> A = rng.normal(size=(2,3,5))
+ >>> np.all(np.fliplr(A) == A[:,::-1,...])
+ True
+
+ """
+ m = asanyarray(m)
+ if m.ndim < 2:
+ raise ValueError("Input must be >= 2-d.")
+ return m[:, ::-1]
+
+
+@array_function_dispatch(_flip_dispatcher)
+def flipud(m):
+ """
+ Reverse the order of elements along axis 0 (up/down).
+
+ For a 2-D array, this flips the entries in each column in the up/down
+ direction. Rows are preserved, but appear in a different order than before.
+
+ Parameters
+ ----------
+ m : array_like
+ Input array.
+
+ Returns
+ -------
+ out : array_like
+ A view of `m` with the rows reversed. Since a view is
+ returned, this operation is :math:`\\mathcal O(1)`.
+
+ See Also
+ --------
+ fliplr : Flip array in the left/right direction.
+ flip : Flip array in one or more dimensions.
+ rot90 : Rotate array counterclockwise.
+
+ Notes
+ -----
+ Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
+ Requires the array to be at least 1-D.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> A = np.diag([1.0, 2, 3])
+ >>> A
+ array([[1., 0., 0.],
+ [0., 2., 0.],
+ [0., 0., 3.]])
+ >>> np.flipud(A)
+ array([[0., 0., 3.],
+ [0., 2., 0.],
+ [1., 0., 0.]])
+
+ >>> rng = np.random.default_rng()
+ >>> A = rng.normal(size=(2,3,5))
+ >>> np.all(np.flipud(A) == A[::-1,...])
+ True
+
+ >>> np.flipud([1,2])
+ array([2, 1])
+
+ """
+ m = asanyarray(m)
+ if m.ndim < 1:
+ raise ValueError("Input must be >= 1-d.")
+ return m[::-1, ...]
+
+
+@finalize_array_function_like
+@set_module('numpy')
+def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None):
+ """
+ Return a 2-D array with ones on the diagonal and zeros elsewhere.
+
+ Parameters
+ ----------
+ N : int
+ Number of rows in the output.
+ M : int, optional
+ Number of columns in the output. If None, defaults to `N`.
+ k : int, optional
+ Index of the diagonal: 0 (the default) refers to the main diagonal,
+ a positive value refers to an upper diagonal, and a negative value
+ to a lower diagonal.
+ dtype : data-type, optional
+ Data-type of the returned array.
+ order : {'C', 'F'}, optional
+ Whether the output should be stored in row-major (C-style) or
+ column-major (Fortran-style) order in memory.
+ device : str, optional
+ The device on which to place the created array. Default: None.
+ For Array-API interoperability only, so must be ``"cpu"`` if passed.
+
+ .. versionadded:: 2.0.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ I : ndarray of shape (N,M)
+ An array where all elements are equal to zero, except for the `k`-th
+ diagonal, whose values are equal to one.
+
+ See Also
+ --------
+ identity : (almost) equivalent function
+ diag : diagonal 2-D array from a 1-D array specified by the user.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.eye(2, dtype=int)
+ array([[1, 0],
+ [0, 1]])
+ >>> np.eye(3, k=1)
+ array([[0., 1., 0.],
+ [0., 0., 1.],
+ [0., 0., 0.]])
+
+ """
+ if like is not None:
+ return _eye_with_like(
+ like, N, M=M, k=k, dtype=dtype, order=order, device=device
+ )
+ if M is None:
+ M = N
+ m = zeros((N, M), dtype=dtype, order=order, device=device)
+ if k >= M:
+ return m
+ # Ensure M and k are integers, so we don't get any surprise casting
+ # results in the expressions `M-k` and `M+1` used below. This avoids
+ # a problem with inputs with type (for example) np.uint64.
+ M = operator.index(M)
+ k = operator.index(k)
+ if k >= 0:
+ i = k
+ else:
+ i = (-k) * M
+ m[:M - k].flat[i::M + 1] = 1
+ return m
+
+
+_eye_with_like = array_function_dispatch()(eye)
+
+
+def _diag_dispatcher(v, k=None):
+ return (v,)
+
+
+@array_function_dispatch(_diag_dispatcher)
+def diag(v, k=0):
+ """
+ Extract a diagonal or construct a diagonal array.
+
+ See the more detailed documentation for ``numpy.diagonal`` if you use this
+ function to extract a diagonal and wish to write to the resulting array;
+ whether it returns a copy or a view depends on what version of numpy you
+ are using.
+
+ Parameters
+ ----------
+ v : array_like
+ If `v` is a 2-D array, return a copy of its `k`-th diagonal.
+ If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
+ diagonal.
+ k : int, optional
+ Diagonal in question. The default is 0. Use `k>0` for diagonals
+ above the main diagonal, and `k<0` for diagonals below the main
+ diagonal.
+
+ Returns
+ -------
+ out : ndarray
+ The extracted diagonal or constructed diagonal array.
+
+ See Also
+ --------
+ diagonal : Return specified diagonals.
+ diagflat : Create a 2-D array with the flattened input as a diagonal.
+ trace : Sum along diagonals.
+ triu : Upper triangle of an array.
+ tril : Lower triangle of an array.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.arange(9).reshape((3,3))
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+
+ >>> np.diag(x)
+ array([0, 4, 8])
+ >>> np.diag(x, k=1)
+ array([1, 5])
+ >>> np.diag(x, k=-1)
+ array([3, 7])
+
+ >>> np.diag(np.diag(x))
+ array([[0, 0, 0],
+ [0, 4, 0],
+ [0, 0, 8]])
+
+ """
+ v = asanyarray(v)
+ s = v.shape
+ if len(s) == 1:
+ n = s[0] + abs(k)
+ res = zeros((n, n), v.dtype)
+ if k >= 0:
+ i = k
+ else:
+ i = (-k) * n
+ res[:n - k].flat[i::n + 1] = v
+ return res
+ elif len(s) == 2:
+ return diagonal(v, k)
+ else:
+ raise ValueError("Input must be 1- or 2-d.")
+
+
+@array_function_dispatch(_diag_dispatcher)
+def diagflat(v, k=0):
+ """
+ Create a two-dimensional array with the flattened input as a diagonal.
+
+ Parameters
+ ----------
+ v : array_like
+ Input data, which is flattened and set as the `k`-th
+ diagonal of the output.
+ k : int, optional
+ Diagonal to set; 0, the default, corresponds to the "main" diagonal,
+ a positive (negative) `k` giving the number of the diagonal above
+ (below) the main.
+
+ Returns
+ -------
+ out : ndarray
+ The 2-D output array.
+
+ See Also
+ --------
+ diag : MATLAB work-alike for 1-D and 2-D arrays.
+ diagonal : Return specified diagonals.
+ trace : Sum along diagonals.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.diagflat([[1,2], [3,4]])
+ array([[1, 0, 0, 0],
+ [0, 2, 0, 0],
+ [0, 0, 3, 0],
+ [0, 0, 0, 4]])
+
+ >>> np.diagflat([1,2], 1)
+ array([[0, 1, 0],
+ [0, 0, 2],
+ [0, 0, 0]])
+
+ """
+ conv = _array_converter(v)
+ v, = conv.as_arrays(subok=False)
+ v = v.ravel()
+ s = len(v)
+ n = s + abs(k)
+ res = zeros((n, n), v.dtype)
+ if (k >= 0):
+ i = arange(0, n - k, dtype=intp)
+ fi = i + k + i * n
+ else:
+ i = arange(0, n + k, dtype=intp)
+ fi = i + (i - k) * n
+ res.flat[fi] = v
+
+ return conv.wrap(res)
+
+
+@finalize_array_function_like
+@set_module('numpy')
+def tri(N, M=None, k=0, dtype=float, *, like=None):
+ """
+ An array with ones at and below the given diagonal and zeros elsewhere.
+
+ Parameters
+ ----------
+ N : int
+ Number of rows in the array.
+ M : int, optional
+ Number of columns in the array.
+ By default, `M` is taken equal to `N`.
+ k : int, optional
+ The sub-diagonal at and below which the array is filled.
+ `k` = 0 is the main diagonal, while `k` < 0 is below it,
+ and `k` > 0 is above. The default is 0.
+ dtype : dtype, optional
+ Data type of the returned array. The default is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ tri : ndarray of shape (N, M)
+ Array with its lower triangle filled with ones and zero elsewhere;
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.tri(3, 5, 2, dtype=int)
+ array([[1, 1, 1, 0, 0],
+ [1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1]])
+
+ >>> np.tri(3, 5, -1)
+ array([[0., 0., 0., 0., 0.],
+ [1., 0., 0., 0., 0.],
+ [1., 1., 0., 0., 0.]])
+
+ """
+ if like is not None:
+ return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
+
+ if M is None:
+ M = N
+
+ m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
+ arange(-k, M - k, dtype=_min_int(-k, M - k)))
+
+ # Avoid making a copy if the requested type is already bool
+ m = m.astype(dtype, copy=False)
+
+ return m
+
+
+_tri_with_like = array_function_dispatch()(tri)
+
+
+def _trilu_dispatcher(m, k=None):
+ return (m,)
+
+
+@array_function_dispatch(_trilu_dispatcher)
+def tril(m, k=0):
+ """
+ Lower triangle of an array.
+
+ Return a copy of an array with elements above the `k`-th diagonal zeroed.
+ For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
+ axes.
+
+ Parameters
+ ----------
+ m : array_like, shape (..., M, N)
+ Input array.
+ k : int, optional
+ Diagonal above which to zero elements. `k = 0` (the default) is the
+ main diagonal, `k < 0` is below it and `k > 0` is above.
+
+ Returns
+ -------
+ tril : ndarray, shape (..., M, N)
+ Lower triangle of `m`, of same shape and data-type as `m`.
+
+ See Also
+ --------
+ triu : same thing, only for the upper triangle
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+ array([[ 0, 0, 0],
+ [ 4, 0, 0],
+ [ 7, 8, 0],
+ [10, 11, 12]])
+
+ >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
+ array([[[ 0, 0, 0, 0, 0],
+ [ 5, 6, 0, 0, 0],
+ [10, 11, 12, 0, 0],
+ [15, 16, 17, 18, 0]],
+ [[20, 0, 0, 0, 0],
+ [25, 26, 0, 0, 0],
+ [30, 31, 32, 0, 0],
+ [35, 36, 37, 38, 0]],
+ [[40, 0, 0, 0, 0],
+ [45, 46, 0, 0, 0],
+ [50, 51, 52, 0, 0],
+ [55, 56, 57, 58, 0]]])
+
+ """
+ m = asanyarray(m)
+ mask = tri(*m.shape[-2:], k=k, dtype=bool)
+
+ return where(mask, m, zeros(1, m.dtype))
+
+
+@array_function_dispatch(_trilu_dispatcher)
+def triu(m, k=0):
+ """
+ Upper triangle of an array.
+
+ Return a copy of an array with the elements below the `k`-th diagonal
+ zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
+ final two axes.
+
+ Please refer to the documentation for `tril` for further details.
+
+ See Also
+ --------
+ tril : lower triangle of an array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+ array([[ 1, 2, 3],
+ [ 4, 5, 6],
+ [ 0, 8, 9],
+ [ 0, 0, 12]])
+
+ >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
+ array([[[ 0, 1, 2, 3, 4],
+ [ 0, 6, 7, 8, 9],
+ [ 0, 0, 12, 13, 14],
+ [ 0, 0, 0, 18, 19]],
+ [[20, 21, 22, 23, 24],
+ [ 0, 26, 27, 28, 29],
+ [ 0, 0, 32, 33, 34],
+ [ 0, 0, 0, 38, 39]],
+ [[40, 41, 42, 43, 44],
+ [ 0, 46, 47, 48, 49],
+ [ 0, 0, 52, 53, 54],
+ [ 0, 0, 0, 58, 59]]])
+
+ """
+ m = asanyarray(m)
+ mask = tri(*m.shape[-2:], k=k - 1, dtype=bool)
+
+ return where(mask, zeros(1, m.dtype), m)
+
+
+def _vander_dispatcher(x, N=None, increasing=None):
+ return (x,)
+
+
+# Originally borrowed from John Hunter and matplotlib
+@array_function_dispatch(_vander_dispatcher)
+def vander(x, N=None, increasing=False):
+ """
+ Generate a Vandermonde matrix.
+
+ The columns of the output matrix are powers of the input vector. The
+ order of the powers is determined by the `increasing` boolean argument.
+ Specifically, when `increasing` is False, the `i`-th output column is
+ the input vector raised element-wise to the power of ``N - i - 1``. Such
+ a matrix with a geometric progression in each row is named for Alexandre-
+ Theophile Vandermonde.
+
+ Parameters
+ ----------
+ x : array_like
+ 1-D input array.
+ N : int, optional
+ Number of columns in the output. If `N` is not specified, a square
+ array is returned (``N = len(x)``).
+ increasing : bool, optional
+ Order of the powers of the columns. If True, the powers increase
+ from left to right, if False (the default) they are reversed.
+
+ Returns
+ -------
+ out : ndarray
+ Vandermonde matrix. If `increasing` is False, the first column is
+ ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
+ True, the columns are ``x^0, x^1, ..., x^(N-1)``.
+
+ See Also
+ --------
+ polynomial.polynomial.polyvander
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> x = np.array([1, 2, 3, 5])
+ >>> N = 3
+ >>> np.vander(x, N)
+ array([[ 1, 1, 1],
+ [ 4, 2, 1],
+ [ 9, 3, 1],
+ [25, 5, 1]])
+
+ >>> np.column_stack([x**(N-1-i) for i in range(N)])
+ array([[ 1, 1, 1],
+ [ 4, 2, 1],
+ [ 9, 3, 1],
+ [25, 5, 1]])
+
+ >>> x = np.array([1, 2, 3, 5])
+ >>> np.vander(x)
+ array([[ 1, 1, 1, 1],
+ [ 8, 4, 2, 1],
+ [ 27, 9, 3, 1],
+ [125, 25, 5, 1]])
+ >>> np.vander(x, increasing=True)
+ array([[ 1, 1, 1, 1],
+ [ 1, 2, 4, 8],
+ [ 1, 3, 9, 27],
+ [ 1, 5, 25, 125]])
+
+ The determinant of a square Vandermonde matrix is the product
+ of the differences between the values of the input vector:
+
+ >>> np.linalg.det(np.vander(x))
+ 48.000000000000043 # may vary
+ >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
+ 48
+
+ """
+ x = asarray(x)
+ if x.ndim != 1:
+ raise ValueError("x must be a one-dimensional array or sequence.")
+ if N is None:
+ N = len(x)
+
+ v = empty((len(x), N), dtype=promote_types(x.dtype, int))
+ tmp = v[:, ::-1] if not increasing else v
+
+ if N > 0:
+ tmp[:, 0] = 1
+ if N > 1:
+ tmp[:, 1:] = x[:, None]
+ multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
+
+ return v
+
+
+def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,
+ weights=None):
+ yield x
+ yield y
+
+ # This terrible logic is adapted from the checks in histogram2d
+ try:
+ N = len(bins)
+ except TypeError:
+ N = 1
+ if N == 2:
+ yield from bins # bins=[x, y]
+ else:
+ yield bins
+
+ yield weights
+
+
+@array_function_dispatch(_histogram2d_dispatcher)
+def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
+ """
+ Compute the bi-dimensional histogram of two data samples.
+
+ Parameters
+ ----------
+ x : array_like, shape (N,)
+ An array containing the x coordinates of the points to be
+ histogrammed.
+ y : array_like, shape (N,)
+ An array containing the y coordinates of the points to be
+ histogrammed.
+ bins : int or array_like or [int, int] or [array, array], optional
+ The bin specification:
+
+ * If int, the number of bins for the two dimensions (nx=ny=bins).
+ * If array_like, the bin edges for the two dimensions
+ (x_edges=y_edges=bins).
+ * If [int, int], the number of bins in each dimension
+ (nx, ny = bins).
+ * If [array, array], the bin edges in each dimension
+ (x_edges, y_edges = bins).
+ * A combination [int, array] or [array, int], where int
+ is the number of bins and array is the bin edges.
+
+ range : array_like, shape(2,2), optional
+ The leftmost and rightmost edges of the bins along each dimension
+ (if not specified explicitly in the `bins` parameters):
+ ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
+ will be considered outliers and not tallied in the histogram.
+ density : bool, optional
+ If False, the default, returns the number of samples in each bin.
+ If True, returns the probability *density* function at the bin,
+ ``bin_count / sample_count / bin_area``.
+ weights : array_like, shape(N,), optional
+ An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
+ Weights are normalized to 1 if `density` is True. If `density` is
+ False, the values of the returned histogram are equal to the sum of
+ the weights belonging to the samples falling into each bin.
+
+ Returns
+ -------
+ H : ndarray, shape(nx, ny)
+ The bi-dimensional histogram of samples `x` and `y`. Values in `x`
+ are histogrammed along the first dimension and values in `y` are
+ histogrammed along the second dimension.
+ xedges : ndarray, shape(nx+1,)
+ The bin edges along the first dimension.
+ yedges : ndarray, shape(ny+1,)
+ The bin edges along the second dimension.
+
+ See Also
+ --------
+ histogram : 1D histogram
+ histogramdd : Multidimensional histogram
+
+ Notes
+ -----
+ When `density` is True, then the returned histogram is the sample
+ density, defined such that the sum over bins of the product
+ ``bin_value * bin_area`` is 1.
+
+ Please note that the histogram does not follow the Cartesian convention
+ where `x` values are on the abscissa and `y` values on the ordinate
+ axis. Rather, `x` is histogrammed along the first dimension of the
+ array (vertical), and `y` along the second dimension of the array
+ (horizontal). This ensures compatibility with `histogramdd`.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from matplotlib.image import NonUniformImage
+ >>> import matplotlib.pyplot as plt
+
+ Construct a 2-D histogram with variable bin width. First define the bin
+ edges:
+
+ >>> xedges = [0, 1, 3, 5]
+ >>> yedges = [0, 2, 3, 4, 6]
+
+ Next we create a histogram H with random bin content:
+
+ >>> x = np.random.normal(2, 1, 100)
+ >>> y = np.random.normal(1, 1, 100)
+ >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
+ >>> # Histogram does not follow Cartesian convention (see Notes),
+ >>> # therefore transpose H for visualization purposes.
+ >>> H = H.T
+
+ :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
+
+ >>> fig = plt.figure(figsize=(7, 3))
+ >>> ax = fig.add_subplot(131, title='imshow: square bins')
+ >>> plt.imshow(H, interpolation='nearest', origin='lower',
+ ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
+ <matplotlib.image.AxesImage object at 0x...>
+
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
+
+ >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
+ ... aspect='equal')
+ >>> X, Y = np.meshgrid(xedges, yedges)
+ >>> ax.pcolormesh(X, Y, H)
+ <matplotlib.collections.QuadMesh object at 0x...>
+
+ :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
+ display actual bin edges with interpolation:
+
+ >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
+ ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
+ >>> im = NonUniformImage(ax, interpolation='bilinear')
+ >>> xcenters = (xedges[:-1] + xedges[1:]) / 2
+ >>> ycenters = (yedges[:-1] + yedges[1:]) / 2
+ >>> im.set_data(xcenters, ycenters, H)
+ >>> ax.add_image(im)
+ >>> plt.show()
+
+ It is also possible to construct a 2-D histogram without specifying bin
+ edges:
+
+ >>> # Generate non-symmetric test data
+ >>> n = 10000
+ >>> x = np.linspace(1, 100, n)
+ >>> y = 2*np.log(x) + np.random.rand(n) - 0.5
+ >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
+ >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
+
+ Now we can plot the histogram using
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a
+ :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.
+
+ >>> # Plot histogram using pcolormesh
+ >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
+ >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
+ >>> ax1.plot(x, 2*np.log(x), 'k-')
+ >>> ax1.set_xlim(x.min(), x.max())
+ >>> ax1.set_ylim(y.min(), y.max())
+ >>> ax1.set_xlabel('x')
+ >>> ax1.set_ylabel('y')
+ >>> ax1.set_title('histogram2d')
+ >>> ax1.grid()
+
+ >>> # Create hexbin plot for comparison
+ >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
+ >>> ax2.plot(x, 2*np.log(x), 'k-')
+ >>> ax2.set_title('hexbin')
+ >>> ax2.set_xlim(x.min(), x.max())
+ >>> ax2.set_xlabel('x')
+ >>> ax2.grid()
+
+ >>> plt.show()
+ """
+ from numpy import histogramdd
+
+ if len(x) != len(y):
+ raise ValueError('x and y must have the same length.')
+
+ try:
+ N = len(bins)
+ except TypeError:
+ N = 1
+
+ if N not in {1, 2}:
+ xedges = yedges = asarray(bins)
+ bins = [xedges, yedges]
+ hist, edges = histogramdd([x, y], bins, range, density, weights)
+ return hist, edges[0], edges[1]
+
+
+@set_module('numpy')
+def mask_indices(n, mask_func, k=0):
+ """
+ Return the indices to access (n, n) arrays, given a masking function.
+
+ Assume `mask_func` is a function that, for a square array a of size
+ ``(n, n)`` with a possible offset argument `k`, when called as
+ ``mask_func(a, k)`` returns a new array with zeros in certain locations
+ (functions like `triu` or `tril` do precisely this). Then this function
+ returns the indices where the non-zero values would be located.
+
+ Parameters
+ ----------
+ n : int
+ The returned indices will be valid to access arrays of shape (n, n).
+ mask_func : callable
+ A function whose call signature is similar to that of `triu`, `tril`.
+ That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
+ `k` is an optional argument to the function.
+ k : scalar
+ An optional argument which is passed through to `mask_func`. Functions
+ like `triu`, `tril` take a second argument that is interpreted as an
+ offset.
+
+ Returns
+ -------
+ indices : tuple of arrays.
+ The `n` arrays of indices corresponding to the locations where
+ ``mask_func(np.ones((n, n)), k)`` is True.
+
+ See Also
+ --------
+ triu, tril, triu_indices, tril_indices
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ These are the indices that would allow you to access the upper triangular
+ part of any 3x3 array:
+
+ >>> iu = np.mask_indices(3, np.triu)
+
+ For example, if `a` is a 3x3 array:
+
+ >>> a = np.arange(9).reshape(3, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> a[iu]
+ array([0, 1, 2, 4, 5, 8])
+
+ An offset can be passed also to the masking function. This gets us the
+ indices starting on the first diagonal right of the main one:
+
+ >>> iu1 = np.mask_indices(3, np.triu, 1)
+
+ with which we now extract only three elements:
+
+ >>> a[iu1]
+ array([1, 2, 5])
+
+ """
+ m = ones((n, n), int)
+ a = mask_func(m, k)
+ return nonzero(a != 0)
+
+
+@set_module('numpy')
+def tril_indices(n, k=0, m=None):
+ """
+ Return the indices for the lower-triangle of an (n, m) array.
+
+ Parameters
+ ----------
+ n : int
+ The row dimension of the arrays for which the returned
+ indices will be valid.
+ k : int, optional
+ Diagonal offset (see `tril` for details).
+ m : int, optional
+ The column dimension of the arrays for which the returned
+ arrays will be valid.
+ By default `m` is taken equal to `n`.
+
+
+ Returns
+ -------
+ inds : tuple of arrays
+ The row and column indices, respectively. The row indices are sorted
+ in non-decreasing order, and the correspdonding column indices are
+ strictly increasing for each row.
+
+ See also
+ --------
+ triu_indices : similar function, for upper-triangular.
+ mask_indices : generic function accepting an arbitrary mask function.
+ tril, triu
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Compute two different sets of indices to access 4x4 arrays, one for the
+ lower triangular part starting at the main diagonal, and one starting two
+ diagonals further right:
+
+ >>> il1 = np.tril_indices(4)
+ >>> il1
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ Note that row indices (first array) are non-decreasing, and the corresponding
+ column indices (second array) are strictly increasing for each row.
+ Here is how they can be used with a sample array:
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Both for indexing:
+
+ >>> a[il1]
+ array([ 0, 4, 5, ..., 13, 14, 15])
+
+ And for assigning values:
+
+ >>> a[il1] = -1
+ >>> a
+ array([[-1, 1, 2, 3],
+ [-1, -1, 6, 7],
+ [-1, -1, -1, 11],
+ [-1, -1, -1, -1]])
+
+ These cover almost the whole array (two diagonals right of the main one):
+
+ >>> il2 = np.tril_indices(4, 2)
+ >>> a[il2] = -10
+ >>> a
+ array([[-10, -10, -10, 3],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]])
+
+ """
+ tri_ = tri(n, m, k=k, dtype=bool)
+
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
+ for inds in indices(tri_.shape, sparse=True))
+
+
+def _trilu_indices_form_dispatcher(arr, k=None):
+ return (arr,)
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
+def tril_indices_from(arr, k=0):
+ """
+ Return the indices for the lower-triangle of arr.
+
+ See `tril_indices` for full details.
+
+ Parameters
+ ----------
+ arr : array_like
+ The indices will be valid for square arrays whose dimensions are
+ the same as arr.
+ k : int, optional
+ Diagonal offset (see `tril` for details).
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Create a 4 by 4 array
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Pass the array to get the indices of the lower triangular elements.
+
+ >>> trili = np.tril_indices_from(a)
+ >>> trili
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ >>> a[trili]
+ array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
+
+ This is syntactic sugar for tril_indices().
+
+ >>> np.tril_indices(a.shape[0])
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+ Use the `k` parameter to return the indices for the lower triangular array
+ up to the k-th diagonal.
+
+ >>> trili1 = np.tril_indices_from(a, k=1)
+ >>> a[trili1]
+ array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])
+
+ See Also
+ --------
+ tril_indices, tril, triu_indices_from
+ """
+ if arr.ndim != 2:
+ raise ValueError("input array must be 2-d")
+ return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
+
+
+@set_module('numpy')
+def triu_indices(n, k=0, m=None):
+ """
+ Return the indices for the upper-triangle of an (n, m) array.
+
+ Parameters
+ ----------
+ n : int
+ The size of the arrays for which the returned indices will
+ be valid.
+ k : int, optional
+ Diagonal offset (see `triu` for details).
+ m : int, optional
+ The column dimension of the arrays for which the returned
+ arrays will be valid.
+ By default `m` is taken equal to `n`.
+
+
+ Returns
+ -------
+ inds : tuple, shape(2) of ndarrays, shape(`n`)
+ The row and column indices, respectively. The row indices are sorted
+ in non-decreasing order, and the correspdonding column indices are
+ strictly increasing for each row.
+
+ See also
+ --------
+ tril_indices : similar function, for lower-triangular.
+ mask_indices : generic function accepting an arbitrary mask function.
+ triu, tril
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Compute two different sets of indices to access 4x4 arrays, one for the
+ upper triangular part starting at the main diagonal, and one starting two
+ diagonals further right:
+
+ >>> iu1 = np.triu_indices(4)
+ >>> iu1
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ Note that row indices (first array) are non-decreasing, and the corresponding
+ column indices (second array) are strictly increasing for each row.
+
+ Here is how they can be used with a sample array:
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Both for indexing:
+
+ >>> a[iu1]
+ array([ 0, 1, 2, ..., 10, 11, 15])
+
+ And for assigning values:
+
+ >>> a[iu1] = -1
+ >>> a
+ array([[-1, -1, -1, -1],
+ [ 4, -1, -1, -1],
+ [ 8, 9, -1, -1],
+ [12, 13, 14, -1]])
+
+ These cover only a small part of the whole array (two diagonals right
+ of the main one):
+
+ >>> iu2 = np.triu_indices(4, 2)
+ >>> a[iu2] = -10
+ >>> a
+ array([[ -1, -1, -10, -10],
+ [ 4, -1, -1, -10],
+ [ 8, 9, -1, -1],
+ [ 12, 13, 14, -1]])
+
+ """
+ tri_ = ~tri(n, m, k=k - 1, dtype=bool)
+
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
+ for inds in indices(tri_.shape, sparse=True))
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
+def triu_indices_from(arr, k=0):
+ """
+ Return the indices for the upper-triangle of arr.
+
+ See `triu_indices` for full details.
+
+ Parameters
+ ----------
+ arr : ndarray, shape(N, N)
+ The indices will be valid for square arrays.
+ k : int, optional
+ Diagonal offset (see `triu` for details).
+
+ Returns
+ -------
+ triu_indices_from : tuple, shape(2) of ndarray, shape(N)
+ Indices for the upper-triangle of `arr`.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ Create a 4 by 4 array
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Pass the array to get the indices of the upper triangular elements.
+
+ >>> triui = np.triu_indices_from(a)
+ >>> triui
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ >>> a[triui]
+ array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
+
+ This is syntactic sugar for triu_indices().
+
+ >>> np.triu_indices(a.shape[0])
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+ Use the `k` parameter to return the indices for the upper triangular array
+ from the k-th diagonal.
+
+ >>> triuim1 = np.triu_indices_from(a, k=1)
+ >>> a[triuim1]
+ array([ 1, 2, 3, 6, 7, 11])
+
+
+ See Also
+ --------
+ triu_indices, triu, tril_indices_from
+ """
+ if arr.ndim != 2:
+ raise ValueError("input array must be 2-d")
+ return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi
new file mode 100644
index 0000000..43df38e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_twodim_base_impl.pyi
@@ -0,0 +1,438 @@
+from collections.abc import Callable, Sequence
+from typing import (
+ Any,
+ TypeAlias,
+ TypeVar,
+ overload,
+)
+from typing import (
+ Literal as L,
+)
+
+import numpy as np
+from numpy import (
+ _OrderCF,
+ complex128,
+ complexfloating,
+ datetime64,
+ float64,
+ floating,
+ generic,
+ int_,
+ intp,
+ object_,
+ signedinteger,
+ timedelta64,
+)
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NDArray,
+ _ArrayLike,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeObject_co,
+ _DTypeLike,
+ _SupportsArray,
+ _SupportsArrayFunc,
+)
+
+__all__ = [
+ "diag",
+ "diagflat",
+ "eye",
+ "fliplr",
+ "flipud",
+ "tri",
+ "triu",
+ "tril",
+ "vander",
+ "histogram2d",
+ "mask_indices",
+ "tril_indices",
+ "tril_indices_from",
+ "triu_indices",
+ "triu_indices_from",
+]
+
+###
+
+_T = TypeVar("_T")
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating)
+_InexactT = TypeVar("_InexactT", bound=np.inexact)
+_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co)
+
+# The returned arrays dtype must be compatible with `np.equal`
+_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]]
+
+_Int_co: TypeAlias = np.integer | np.bool
+_Float_co: TypeAlias = np.floating | _Int_co
+_Number_co: TypeAlias = np.number | np.bool
+
+_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT]
+_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co]
+_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co]
+_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co]
+_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co]
+
+###
+
+@overload
+def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
+@overload
+def fliplr(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...
+@overload
+def flipud(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def eye(
+ N: int,
+ M: int | None = ...,
+ k: int = ...,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ device: L["cpu"] | None = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[float64]: ...
+@overload
+def eye(
+ N: int,
+ M: int | None,
+ k: int,
+ dtype: _DTypeLike[_ScalarT],
+ order: _OrderCF = ...,
+ *,
+ device: L["cpu"] | None = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def eye(
+ N: int,
+ M: int | None = ...,
+ k: int = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ order: _OrderCF = ...,
+ device: L["cpu"] | None = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[_ScalarT]: ...
+@overload
+def eye(
+ N: int,
+ M: int | None = ...,
+ k: int = ...,
+ dtype: DTypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ device: L["cpu"] | None = ...,
+ like: _SupportsArrayFunc | None = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ...
+@overload
+def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ...
+@overload
+def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def tri(
+ N: int,
+ M: int | None = ...,
+ k: int = ...,
+ dtype: None = ...,
+ *,
+ like: _SupportsArrayFunc | None = ...
+) -> NDArray[float64]: ...
+@overload
+def tri(
+ N: int,
+ M: int | None,
+ k: int,
+ dtype: _DTypeLike[_ScalarT],
+ *,
+ like: _SupportsArrayFunc | None = ...
+) -> NDArray[_ScalarT]: ...
+@overload
+def tri(
+ N: int,
+ M: int | None = ...,
+ k: int = ...,
+ *,
+ dtype: _DTypeLike[_ScalarT],
+ like: _SupportsArrayFunc | None = ...
+) -> NDArray[_ScalarT]: ...
+@overload
+def tri(
+ N: int,
+ M: int | None = ...,
+ k: int = ...,
+ dtype: DTypeLike = ...,
+ *,
+ like: _SupportsArrayFunc | None = ...
+) -> NDArray[Any]: ...
+
+@overload
+def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ...
+@overload
+def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ...
+
+@overload
+def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ...
+@overload
+def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ...
+
+@overload
+def vander( # type: ignore[misc]
+ x: _ArrayLikeInt_co,
+ N: int | None = ...,
+ increasing: bool = ...,
+) -> NDArray[signedinteger]: ...
+@overload
+def vander( # type: ignore[misc]
+ x: _ArrayLikeFloat_co,
+ N: int | None = ...,
+ increasing: bool = ...,
+) -> NDArray[floating]: ...
+@overload
+def vander(
+ x: _ArrayLikeComplex_co,
+ N: int | None = ...,
+ increasing: bool = ...,
+) -> NDArray[complexfloating]: ...
+@overload
+def vander(
+ x: _ArrayLikeObject_co,
+ N: int | None = ...,
+ increasing: bool = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def histogram2d(
+ x: _ArrayLike1D[_ComplexFloatingT],
+ y: _ArrayLike1D[_ComplexFloatingT | _Float_co],
+ bins: int | Sequence[int] = ...,
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_ComplexFloatingT],
+ NDArray[_ComplexFloatingT],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1D[_ComplexFloatingT | _Float_co],
+ y: _ArrayLike1D[_ComplexFloatingT],
+ bins: int | Sequence[int] = ...,
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_ComplexFloatingT],
+ NDArray[_ComplexFloatingT],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1D[_InexactT],
+ y: _ArrayLike1D[_InexactT | _Int_co],
+ bins: int | Sequence[int] = ...,
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_InexactT],
+ NDArray[_InexactT],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1D[_InexactT | _Int_co],
+ y: _ArrayLike1D[_InexactT],
+ bins: int | Sequence[int] = ...,
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_InexactT],
+ NDArray[_InexactT],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DInt_co | Sequence[float],
+ y: _ArrayLike1DInt_co | Sequence[float],
+ bins: int | Sequence[int] = ...,
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[float64],
+ NDArray[float64],
+]: ...
+@overload
+def histogram2d(
+ x: Sequence[complex],
+ y: Sequence[complex],
+ bins: int | Sequence[int] = ...,
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[complex128 | float64],
+ NDArray[complex128 | float64],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DNumber_co,
+ y: _ArrayLike1DNumber_co,
+ bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_NumberCoT],
+ NDArray[_NumberCoT],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1D[_InexactT],
+ y: _ArrayLike1D[_InexactT],
+ bins: Sequence[_ArrayLike1D[_NumberCoT] | int],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_NumberCoT | _InexactT],
+ NDArray[_NumberCoT | _InexactT],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DInt_co | Sequence[float],
+ y: _ArrayLike1DInt_co | Sequence[float],
+ bins: Sequence[_ArrayLike1D[_NumberCoT] | int],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_NumberCoT | float64],
+ NDArray[_NumberCoT | float64],
+]: ...
+@overload
+def histogram2d(
+ x: Sequence[complex],
+ y: Sequence[complex],
+ bins: Sequence[_ArrayLike1D[_NumberCoT] | int],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[_NumberCoT | complex128 | float64],
+ NDArray[_NumberCoT | complex128 | float64],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DNumber_co,
+ y: _ArrayLike1DNumber_co,
+ bins: Sequence[Sequence[bool]],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[np.bool],
+ NDArray[np.bool],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DNumber_co,
+ y: _ArrayLike1DNumber_co,
+ bins: Sequence[Sequence[int]],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[np.int_ | np.bool],
+ NDArray[np.int_ | np.bool],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DNumber_co,
+ y: _ArrayLike1DNumber_co,
+ bins: Sequence[Sequence[float]],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[np.float64 | np.int_ | np.bool],
+ NDArray[np.float64 | np.int_ | np.bool],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLike1DNumber_co,
+ y: _ArrayLike1DNumber_co,
+ bins: Sequence[Sequence[complex]],
+ range: _ArrayLike2DFloat_co | None = ...,
+ density: bool | None = ...,
+ weights: _ArrayLike1DFloat_co | None = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[np.complex128 | np.float64 | np.int_ | np.bool],
+ NDArray[np.complex128 | np.float64 | np.int_ | np.bool],
+]: ...
+
+# NOTE: we're assuming/demanding here the `mask_func` returns
+# an ndarray of shape `(n, n)`; otherwise there is the possibility
+# of the output tuple having more or less than 2 elements
+@overload
+def mask_indices(
+ n: int,
+ mask_func: _MaskFunc[int],
+ k: int = ...,
+) -> tuple[NDArray[intp], NDArray[intp]]: ...
+@overload
+def mask_indices(
+ n: int,
+ mask_func: _MaskFunc[_T],
+ k: _T,
+) -> tuple[NDArray[intp], NDArray[intp]]: ...
+
+def tril_indices(
+ n: int,
+ k: int = ...,
+ m: int | None = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def tril_indices_from(
+ arr: NDArray[Any],
+ k: int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices(
+ n: int,
+ k: int = ...,
+ m: int | None = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices_from(
+ arr: NDArray[Any],
+ k: int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py
new file mode 100644
index 0000000..977609c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.py
@@ -0,0 +1,699 @@
+"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
+
+"""
+import functools
+
+__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
+ 'isreal', 'nan_to_num', 'real', 'real_if_close',
+ 'typename', 'mintypecode',
+ 'common_type']
+
+import numpy._core.numeric as _nx
+from numpy._core import getlimits, overrides
+from numpy._core.numeric import asanyarray, asarray, isnan, zeros
+from numpy._utils import set_module
+
+from ._ufunclike_impl import isneginf, isposinf
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
+
+
+@set_module('numpy')
+def mintypecode(typechars, typeset='GDFgdf', default='d'):
+ """
+ Return the character for the minimum-size type to which given types can
+ be safely cast.
+
+ The returned type character must represent the smallest size dtype such
+ that an array of the returned type can handle the data from an array of
+ all types in `typechars` (or if `typechars` is an array, then its
+ dtype.char).
+
+ Parameters
+ ----------
+ typechars : list of str or array_like
+ If a list of strings, each string should represent a dtype.
+ If array_like, the character representation of the array dtype is used.
+ typeset : str or list of str, optional
+ The set of characters that the returned character is chosen from.
+ The default set is 'GDFgdf'.
+ default : str, optional
+ The default character, this is returned if none of the characters in
+ `typechars` matches a character in `typeset`.
+
+ Returns
+ -------
+ typechar : str
+ The character representing the minimum-size type that was found.
+
+ See Also
+ --------
+ dtype
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.mintypecode(['d', 'f', 'S'])
+ 'd'
+ >>> x = np.array([1.1, 2-3.j])
+ >>> np.mintypecode(x)
+ 'D'
+
+ >>> np.mintypecode('abceh', default='G')
+ 'G'
+
+ """
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
+ for t in typechars)
+ intersection = {t for t in typecodes if t in typeset}
+ if not intersection:
+ return default
+ if 'F' in intersection and 'd' in intersection:
+ return 'D'
+ return min(intersection, key=_typecodes_by_elsize.index)
+
+
+def _real_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_real_dispatcher)
+def real(val):
+ """
+ Return the real part of the complex argument.
+
+ Parameters
+ ----------
+ val : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The real component of the complex argument. If `val` is real, the type
+ of `val` is used for the output. If `val` has complex elements, the
+ returned type is float.
+
+ See Also
+ --------
+ real_if_close, imag, angle
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
+ >>> a.real
+ array([1., 3., 5.])
+ >>> a.real = 9
+ >>> a
+ array([9.+2.j, 9.+4.j, 9.+6.j])
+ >>> a.real = np.array([9, 8, 7])
+ >>> a
+ array([9.+2.j, 8.+4.j, 7.+6.j])
+ >>> np.real(1 + 1j)
+ 1.0
+
+ """
+ try:
+ return val.real
+ except AttributeError:
+ return asanyarray(val).real
+
+
+def _imag_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_imag_dispatcher)
+def imag(val):
+ """
+ Return the imaginary part of the complex argument.
+
+ Parameters
+ ----------
+ val : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The imaginary component of the complex argument. If `val` is real,
+ the type of `val` is used for the output. If `val` has complex
+ elements, the returned type is float.
+
+ See Also
+ --------
+ real, angle, real_if_close
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
+ >>> a.imag
+ array([2., 4., 6.])
+ >>> a.imag = np.array([8, 10, 12])
+ >>> a
+ array([1. +8.j, 3.+10.j, 5.+12.j])
+ >>> np.imag(1 + 1j)
+ 1.0
+
+ """
+ try:
+ return val.imag
+ except AttributeError:
+ return asanyarray(val).imag
+
+
+def _is_type_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def iscomplex(x):
+ """
+ Returns a bool array, where True if input element is complex.
+
+ What is tested is whether the input has a non-zero imaginary part, not if
+ the input type is complex.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray of bools
+ Output array.
+
+ See Also
+ --------
+ isreal
+ iscomplexobj : Return True if x is a complex type or an array of complex
+ numbers.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
+ array([ True, False, False, False, False, True])
+
+ """
+ ax = asanyarray(x)
+ if issubclass(ax.dtype.type, _nx.complexfloating):
+ return ax.imag != 0
+ res = zeros(ax.shape, bool)
+ return res[()] # convert to scalar if needed
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def isreal(x):
+ """
+ Returns a bool array, where True if input element is real.
+
+ If element has complex type with zero imaginary part, the return value
+ for that element is True.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, bool
+ Boolean array of same shape as `x`.
+
+ Notes
+ -----
+ `isreal` may behave unexpectedly for string or object arrays (see examples)
+
+ See Also
+ --------
+ iscomplex
+ isrealobj : Return True if x is not a complex type.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
+ >>> np.isreal(a)
+ array([False, True, True, True, True, False])
+
+ The function does not work on string arrays.
+
+ >>> a = np.array([2j, "a"], dtype="U")
+ >>> np.isreal(a) # Warns about non-elementwise comparison
+ False
+
+ Returns True for all elements in input array of ``dtype=object`` even if
+ any of the elements is complex.
+
+ >>> a = np.array([1, "2", 3+4j], dtype=object)
+ >>> np.isreal(a)
+ array([ True, True, True])
+
+ isreal should not be used with object arrays
+
+ >>> a = np.array([1+2j, 2+1j], dtype=object)
+ >>> np.isreal(a)
+ array([ True, True])
+
+ """
+ return imag(x) == 0
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def iscomplexobj(x):
+ """
+ Check for a complex type or an array of complex numbers.
+
+ The type of the input is checked, not the value. Even if the input
+ has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
+
+ Parameters
+ ----------
+ x : any
+ The input can be of any type and shape.
+
+ Returns
+ -------
+ iscomplexobj : bool
+ The return value, True if `x` is of a complex type or has at least
+ one complex element.
+
+ See Also
+ --------
+ isrealobj, iscomplex
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.iscomplexobj(1)
+ False
+ >>> np.iscomplexobj(1+0j)
+ True
+ >>> np.iscomplexobj([3, 1+0j, True])
+ True
+
+ """
+ try:
+ dtype = x.dtype
+ type_ = dtype.type
+ except AttributeError:
+ type_ = asarray(x).dtype.type
+ return issubclass(type_, _nx.complexfloating)
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def isrealobj(x):
+ """
+ Return True if x is a not complex type or an array of complex numbers.
+
+ The type of the input is checked, not the value. So even if the input
+ has an imaginary part equal to zero, `isrealobj` evaluates to False
+ if the data type is complex.
+
+ Parameters
+ ----------
+ x : any
+ The input can be of any type and shape.
+
+ Returns
+ -------
+ y : bool
+ The return value, False if `x` is of a complex type.
+
+ See Also
+ --------
+ iscomplexobj, isreal
+
+ Notes
+ -----
+ The function is only meant for arrays with numerical values but it
+ accepts all other objects. Since it assumes array input, the return
+ value of other objects may be True.
+
+ >>> np.isrealobj('A string')
+ True
+ >>> np.isrealobj(False)
+ True
+ >>> np.isrealobj(None)
+ True
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.isrealobj(1)
+ True
+ >>> np.isrealobj(1+0j)
+ False
+ >>> np.isrealobj([3, 1+0j, True])
+ False
+
+ """
+ return not iscomplexobj(x)
+
+#-----------------------------------------------------------------------------
+
+def _getmaxmin(t):
+ from numpy._core import getlimits
+ f = getlimits.finfo(t)
+ return f.max, f.min
+
+
+def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
+ return (x,)
+
+
+@array_function_dispatch(_nan_to_num_dispatcher)
+def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
+ """
+ Replace NaN with zero and infinity with large finite numbers (default
+ behaviour) or with the numbers defined by the user using the `nan`,
+ `posinf` and/or `neginf` keywords.
+
+ If `x` is inexact, NaN is replaced by zero or by the user defined value in
+ `nan` keyword, infinity is replaced by the largest finite floating point
+ values representable by ``x.dtype`` or by the user defined value in
+ `posinf` keyword and -infinity is replaced by the most negative finite
+ floating point values representable by ``x.dtype`` or by the user defined
+ value in `neginf` keyword.
+
+ For complex dtypes, the above is applied to each of the real and
+ imaginary components of `x` separately.
+
+ If `x` is not inexact, then no replacements are made.
+
+ Parameters
+ ----------
+ x : scalar or array_like
+ Input data.
+ copy : bool, optional
+ Whether to create a copy of `x` (True) or to replace values
+ in-place (False). The in-place operation only occurs if
+ casting to an array does not require a copy.
+ Default is True.
+ nan : int, float, optional
+ Value to be used to fill NaN values. If no value is passed
+ then NaN values will be replaced with 0.0.
+ posinf : int, float, optional
+ Value to be used to fill positive infinity values. If no value is
+ passed then positive infinity values will be replaced with a very
+ large number.
+ neginf : int, float, optional
+ Value to be used to fill negative infinity values. If no value is
+ passed then negative infinity values will be replaced with a very
+ small (or negative) number.
+
+ Returns
+ -------
+ out : ndarray
+ `x`, with the non-finite values replaced. If `copy` is False, this may
+ be `x` itself.
+
+ See Also
+ --------
+ isinf : Shows which elements are positive or negative infinity.
+ isneginf : Shows which elements are negative infinity.
+ isposinf : Shows which elements are positive infinity.
+ isnan : Shows which elements are Not a Number (NaN).
+ isfinite : Shows which elements are finite (not NaN, not infinity)
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.nan_to_num(np.inf)
+ 1.7976931348623157e+308
+ >>> np.nan_to_num(-np.inf)
+ -1.7976931348623157e+308
+ >>> np.nan_to_num(np.nan)
+ 0.0
+ >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
+ >>> np.nan_to_num(x)
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
+ -1.28000000e+002, 1.28000000e+002])
+ >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
+ -1.2800000e+02, 1.2800000e+02])
+ >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
+ -1.28000000e+002, 1.28000000e+002])
+ >>> np.nan_to_num(y)
+ array([ 1.79769313e+308 +0.00000000e+000j, # may vary
+ 0.00000000e+000 +0.00000000e+000j,
+ 0.00000000e+000 +1.79769313e+308j])
+ >>> np.nan_to_num(y, nan=111111, posinf=222222)
+ array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
+ """
+ x = _nx.array(x, subok=True, copy=copy)
+ xtype = x.dtype.type
+
+ isscalar = (x.ndim == 0)
+
+ if not issubclass(xtype, _nx.inexact):
+ return x[()] if isscalar else x
+
+ iscomplex = issubclass(xtype, _nx.complexfloating)
+
+ dest = (x.real, x.imag) if iscomplex else (x,)
+ maxf, minf = _getmaxmin(x.real.dtype)
+ if posinf is not None:
+ maxf = posinf
+ if neginf is not None:
+ minf = neginf
+ for d in dest:
+ idx_nan = isnan(d)
+ idx_posinf = isposinf(d)
+ idx_neginf = isneginf(d)
+ _nx.copyto(d, nan, where=idx_nan)
+ _nx.copyto(d, maxf, where=idx_posinf)
+ _nx.copyto(d, minf, where=idx_neginf)
+ return x[()] if isscalar else x
+
+#-----------------------------------------------------------------------------
+
+def _real_if_close_dispatcher(a, tol=None):
+ return (a,)
+
+
+@array_function_dispatch(_real_if_close_dispatcher)
+def real_if_close(a, tol=100):
+ """
+ If input is complex with all imaginary parts close to zero, return
+ real parts.
+
+ "Close to zero" is defined as `tol` * (machine epsilon of the type for
+ `a`).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ tol : float
+ Tolerance in machine epsilons for the complex part of the elements
+ in the array. If the tolerance is <=1, then the absolute tolerance
+ is used.
+
+ Returns
+ -------
+ out : ndarray
+ If `a` is real, the type of `a` is used for the output. If `a`
+ has complex elements, the returned type is float.
+
+ See Also
+ --------
+ real, imag, angle
+
+ Notes
+ -----
+ Machine epsilon varies from machine to machine and between data types
+ but Python floats on most platforms have a machine epsilon equal to
+ 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
+ out the machine epsilon for floats.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.finfo(float).eps
+ 2.2204460492503131e-16 # may vary
+
+ >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
+ array([2.1, 5.2])
+ >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
+ array([2.1+4.e-13j, 5.2 + 3e-15j])
+
+ """
+ a = asanyarray(a)
+ type_ = a.dtype.type
+ if not issubclass(type_, _nx.complexfloating):
+ return a
+ if tol > 1:
+ f = getlimits.finfo(type_)
+ tol = f.eps * tol
+ if _nx.all(_nx.absolute(a.imag) < tol):
+ a = a.real
+ return a
+
+
+#-----------------------------------------------------------------------------
+
+_namefromtype = {'S1': 'character',
+ '?': 'bool',
+ 'b': 'signed char',
+ 'B': 'unsigned char',
+ 'h': 'short',
+ 'H': 'unsigned short',
+ 'i': 'integer',
+ 'I': 'unsigned integer',
+ 'l': 'long integer',
+ 'L': 'unsigned long integer',
+ 'q': 'long long integer',
+ 'Q': 'unsigned long long integer',
+ 'f': 'single precision',
+ 'd': 'double precision',
+ 'g': 'long precision',
+ 'F': 'complex single precision',
+ 'D': 'complex double precision',
+ 'G': 'complex long double precision',
+ 'S': 'string',
+ 'U': 'unicode',
+ 'V': 'void',
+ 'O': 'object'
+ }
+
+@set_module('numpy')
+def typename(char):
+ """
+ Return a description for the given data type code.
+
+ Parameters
+ ----------
+ char : str
+ Data type code.
+
+ Returns
+ -------
+ out : str
+ Description of the input data type code.
+
+ See Also
+ --------
+ dtype
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
+ ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
+ >>> for typechar in typechars:
+ ... print(typechar, ' : ', np.typename(typechar))
+ ...
+ S1 : character
+ ? : bool
+ B : unsigned char
+ D : complex double precision
+ G : complex long double precision
+ F : complex single precision
+ I : unsigned integer
+ H : unsigned short
+ L : unsigned long integer
+ O : object
+ Q : unsigned long long integer
+ S : string
+ U : unicode
+ V : void
+ b : signed char
+ d : double precision
+ g : long precision
+ f : single precision
+ i : integer
+ h : short
+ l : long integer
+ q : long long integer
+
+ """
+ return _namefromtype[char]
+
+#-----------------------------------------------------------------------------
+
+
+#determine the "minimum common type" for a group of arrays.
+array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble],
+ [None, _nx.complex64, _nx.complex128, _nx.clongdouble]]
+array_precision = {_nx.float16: 0,
+ _nx.float32: 1,
+ _nx.float64: 2,
+ _nx.longdouble: 3,
+ _nx.complex64: 1,
+ _nx.complex128: 2,
+ _nx.clongdouble: 3}
+
+
+def _common_type_dispatcher(*arrays):
+ return arrays
+
+
+@array_function_dispatch(_common_type_dispatcher)
+def common_type(*arrays):
+ """
+ Return a scalar type which is common to the input arrays.
+
+ The return type will always be an inexact (i.e. floating point) scalar
+ type, even if all the arrays are integer arrays. If one of the inputs is
+ an integer array, the minimum precision type that is returned is a
+ 64-bit floating point dtype.
+
+ All input arrays except int64 and uint64 can be safely cast to the
+ returned dtype without loss of information.
+
+ Parameters
+ ----------
+ array1, array2, ... : ndarrays
+ Input arrays.
+
+ Returns
+ -------
+ out : data type code
+ Data type code.
+
+ See Also
+ --------
+ dtype, mintypecode
+
+ Examples
+ --------
+ >>> np.common_type(np.arange(2, dtype=np.float32))
+ <class 'numpy.float32'>
+ >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
+ <class 'numpy.float64'>
+ >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
+ <class 'numpy.complex128'>
+
+ """
+ is_complex = False
+ precision = 0
+ for a in arrays:
+ t = a.dtype.type
+ if iscomplexobj(a):
+ is_complex = True
+ if issubclass(t, _nx.integer):
+ p = 2 # array_precision[_nx.double]
+ else:
+ p = array_precision.get(t)
+ if p is None:
+ raise TypeError("can't get common type for non-numeric array")
+ precision = max(precision, p)
+ if is_complex:
+ return array_type[1][precision]
+ else:
+ return array_type[0][precision]
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi
new file mode 100644
index 0000000..944015e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_type_check_impl.pyi
@@ -0,0 +1,350 @@
+from collections.abc import Container, Iterable
+from typing import Any, Protocol, TypeAlias, overload, type_check_only
+from typing import Literal as L
+
+from _typeshed import Incomplete
+from typing_extensions import TypeVar
+
+import numpy as np
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _16Bit,
+ _32Bit,
+ _64Bit,
+ _ArrayLike,
+ _NestedSequence,
+ _ScalarLike_co,
+ _SupportsArray,
+)
+
+__all__ = [
+ "common_type",
+ "imag",
+ "iscomplex",
+ "iscomplexobj",
+ "isreal",
+ "isrealobj",
+ "mintypecode",
+ "nan_to_num",
+ "real",
+ "real_if_close",
+ "typename",
+]
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
+_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool)
+
+_FloatMax32: TypeAlias = np.float32 | np.float16
+_ComplexMax128: TypeAlias = np.complex128 | np.complex64
+_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer
+_Real: TypeAlias = np.floating | np.integer
+_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16
+_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer
+
+@type_check_only
+class _HasReal(Protocol[_T_co]):
+ @property
+ def real(self, /) -> _T_co: ...
+
+@type_check_only
+class _HasImag(Protocol[_T_co]):
+ @property
+ def imag(self, /) -> _T_co: ...
+
+@type_check_only
+class _HasDType(Protocol[_ScalarT_co]):
+ @property
+ def dtype(self, /) -> np.dtype[_ScalarT_co]: ...
+
+###
+
+def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ...
+
+#
+@overload
+def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap]
+@overload
+def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ...
+@overload
+def real(val: ArrayLike) -> NDArray[Any]: ...
+
+#
+@overload
+def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap]
+@overload
+def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ...
+@overload
+def imag(val: ArrayLike) -> NDArray[Any]: ...
+
+#
+@overload
+def iscomplex(x: _ScalarLike_co) -> np.bool: ...
+@overload
+def iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ...
+@overload
+def iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ...
+
+#
+@overload
+def isreal(x: _ScalarLike_co) -> np.bool: ...
+@overload
+def isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ...
+@overload
+def isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ...
+
+#
+def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ...
+def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ...
+
+#
+@overload
+def nan_to_num(
+ x: _ScalarT,
+ copy: bool = True,
+ nan: float = 0.0,
+ posinf: float | None = None,
+ neginf: float | None = None,
+) -> _ScalarT: ...
+@overload
+def nan_to_num(
+ x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]],
+ copy: bool = True,
+ nan: float = 0.0,
+ posinf: float | None = None,
+ neginf: float | None = None,
+) -> NDArray[_ScalarT]: ...
+@overload
+def nan_to_num(
+ x: _SupportsArray[np.dtype[_ScalarT]],
+ copy: bool = True,
+ nan: float = 0.0,
+ posinf: float | None = None,
+ neginf: float | None = None,
+) -> _ScalarT | NDArray[_ScalarT]: ...
+@overload
+def nan_to_num(
+ x: _NestedSequence[ArrayLike],
+ copy: bool = True,
+ nan: float = 0.0,
+ posinf: float | None = None,
+ neginf: float | None = None,
+) -> NDArray[Incomplete]: ...
+@overload
+def nan_to_num(
+ x: ArrayLike,
+ copy: bool = True,
+ nan: float = 0.0,
+ posinf: float | None = None,
+ neginf: float | None = None,
+) -> Incomplete: ...
+
+# NOTE: The [overload-overlap] mypy error is a false positive
+@overload
+def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap]
+@overload
+def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ...
+@overload
+def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ...
+@overload
+def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ...
+@overload
+def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ...
+
+#
+@overload
+def typename(char: L['S1']) -> L['character']: ...
+@overload
+def typename(char: L['?']) -> L['bool']: ...
+@overload
+def typename(char: L['b']) -> L['signed char']: ...
+@overload
+def typename(char: L['B']) -> L['unsigned char']: ...
+@overload
+def typename(char: L['h']) -> L['short']: ...
+@overload
+def typename(char: L['H']) -> L['unsigned short']: ...
+@overload
+def typename(char: L['i']) -> L['integer']: ...
+@overload
+def typename(char: L['I']) -> L['unsigned integer']: ...
+@overload
+def typename(char: L['l']) -> L['long integer']: ...
+@overload
+def typename(char: L['L']) -> L['unsigned long integer']: ...
+@overload
+def typename(char: L['q']) -> L['long long integer']: ...
+@overload
+def typename(char: L['Q']) -> L['unsigned long long integer']: ...
+@overload
+def typename(char: L['f']) -> L['single precision']: ...
+@overload
+def typename(char: L['d']) -> L['double precision']: ...
+@overload
+def typename(char: L['g']) -> L['long precision']: ...
+@overload
+def typename(char: L['F']) -> L['complex single precision']: ...
+@overload
+def typename(char: L['D']) -> L['complex double precision']: ...
+@overload
+def typename(char: L['G']) -> L['complex long double precision']: ...
+@overload
+def typename(char: L['S']) -> L['string']: ...
+@overload
+def typename(char: L['U']) -> L['unicode']: ...
+@overload
+def typename(char: L['V']) -> L['void']: ...
+@overload
+def typename(char: L['O']) -> L['object']: ...
+
+# NOTE: The [overload-overlap] mypy errors are false positives
+@overload
+def common_type() -> type[np.float16]: ...
+@overload
+def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap]
+@overload
+def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap]
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[np.float64 | np.integer],
+ /,
+ *ai: _HasDType[_RealMax64],
+) -> type[np.float64]: ...
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[np.longdouble],
+ /,
+ *ai: _HasDType[_Real],
+) -> type[np.longdouble]: ...
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[np.complex64],
+ /,
+ *ai: _HasDType[_InexactMax32],
+) -> type[np.complex64]: ...
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[np.complex128],
+ /,
+ *ai: _HasDType[_NumberMax64],
+) -> type[np.complex128]: ...
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[np.clongdouble],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[np.clongdouble]: ...
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[_FloatMax32],
+ array1: _HasDType[np.float32],
+ /,
+ *ai: _HasDType[_FloatMax32],
+) -> type[np.float32]: ...
+@overload
+def common_type(
+ a0: _HasDType[_RealMax64],
+ array1: _HasDType[np.float64 | np.integer],
+ /,
+ *ai: _HasDType[_RealMax64],
+) -> type[np.float64]: ...
+@overload
+def common_type(
+ a0: _HasDType[_Real],
+ array1: _HasDType[np.longdouble],
+ /,
+ *ai: _HasDType[_Real],
+) -> type[np.longdouble]: ...
+@overload
+def common_type( # type: ignore[overload-overlap]
+ a0: _HasDType[_InexactMax32],
+ array1: _HasDType[np.complex64],
+ /,
+ *ai: _HasDType[_InexactMax32],
+) -> type[np.complex64]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.float64],
+ array1: _HasDType[_ComplexMax128],
+ /,
+ *ai: _HasDType[_NumberMax64],
+) -> type[np.complex128]: ...
+@overload
+def common_type(
+ a0: _HasDType[_ComplexMax128],
+ array1: _HasDType[np.float64],
+ /,
+ *ai: _HasDType[_NumberMax64],
+) -> type[np.complex128]: ...
+@overload
+def common_type(
+ a0: _HasDType[_NumberMax64],
+ array1: _HasDType[np.complex128],
+ /,
+ *ai: _HasDType[_NumberMax64],
+) -> type[np.complex128]: ...
+@overload
+def common_type(
+ a0: _HasDType[_ComplexMax128],
+ array1: _HasDType[np.complex128 | np.integer],
+ /,
+ *ai: _HasDType[_NumberMax64],
+) -> type[np.complex128]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.complex128 | np.integer],
+ array1: _HasDType[_ComplexMax128],
+ /,
+ *ai: _HasDType[_NumberMax64],
+) -> type[np.complex128]: ...
+@overload
+def common_type(
+ a0: _HasDType[_Real],
+ /,
+ *ai: _HasDType[_Real],
+) -> type[np.floating]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.number],
+ array1: _HasDType[np.clongdouble],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[np.clongdouble]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.longdouble],
+ array1: _HasDType[np.complexfloating],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[np.clongdouble]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.complexfloating],
+ array1: _HasDType[np.longdouble],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[np.clongdouble]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.complexfloating],
+ array1: _HasDType[np.number],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[np.complexfloating]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.number],
+ array1: _HasDType[np.complexfloating],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[np.complexfloating]: ...
+@overload
+def common_type(
+ a0: _HasDType[np.number],
+ array1: _HasDType[np.number],
+ /,
+ *ai: _HasDType[np.number],
+) -> type[Any]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py
new file mode 100644
index 0000000..695aab1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.py
@@ -0,0 +1,207 @@
+"""
+Module of functions that are like ufuncs in acting on arrays and optionally
+storing results in an output array.
+
+"""
+__all__ = ['fix', 'isneginf', 'isposinf']
+
+import numpy._core.numeric as nx
+from numpy._core.overrides import array_function_dispatch
+
+
+def _dispatcher(x, out=None):
+ return (x, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+def fix(x, out=None):
+ """
+ Round to nearest integer towards zero.
+
+ Round an array of floats element-wise to nearest integer towards zero.
+ The rounded values have the same data-type as the input.
+
+ Parameters
+ ----------
+ x : array_like
+ An array to be rounded
+ out : ndarray, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated array is returned.
+
+ Returns
+ -------
+ out : ndarray of floats
+ An array with the same dimensions and data-type as the input.
+ If second argument is not supplied then a new array is returned
+ with the rounded values.
+
+ If a second argument is supplied the result is stored there.
+ The return value ``out`` is then a reference to that array.
+
+ See Also
+ --------
+ rint, trunc, floor, ceil
+ around : Round to given number of decimals
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.fix(3.14)
+ 3.0
+ >>> np.fix(3)
+ 3
+ >>> np.fix([2.1, 2.9, -2.1, -2.9])
+ array([ 2., 2., -2., -2.])
+
+ """
+ # promote back to an array if flattened
+ res = nx.asanyarray(nx.ceil(x, out=out))
+ res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
+
+ # when no out argument is passed and no subclasses are involved, flatten
+ # scalars
+ if out is None and type(res) is nx.ndarray:
+ res = res[()]
+ return res
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+def isposinf(x, out=None):
+ """
+ Test element-wise for positive infinity, return result as bool array.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ out : array_like, optional
+ A location into which the result is stored. If provided, it must have a
+ shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated boolean array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ A boolean array with the same dimensions as the input.
+ If second argument is not supplied then a boolean array is returned
+ with values True where the corresponding element of the input is
+ positive infinity and values False where the element of the input is
+ not positive infinity.
+
+ If a second argument is supplied the result is stored there. If the
+ type of that array is a numeric type the result is represented as zeros
+ and ones, if the type is boolean then as False and True.
+ The return value `out` is then a reference to that array.
+
+ See Also
+ --------
+ isinf, isneginf, isfinite, isnan
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754).
+
+ Errors result if the second argument is also supplied when x is a scalar
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.isposinf(np.inf)
+ True
+ >>> np.isposinf(-np.inf)
+ False
+ >>> np.isposinf([-np.inf, 0., np.inf])
+ array([False, False, True])
+
+ >>> x = np.array([-np.inf, 0., np.inf])
+ >>> y = np.array([2, 2, 2])
+ >>> np.isposinf(x, y)
+ array([0, 0, 1])
+ >>> y
+ array([0, 0, 1])
+
+ """
+ is_inf = nx.isinf(x)
+ try:
+ signbit = ~nx.signbit(x)
+ except TypeError as e:
+ dtype = nx.asanyarray(x).dtype
+ raise TypeError(f'This operation is not supported for {dtype} values '
+ 'because it would be ambiguous.') from e
+ else:
+ return nx.logical_and(is_inf, signbit, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+def isneginf(x, out=None):
+ """
+ Test element-wise for negative infinity, return result as bool array.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ out : array_like, optional
+ A location into which the result is stored. If provided, it must have a
+ shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated boolean array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ A boolean array with the same dimensions as the input.
+ If second argument is not supplied then a numpy boolean array is
+ returned with values True where the corresponding element of the
+ input is negative infinity and values False where the element of
+ the input is not negative infinity.
+
+ If a second argument is supplied the result is stored there. If the
+ type of that array is a numeric type the result is represented as
+ zeros and ones, if the type is boolean then as False and True. The
+ return value `out` is then a reference to that array.
+
+ See Also
+ --------
+ isinf, isposinf, isnan, isfinite
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754).
+
+ Errors result if the second argument is also supplied when x is a scalar
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.isneginf(-np.inf)
+ True
+ >>> np.isneginf(np.inf)
+ False
+ >>> np.isneginf([-np.inf, 0., np.inf])
+ array([ True, False, False])
+
+ >>> x = np.array([-np.inf, 0., np.inf])
+ >>> y = np.array([2, 2, 2])
+ >>> np.isneginf(x, y)
+ array([1, 0, 0])
+ >>> y
+ array([1, 0, 0])
+
+ """
+ is_inf = nx.isinf(x)
+ try:
+ signbit = nx.signbit(x)
+ except TypeError as e:
+ dtype = nx.asanyarray(x).dtype
+ raise TypeError(f'This operation is not supported for {dtype} values '
+ 'because it would be ambiguous.') from e
+ else:
+ return nx.logical_and(is_inf, signbit, out)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi
new file mode 100644
index 0000000..a673f05
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_ufunclike_impl.pyi
@@ -0,0 +1,67 @@
+from typing import Any, TypeVar, overload
+
+import numpy as np
+from numpy import floating, object_
+from numpy._typing import (
+ NDArray,
+ _ArrayLikeFloat_co,
+ _ArrayLikeObject_co,
+ _FloatLike_co,
+)
+
+__all__ = ["fix", "isneginf", "isposinf"]
+
+_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
+
+@overload
+def fix( # type: ignore[misc]
+ x: _FloatLike_co,
+ out: None = ...,
+) -> floating: ...
+@overload
+def fix(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[floating]: ...
+@overload
+def fix(
+ x: _ArrayLikeObject_co,
+ out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def fix(
+ x: _ArrayLikeFloat_co | _ArrayLikeObject_co,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+@overload
+def isposinf( # type: ignore[misc]
+ x: _FloatLike_co,
+ out: None = ...,
+) -> np.bool: ...
+@overload
+def isposinf(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[np.bool]: ...
+@overload
+def isposinf(
+ x: _ArrayLikeFloat_co,
+ out: _ArrayT,
+) -> _ArrayT: ...
+
+@overload
+def isneginf( # type: ignore[misc]
+ x: _FloatLike_co,
+ out: None = ...,
+) -> np.bool: ...
+@overload
+def isneginf(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[np.bool]: ...
+@overload
+def isneginf(
+ x: _ArrayLikeFloat_co,
+ out: _ArrayT,
+) -> _ArrayT: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py
new file mode 100644
index 0000000..f3a6c0f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.py
@@ -0,0 +1,299 @@
+"""
+Container class for backward compatibility with NumArray.
+
+The user_array.container class exists for backward compatibility with NumArray
+and is not meant to be used in new code. If you need to create an array
+container class, we recommend either creating a class that wraps an ndarray
+or subclasses ndarray.
+
+"""
+from numpy._core import (
+ absolute,
+ add,
+ arange,
+ array,
+ asarray,
+ bitwise_and,
+ bitwise_or,
+ bitwise_xor,
+ divide,
+ equal,
+ greater,
+ greater_equal,
+ invert,
+ left_shift,
+ less,
+ less_equal,
+ multiply,
+ not_equal,
+ power,
+ remainder,
+ reshape,
+ right_shift,
+ shape,
+ sin,
+ sqrt,
+ subtract,
+ transpose,
+)
+from numpy._core.overrides import set_module
+
+
+@set_module("numpy.lib.user_array")
+class container:
+ """
+ container(data, dtype=None, copy=True)
+
+ Standard container-class for easy multiple-inheritance.
+
+ Methods
+ -------
+ copy
+ byteswap
+ astype
+
+ """
+ def __init__(self, data, dtype=None, copy=True):
+ self.array = array(data, dtype, copy=copy)
+
+ def __repr__(self):
+ if self.ndim > 0:
+ return self.__class__.__name__ + repr(self.array)[len("array"):]
+ else:
+ return self.__class__.__name__ + "(" + repr(self.array) + ")"
+
+ def __array__(self, t=None):
+ if t:
+ return self.array.astype(t)
+ return self.array
+
+ # Array as sequence
+ def __len__(self):
+ return len(self.array)
+
+ def __getitem__(self, index):
+ return self._rc(self.array[index])
+
+ def __setitem__(self, index, value):
+ self.array[index] = asarray(value, self.dtype)
+
+ def __abs__(self):
+ return self._rc(absolute(self.array))
+
+ def __neg__(self):
+ return self._rc(-self.array)
+
+ def __add__(self, other):
+ return self._rc(self.array + asarray(other))
+
+ __radd__ = __add__
+
+ def __iadd__(self, other):
+ add(self.array, other, self.array)
+ return self
+
+ def __sub__(self, other):
+ return self._rc(self.array - asarray(other))
+
+ def __rsub__(self, other):
+ return self._rc(asarray(other) - self.array)
+
+ def __isub__(self, other):
+ subtract(self.array, other, self.array)
+ return self
+
+ def __mul__(self, other):
+ return self._rc(multiply(self.array, asarray(other)))
+
+ __rmul__ = __mul__
+
+ def __imul__(self, other):
+ multiply(self.array, other, self.array)
+ return self
+
+ def __mod__(self, other):
+ return self._rc(remainder(self.array, other))
+
+ def __rmod__(self, other):
+ return self._rc(remainder(other, self.array))
+
+ def __imod__(self, other):
+ remainder(self.array, other, self.array)
+ return self
+
+ def __divmod__(self, other):
+ return (self._rc(divide(self.array, other)),
+ self._rc(remainder(self.array, other)))
+
+ def __rdivmod__(self, other):
+ return (self._rc(divide(other, self.array)),
+ self._rc(remainder(other, self.array)))
+
+ def __pow__(self, other):
+ return self._rc(power(self.array, asarray(other)))
+
+ def __rpow__(self, other):
+ return self._rc(power(asarray(other), self.array))
+
+ def __ipow__(self, other):
+ power(self.array, other, self.array)
+ return self
+
+ def __lshift__(self, other):
+ return self._rc(left_shift(self.array, other))
+
+ def __rshift__(self, other):
+ return self._rc(right_shift(self.array, other))
+
+ def __rlshift__(self, other):
+ return self._rc(left_shift(other, self.array))
+
+ def __rrshift__(self, other):
+ return self._rc(right_shift(other, self.array))
+
+ def __ilshift__(self, other):
+ left_shift(self.array, other, self.array)
+ return self
+
+ def __irshift__(self, other):
+ right_shift(self.array, other, self.array)
+ return self
+
+ def __and__(self, other):
+ return self._rc(bitwise_and(self.array, other))
+
+ def __rand__(self, other):
+ return self._rc(bitwise_and(other, self.array))
+
+ def __iand__(self, other):
+ bitwise_and(self.array, other, self.array)
+ return self
+
+ def __xor__(self, other):
+ return self._rc(bitwise_xor(self.array, other))
+
+ def __rxor__(self, other):
+ return self._rc(bitwise_xor(other, self.array))
+
+ def __ixor__(self, other):
+ bitwise_xor(self.array, other, self.array)
+ return self
+
+ def __or__(self, other):
+ return self._rc(bitwise_or(self.array, other))
+
+ def __ror__(self, other):
+ return self._rc(bitwise_or(other, self.array))
+
+ def __ior__(self, other):
+ bitwise_or(self.array, other, self.array)
+ return self
+
+ def __pos__(self):
+ return self._rc(self.array)
+
+ def __invert__(self):
+ return self._rc(invert(self.array))
+
+ def _scalarfunc(self, func):
+ if self.ndim == 0:
+ return func(self[0])
+ else:
+ raise TypeError(
+ "only rank-0 arrays can be converted to Python scalars.")
+
+ def __complex__(self):
+ return self._scalarfunc(complex)
+
+ def __float__(self):
+ return self._scalarfunc(float)
+
+ def __int__(self):
+ return self._scalarfunc(int)
+
+ def __hex__(self):
+ return self._scalarfunc(hex)
+
+ def __oct__(self):
+ return self._scalarfunc(oct)
+
+ def __lt__(self, other):
+ return self._rc(less(self.array, other))
+
+ def __le__(self, other):
+ return self._rc(less_equal(self.array, other))
+
+ def __eq__(self, other):
+ return self._rc(equal(self.array, other))
+
+ def __ne__(self, other):
+ return self._rc(not_equal(self.array, other))
+
+ def __gt__(self, other):
+ return self._rc(greater(self.array, other))
+
+ def __ge__(self, other):
+ return self._rc(greater_equal(self.array, other))
+
+ def copy(self):
+ ""
+ return self._rc(self.array.copy())
+
+ def tobytes(self):
+ ""
+ return self.array.tobytes()
+
+ def byteswap(self):
+ ""
+ return self._rc(self.array.byteswap())
+
+ def astype(self, typecode):
+ ""
+ return self._rc(self.array.astype(typecode))
+
+ def _rc(self, a):
+ if len(shape(a)) == 0:
+ return a
+ else:
+ return self.__class__(a)
+
+ def __array_wrap__(self, *args):
+ return self.__class__(args[0])
+
+ def __setattr__(self, attr, value):
+ if attr == 'array':
+ object.__setattr__(self, attr, value)
+ return
+ try:
+ self.array.__setattr__(attr, value)
+ except AttributeError:
+ object.__setattr__(self, attr, value)
+
+ # Only called after other approaches fail.
+ def __getattr__(self, attr):
+ if (attr == 'array'):
+ return object.__getattribute__(self, attr)
+ return self.array.__getattribute__(attr)
+
+
+#############################################################
+# Test of class container
+#############################################################
+if __name__ == '__main__':
+ temp = reshape(arange(10000), (100, 100))
+
+ ua = container(temp)
+ # new object created begin test
+ print(dir(ua))
+ print(shape(ua), ua.shape) # I have changed Numeric.py
+
+ ua_small = ua[:3, :5]
+ print(ua_small)
+ # this did not change ua[0,0], which is not normal behavior
+ ua_small[0, 0] = 10
+ print(ua_small[0, 0], ua[0, 0])
+ print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
+ print(less(ua_small, 103), type(less(ua_small, 103)))
+ print(type(ua_small * reshape(arange(15), shape(ua_small))))
+ print(reshape(ua_small, (5, 3)))
+ print(transpose(ua_small))
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi
new file mode 100644
index 0000000..13c0a01
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_user_array_impl.pyi
@@ -0,0 +1,225 @@
+from types import EllipsisType
+from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload
+
+from _typeshed import Incomplete
+from typing_extensions import TypeVar, override
+
+import numpy as np
+import numpy.typing as npt
+from numpy._typing import (
+ _AnyShape,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeInt_co,
+ _DTypeLike,
+)
+
+###
+
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])
+_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True)
+_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
+_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)
+
+_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]])
+_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]])
+_RealContainerT = TypeVar(
+ "_RealContainerT",
+ bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]],
+)
+_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]])
+
+_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool]
+
+_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None
+_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...]
+_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice
+_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...]
+
+###
+
+class container(Generic[_ShapeT_co, _DTypeT_co]):
+ array: np.ndarray[_ShapeT_co, _DTypeT_co]
+
+ @overload
+ def __init__(
+ self,
+ /,
+ data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co],
+ dtype: None = None,
+ copy: bool = True,
+ ) -> None: ...
+ @overload
+ def __init__(
+ self: container[Any, np.dtype[_ScalarT]],
+ /,
+ data: _ArrayLike[_ScalarT],
+ dtype: None = None,
+ copy: bool = True,
+ ) -> None: ...
+ @overload
+ def __init__(
+ self: container[Any, np.dtype[_ScalarT]],
+ /,
+ data: npt.ArrayLike,
+ dtype: _DTypeLike[_ScalarT],
+ copy: bool = True,
+ ) -> None: ...
+ @overload
+ def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ...
+
+ #
+ def __complex__(self, /) -> complex: ...
+ def __float__(self, /) -> float: ...
+ def __int__(self, /) -> int: ...
+ def __hex__(self, /) -> str: ...
+ def __oct__(self, /) -> str: ...
+
+ #
+ @override
+ def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+ @override
+ def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
+
+ #
+ def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
+ def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
+ def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
+ def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...
+
+ #
+ def __len__(self, /) -> int: ...
+
+ # keep in sync with np.ndarray
+ @overload
+ def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ...
+ @overload
+ def __getitem__(self, key: _ToIndices, /) -> Any: ...
+ @overload
+ def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ...
+ @overload
+ def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ...
+
+ # keep in sync with np.ndarray
+ @overload
+ def __setitem__(self, index: _ToIndices, value: object, /) -> None: ...
+ @overload
+ def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ...
+
+ # keep in sync with np.ndarray
+ @overload
+ def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap]
+ @overload
+ def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ...
+ @overload
+ def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ...
+ @overload
+ def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ...
+ @overload
+ def __abs__(self: _RealContainerT, /) -> _RealContainerT: ...
+
+ #
+ def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019
+ def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019
+ def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019
+
+ # TODO(jorenham): complete these binary ops
+
+ #
+ def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __iadd__(self, other: npt.ArrayLike, /) -> Self: ...
+
+ #
+ def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __isub__(self, other: npt.ArrayLike, /) -> Self: ...
+
+ #
+ def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __imul__(self, other: npt.ArrayLike, /) -> Self: ...
+
+ #
+ def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __imod__(self, other: npt.ArrayLike, /) -> Self: ...
+
+ #
+ def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ...
+ def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ...
+
+ #
+ def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ...
+ def __ipow__(self, other: npt.ArrayLike, /) -> Self: ...
+
+ #
+ def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
+ def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
+ def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ...
+
+ #
+ def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
+ def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...
+ def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ...
+
+ #
+ @overload
+ def __and__(
+ self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /
+ ) -> container[_AnyShape, np.dtype[np.bool]]: ...
+ @overload
+ def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...
+ __rand__ = __and__
+ @overload
+ def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...
+ @overload
+ def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ...
+
+ #
+ @overload
+ def __xor__(
+ self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /
+ ) -> container[_AnyShape, np.dtype[np.bool]]: ...
+ @overload
+ def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...
+ __rxor__ = __xor__
+ @overload
+ def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...
+ @overload
+ def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ...
+
+ #
+ @overload
+ def __or__(
+ self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /
+ ) -> container[_AnyShape, np.dtype[np.bool]]: ...
+ @overload
+ def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...
+ __ror__ = __or__
+ @overload
+ def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...
+ @overload
+ def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ...
+
+ #
+ @overload
+ def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ...
+
+ #
+ @overload
+ def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ...
+ @overload
+ def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ...
+
+ #
+ def copy(self, /) -> Self: ...
+ def tobytes(self, /) -> bytes: ...
+ def byteswap(self, /) -> Self: ...
+ def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py
new file mode 100644
index 0000000..2e1ee23
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.py
@@ -0,0 +1,779 @@
+import functools
+import os
+import platform
+import sys
+import textwrap
+import types
+import warnings
+
+import numpy as np
+from numpy._core import ndarray
+from numpy._utils import set_module
+
+__all__ = [
+ 'get_include', 'info', 'show_runtime'
+]
+
+
+@set_module('numpy')
+def show_runtime():
+ """
+ Print information about various resources in the system
+ including available intrinsic support and BLAS/LAPACK library
+ in use
+
+ .. versionadded:: 1.24.0
+
+ See Also
+ --------
+ show_config : Show libraries in the system on which NumPy was built.
+
+ Notes
+ -----
+ 1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
+ library if available.
+ 2. SIMD related information is derived from ``__cpu_features__``,
+ ``__cpu_baseline__`` and ``__cpu_dispatch__``
+
+ """
+ from pprint import pprint
+
+ from numpy._core._multiarray_umath import (
+ __cpu_baseline__,
+ __cpu_dispatch__,
+ __cpu_features__,
+ )
+ config_found = [{
+ "numpy_version": np.__version__,
+ "python": sys.version,
+ "uname": platform.uname(),
+ }]
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+ config_found.append({
+ "simd_extensions": {
+ "baseline": __cpu_baseline__,
+ "found": features_found,
+ "not_found": features_not_found
+ }
+ })
+ try:
+ from threadpoolctl import threadpool_info
+ config_found.extend(threadpool_info())
+ except ImportError:
+ print("WARNING: `threadpoolctl` not found in system!"
+ " Install it by `pip install threadpoolctl`."
+ " Once installed, try `np.show_runtime` again"
+ " for more detailed build information")
+ pprint(config_found)
+
+
+@set_module('numpy')
+def get_include():
+ """
+ Return the directory that contains the NumPy \\*.h header files.
+
+ Extension modules that need to compile against NumPy may need to use this
+ function to locate the appropriate include directory.
+
+ Notes
+ -----
+ When using ``setuptools``, for example in ``setup.py``::
+
+ import numpy as np
+ ...
+ Extension('extension_name', ...
+ include_dirs=[np.get_include()])
+ ...
+
+ Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using
+ that is likely preferred for build systems other than ``setuptools``::
+
+ $ numpy-config --cflags
+ -I/path/to/site-packages/numpy/_core/include
+
+ # Or rely on pkg-config:
+ $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir)
+ $ pkg-config --cflags
+ -I/path/to/site-packages/numpy/_core/include
+
+ Examples
+ --------
+ >>> np.get_include()
+ '.../site-packages/numpy/core/include' # may vary
+
+ """
+ import numpy
+ if numpy.show_config is None:
+ # running from numpy source directory
+ d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include')
+ else:
+ # using installed numpy core headers
+ import numpy._core as _core
+ d = os.path.join(os.path.dirname(_core.__file__), 'include')
+ return d
+
+
+class _Deprecate:
+ """
+ Decorator class to deprecate old functions.
+
+ Refer to `deprecate` for details.
+
+ See Also
+ --------
+ deprecate
+
+ """
+
+ def __init__(self, old_name=None, new_name=None, message=None):
+ self.old_name = old_name
+ self.new_name = new_name
+ self.message = message
+
+ def __call__(self, func, *args, **kwargs):
+ """
+ Decorator call. Refer to ``decorate``.
+
+ """
+ old_name = self.old_name
+ new_name = self.new_name
+ message = self.message
+
+ if old_name is None:
+ old_name = func.__name__
+ if new_name is None:
+ depdoc = f"`{old_name}` is deprecated!"
+ else:
+ depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"
+
+ if message is not None:
+ depdoc += "\n" + message
+
+ @functools.wraps(func)
+ def newfunc(*args, **kwds):
+ warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
+ return func(*args, **kwds)
+
+ newfunc.__name__ = old_name
+ doc = func.__doc__
+ if doc is None:
+ doc = depdoc
+ else:
+ lines = doc.expandtabs().split('\n')
+ indent = _get_indent(lines[1:])
+ if lines[0].lstrip():
+ # Indent the original first line to let inspect.cleandoc()
+ # dedent the docstring despite the deprecation notice.
+ doc = indent * ' ' + doc
+ else:
+ # Remove the same leading blank lines as cleandoc() would.
+ skip = len(lines[0]) + 1
+ for line in lines[1:]:
+ if len(line) > indent:
+ break
+ skip += len(line) + 1
+ doc = doc[skip:]
+ depdoc = textwrap.indent(depdoc, ' ' * indent)
+ doc = f'{depdoc}\n\n{doc}'
+ newfunc.__doc__ = doc
+
+ return newfunc
+
+
+def _get_indent(lines):
+ """
+ Determines the leading whitespace that could be removed from all the lines.
+ """
+ indent = sys.maxsize
+ for line in lines:
+ content = len(line.lstrip())
+ if content:
+ indent = min(indent, len(line) - content)
+ if indent == sys.maxsize:
+ indent = 0
+ return indent
+
+
+def deprecate(*args, **kwargs):
+ """
+ Issues a DeprecationWarning, adds warning to `old_name`'s
+ docstring, rebinds ``old_name.__name__`` and returns the new
+ function object.
+
+ This function may also be used as a decorator.
+
+ .. deprecated:: 2.0
+ Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
+
+ Parameters
+ ----------
+ func : function
+ The function to be deprecated.
+ old_name : str, optional
+ The name of the function to be deprecated. Default is None, in
+ which case the name of `func` is used.
+ new_name : str, optional
+ The new name for the function. Default is None, in which case the
+ deprecation message is that `old_name` is deprecated. If given, the
+ deprecation message is that `old_name` is deprecated and `new_name`
+ should be used instead.
+ message : str, optional
+ Additional explanation of the deprecation. Displayed in the
+ docstring after the warning.
+
+ Returns
+ -------
+ old_func : function
+ The deprecated function.
+
+ Examples
+ --------
+ Note that ``olduint`` returns a value after printing Deprecation
+ Warning:
+
+ >>> olduint = np.lib.utils.deprecate(np.uint)
+ DeprecationWarning: `uint64` is deprecated! # may vary
+ >>> olduint(6)
+ 6
+
+ """
+ # Deprecate may be run as a function or as a decorator
+ # If run as a function, we initialise the decorator class
+ # and execute its __call__ method.
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`deprecate` is deprecated, "
+ "use `warn` with `DeprecationWarning` instead. "
+ "(deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ if args:
+ fn = args[0]
+ args = args[1:]
+
+ return _Deprecate(*args, **kwargs)(fn)
+ else:
+ return _Deprecate(*args, **kwargs)
+
+
+def deprecate_with_doc(msg):
+ """
+ Deprecates a function and includes the deprecation in its docstring.
+
+ .. deprecated:: 2.0
+ Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
+
+ This function is used as a decorator. It returns an object that can be
+ used to issue a DeprecationWarning, by passing the to-be decorated
+ function as argument, this adds warning to the to-be decorated function's
+ docstring and returns the new function object.
+
+ See Also
+ --------
+ deprecate : Decorate a function such that it issues a
+ :exc:`DeprecationWarning`
+
+ Parameters
+ ----------
+ msg : str
+ Additional explanation of the deprecation. Displayed in the
+ docstring after the warning.
+
+ Returns
+ -------
+ obj : object
+
+ """
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`deprecate` is deprecated, "
+ "use `warn` with `DeprecationWarning` instead. "
+ "(deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ return _Deprecate(message=msg)
+
+
+#-----------------------------------------------------------------------------
+
+
+# NOTE: pydoc defines a help function which works similarly to this
+# except it uses a pager to take over the screen.
+
+# combine name and arguments and split to multiple lines of width
+# characters. End lines on a comma and begin argument list indented with
+# the rest of the arguments.
+def _split_line(name, arguments, width):
+ firstwidth = len(name)
+ k = firstwidth
+ newstr = name
+ sepstr = ", "
+ arglist = arguments.split(sepstr)
+ for argument in arglist:
+ if k == firstwidth:
+ addstr = ""
+ else:
+ addstr = sepstr
+ k = k + len(argument) + len(addstr)
+ if k > width:
+ k = firstwidth + 1 + len(argument)
+ newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument
+ else:
+ newstr = newstr + addstr + argument
+ return newstr
+
+
+_namedict = None
+_dictlist = None
+
+# Traverse all module directories underneath globals
+# to see if something is defined
+def _makenamedict(module='numpy'):
+ module = __import__(module, globals(), locals(), [])
+ thedict = {module.__name__: module.__dict__}
+ dictlist = [module.__name__]
+ totraverse = [module.__dict__]
+ while True:
+ if len(totraverse) == 0:
+ break
+ thisdict = totraverse.pop(0)
+ for x in thisdict.keys():
+ if isinstance(thisdict[x], types.ModuleType):
+ modname = thisdict[x].__name__
+ if modname not in dictlist:
+ moddict = thisdict[x].__dict__
+ dictlist.append(modname)
+ totraverse.append(moddict)
+ thedict[modname] = moddict
+ return thedict, dictlist
+
+
+def _info(obj, output=None):
+ """Provide information about ndarray obj.
+
+ Parameters
+ ----------
+ obj : ndarray
+ Must be ndarray, not checked.
+ output
+ Where printed output goes.
+
+ Notes
+ -----
+ Copied over from the numarray module prior to its removal.
+ Adapted somewhat as only numpy is an option now.
+
+ Called by info.
+
+ """
+ extra = ""
+ tic = ""
+ bp = lambda x: x
+ cls = getattr(obj, '__class__', type(obj))
+ nm = getattr(cls, '__name__', cls)
+ strides = obj.strides
+ endian = obj.dtype.byteorder
+
+ if output is None:
+ output = sys.stdout
+
+ print("class: ", nm, file=output)
+ print("shape: ", obj.shape, file=output)
+ print("strides: ", strides, file=output)
+ print("itemsize: ", obj.itemsize, file=output)
+ print("aligned: ", bp(obj.flags.aligned), file=output)
+ print("contiguous: ", bp(obj.flags.contiguous), file=output)
+ print("fortran: ", obj.flags.fortran, file=output)
+ print(
+ f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}",
+ file=output
+ )
+ print("byteorder: ", end=' ', file=output)
+ if endian in ['|', '=']:
+ print(f"{tic}{sys.byteorder}{tic}", file=output)
+ byteswap = False
+ elif endian == '>':
+ print(f"{tic}big{tic}", file=output)
+ byteswap = sys.byteorder != "big"
+ else:
+ print(f"{tic}little{tic}", file=output)
+ byteswap = sys.byteorder != "little"
+ print("byteswap: ", bp(byteswap), file=output)
+ print(f"type: {obj.dtype}", file=output)
+
+
+@set_module('numpy')
+def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
+ """
+ Get help information for an array, function, class, or module.
+
+ Parameters
+ ----------
+ object : object or str, optional
+ Input object or name to get information about. If `object` is
+ an `ndarray` instance, information about the array is printed.
+ If `object` is a numpy object, its docstring is given. If it is
+ a string, available modules are searched for matching objects.
+ If None, information about `info` itself is returned.
+ maxwidth : int, optional
+ Printing width.
+ output : file like object, optional
+ File like object that the output is written to, default is
+ ``None``, in which case ``sys.stdout`` will be used.
+ The object has to be opened in 'w' or 'a' mode.
+ toplevel : str, optional
+ Start search at this level.
+
+ Notes
+ -----
+ When used interactively with an object, ``np.info(obj)`` is equivalent
+ to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
+ prompt.
+
+ Examples
+ --------
+ >>> np.info(np.polyval) # doctest: +SKIP
+ polyval(p, x)
+ Evaluate the polynomial p at x.
+ ...
+
+ When using a string for `object` it is possible to get multiple results.
+
+ >>> np.info('fft') # doctest: +SKIP
+ *** Found in numpy ***
+ Core FFT routines
+ ...
+ *** Found in numpy.fft ***
+ fft(a, n=None, axis=-1)
+ ...
+ *** Repeat reference found in numpy.fft.fftpack ***
+ *** Total of 3 references found. ***
+
+ When the argument is an array, information about the array is printed.
+
+ >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
+ >>> np.info(a)
+ class: ndarray
+ shape: (2, 3)
+ strides: (24, 8)
+ itemsize: 8
+ aligned: True
+ contiguous: True
+ fortran: False
+ data pointer: 0x562b6e0d2860 # may vary
+ byteorder: little
+ byteswap: False
+ type: complex64
+
+ """
+ global _namedict, _dictlist
+ # Local import to speed up numpy's import time.
+ import inspect
+ import pydoc
+
+ if (hasattr(object, '_ppimport_importer') or
+ hasattr(object, '_ppimport_module')):
+ object = object._ppimport_module
+ elif hasattr(object, '_ppimport_attr'):
+ object = object._ppimport_attr
+
+ if output is None:
+ output = sys.stdout
+
+ if object is None:
+ info(info)
+ elif isinstance(object, ndarray):
+ _info(object, output=output)
+ elif isinstance(object, str):
+ if _namedict is None:
+ _namedict, _dictlist = _makenamedict(toplevel)
+ numfound = 0
+ objlist = []
+ for namestr in _dictlist:
+ try:
+ obj = _namedict[namestr][object]
+ if id(obj) in objlist:
+ print(f"\n *** Repeat reference found in {namestr} *** ",
+ file=output
+ )
+ else:
+ objlist.append(id(obj))
+ print(f" *** Found in {namestr} ***", file=output)
+ info(obj)
+ print("-" * maxwidth, file=output)
+ numfound += 1
+ except KeyError:
+ pass
+ if numfound == 0:
+ print(f"Help for {object} not found.", file=output)
+ else:
+ print("\n "
+ "*** Total of %d references found. ***" % numfound,
+ file=output
+ )
+
+ elif inspect.isfunction(object) or inspect.ismethod(object):
+ name = object.__name__
+ try:
+ arguments = str(inspect.signature(object))
+ except Exception:
+ arguments = "()"
+
+ if len(name + arguments) > maxwidth:
+ argstr = _split_line(name, arguments, maxwidth)
+ else:
+ argstr = name + arguments
+
+ print(" " + argstr + "\n", file=output)
+ print(inspect.getdoc(object), file=output)
+
+ elif inspect.isclass(object):
+ name = object.__name__
+ try:
+ arguments = str(inspect.signature(object))
+ except Exception:
+ arguments = "()"
+
+ if len(name + arguments) > maxwidth:
+ argstr = _split_line(name, arguments, maxwidth)
+ else:
+ argstr = name + arguments
+
+ print(" " + argstr + "\n", file=output)
+ doc1 = inspect.getdoc(object)
+ if doc1 is None:
+ if hasattr(object, '__init__'):
+ print(inspect.getdoc(object.__init__), file=output)
+ else:
+ print(inspect.getdoc(object), file=output)
+
+ methods = pydoc.allmethods(object)
+
+ public_methods = [meth for meth in methods if meth[0] != '_']
+ if public_methods:
+ print("\n\nMethods:\n", file=output)
+ for meth in public_methods:
+ thisobj = getattr(object, meth, None)
+ if thisobj is not None:
+ methstr, other = pydoc.splitdoc(
+ inspect.getdoc(thisobj) or "None"
+ )
+ print(f" {meth} -- {methstr}", file=output)
+
+ elif hasattr(object, '__doc__'):
+ print(inspect.getdoc(object), file=output)
+
+
+def safe_eval(source):
+ """
+ Protected string evaluation.
+
+ .. deprecated:: 2.0
+ Use `ast.literal_eval` instead.
+
+ Evaluate a string containing a Python literal expression without
+ allowing the execution of arbitrary non-literal code.
+
+ .. warning::
+
+ This function is identical to :py:meth:`ast.literal_eval` and
+ has the same security implications. It may not always be safe
+ to evaluate large input strings.
+
+ Parameters
+ ----------
+ source : str
+ The string to evaluate.
+
+ Returns
+ -------
+ obj : object
+ The result of evaluating `source`.
+
+ Raises
+ ------
+ SyntaxError
+ If the code has invalid Python syntax, or if it contains
+ non-literal code.
+
+ Examples
+ --------
+ >>> np.safe_eval('1')
+ 1
+ >>> np.safe_eval('[1, 2, 3]')
+ [1, 2, 3]
+ >>> np.safe_eval('{"foo": ("bar", 10.0)}')
+ {'foo': ('bar', 10.0)}
+
+ >>> np.safe_eval('import os')
+ Traceback (most recent call last):
+ ...
+ SyntaxError: invalid syntax
+
+ >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
+ Traceback (most recent call last):
+ ...
+ ValueError: malformed node or string: <_ast.Call object at 0x...>
+
+ """
+
+ # Deprecated in NumPy 2.0, 2023-07-11
+ warnings.warn(
+ "`safe_eval` is deprecated. Use `ast.literal_eval` instead. "
+ "Be aware of security implications, such as memory exhaustion "
+ "based attacks (deprecated in NumPy 2.0)",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ # Local import to speed up numpy's import time.
+ import ast
+ return ast.literal_eval(source)
+
+
+def _median_nancheck(data, result, axis):
+ """
+ Utility function to check median result from data for NaN values at the end
+ and return NaN in that case. Input result can also be a MaskedArray.
+
+ Parameters
+ ----------
+ data : array
+ Sorted input data to median function
+ result : Array or MaskedArray
+ Result of median function.
+ axis : int
+ Axis along which the median was computed.
+
+ Returns
+ -------
+ result : scalar or ndarray
+ Median or NaN in axes which contained NaN in the input. If the input
+ was an array, NaN will be inserted in-place. If a scalar, either the
+ input itself or a scalar NaN.
+ """
+ if data.size == 0:
+ return result
+ potential_nans = data.take(-1, axis=axis)
+ n = np.isnan(potential_nans)
+ # masked NaN values are ok, although for masked the copyto may fail for
+ # unmasked ones (this was always broken) when the result is a scalar.
+ if np.ma.isMaskedArray(n):
+ n = n.filled(False)
+
+ if not n.any():
+ return result
+
+ # Without given output, it is possible that the current result is a
+ # numpy scalar, which is not writeable. If so, just return nan.
+ if isinstance(result, np.generic):
+ return potential_nans
+
+ # Otherwise copy NaNs (if there are any)
+ np.copyto(result, potential_nans, where=n)
+ return result
+
+def _opt_info():
+ """
+ Returns a string containing the CPU features supported
+ by the current build.
+
+ The format of the string can be explained as follows:
+ - Dispatched features supported by the running machine end with `*`.
+ - Dispatched features not supported by the running machine
+ end with `?`.
+ - Remaining features represent the baseline.
+
+ Returns:
+ str: A formatted string indicating the supported CPU features.
+ """
+ from numpy._core._multiarray_umath import (
+ __cpu_baseline__,
+ __cpu_dispatch__,
+ __cpu_features__,
+ )
+
+ if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
+ return ''
+
+ enabled_features = ' '.join(__cpu_baseline__)
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ enabled_features += f" {feature}*"
+ else:
+ enabled_features += f" {feature}?"
+
+ return enabled_features
+
+def drop_metadata(dtype, /):
+ """
+ Returns the dtype unchanged if it contained no metadata or a copy of the
+ dtype if it (or any of its structure dtypes) contained metadata.
+
+ This utility is used by `np.save` and `np.savez` to drop metadata before
+ saving.
+
+ .. note::
+
+ Due to its limitation this function may move to a more appropriate
+ home or change in the future and is considered semi-public API only.
+
+ .. warning::
+
+ This function does not preserve more strange things like record dtypes
+ and user dtypes may simply return the wrong thing. If you need to be
+ sure about the latter, check the result with:
+ ``np.can_cast(new_dtype, dtype, casting="no")``.
+
+ """
+ if dtype.fields is not None:
+ found_metadata = dtype.metadata is not None
+
+ names = []
+ formats = []
+ offsets = []
+ titles = []
+ for name, field in dtype.fields.items():
+ field_dt = drop_metadata(field[0])
+ if field_dt is not field[0]:
+ found_metadata = True
+
+ names.append(name)
+ formats.append(field_dt)
+ offsets.append(field[1])
+ titles.append(None if len(field) < 3 else field[2])
+
+ if not found_metadata:
+ return dtype
+
+ structure = {
+ 'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles,
+ 'itemsize': dtype.itemsize}
+
+ # NOTE: Could pass (dtype.type, structure) to preserve record dtypes...
+ return np.dtype(structure, align=dtype.isalignedstruct)
+ elif dtype.subdtype is not None:
+ # subarray dtype
+ subdtype, shape = dtype.subdtype
+ new_subdtype = drop_metadata(subdtype)
+ if dtype.metadata is None and new_subdtype is subdtype:
+ return dtype
+
+ return np.dtype((new_subdtype, shape))
+ else:
+ # Normal unstructured dtype
+ if dtype.metadata is None:
+ return dtype
+ # Note that `dt.str` doesn't round-trip e.g. for user-dtypes.
+ return np.dtype(dtype.str)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi
new file mode 100644
index 0000000..00ed47c
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_utils_impl.pyi
@@ -0,0 +1,10 @@
+from _typeshed import SupportsWrite
+
+from numpy._typing import DTypeLike
+
+__all__ = ["get_include", "info", "show_runtime"]
+
+def get_include() -> str: ...
+def show_runtime() -> None: ...
+def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ...
+def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_version.py b/.venv/lib/python3.12/site-packages/numpy/lib/_version.py
new file mode 100644
index 0000000..f7a3538
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_version.py
@@ -0,0 +1,154 @@
+"""Utility to compare (NumPy) version strings.
+
+The NumpyVersion class allows properly comparing numpy version strings.
+The LooseVersion and StrictVersion classes that distutils provides don't
+work; they don't recognize anything like alpha/beta/rc/dev versions.
+
+"""
+import re
+
+__all__ = ['NumpyVersion']
+
+
+class NumpyVersion:
+ """Parse and compare numpy version strings.
+
+ NumPy has the following versioning scheme (numbers given are examples; they
+ can be > 9 in principle):
+
+ - Released version: '1.8.0', '1.8.1', etc.
+ - Alpha: '1.8.0a1', '1.8.0a2', etc.
+ - Beta: '1.8.0b1', '1.8.0b2', etc.
+ - Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
+ - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
+ - Development versions after a1: '1.8.0a1.dev-f1234afa',
+ '1.8.0b2.dev-f1234afa',
+ '1.8.1rc1.dev-f1234afa', etc.
+ - Development versions (no git hash available): '1.8.0.dev-Unknown'
+
+ Comparing needs to be done against a valid version string or other
+ `NumpyVersion` instance. Note that all development versions of the same
+ (pre-)release compare equal.
+
+ Parameters
+ ----------
+ vstring : str
+ NumPy version string (``np.__version__``).
+
+ Examples
+ --------
+ >>> from numpy.lib import NumpyVersion
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
+ ... print('skip')
+ >>> # skip
+
+ >>> NumpyVersion('1.7') # raises ValueError, add ".0"
+ Traceback (most recent call last):
+ ...
+ ValueError: Not a valid numpy version string
+
+ """
+
+ __module__ = "numpy.lib"
+
+ def __init__(self, vstring):
+ self.vstring = vstring
+ ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
+ if not ver_main:
+ raise ValueError("Not a valid numpy version string")
+
+ self.version = ver_main.group()
+ self.major, self.minor, self.bugfix = [int(x) for x in
+ self.version.split('.')]
+ if len(vstring) == ver_main.end():
+ self.pre_release = 'final'
+ else:
+ alpha = re.match(r'a\d', vstring[ver_main.end():])
+ beta = re.match(r'b\d', vstring[ver_main.end():])
+ rc = re.match(r'rc\d', vstring[ver_main.end():])
+ pre_rel = [m for m in [alpha, beta, rc] if m is not None]
+ if pre_rel:
+ self.pre_release = pre_rel[0].group()
+ else:
+ self.pre_release = ''
+
+ self.is_devversion = bool(re.search(r'.dev', vstring))
+
+ def _compare_version(self, other):
+ """Compare major.minor.bugfix"""
+ if self.major == other.major:
+ if self.minor == other.minor:
+ if self.bugfix == other.bugfix:
+ vercmp = 0
+ elif self.bugfix > other.bugfix:
+ vercmp = 1
+ else:
+ vercmp = -1
+ elif self.minor > other.minor:
+ vercmp = 1
+ else:
+ vercmp = -1
+ elif self.major > other.major:
+ vercmp = 1
+ else:
+ vercmp = -1
+
+ return vercmp
+
+ def _compare_pre_release(self, other):
+ """Compare alpha/beta/rc/final."""
+ if self.pre_release == other.pre_release:
+ vercmp = 0
+ elif self.pre_release == 'final':
+ vercmp = 1
+ elif other.pre_release == 'final':
+ vercmp = -1
+ elif self.pre_release > other.pre_release:
+ vercmp = 1
+ else:
+ vercmp = -1
+
+ return vercmp
+
+ def _compare(self, other):
+ if not isinstance(other, (str, NumpyVersion)):
+ raise ValueError("Invalid object to compare with NumpyVersion.")
+
+ if isinstance(other, str):
+ other = NumpyVersion(other)
+
+ vercmp = self._compare_version(other)
+ if vercmp == 0:
+ # Same x.y.z version, check for alpha/beta/rc
+ vercmp = self._compare_pre_release(other)
+ if vercmp == 0:
+ # Same version and same pre-release, check if dev version
+ if self.is_devversion is other.is_devversion:
+ vercmp = 0
+ elif self.is_devversion:
+ vercmp = -1
+ else:
+ vercmp = 1
+
+ return vercmp
+
+ def __lt__(self, other):
+ return self._compare(other) < 0
+
+ def __le__(self, other):
+ return self._compare(other) <= 0
+
+ def __eq__(self, other):
+ return self._compare(other) == 0
+
+ def __ne__(self, other):
+ return self._compare(other) != 0
+
+ def __gt__(self, other):
+ return self._compare(other) > 0
+
+ def __ge__(self, other):
+ return self._compare(other) >= 0
+
+ def __repr__(self):
+ return f"NumpyVersion({self.vstring})"
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi
new file mode 100644
index 0000000..c53ef79
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/_version.pyi
@@ -0,0 +1,17 @@
+__all__ = ["NumpyVersion"]
+
+class NumpyVersion:
+ vstring: str
+ version: str
+ major: int
+ minor: int
+ bugfix: int
+ pre_release: str
+ is_devversion: bool
+ def __init__(self, vstring: str) -> None: ...
+ def __lt__(self, other: str | NumpyVersion) -> bool: ...
+ def __le__(self, other: str | NumpyVersion) -> bool: ...
+ def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
+ def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
+ def __gt__(self, other: str | NumpyVersion) -> bool: ...
+ def __ge__(self, other: str | NumpyVersion) -> bool: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py
new file mode 100644
index 0000000..c267eb0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.py
@@ -0,0 +1,7 @@
+from ._array_utils_impl import ( # noqa: F401
+ __all__,
+ __doc__,
+ byte_bounds,
+ normalize_axis_index,
+ normalize_axis_tuple,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi
new file mode 100644
index 0000000..8adc3c5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/array_utils.pyi
@@ -0,0 +1,12 @@
+from ._array_utils_impl import (
+ __all__ as __all__,
+)
+from ._array_utils_impl import (
+ byte_bounds as byte_bounds,
+)
+from ._array_utils_impl import (
+ normalize_axis_index as normalize_axis_index,
+)
+from ._array_utils_impl import (
+ normalize_axis_tuple as normalize_axis_tuple,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/format.py b/.venv/lib/python3.12/site-packages/numpy/lib/format.py
new file mode 100644
index 0000000..8e0c799
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/format.py
@@ -0,0 +1,24 @@
+from ._format_impl import ( # noqa: F401
+ ARRAY_ALIGN,
+ BUFFER_SIZE,
+ EXPECTED_KEYS,
+ GROWTH_AXIS_MAX_DIGITS,
+ MAGIC_LEN,
+ MAGIC_PREFIX,
+ __all__,
+ __doc__,
+ descr_to_dtype,
+ drop_metadata,
+ dtype_to_descr,
+ header_data_from_array_1_0,
+ isfileobj,
+ magic,
+ open_memmap,
+ read_array,
+ read_array_header_1_0,
+ read_array_header_2_0,
+ read_magic,
+ write_array,
+ write_array_header_1_0,
+ write_array_header_2_0,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/format.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/format.pyi
new file mode 100644
index 0000000..dd9470e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/format.pyi
@@ -0,0 +1,66 @@
+from ._format_impl import (
+ ARRAY_ALIGN as ARRAY_ALIGN,
+)
+from ._format_impl import (
+ BUFFER_SIZE as BUFFER_SIZE,
+)
+from ._format_impl import (
+ EXPECTED_KEYS as EXPECTED_KEYS,
+)
+from ._format_impl import (
+ GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS,
+)
+from ._format_impl import (
+ MAGIC_LEN as MAGIC_LEN,
+)
+from ._format_impl import (
+ MAGIC_PREFIX as MAGIC_PREFIX,
+)
+from ._format_impl import (
+ __all__ as __all__,
+)
+from ._format_impl import (
+ __doc__ as __doc__,
+)
+from ._format_impl import (
+ descr_to_dtype as descr_to_dtype,
+)
+from ._format_impl import (
+ drop_metadata as drop_metadata,
+)
+from ._format_impl import (
+ dtype_to_descr as dtype_to_descr,
+)
+from ._format_impl import (
+ header_data_from_array_1_0 as header_data_from_array_1_0,
+)
+from ._format_impl import (
+ isfileobj as isfileobj,
+)
+from ._format_impl import (
+ magic as magic,
+)
+from ._format_impl import (
+ open_memmap as open_memmap,
+)
+from ._format_impl import (
+ read_array as read_array,
+)
+from ._format_impl import (
+ read_array_header_1_0 as read_array_header_1_0,
+)
+from ._format_impl import (
+ read_array_header_2_0 as read_array_header_2_0,
+)
+from ._format_impl import (
+ read_magic as read_magic,
+)
+from ._format_impl import (
+ write_array as write_array,
+)
+from ._format_impl import (
+ write_array_header_1_0 as write_array_header_1_0,
+)
+from ._format_impl import (
+ write_array_header_2_0 as write_array_header_2_0,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/introspect.py b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.py
new file mode 100644
index 0000000..f4a0f32
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.py
@@ -0,0 +1,95 @@
+"""
+Introspection helper functions.
+"""
+
+__all__ = ['opt_func_info']
+
+
+def opt_func_info(func_name=None, signature=None):
+ """
+ Returns a dictionary containing the currently supported CPU dispatched
+ features for all optimized functions.
+
+ Parameters
+ ----------
+ func_name : str (optional)
+ Regular expression to filter by function name.
+
+ signature : str (optional)
+ Regular expression to filter by data type.
+
+ Returns
+ -------
+ dict
+ A dictionary where keys are optimized function names and values are
+ nested dictionaries indicating supported targets based on data types.
+
+ Examples
+ --------
+ Retrieve dispatch information for functions named 'add' or 'sub' and
+ data types 'float64' or 'float32':
+
+ >>> import numpy as np
+ >>> dict = np.lib.introspect.opt_func_info(
+ ... func_name="add|abs", signature="float64|complex64"
+ ... )
+ >>> import json
+ >>> print(json.dumps(dict, indent=2))
+ {
+ "absolute": {
+ "dd": {
+ "current": "SSE41",
+ "available": "SSE41 baseline(SSE SSE2 SSE3)"
+ },
+ "Ff": {
+ "current": "FMA3__AVX2",
+ "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"
+ },
+ "Dd": {
+ "current": "FMA3__AVX2",
+ "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"
+ }
+ },
+ "add": {
+ "ddd": {
+ "current": "FMA3__AVX2",
+ "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"
+ },
+ "FFF": {
+ "current": "FMA3__AVX2",
+ "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"
+ }
+ }
+ }
+
+ """
+ import re
+
+ from numpy._core._multiarray_umath import __cpu_targets_info__ as targets
+ from numpy._core._multiarray_umath import dtype
+
+ if func_name is not None:
+ func_pattern = re.compile(func_name)
+ matching_funcs = {
+ k: v for k, v in targets.items()
+ if func_pattern.search(k)
+ }
+ else:
+ matching_funcs = targets
+
+ if signature is not None:
+ sig_pattern = re.compile(signature)
+ matching_sigs = {}
+ for k, v in matching_funcs.items():
+ matching_chars = {}
+ for chars, targets in v.items():
+ if any(
+ sig_pattern.search(c) or sig_pattern.search(dtype(c).name)
+ for c in chars
+ ):
+ matching_chars[chars] = targets
+ if matching_chars:
+ matching_sigs[k] = matching_chars
+ else:
+ matching_sigs = matching_funcs
+ return matching_sigs
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi
new file mode 100644
index 0000000..7929981
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/introspect.pyi
@@ -0,0 +1,3 @@
+__all__ = ["opt_func_info"]
+
+def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/mixins.py b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.py
new file mode 100644
index 0000000..831bb34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.py
@@ -0,0 +1,180 @@
+"""
+Mixin classes for custom array types that don't inherit from ndarray.
+"""
+
+__all__ = ['NDArrayOperatorsMixin']
+
+
+def _disables_array_ufunc(obj):
+ """True when __array_ufunc__ is set to None."""
+ try:
+ return obj.__array_ufunc__ is None
+ except AttributeError:
+ return False
+
+
+def _binary_method(ufunc, name):
+ """Implement a forward binary method with a ufunc, e.g., __add__."""
+ def func(self, other):
+ if _disables_array_ufunc(other):
+ return NotImplemented
+ return ufunc(self, other)
+ func.__name__ = f'__{name}__'
+ return func
+
+
+def _reflected_binary_method(ufunc, name):
+ """Implement a reflected binary method with a ufunc, e.g., __radd__."""
+ def func(self, other):
+ if _disables_array_ufunc(other):
+ return NotImplemented
+ return ufunc(other, self)
+ func.__name__ = f'__r{name}__'
+ return func
+
+
+def _inplace_binary_method(ufunc, name):
+ """Implement an in-place binary method with a ufunc, e.g., __iadd__."""
+ def func(self, other):
+ return ufunc(self, other, out=(self,))
+ func.__name__ = f'__i{name}__'
+ return func
+
+
+def _numeric_methods(ufunc, name):
+ """Implement forward, reflected and inplace binary methods with a ufunc."""
+ return (_binary_method(ufunc, name),
+ _reflected_binary_method(ufunc, name),
+ _inplace_binary_method(ufunc, name))
+
+
+def _unary_method(ufunc, name):
+ """Implement a unary special method with a ufunc."""
+ def func(self):
+ return ufunc(self)
+ func.__name__ = f'__{name}__'
+ return func
+
+
+class NDArrayOperatorsMixin:
+ """Mixin defining all operator special methods using __array_ufunc__.
+
+ This class implements the special methods for almost all of Python's
+ builtin operators defined in the `operator` module, including comparisons
+ (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
+ deferring to the ``__array_ufunc__`` method, which subclasses must
+ implement.
+
+ It is useful for writing classes that do not inherit from `numpy.ndarray`,
+ but that should support arithmetic and numpy universal functions like
+ arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`.
+
+ As an trivial example, consider this implementation of an ``ArrayLike``
+ class that simply wraps a NumPy array and ensures that the result of any
+ arithmetic operation is also an ``ArrayLike`` object:
+
+ >>> import numbers
+ >>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
+ ... def __init__(self, value):
+ ... self.value = np.asarray(value)
+ ...
+ ... # One might also consider adding the built-in list type to this
+ ... # list, to support operations like np.add(array_like, list)
+ ... _HANDLED_TYPES = (np.ndarray, numbers.Number)
+ ...
+ ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ ... out = kwargs.get('out', ())
+ ... for x in inputs + out:
+ ... # Only support operations with instances of
+ ... # _HANDLED_TYPES. Use ArrayLike instead of type(self)
+ ... # for isinstance to allow subclasses that don't
+ ... # override __array_ufunc__ to handle ArrayLike objects.
+ ... if not isinstance(
+ ... x, self._HANDLED_TYPES + (ArrayLike,)
+ ... ):
+ ... return NotImplemented
+ ...
+ ... # Defer to the implementation of the ufunc
+ ... # on unwrapped values.
+ ... inputs = tuple(x.value if isinstance(x, ArrayLike) else x
+ ... for x in inputs)
+ ... if out:
+ ... kwargs['out'] = tuple(
+ ... x.value if isinstance(x, ArrayLike) else x
+ ... for x in out)
+ ... result = getattr(ufunc, method)(*inputs, **kwargs)
+ ...
+ ... if type(result) is tuple:
+ ... # multiple return values
+ ... return tuple(type(self)(x) for x in result)
+ ... elif method == 'at':
+ ... # no return value
+ ... return None
+ ... else:
+ ... # one return value
+ ... return type(self)(result)
+ ...
+ ... def __repr__(self):
+ ... return '%s(%r)' % (type(self).__name__, self.value)
+
+ In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
+ the result is always another ``ArrayLike``:
+
+ >>> x = ArrayLike([1, 2, 3])
+ >>> x - 1
+ ArrayLike(array([0, 1, 2]))
+ >>> 1 - x
+ ArrayLike(array([ 0, -1, -2]))
+ >>> np.arange(3) - x
+ ArrayLike(array([-1, -1, -1]))
+ >>> x - np.arange(3)
+ ArrayLike(array([1, 1, 1]))
+
+ Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
+ with arbitrary, unrecognized types. This ensures that interactions with
+ ArrayLike preserve a well-defined casting hierarchy.
+
+ """
+ from numpy._core import umath as um
+
+ __slots__ = ()
+ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc
+ # overrides NEP.
+
+ # comparisons don't have reflected and in-place versions
+ __lt__ = _binary_method(um.less, 'lt')
+ __le__ = _binary_method(um.less_equal, 'le')
+ __eq__ = _binary_method(um.equal, 'eq')
+ __ne__ = _binary_method(um.not_equal, 'ne')
+ __gt__ = _binary_method(um.greater, 'gt')
+ __ge__ = _binary_method(um.greater_equal, 'ge')
+
+ # numeric methods
+ __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
+ __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
+ __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
+ um.matmul, 'matmul')
+ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
+ um.true_divide, 'truediv')
+ __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
+ um.floor_divide, 'floordiv')
+ __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
+ __divmod__ = _binary_method(um.divmod, 'divmod')
+ __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
+ # __idivmod__ does not exist
+ # TODO: handle the optional third argument for __pow__?
+ __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
+ __lshift__, __rlshift__, __ilshift__ = _numeric_methods(
+ um.left_shift, 'lshift')
+ __rshift__, __rrshift__, __irshift__ = _numeric_methods(
+ um.right_shift, 'rshift')
+ __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
+ __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
+ __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
+
+ # unary methods
+ __neg__ = _unary_method(um.negative, 'neg')
+ __pos__ = _unary_method(um.positive, 'pos')
+ __abs__ = _unary_method(um.absolute, 'abs')
+ __invert__ = _unary_method(um.invert, 'invert')
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi
new file mode 100644
index 0000000..4f4801f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/mixins.pyi
@@ -0,0 +1,75 @@
+from abc import ABC, abstractmethod
+from typing import Any
+from typing import Literal as L
+
+from numpy import ufunc
+
+__all__ = ["NDArrayOperatorsMixin"]
+
+# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
+# even though it's reliant on subclasses implementing `__array_ufunc__`
+
+# NOTE: The accepted input- and output-types of the various dunders are
+# completely dependent on how `__array_ufunc__` is implemented.
+# As such, only little type safety can be provided here.
+
+class NDArrayOperatorsMixin(ABC):
+ @abstractmethod
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+ def __lt__(self, other: Any) -> Any: ...
+ def __le__(self, other: Any) -> Any: ...
+ def __eq__(self, other: Any) -> Any: ...
+ def __ne__(self, other: Any) -> Any: ...
+ def __gt__(self, other: Any) -> Any: ...
+ def __ge__(self, other: Any) -> Any: ...
+ def __add__(self, other: Any) -> Any: ...
+ def __radd__(self, other: Any) -> Any: ...
+ def __iadd__(self, other: Any) -> Any: ...
+ def __sub__(self, other: Any) -> Any: ...
+ def __rsub__(self, other: Any) -> Any: ...
+ def __isub__(self, other: Any) -> Any: ...
+ def __mul__(self, other: Any) -> Any: ...
+ def __rmul__(self, other: Any) -> Any: ...
+ def __imul__(self, other: Any) -> Any: ...
+ def __matmul__(self, other: Any) -> Any: ...
+ def __rmatmul__(self, other: Any) -> Any: ...
+ def __imatmul__(self, other: Any) -> Any: ...
+ def __truediv__(self, other: Any) -> Any: ...
+ def __rtruediv__(self, other: Any) -> Any: ...
+ def __itruediv__(self, other: Any) -> Any: ...
+ def __floordiv__(self, other: Any) -> Any: ...
+ def __rfloordiv__(self, other: Any) -> Any: ...
+ def __ifloordiv__(self, other: Any) -> Any: ...
+ def __mod__(self, other: Any) -> Any: ...
+ def __rmod__(self, other: Any) -> Any: ...
+ def __imod__(self, other: Any) -> Any: ...
+ def __divmod__(self, other: Any) -> Any: ...
+ def __rdivmod__(self, other: Any) -> Any: ...
+ def __pow__(self, other: Any) -> Any: ...
+ def __rpow__(self, other: Any) -> Any: ...
+ def __ipow__(self, other: Any) -> Any: ...
+ def __lshift__(self, other: Any) -> Any: ...
+ def __rlshift__(self, other: Any) -> Any: ...
+ def __ilshift__(self, other: Any) -> Any: ...
+ def __rshift__(self, other: Any) -> Any: ...
+ def __rrshift__(self, other: Any) -> Any: ...
+ def __irshift__(self, other: Any) -> Any: ...
+ def __and__(self, other: Any) -> Any: ...
+ def __rand__(self, other: Any) -> Any: ...
+ def __iand__(self, other: Any) -> Any: ...
+ def __xor__(self, other: Any) -> Any: ...
+ def __rxor__(self, other: Any) -> Any: ...
+ def __ixor__(self, other: Any) -> Any: ...
+ def __or__(self, other: Any) -> Any: ...
+ def __ror__(self, other: Any) -> Any: ...
+ def __ior__(self, other: Any) -> Any: ...
+ def __neg__(self) -> Any: ...
+ def __pos__(self) -> Any: ...
+ def __abs__(self) -> Any: ...
+ def __invert__(self) -> Any: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/npyio.py b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.py
new file mode 100644
index 0000000..84d8079
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.py
@@ -0,0 +1 @@
+from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi
new file mode 100644
index 0000000..49fb4d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/npyio.pyi
@@ -0,0 +1,9 @@
+from numpy.lib._npyio_impl import (
+ DataSource as DataSource,
+)
+from numpy.lib._npyio_impl import (
+ NpzFile as NpzFile,
+)
+from numpy.lib._npyio_impl import (
+ __doc__ as __doc__,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py
new file mode 100644
index 0000000..c8a6dd8
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.py
@@ -0,0 +1,1681 @@
+"""
+Collection of utilities to manipulate structured arrays.
+
+Most of these functions were initially implemented by John Hunter for
+matplotlib. They have been rewritten and extended for convenience.
+
+"""
+import itertools
+
+import numpy as np
+import numpy.ma as ma
+import numpy.ma.mrecords as mrec
+from numpy._core.overrides import array_function_dispatch
+from numpy.lib._iotools import _is_string_like
+
+__all__ = [
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
+ 'join_by', 'merge_arrays', 'rec_append_fields',
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
+ 'rename_fields', 'repack_fields', 'require_fields',
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
+ ]
+
+
+def _recursive_fill_fields_dispatcher(input, output):
+ return (input, output)
+
+
+@array_function_dispatch(_recursive_fill_fields_dispatcher)
+def recursive_fill_fields(input, output):
+ """
+ Fills fields from output with fields from input,
+ with support for nested structures.
+
+ Parameters
+ ----------
+ input : ndarray
+ Input array.
+ output : ndarray
+ Output array.
+
+ Notes
+ -----
+ * `output` should be at least the same size as `input`
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
+ >>> b = np.zeros((3,), dtype=a.dtype)
+ >>> rfn.recursive_fill_fields(a, b)
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
+
+ """
+ newdtype = output.dtype
+ for field in newdtype.names:
+ try:
+ current = input[field]
+ except ValueError:
+ continue
+ if current.dtype.names is not None:
+ recursive_fill_fields(current, output[field])
+ else:
+ output[field][:len(current)] = current
+ return output
+
+
+def _get_fieldspec(dtype):
+ """
+ Produce a list of name/dtype pairs corresponding to the dtype fields
+
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
+ string. As a result, this handles subarray dtypes
+
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
+ this (deliberately) discards field offsets.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
+ >>> dt.descr
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
+ >>> _get_fieldspec(dt)
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
+
+ """
+ if dtype.names is None:
+ # .descr returns a nameless field, so we should too
+ return [('', dtype)]
+ else:
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
+ # keep any titles, if present
+ return [
+ (name if len(f) == 2 else (f[2], name), f[0])
+ for name, f in fields
+ ]
+
+
+def get_names(adtype):
+ """
+ Returns the field names of the input datatype as a tuple. Input datatype
+ must have fields otherwise error is raised.
+
+ Parameters
+ ----------
+ adtype : dtype
+ Input datatype
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
+ ('A',)
+ >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
+ ('A', 'B')
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
+ >>> rfn.get_names(adtype)
+ ('a', ('b', ('ba', 'bb')))
+ """
+ listnames = []
+ names = adtype.names
+ for name in names:
+ current = adtype[name]
+ if current.names is not None:
+ listnames.append((name, tuple(get_names(current))))
+ else:
+ listnames.append(name)
+ return tuple(listnames)
+
+
+def get_names_flat(adtype):
+ """
+ Returns the field names of the input datatype as a tuple. Input datatype
+ must have fields otherwise error is raised.
+ Nested structure are flattened beforehand.
+
+ Parameters
+ ----------
+ adtype : dtype
+ Input datatype
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
+ False
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
+ ('A', 'B')
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
+ >>> rfn.get_names_flat(adtype)
+ ('a', 'b', 'ba', 'bb')
+ """
+ listnames = []
+ names = adtype.names
+ for name in names:
+ listnames.append(name)
+ current = adtype[name]
+ if current.names is not None:
+ listnames.extend(get_names_flat(current))
+ return tuple(listnames)
+
+
+def flatten_descr(ndtype):
+ """
+ Flatten a structured data-type description.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
+ >>> rfn.flatten_descr(ndtype)
+ (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
+
+ """
+ names = ndtype.names
+ if names is None:
+ return (('', ndtype),)
+ else:
+ descr = []
+ for field in names:
+ (typ, _) = ndtype.fields[field]
+ if typ.names is not None:
+ descr.extend(flatten_descr(typ))
+ else:
+ descr.append((field, typ))
+ return tuple(descr)
+
+
+def _zip_dtype(seqarrays, flatten=False):
+ newdtype = []
+ if flatten:
+ for a in seqarrays:
+ newdtype.extend(flatten_descr(a.dtype))
+ else:
+ for a in seqarrays:
+ current = a.dtype
+ if current.names is not None and len(current.names) == 1:
+ # special case - dtypes of 1 field are flattened
+ newdtype.extend(_get_fieldspec(current))
+ else:
+ newdtype.append(('', current))
+ return np.dtype(newdtype)
+
+
+def _zip_descr(seqarrays, flatten=False):
+ """
+ Combine the dtype description of a series of arrays.
+
+ Parameters
+ ----------
+ seqarrays : sequence of arrays
+ Sequence of arrays
+ flatten : {boolean}, optional
+ Whether to collapse nested descriptions.
+ """
+ return _zip_dtype(seqarrays, flatten=flatten).descr
+
+
+def get_fieldstructure(adtype, lastname=None, parents=None,):
+ """
+ Returns a dictionary with fields indexing lists of their parent fields.
+
+ This function is used to simplify access to fields nested in other fields.
+
+ Parameters
+ ----------
+ adtype : np.dtype
+ Input datatype
+ lastname : optional
+ Last processed field name (used internally during recursion).
+ parents : dictionary
+ Dictionary of parent fields (used internally during recursion).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> ndtype = np.dtype([('A', int),
+ ... ('B', [('BA', int),
+ ... ('BB', [('BBA', int), ('BBB', int)])])])
+ >>> rfn.get_fieldstructure(ndtype)
+ ... # XXX: possible regression, order of BBA and BBB is swapped
+ {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
+
+ """
+ if parents is None:
+ parents = {}
+ names = adtype.names
+ for name in names:
+ current = adtype[name]
+ if current.names is not None:
+ if lastname:
+ parents[name] = [lastname, ]
+ else:
+ parents[name] = []
+ parents.update(get_fieldstructure(current, name, parents))
+ else:
+ lastparent = list(parents.get(lastname, []) or [])
+ if lastparent:
+ lastparent.append(lastname)
+ elif lastname:
+ lastparent = [lastname, ]
+ parents[name] = lastparent or []
+ return parents
+
+
+def _izip_fields_flat(iterable):
+ """
+ Returns an iterator of concatenated fields from a sequence of arrays,
+ collapsing any nested structure.
+
+ """
+ for element in iterable:
+ if isinstance(element, np.void):
+ yield from _izip_fields_flat(tuple(element))
+ else:
+ yield element
+
+
+def _izip_fields(iterable):
+ """
+ Returns an iterator of concatenated fields from a sequence of arrays.
+
+ """
+ for element in iterable:
+ if (hasattr(element, '__iter__') and
+ not isinstance(element, str)):
+ yield from _izip_fields(element)
+ elif isinstance(element, np.void) and len(tuple(element)) == 1:
+ # this statement is the same from the previous expression
+ yield from _izip_fields(element)
+ else:
+ yield element
+
+
+def _izip_records(seqarrays, fill_value=None, flatten=True):
+ """
+ Returns an iterator of concatenated items from a sequence of arrays.
+
+ Parameters
+ ----------
+ seqarrays : sequence of arrays
+ Sequence of arrays.
+ fill_value : {None, integer}
+ Value used to pad shorter iterables.
+ flatten : {True, False},
+ Whether to
+ """
+
+ # Should we flatten the items, or just use a nested approach
+ if flatten:
+ zipfunc = _izip_fields_flat
+ else:
+ zipfunc = _izip_fields
+
+ for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
+ yield tuple(zipfunc(tup))
+
+
+def _fix_output(output, usemask=True, asrecarray=False):
+ """
+ Private function: return a recarray, a ndarray, a MaskedArray
+ or a MaskedRecords depending on the input parameters
+ """
+ if not isinstance(output, ma.MaskedArray):
+ usemask = False
+ if usemask:
+ if asrecarray:
+ output = output.view(mrec.MaskedRecords)
+ else:
+ output = ma.filled(output)
+ if asrecarray:
+ output = output.view(np.recarray)
+ return output
+
+
+def _fix_defaults(output, defaults=None):
+ """
+ Update the fill_value and masked data of `output`
+ from the default given in a dictionary defaults.
+ """
+ names = output.dtype.names
+ (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
+ for (k, v) in (defaults or {}).items():
+ if k in names:
+ fill_value[k] = v
+ data[k][mask[k]] = v
+ return output
+
+
+def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
+ usemask=None, asrecarray=None):
+ return seqarrays
+
+
+@array_function_dispatch(_merge_arrays_dispatcher)
+def merge_arrays(seqarrays, fill_value=-1, flatten=False,
+ usemask=False, asrecarray=False):
+ """
+ Merge arrays field by field.
+
+ Parameters
+ ----------
+ seqarrays : sequence of ndarrays
+ Sequence of arrays
+ fill_value : {float}, optional
+ Filling value used to pad missing data on the shorter arrays.
+ flatten : {False, True}, optional
+ Whether to collapse nested fields.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (MaskedRecords) or not.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
+ ... np.array([10., 20., 30.])), usemask=False)
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
+ ... np.array([10., 20., 30.])),
+ ... usemask=False, asrecarray=True)
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('a', '<i8'), ('f1', '<f8')])
+
+ Notes
+ -----
+ * Without a mask, the missing value will be filled with something,
+ depending on what its corresponding type:
+
+ * ``-1`` for integers
+ * ``-1.0`` for floating point numbers
+ * ``'-'`` for characters
+ * ``'-1'`` for strings
+ * ``True`` for boolean values
+ * XXX: I just obtained these values empirically
+ """
+ # Only one item in the input sequence ?
+ if (len(seqarrays) == 1):
+ seqarrays = np.asanyarray(seqarrays[0])
+ # Do we have a single ndarray as input ?
+ if isinstance(seqarrays, (np.ndarray, np.void)):
+ seqdtype = seqarrays.dtype
+ # Make sure we have named fields
+ if seqdtype.names is None:
+ seqdtype = np.dtype([('', seqdtype)])
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
+ # Minimal processing needed: just make sure everything's a-ok
+ seqarrays = seqarrays.ravel()
+ # Find what type of array we must return
+ if usemask:
+ if asrecarray:
+ seqtype = mrec.MaskedRecords
+ else:
+ seqtype = ma.MaskedArray
+ elif asrecarray:
+ seqtype = np.recarray
+ else:
+ seqtype = np.ndarray
+ return seqarrays.view(dtype=seqdtype, type=seqtype)
+ else:
+ seqarrays = (seqarrays,)
+ else:
+ # Make sure we have arrays in the input sequence
+ seqarrays = [np.asanyarray(_m) for _m in seqarrays]
+ # Find the sizes of the inputs and their maximum
+ sizes = tuple(a.size for a in seqarrays)
+ maxlength = max(sizes)
+ # Get the dtype of the output (flattening if needed)
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
+ # Initialize the sequences for data and mask
+ seqdata = []
+ seqmask = []
+ # If we expect some kind of MaskedArray, make a special loop.
+ if usemask:
+ for (a, n) in zip(seqarrays, sizes):
+ nbmissing = (maxlength - n)
+ # Get the data and mask
+ data = a.ravel().__array__()
+ mask = ma.getmaskarray(a).ravel()
+ # Get the filling value (if needed)
+ if nbmissing:
+ fval = mrec._check_fill_value(fill_value, a.dtype)
+ if isinstance(fval, (np.ndarray, np.void)):
+ if len(fval.dtype) == 1:
+ fval = fval.item()[0]
+ fmsk = True
+ else:
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
+ fmsk = np.ones((1,), dtype=mask.dtype)
+ else:
+ fval = None
+ fmsk = True
+ # Store an iterator padding the input to the expected length
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
+ seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
+ # Create an iterator for the data
+ data = tuple(_izip_records(seqdata, flatten=flatten))
+ output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
+ mask=list(_izip_records(seqmask, flatten=flatten)))
+ if asrecarray:
+ output = output.view(mrec.MaskedRecords)
+ else:
+ # Same as before, without the mask we don't need...
+ for (a, n) in zip(seqarrays, sizes):
+ nbmissing = (maxlength - n)
+ data = a.ravel().__array__()
+ if nbmissing:
+ fval = mrec._check_fill_value(fill_value, a.dtype)
+ if isinstance(fval, (np.ndarray, np.void)):
+ if len(fval.dtype) == 1:
+ fval = fval.item()[0]
+ else:
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
+ else:
+ fval = None
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
+ dtype=newdtype, count=maxlength)
+ if asrecarray:
+ output = output.view(np.recarray)
+ # And we're done...
+ return output
+
+
+def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
+ return (base,)
+
+
+@array_function_dispatch(_drop_fields_dispatcher)
+def drop_fields(base, drop_names, usemask=True, asrecarray=False):
+ """
+ Return a new array with fields in `drop_names` dropped.
+
+ Nested fields are supported.
+
+ Parameters
+ ----------
+ base : array
+ Input array
+ drop_names : string or sequence
+ String or sequence of strings corresponding to the names of the
+ fields to drop.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : string or sequence, optional
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
+ a plain ndarray or masked array with flexible dtype. The default
+ is False.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
+ >>> rfn.drop_fields(a, 'a')
+ array([((2., 3),), ((5., 6),)],
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
+ >>> rfn.drop_fields(a, 'ba')
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
+ >>> rfn.drop_fields(a, ['ba', 'bb'])
+ array([(1,), (4,)], dtype=[('a', '<i8')])
+ """
+ if _is_string_like(drop_names):
+ drop_names = [drop_names]
+ else:
+ drop_names = set(drop_names)
+
+ def _drop_descr(ndtype, drop_names):
+ names = ndtype.names
+ newdtype = []
+ for name in names:
+ current = ndtype[name]
+ if name in drop_names:
+ continue
+ if current.names is not None:
+ descr = _drop_descr(current, drop_names)
+ if descr:
+ newdtype.append((name, descr))
+ else:
+ newdtype.append((name, current))
+ return newdtype
+
+ newdtype = _drop_descr(base.dtype, drop_names)
+
+ output = np.empty(base.shape, dtype=newdtype)
+ output = recursive_fill_fields(base, output)
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
+ """
+ Return a new array keeping only the fields in `keep_names`,
+ and preserving the order of those fields.
+
+ Parameters
+ ----------
+ base : array
+ Input array
+ keep_names : string or sequence
+ String or sequence of strings corresponding to the names of the
+ fields to keep. Order of the names will be preserved.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : string or sequence, optional
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
+ a plain ndarray or masked array with flexible dtype. The default
+ is False.
+ """
+ newdtype = [(n, base.dtype[n]) for n in keep_names]
+ output = np.empty(base.shape, dtype=newdtype)
+ output = recursive_fill_fields(base, output)
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _rec_drop_fields_dispatcher(base, drop_names):
+ return (base,)
+
+
+@array_function_dispatch(_rec_drop_fields_dispatcher)
+def rec_drop_fields(base, drop_names):
+ """
+ Returns a new numpy.recarray with fields in `drop_names` dropped.
+ """
+ return drop_fields(base, drop_names, usemask=False, asrecarray=True)
+
+
+def _rename_fields_dispatcher(base, namemapper):
+ return (base,)
+
+
+@array_function_dispatch(_rename_fields_dispatcher)
+def rename_fields(base, namemapper):
+ """
+ Rename the fields from a flexible-datatype ndarray or recarray.
+
+ Nested fields are supported.
+
+ Parameters
+ ----------
+ base : ndarray
+ Input array whose fields must be modified.
+ namemapper : dictionary
+ Dictionary mapping old field names to their new version.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
+ ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
+ >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
+
+ """
+ def _recursive_rename_fields(ndtype, namemapper):
+ newdtype = []
+ for name in ndtype.names:
+ newname = namemapper.get(name, name)
+ current = ndtype[name]
+ if current.names is not None:
+ newdtype.append(
+ (newname, _recursive_rename_fields(current, namemapper))
+ )
+ else:
+ newdtype.append((newname, current))
+ return newdtype
+ newdtype = _recursive_rename_fields(base.dtype, namemapper)
+ return base.view(newdtype)
+
+
+def _append_fields_dispatcher(base, names, data, dtypes=None,
+ fill_value=None, usemask=None, asrecarray=None):
+ yield base
+ yield from data
+
+
+@array_function_dispatch(_append_fields_dispatcher)
+def append_fields(base, names, data, dtypes=None,
+ fill_value=-1, usemask=True, asrecarray=False):
+ """
+ Add new fields to an existing array.
+
+ The names of the fields are given with the `names` arguments,
+ the corresponding values with the `data` arguments.
+ If a single field is appended, `names`, `data` and `dtypes` do not have
+ to be lists but just values.
+
+ Parameters
+ ----------
+ base : array
+ Input array to extend.
+ names : string, sequence
+ String or sequence of strings corresponding to the names
+ of the new fields.
+ data : array or sequence of arrays
+ Array or sequence of arrays storing the fields to add to the base.
+ dtypes : sequence of datatypes, optional
+ Datatype or sequence of datatypes.
+ If None, the datatypes are estimated from the `data`.
+ fill_value : {float}, optional
+ Filling value used to pad missing data on the shorter arrays.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (MaskedRecords) or not.
+
+ """
+ # Check the names
+ if isinstance(names, (tuple, list)):
+ if len(names) != len(data):
+ msg = "The number of arrays does not match the number of names"
+ raise ValueError(msg)
+ elif isinstance(names, str):
+ names = [names, ]
+ data = [data, ]
+ #
+ if dtypes is None:
+ data = [np.array(a, copy=None, subok=True) for a in data]
+ data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
+ else:
+ if not isinstance(dtypes, (tuple, list)):
+ dtypes = [dtypes, ]
+ if len(data) != len(dtypes):
+ if len(dtypes) == 1:
+ dtypes = dtypes * len(data)
+ else:
+ msg = "The dtypes argument must be None, a dtype, or a list."
+ raise ValueError(msg)
+ data = [np.array(a, copy=None, subok=True, dtype=d).view([(n, d)])
+ for (a, n, d) in zip(data, names, dtypes)]
+ #
+ base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
+ if len(data) > 1:
+ data = merge_arrays(data, flatten=True, usemask=usemask,
+ fill_value=fill_value)
+ else:
+ data = data.pop()
+ #
+ output = ma.masked_all(
+ max(len(base), len(data)),
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
+ output = recursive_fill_fields(base, output)
+ output = recursive_fill_fields(data, output)
+ #
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
+ yield base
+ yield from data
+
+
+@array_function_dispatch(_rec_append_fields_dispatcher)
+def rec_append_fields(base, names, data, dtypes=None):
+ """
+ Add new fields to an existing array.
+
+ The names of the fields are given with the `names` arguments,
+ the corresponding values with the `data` arguments.
+ If a single field is appended, `names`, `data` and `dtypes` do not have
+ to be lists but just values.
+
+ Parameters
+ ----------
+ base : array
+ Input array to extend.
+ names : string, sequence
+ String or sequence of strings corresponding to the names
+ of the new fields.
+ data : array or sequence of arrays
+ Array or sequence of arrays storing the fields to add to the base.
+ dtypes : sequence of datatypes, optional
+ Datatype or sequence of datatypes.
+ If None, the datatypes are estimated from the `data`.
+
+ See Also
+ --------
+ append_fields
+
+ Returns
+ -------
+ appended_array : np.recarray
+ """
+ return append_fields(base, names, data=data, dtypes=dtypes,
+ asrecarray=True, usemask=False)
+
+
+def _repack_fields_dispatcher(a, align=None, recurse=None):
+ return (a,)
+
+
+@array_function_dispatch(_repack_fields_dispatcher)
+def repack_fields(a, align=False, recurse=False):
+ """
+ Re-pack the fields of a structured array or dtype in memory.
+
+ The memory layout of structured datatypes allows fields at arbitrary
+ byte offsets. This means the fields can be separated by padding bytes,
+ their offsets can be non-monotonically increasing, and they can overlap.
+
+ This method removes any overlaps and reorders the fields in memory so they
+ have increasing byte offsets, and adds or removes padding bytes depending
+ on the `align` option, which behaves like the `align` option to
+ `numpy.dtype`.
+
+ If `align=False`, this method produces a "packed" memory layout in which
+ each field starts at the byte the previous field ended, and any padding
+ bytes are removed.
+
+ If `align=True`, this methods produces an "aligned" memory layout in which
+ each field's offset is a multiple of its alignment, and the total itemsize
+ is a multiple of the largest alignment, by adding padding bytes as needed.
+
+ Parameters
+ ----------
+ a : ndarray or dtype
+ array or dtype for which to repack the fields.
+ align : boolean
+ If true, use an "aligned" memory layout, otherwise use a "packed" layout.
+ recurse : boolean
+ If True, also repack nested structures.
+
+ Returns
+ -------
+ repacked : ndarray or dtype
+ Copy of `a` with fields repacked, or `a` itself if no repacking was
+ needed.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> def print_offsets(d):
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
+ ... print("itemsize:", d.itemsize)
+ ...
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
+ >>> dt
+ dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
+'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
+ >>> print_offsets(dt)
+ offsets: [0, 8, 16]
+ itemsize: 24
+ >>> packed_dt = rfn.repack_fields(dt)
+ >>> packed_dt
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
+ >>> print_offsets(packed_dt)
+ offsets: [0, 1, 9]
+ itemsize: 17
+
+ """
+ if not isinstance(a, np.dtype):
+ dt = repack_fields(a.dtype, align=align, recurse=recurse)
+ return a.astype(dt, copy=False)
+
+ if a.names is None:
+ return a
+
+ fieldinfo = []
+ for name in a.names:
+ tup = a.fields[name]
+ if recurse:
+ fmt = repack_fields(tup[0], align=align, recurse=True)
+ else:
+ fmt = tup[0]
+
+ if len(tup) == 3:
+ name = (tup[2], name)
+
+ fieldinfo.append((name, fmt))
+
+ dt = np.dtype(fieldinfo, align=align)
+ return np.dtype((a.type, dt))
+
+def _get_fields_and_offsets(dt, offset=0):
+ """
+ Returns a flat list of (dtype, count, offset) tuples of all the
+ scalar fields in the dtype "dt", including nested fields, in left
+ to right order.
+ """
+
+ # counts up elements in subarrays, including nested subarrays, and returns
+ # base dtype and count
+ def count_elem(dt):
+ count = 1
+ while dt.shape != ():
+ for size in dt.shape:
+ count *= size
+ dt = dt.base
+ return dt, count
+
+ fields = []
+ for name in dt.names:
+ field = dt.fields[name]
+ f_dt, f_offset = field[0], field[1]
+ f_dt, n = count_elem(f_dt)
+
+ if f_dt.names is None:
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
+ else:
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
+ size = f_dt.itemsize
+
+ for i in range(n):
+ if i == 0:
+ # optimization: avoid list comprehension if no subarray
+ fields.extend(subfields)
+ else:
+ fields.extend([(d, c, o + i * size) for d, c, o in subfields])
+ return fields
+
+def _common_stride(offsets, counts, itemsize):
+ """
+ Returns the stride between the fields, or None if the stride is not
+ constant. The values in "counts" designate the lengths of
+ subarrays. Subarrays are treated as many contiguous fields, with
+ always positive stride.
+ """
+ if len(offsets) <= 1:
+ return itemsize
+
+ negative = offsets[1] < offsets[0] # negative stride
+ if negative:
+ # reverse, so offsets will be ascending
+ it = zip(reversed(offsets), reversed(counts))
+ else:
+ it = zip(offsets, counts)
+
+ prev_offset = None
+ stride = None
+ for offset, count in it:
+ if count != 1: # subarray: always c-contiguous
+ if negative:
+ return None # subarrays can never have a negative stride
+ if stride is None:
+ stride = itemsize
+ if stride != itemsize:
+ return None
+ end_offset = offset + (count - 1) * itemsize
+ else:
+ end_offset = offset
+
+ if prev_offset is not None:
+ new_stride = offset - prev_offset
+ if stride is None:
+ stride = new_stride
+ if stride != new_stride:
+ return None
+
+ prev_offset = end_offset
+
+ if negative:
+ return -stride
+ return stride
+
+
+def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
+ casting=None):
+ return (arr,)
+
+@array_function_dispatch(_structured_to_unstructured_dispatcher)
+def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
+ """
+ Converts an n-D structured array into an (n+1)-D unstructured array.
+
+ The new array will have a new last dimension equal in size to the
+ number of field-elements of the input array. If not supplied, the output
+ datatype is determined from the numpy type promotion rules applied to all
+ the field datatypes.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ as a single field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Structured array or dtype to convert. Cannot contain object datatype.
+ dtype : dtype, optional
+ The dtype of the output unstructured array.
+ copy : bool, optional
+ If true, always return a copy. If false, a view is returned if
+ possible, such as when the `dtype` and strides of the fields are
+ suitable and the array subtype is one of `numpy.ndarray`,
+ `numpy.recarray` or `numpy.memmap`.
+
+ .. versionchanged:: 1.25.0
+ A view can now be returned if the fields are separated by a
+ uniform stride.
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
+
+ Returns
+ -------
+ unstructured : ndarray
+ Unstructured array with one more dimension.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+ >>> rfn.structured_to_unstructured(a)
+ array([[0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.]])
+
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """ # noqa: E501
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ fields = _get_fields_and_offsets(arr.dtype)
+ n_fields = len(fields)
+ if n_fields == 0 and dtype is None:
+ raise ValueError("arr has no fields. Unable to guess dtype")
+ elif n_fields == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("arr with no fields is not supported")
+
+ dts, counts, offsets = zip(*fields)
+ names = [f'f{n}' for n in range(n_fields)]
+
+ if dtype is None:
+ out_dtype = np.result_type(*[dt.base for dt in dts])
+ else:
+ out_dtype = np.dtype(dtype)
+
+ # Use a series of views and casts to convert to an unstructured array:
+
+ # first view using flattened fields (doesn't work for object arrays)
+ # Note: dts may include a shape for subarrays
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': arr.dtype.itemsize})
+ arr = arr.view(flattened_fields)
+
+ # we only allow a few types to be unstructured by manipulating the
+ # strides, because we know it won't work with, for example, np.matrix nor
+ # np.ma.MaskedArray.
+ can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
+ if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
+ # all elements have the right dtype already; if they have a common
+ # stride, we can just return a view
+ common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
+ if common_stride is not None:
+ wrap = arr.__array_wrap__
+
+ new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
+ new_strides = arr.strides + (abs(common_stride), 1)
+
+ arr = arr[..., np.newaxis].view(np.uint8) # view as bytes
+ arr = arr[..., min(offsets):] # remove the leading unused data
+ arr = np.lib.stride_tricks.as_strided(arr,
+ new_shape,
+ new_strides,
+ subok=True)
+
+ # cast and drop the last dimension again
+ arr = arr.view(out_dtype)[..., 0]
+
+ if common_stride < 0:
+ arr = arr[..., ::-1] # reverse, if the stride was negative
+ if type(arr) is not type(wrap.__self__):
+ # Some types (e.g. recarray) turn into an ndarray along the
+ # way, so we have to wrap it again in order to match the
+ # behavior with copy=True.
+ arr = wrap(arr)
+ return arr
+
+ # next cast to a packed format with all fields converted to new dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
+
+ # finally is it safe to view the packed fields as the unstructured type
+ return arr.view((out_dtype, (sum(counts),)))
+
+
+def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
+ align=None, copy=None, casting=None):
+ return (arr,)
+
+@array_function_dispatch(_unstructured_to_structured_dispatcher)
+def unstructured_to_structured(arr, dtype=None, names=None, align=False,
+ copy=False, casting='unsafe'):
+ """
+ Converts an n-D unstructured array into an (n-1)-D structured array.
+
+ The last dimension of the input array is converted into a structure, with
+ number of field-elements equal to the size of the last dimension of the
+ input array. By default all output fields have the input array's dtype, but
+ an output structured dtype with an equal number of fields-elements can be
+ supplied instead.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ towards the number of field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Unstructured array or dtype to convert.
+ dtype : dtype, optional
+ The structured dtype of the output array
+ names : list of strings, optional
+ If dtype is not supplied, this specifies the field names for the output
+ dtype, in order. The field dtypes will be the same as the input array.
+ align : boolean, optional
+ Whether to create an aligned memory layout.
+ copy : bool, optional
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
+
+ Returns
+ -------
+ structured : ndarray
+ Structured array with fewer dimensions.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a = np.arange(20).reshape((4,5))
+ >>> a
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]])
+ >>> rfn.unstructured_to_structured(a, dt)
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+
+ """ # noqa: E501
+ if arr.shape == ():
+ raise ValueError('arr must have at least one dimension')
+ n_elem = arr.shape[-1]
+ if n_elem == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("last axis with size 0 is not supported")
+
+ if dtype is None:
+ if names is None:
+ names = [f'f{n}' for n in range(n_elem)]
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
+ fields = _get_fields_and_offsets(out_dtype)
+ dts, counts, offsets = zip(*fields)
+ else:
+ if names is not None:
+ raise ValueError("don't supply both dtype and names")
+ # if dtype is the args of np.dtype, construct it
+ dtype = np.dtype(dtype)
+ # sanity check of the input dtype
+ fields = _get_fields_and_offsets(dtype)
+ if len(fields) == 0:
+ dts, counts, offsets = [], [], []
+ else:
+ dts, counts, offsets = zip(*fields)
+
+ if n_elem != sum(counts):
+ raise ValueError('The length of the last dimension of arr must '
+ 'be equal to the number of fields in dtype')
+ out_dtype = dtype
+ if align and not out_dtype.isalignedstruct:
+ raise ValueError("align was True but dtype is not aligned")
+
+ names = [f'f{n}' for n in range(len(fields))]
+
+ # Use a series of views and casts to convert to a structured array:
+
+ # first view as a packed structured array of one dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
+ arr = np.ascontiguousarray(arr).view(packed_fields)
+
+ # next cast to an unpacked but flattened format with varied dtypes
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': out_dtype.itemsize})
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
+
+ # finally view as the final nested dtype and remove the last axis
+ return arr.view(out_dtype)[..., 0]
+
+def _apply_along_fields_dispatcher(func, arr):
+ return (arr,)
+
+@array_function_dispatch(_apply_along_fields_dispatcher)
+def apply_along_fields(func, arr):
+ """
+ Apply function 'func' as a reduction across fields of a structured array.
+
+ This is similar to `numpy.apply_along_axis`, but treats the fields of a
+ structured array as an extra axis. The fields are all first cast to a
+ common type following the type-promotion rules from `numpy.result_type`
+ applied to the field's dtypes.
+
+ Parameters
+ ----------
+ func : function
+ Function to apply on the "field" dimension. This function must
+ support an `axis` argument, like `numpy.mean`, `numpy.sum`, etc.
+ arr : ndarray
+ Structured array for which to apply func.
+
+ Returns
+ -------
+ out : ndarray
+ Result of the reduction operation
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> rfn.apply_along_fields(np.mean, b)
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ uarr = structured_to_unstructured(arr)
+ return func(uarr, axis=-1)
+ # works and avoids axis requirement, but very, very slow:
+ #return np.apply_along_axis(func, -1, uarr)
+
+def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
+ return dst, src
+
+@array_function_dispatch(_assign_fields_by_name_dispatcher)
+def assign_fields_by_name(dst, src, zero_unassigned=True):
+ """
+ Assigns values from one structured array to another by field name.
+
+ Normally in numpy >= 1.14, assignment of one structured array to another
+ copies fields "by position", meaning that the first field from the src is
+ copied to the first field of the dst, and so on, regardless of field name.
+
+ This function instead copies "by field name", such that fields in the dst
+ are assigned from the identically named field in the src. This applies
+ recursively for nested structures. This is how structure assignment worked
+ in numpy >= 1.6 to <= 1.13.
+
+ Parameters
+ ----------
+ dst : ndarray
+ src : ndarray
+ The source and destination arrays during assignment.
+ zero_unassigned : bool, optional
+ If True, fields in the dst for which there was no matching
+ field in the src are filled with the value 0 (zero). This
+ was the behavior of numpy <= 1.13. If False, those fields
+ are not modified.
+ """
+
+ if dst.dtype.names is None:
+ dst[...] = src
+ return
+
+ for name in dst.dtype.names:
+ if name not in src.dtype.names:
+ if zero_unassigned:
+ dst[name] = 0
+ else:
+ assign_fields_by_name(dst[name], src[name],
+ zero_unassigned)
+
+def _require_fields_dispatcher(array, required_dtype):
+ return (array,)
+
+@array_function_dispatch(_require_fields_dispatcher)
+def require_fields(array, required_dtype):
+ """
+ Casts a structured array to a new dtype using assignment by field-name.
+
+ This function assigns from the old to the new array by name, so the
+ value of a field in the output array is the value of the field with the
+ same name in the source array. This has the effect of creating a new
+ ndarray containing only the fields "required" by the required_dtype.
+
+ If a field name in the required_dtype does not exist in the
+ input array, that field is created and set to 0 in the output array.
+
+ Parameters
+ ----------
+ a : ndarray
+ array to cast
+ required_dtype : dtype
+ datatype for output array
+
+ Returns
+ -------
+ out : ndarray
+ array with the new dtype, with field values copied from the fields in
+ the input array with the same name
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
+ dtype=[('b', '<f4'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
+ dtype=[('b', '<f4'), ('newf', 'u1')])
+
+ """
+ out = np.empty(array.shape, dtype=required_dtype)
+ assign_fields_by_name(out, array)
+ return out
+
+
+def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
+ asrecarray=None, autoconvert=None):
+ return arrays
+
+
+@array_function_dispatch(_stack_arrays_dispatcher)
+def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
+ autoconvert=False):
+ """
+ Superposes arrays fields by fields
+
+ Parameters
+ ----------
+ arrays : array or sequence
+ Sequence of input arrays.
+ defaults : dictionary, optional
+ Dictionary mapping field names to the corresponding default values.
+ usemask : {True, False}, optional
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
+ autoconvert : {False, True}, optional
+ Whether automatically cast the type of the field to the maximum.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> x = np.array([1, 2,])
+ >>> rfn.stack_arrays(x) is x
+ True
+ >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
+ >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
+ >>> test = rfn.stack_arrays((z,zz))
+ >>> test
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
+ mask=[(False, False, True), (False, False, True),
+ (False, False, False), (False, False, False),
+ (False, False, False)],
+ fill_value=(b'N/A', 1e+20, 1e+20),
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
+
+ """
+ if isinstance(arrays, np.ndarray):
+ return arrays
+ elif len(arrays) == 1:
+ return arrays[0]
+ seqarrays = [np.asanyarray(a).ravel() for a in arrays]
+ nrecords = [len(a) for a in seqarrays]
+ ndtype = [a.dtype for a in seqarrays]
+ fldnames = [d.names for d in ndtype]
+ #
+ dtype_l = ndtype[0]
+ newdescr = _get_fieldspec(dtype_l)
+ names = [n for n, d in newdescr]
+ for dtype_n in ndtype[1:]:
+ for fname, fdtype in _get_fieldspec(dtype_n):
+ if fname not in names:
+ newdescr.append((fname, fdtype))
+ names.append(fname)
+ else:
+ nameidx = names.index(fname)
+ _, cdtype = newdescr[nameidx]
+ if autoconvert:
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
+ elif fdtype != cdtype:
+ raise TypeError(f"Incompatible type '{cdtype}' <> '{fdtype}'")
+ # Only one field: use concatenate
+ if len(newdescr) == 1:
+ output = ma.concatenate(seqarrays)
+ else:
+ #
+ output = ma.masked_all((np.sum(nrecords),), newdescr)
+ offset = np.cumsum(np.r_[0, nrecords])
+ seen = []
+ for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
+ names = a.dtype.names
+ if names is None:
+ output[f'f{len(seen)}'][i:j] = a
+ else:
+ for name in n:
+ output[name][i:j] = a[name]
+ if name not in seen:
+ seen.append(name)
+ #
+ return _fix_output(_fix_defaults(output, defaults),
+ usemask=usemask, asrecarray=asrecarray)
+
+
+def _find_duplicates_dispatcher(
+ a, key=None, ignoremask=None, return_index=None):
+ return (a,)
+
+
+@array_function_dispatch(_find_duplicates_dispatcher)
+def find_duplicates(a, key=None, ignoremask=True, return_index=False):
+ """
+ Find the duplicates in a structured array along a given key
+
+ Parameters
+ ----------
+ a : array-like
+ Input array
+ key : {string, None}, optional
+ Name of the fields along which to check the duplicates.
+ If None, the search is performed by records
+ ignoremask : {True, False}, optional
+ Whether masked data should be discarded or considered as duplicates.
+ return_index : {False, True}, optional
+ Whether to return the indices of the duplicated values.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.lib import recfunctions as rfn
+ >>> ndtype = [('a', int)]
+ >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
+ ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
+ >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
+ mask=[(False,), (False,), (False,), (False,)],
+ fill_value=(999999,),
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
+ """
+ a = np.asanyarray(a).ravel()
+ # Get a dictionary of fields
+ fields = get_fieldstructure(a.dtype)
+ # Get the sorting data (by selecting the corresponding field)
+ base = a
+ if key:
+ for f in fields[key]:
+ base = base[f]
+ base = base[key]
+ # Get the sorting indices and the sorted data
+ sortidx = base.argsort()
+ sortedbase = base[sortidx]
+ sorteddata = sortedbase.filled()
+ # Compare the sorting data
+ flag = (sorteddata[:-1] == sorteddata[1:])
+ # If masked data must be ignored, set the flag to false where needed
+ if ignoremask:
+ sortedmask = sortedbase.recordmask
+ flag[sortedmask[1:]] = False
+ flag = np.concatenate(([False], flag))
+ # We need to take the point on the left as well (else we're missing it)
+ flag[:-1] = flag[:-1] + flag[1:]
+ duplicates = a[sortidx][flag]
+ if return_index:
+ return (duplicates, sortidx[flag])
+ else:
+ return duplicates
+
+
+def _join_by_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None, usemask=None, asrecarray=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_join_by_dispatcher)
+def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
+ defaults=None, usemask=True, asrecarray=False):
+ """
+ Join arrays `r1` and `r2` on key `key`.
+
+ The key should be either a string or a sequence of string corresponding
+ to the fields used to join the array. An exception is raised if the
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
+ `r2` should have any duplicates along `key`: the presence of duplicates
+ will make the output quite unreliable. Note that duplicates are not
+ looked for by the algorithm.
+
+ Parameters
+ ----------
+ key : {string, sequence}
+ A string or a sequence of strings corresponding to the fields used
+ for comparison.
+ r1, r2 : arrays
+ Structured arrays.
+ jointype : {'inner', 'outer', 'leftouter'}, optional
+ If 'inner', returns the elements common to both r1 and r2.
+ If 'outer', returns the common elements as well as the elements of
+ r1 not in r2 and the elements of not in r2.
+ If 'leftouter', returns the common elements and the elements of r1
+ not in r2.
+ r1postfix : string, optional
+ String appended to the names of the fields of r1 that are present
+ in r2 but absent of the key.
+ r2postfix : string, optional
+ String appended to the names of the fields of r2 that are present
+ in r1 but absent of the key.
+ defaults : {dictionary}, optional
+ Dictionary mapping field names to the corresponding default values.
+ usemask : {True, False}, optional
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
+
+ Notes
+ -----
+ * The output is sorted along the key.
+ * A temporary array is formed by dropping the fields not in the key for
+ the two arrays and concatenating the result. This array is then
+ sorted, and the common entries selected. The output is constructed by
+ filling the fields with the selected entries. Matching is not
+ preserved if there are some duplicates...
+
+ """
+ # Check jointype
+ if jointype not in ('inner', 'outer', 'leftouter'):
+ raise ValueError(
+ "The 'jointype' argument should be in 'inner', "
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
+ )
+ # If we have a single key, put it in a tuple
+ if isinstance(key, str):
+ key = (key,)
+
+ # Check the keys
+ if len(set(key)) != len(key):
+ dup = next(x for n, x in enumerate(key) if x in key[n + 1:])
+ raise ValueError(f"duplicate join key {dup!r}")
+ for name in key:
+ if name not in r1.dtype.names:
+ raise ValueError(f'r1 does not have key field {name!r}')
+ if name not in r2.dtype.names:
+ raise ValueError(f'r2 does not have key field {name!r}')
+
+ # Make sure we work with ravelled arrays
+ r1 = r1.ravel()
+ r2 = r2.ravel()
+ (nb1, nb2) = (len(r1), len(r2))
+ (r1names, r2names) = (r1.dtype.names, r2.dtype.names)
+
+ # Check the names for collision
+ collisions = (set(r1names) & set(r2names)) - set(key)
+ if collisions and not (r1postfix or r2postfix):
+ msg = "r1 and r2 contain common names, r1postfix and r2postfix "
+ msg += "can't both be empty"
+ raise ValueError(msg)
+
+ # Make temporary arrays of just the keys
+ # (use order of keys in `r1` for back-compatibility)
+ key1 = [n for n in r1names if n in key]
+ r1k = _keep_fields(r1, key1)
+ r2k = _keep_fields(r2, key1)
+
+ # Concatenate the two arrays for comparison
+ aux = ma.concatenate((r1k, r2k))
+ idx_sort = aux.argsort(order=key)
+ aux = aux[idx_sort]
+ #
+ # Get the common keys
+ flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
+ flag_in[:-1] = flag_in[1:] + flag_in[:-1]
+ idx_in = idx_sort[flag_in]
+ idx_1 = idx_in[(idx_in < nb1)]
+ idx_2 = idx_in[(idx_in >= nb1)] - nb1
+ (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
+ if jointype == 'inner':
+ (r1spc, r2spc) = (0, 0)
+ elif jointype == 'outer':
+ idx_out = idx_sort[~flag_in]
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
+ idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
+ elif jointype == 'leftouter':
+ idx_out = idx_sort[~flag_in]
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
+ # Select the entries from each input
+ (s1, s2) = (r1[idx_1], r2[idx_2])
+ #
+ # Build the new description of the output array .......
+ # Start with the key fields
+ ndtype = _get_fieldspec(r1k.dtype)
+
+ # Add the fields from r1
+ for fname, fdtype in _get_fieldspec(r1.dtype):
+ if fname not in key:
+ ndtype.append((fname, fdtype))
+
+ # Add the fields from r2
+ for fname, fdtype in _get_fieldspec(r2.dtype):
+ # Have we seen the current name already ?
+ # we need to rebuild this list every time
+ names = [name for name, dtype in ndtype]
+ try:
+ nameidx = names.index(fname)
+ except ValueError:
+ #... we haven't: just add the description to the current list
+ ndtype.append((fname, fdtype))
+ else:
+ # collision
+ _, cdtype = ndtype[nameidx]
+ if fname in key:
+ # The current field is part of the key: take the largest dtype
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
+ else:
+ # The current field is not part of the key: add the suffixes,
+ # and place the new field adjacent to the old one
+ ndtype[nameidx:nameidx + 1] = [
+ (fname + r1postfix, cdtype),
+ (fname + r2postfix, fdtype)
+ ]
+ # Rebuild a dtype from the new fields
+ ndtype = np.dtype(ndtype)
+ # Find the largest nb of common fields :
+ # r1cmn and r2cmn should be equal, but...
+ cmn = max(r1cmn, r2cmn)
+ # Construct an empty array
+ output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
+ names = output.dtype.names
+ for f in r1names:
+ selected = s1[f]
+ if f not in names or (f in r2names and not r2postfix and f not in key):
+ f += r1postfix
+ current = output[f]
+ current[:r1cmn] = selected[:r1cmn]
+ if jointype in ('outer', 'leftouter'):
+ current[cmn:cmn + r1spc] = selected[r1cmn:]
+ for f in r2names:
+ selected = s2[f]
+ if f not in names or (f in r1names and not r1postfix and f not in key):
+ f += r2postfix
+ current = output[f]
+ current[:r2cmn] = selected[:r2cmn]
+ if (jointype == 'outer') and r2spc:
+ current[-r2spc:] = selected[r2cmn:]
+ # Sort and finalize the output
+ output.sort(order=key)
+ kwargs = {'usemask': usemask, 'asrecarray': asrecarray}
+ return _fix_output(_fix_defaults(output, defaults), **kwargs)
+
+
+def _rec_join_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_rec_join_dispatcher)
+def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
+ defaults=None):
+ """
+ Join arrays `r1` and `r2` on keys.
+ Alternative to join_by, that always returns a np.recarray.
+
+ See Also
+ --------
+ join_by : equivalent function
+ """
+ kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix,
+ 'defaults': defaults, 'usemask': False, 'asrecarray': True}
+ return join_by(key, r1, r2, **kwargs)
+
+
+del array_function_dispatch
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi
new file mode 100644
index 0000000..0736429
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/recfunctions.pyi
@@ -0,0 +1,435 @@
+from collections.abc import Callable, Iterable, Mapping, Sequence
+from typing import Any, Literal, TypeAlias, overload
+
+from _typeshed import Incomplete
+from typing_extensions import TypeVar
+
+import numpy as np
+import numpy.typing as npt
+from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid
+from numpy.ma.mrecords import MaskedRecords
+
+__all__ = [
+ "append_fields",
+ "apply_along_fields",
+ "assign_fields_by_name",
+ "drop_fields",
+ "find_duplicates",
+ "flatten_descr",
+ "get_fieldstructure",
+ "get_names",
+ "get_names_flat",
+ "join_by",
+ "merge_arrays",
+ "rec_append_fields",
+ "rec_drop_fields",
+ "rec_join",
+ "recursive_fill_fields",
+ "rename_fields",
+ "repack_fields",
+ "require_fields",
+ "stack_arrays",
+ "structured_to_unstructured",
+ "unstructured_to_structured",
+]
+
+_T = TypeVar("_T")
+_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])
+_ScalarT = TypeVar("_ScalarT", bound=np.generic)
+_DTypeT = TypeVar("_DTypeT", bound=np.dtype)
+_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any])
+_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void])
+_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType)
+
+_OneOrMany: TypeAlias = _T | Iterable[_T]
+_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T]
+
+_NestedNames: TypeAlias = tuple[str | _NestedNames, ...]
+_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_
+_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType
+
+_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"]
+
+###
+
+def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ...
+
+#
+def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ...
+def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ...
+
+#
+@overload
+def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ...
+@overload
+def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ...
+
+#
+def get_fieldstructure(
+ adtype: np.dtype[np.void],
+ lastname: str | None = None,
+ parents: dict[str, list[str]] | None = None,
+) -> dict[str, list[str]]: ...
+
+#
+@overload
+def merge_arrays(
+ seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype],
+ fill_value: float = -1,
+ flatten: bool = False,
+ usemask: bool = False,
+ asrecarray: bool = False,
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def merge_arrays(
+ seqarrays: Sequence[npt.ArrayLike] | np.void,
+ fill_value: float = -1,
+ flatten: bool = False,
+ usemask: bool = False,
+ asrecarray: bool = False,
+) -> np.recarray[_AnyShape, np.dtype[np.void]]: ...
+
+#
+@overload
+def drop_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ drop_names: str | Iterable[str],
+ usemask: bool = True,
+ asrecarray: Literal[False] = False,
+) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def drop_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ drop_names: str | Iterable[str],
+ usemask: bool,
+ asrecarray: Literal[True],
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def drop_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ drop_names: str | Iterable[str],
+ usemask: bool = True,
+ *,
+ asrecarray: Literal[True],
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+
+#
+@overload
+def rename_fields(
+ base: MaskedRecords[_ShapeT, np.dtype[np.void]],
+ namemapper: Mapping[str, str],
+) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def rename_fields(
+ base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
+ namemapper: Mapping[str, str],
+) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def rename_fields(
+ base: np.recarray[_ShapeT, np.dtype[np.void]],
+ namemapper: Mapping[str, str],
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def rename_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ namemapper: Mapping[str, str],
+) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
+
+#
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None,
+ fill_value: int,
+ usemask: Literal[False],
+ asrecarray: Literal[False] = False,
+) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None = None,
+ fill_value: int = -1,
+ *,
+ usemask: Literal[False],
+ asrecarray: Literal[False] = False,
+) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None,
+ fill_value: int,
+ usemask: Literal[False],
+ asrecarray: Literal[True],
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None = None,
+ fill_value: int = -1,
+ *,
+ usemask: Literal[False],
+ asrecarray: Literal[True],
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None = None,
+ fill_value: int = -1,
+ usemask: Literal[True] = True,
+ asrecarray: Literal[False] = False,
+) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None,
+ fill_value: int,
+ usemask: Literal[True],
+ asrecarray: Literal[True],
+) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None = None,
+ fill_value: int = -1,
+ usemask: Literal[True] = True,
+ *,
+ asrecarray: Literal[True],
+) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...
+
+#
+def rec_drop_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ drop_names: str | Iterable[str],
+) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...
+
+#
+def rec_append_fields(
+ base: np.ndarray[_ShapeT, np.dtype[np.void]],
+ names: _OneOrMany[str],
+ data: _OneOrMany[npt.NDArray[Any]],
+ dtypes: _BuiltinSequence[np.dtype] | None = None,
+) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
+
+# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented,
+# e.g. using a `TypeVar` with constraints.
+# https://github.com/numpy/numtype/issues/92
+@overload
+def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ...
+@overload
+def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ...
+@overload
+def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ...
+
+# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1)
+@overload
+def structured_to_unstructured(
+ arr: npt.NDArray[np.void],
+ dtype: _DTypeLike[_ScalarT],
+ copy: bool = False,
+ casting: np._CastingKind = "unsafe",
+) -> npt.NDArray[_ScalarT]: ...
+@overload
+def structured_to_unstructured(
+ arr: npt.NDArray[np.void],
+ dtype: npt.DTypeLike | None = None,
+ copy: bool = False,
+ casting: np._CastingKind = "unsafe",
+) -> npt.NDArray[Any]: ...
+
+#
+@overload
+def unstructured_to_structured(
+ arr: npt.NDArray[Any],
+ dtype: npt.DTypeLike,
+ names: None = None,
+ align: bool = False,
+ copy: bool = False,
+ casting: str = "unsafe",
+) -> npt.NDArray[np.void]: ...
+@overload
+def unstructured_to_structured(
+ arr: npt.NDArray[Any],
+ dtype: None,
+ names: _OneOrMany[str],
+ align: bool = False,
+ copy: bool = False,
+ casting: str = "unsafe",
+) -> npt.NDArray[np.void]: ...
+
+#
+def apply_along_fields(
+ func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]],
+ arr: np.ndarray[_ShapeT, np.dtype[np.void]],
+) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
+
+#
+def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ...
+
+#
+def require_fields(
+ array: np.ndarray[_ShapeT, np.dtype[np.void]],
+ required_dtype: _DTypeLikeVoid,
+) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...
+
+# TODO(jorenham): Attempt shape-typing
+@overload
+def stack_arrays(
+ arrays: _ArrayT,
+ defaults: Mapping[str, object] | None = None,
+ usemask: bool = True,
+ asrecarray: bool = False,
+ autoconvert: bool = False,
+) -> _ArrayT: ...
+@overload
+def stack_arrays(
+ arrays: Sequence[npt.NDArray[Any]],
+ defaults: Mapping[str, Incomplete] | None,
+ usemask: Literal[False],
+ asrecarray: Literal[False] = False,
+ autoconvert: bool = False,
+) -> npt.NDArray[np.void]: ...
+@overload
+def stack_arrays(
+ arrays: Sequence[npt.NDArray[Any]],
+ defaults: Mapping[str, Incomplete] | None = None,
+ *,
+ usemask: Literal[False],
+ asrecarray: Literal[False] = False,
+ autoconvert: bool = False,
+) -> npt.NDArray[np.void]: ...
+@overload
+def stack_arrays(
+ arrays: Sequence[npt.NDArray[Any]],
+ defaults: Mapping[str, Incomplete] | None = None,
+ *,
+ usemask: Literal[False],
+ asrecarray: Literal[True],
+ autoconvert: bool = False,
+) -> np.recarray[_AnyShape, np.dtype[np.void]]: ...
+@overload
+def stack_arrays(
+ arrays: Sequence[npt.NDArray[Any]],
+ defaults: Mapping[str, Incomplete] | None = None,
+ usemask: Literal[True] = True,
+ asrecarray: Literal[False] = False,
+ autoconvert: bool = False,
+) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ...
+@overload
+def stack_arrays(
+ arrays: Sequence[npt.NDArray[Any]],
+ defaults: Mapping[str, Incomplete] | None,
+ usemask: Literal[True],
+ asrecarray: Literal[True],
+ autoconvert: bool = False,
+) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ...
+@overload
+def stack_arrays(
+ arrays: Sequence[npt.NDArray[Any]],
+ defaults: Mapping[str, Incomplete] | None = None,
+ usemask: Literal[True] = True,
+ *,
+ asrecarray: Literal[True],
+ autoconvert: bool = False,
+) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ...
+
+#
+@overload
+def find_duplicates(
+ a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
+ key: str | None = None,
+ ignoremask: bool = True,
+ return_index: Literal[False] = False,
+) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...
+@overload
+def find_duplicates(
+ a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
+ key: str | None,
+ ignoremask: bool,
+ return_index: Literal[True],
+) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ...
+@overload
+def find_duplicates(
+ a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],
+ key: str | None = None,
+ ignoremask: bool = True,
+ *,
+ return_index: Literal[True],
+) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ...
+
+#
+@overload
+def join_by(
+ key: str | Sequence[str],
+ r1: npt.NDArray[np.void],
+ r2: npt.NDArray[np.void],
+ jointype: _JoinType = "inner",
+ r1postfix: str = "1",
+ r2postfix: str = "2",
+ defaults: Mapping[str, object] | None = None,
+ *,
+ usemask: Literal[False],
+ asrecarray: Literal[False] = False,
+) -> np.ndarray[tuple[int], np.dtype[np.void]]: ...
+@overload
+def join_by(
+ key: str | Sequence[str],
+ r1: npt.NDArray[np.void],
+ r2: npt.NDArray[np.void],
+ jointype: _JoinType = "inner",
+ r1postfix: str = "1",
+ r2postfix: str = "2",
+ defaults: Mapping[str, object] | None = None,
+ *,
+ usemask: Literal[False],
+ asrecarray: Literal[True],
+) -> np.recarray[tuple[int], np.dtype[np.void]]: ...
+@overload
+def join_by(
+ key: str | Sequence[str],
+ r1: npt.NDArray[np.void],
+ r2: npt.NDArray[np.void],
+ jointype: _JoinType = "inner",
+ r1postfix: str = "1",
+ r2postfix: str = "2",
+ defaults: Mapping[str, object] | None = None,
+ usemask: Literal[True] = True,
+ asrecarray: Literal[False] = False,
+) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ...
+@overload
+def join_by(
+ key: str | Sequence[str],
+ r1: npt.NDArray[np.void],
+ r2: npt.NDArray[np.void],
+ jointype: _JoinType = "inner",
+ r1postfix: str = "1",
+ r2postfix: str = "2",
+ defaults: Mapping[str, object] | None = None,
+ usemask: Literal[True] = True,
+ *,
+ asrecarray: Literal[True],
+) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ...
+
+#
+def rec_join(
+ key: str | Sequence[str],
+ r1: npt.NDArray[np.void],
+ r2: npt.NDArray[np.void],
+ jointype: _JoinType = "inner",
+ r1postfix: str = "1",
+ r2postfix: str = "2",
+ defaults: Mapping[str, object] | None = None,
+) -> np.recarray[tuple[int], np.dtype[np.void]]: ...
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/scimath.py b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.py
new file mode 100644
index 0000000..fb6824d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.py
@@ -0,0 +1,13 @@
+from ._scimath_impl import ( # noqa: F401
+ __all__,
+ __doc__,
+ arccos,
+ arcsin,
+ arctanh,
+ log,
+ log2,
+ log10,
+ logn,
+ power,
+ sqrt,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi
new file mode 100644
index 0000000..253235d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/scimath.pyi
@@ -0,0 +1,30 @@
+from ._scimath_impl import (
+ __all__ as __all__,
+)
+from ._scimath_impl import (
+ arccos as arccos,
+)
+from ._scimath_impl import (
+ arcsin as arcsin,
+)
+from ._scimath_impl import (
+ arctanh as arctanh,
+)
+from ._scimath_impl import (
+ log as log,
+)
+from ._scimath_impl import (
+ log2 as log2,
+)
+from ._scimath_impl import (
+ log10 as log10,
+)
+from ._scimath_impl import (
+ logn as logn,
+)
+from ._scimath_impl import (
+ power as power,
+)
+from ._scimath_impl import (
+ sqrt as sqrt,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py
new file mode 100644
index 0000000..721a548
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.py
@@ -0,0 +1 @@
+from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi
new file mode 100644
index 0000000..42d8fe9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/stride_tricks.pyi
@@ -0,0 +1,6 @@
+from numpy.lib._stride_tricks_impl import (
+ as_strided as as_strided,
+)
+from numpy.lib._stride_tricks_impl import (
+ sliding_window_view as sliding_window_view,
+)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__init__.py
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..08ba67e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc
new file mode 100644
index 0000000..f6ce1a4
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc
new file mode 100644
index 0000000..064d4eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc
new file mode 100644
index 0000000..a212b0b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc
new file mode 100644
index 0000000..72fd67e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc
new file mode 100644
index 0000000..673f7c1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc
new file mode 100644
index 0000000..3edbed0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc
new file mode 100644
index 0000000..a9a8259
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc
new file mode 100644
index 0000000..4b905ea
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc
new file mode 100644
index 0000000..beaa22d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc
new file mode 100644
index 0000000..2b1a824
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc
new file mode 100644
index 0000000..2610bc0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc
new file mode 100644
index 0000000..20e6590
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc
new file mode 100644
index 0000000..9d3d392
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc
new file mode 100644
index 0000000..f390886
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc
new file mode 100644
index 0000000..c4c3dfe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc
new file mode 100644
index 0000000..c7d50a1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc
new file mode 100644
index 0000000..17dd09f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc
new file mode 100644
index 0000000..7850412
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc
new file mode 100644
index 0000000..bd89d0d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc
new file mode 100644
index 0000000..69e2029
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc
new file mode 100644
index 0000000..6af642b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc
new file mode 100644
index 0000000..268e30e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc
new file mode 100644
index 0000000..f06fa5f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc
new file mode 100644
index 0000000..bf8e207
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc
new file mode 100644
index 0000000..2f4fba6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy
new file mode 100644
index 0000000..a6e9e23
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-np0-objarr.npy
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy
new file mode 100644
index 0000000..12936c9
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npy
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz
new file mode 100644
index 0000000..68a3b53
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py2-objarr.npz
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy
new file mode 100644
index 0000000..c9f33b0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npy
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz
new file mode 100644
index 0000000..fd7d9d3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/py3-objarr.npz
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy
new file mode 100644
index 0000000..7c6997d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/python3.npy
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy
new file mode 100644
index 0000000..d9bc36a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/data/win64python2.npy
Binary files differ
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py
new file mode 100644
index 0000000..6513732
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__datasource.py
@@ -0,0 +1,352 @@
+import os
+import urllib.request as urllib_request
+from shutil import rmtree
+from tempfile import NamedTemporaryFile, mkdtemp, mkstemp
+from urllib.error import URLError
+from urllib.parse import urlparse
+
+import pytest
+
+import numpy.lib._datasource as datasource
+from numpy.testing import assert_, assert_equal, assert_raises
+
+
+def urlopen_stub(url, data=None):
+ '''Stub to replace urlopen for testing.'''
+ if url == valid_httpurl():
+ tmpfile = NamedTemporaryFile(prefix='urltmp_')
+ return tmpfile
+ else:
+ raise URLError('Name or service not known')
+
+
+# setup and teardown
+old_urlopen = None
+
+
+def setup_module():
+ global old_urlopen
+
+ old_urlopen = urllib_request.urlopen
+ urllib_request.urlopen = urlopen_stub
+
+
+def teardown_module():
+ urllib_request.urlopen = old_urlopen
+
+
+# A valid website for more robust testing
+http_path = 'http://www.google.com/'
+http_file = 'index.html'
+
+http_fakepath = 'http://fake.abc.web/site/'
+http_fakefile = 'fake.txt'
+
+malicious_files = ['/etc/shadow', '../../shadow',
+ '..\\system.dat', 'c:\\windows\\system.dat']
+
+magic_line = b'three is the magic number'
+
+
+# Utility functions used by many tests
+def valid_textfile(filedir):
+ # Generate and return a valid temporary file.
+ fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
+ os.close(fd)
+ return path
+
+
+def invalid_textfile(filedir):
+ # Generate and return an invalid filename.
+ fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
+ os.close(fd)
+ os.remove(path)
+ return path
+
+
+def valid_httpurl():
+ return http_path + http_file
+
+
+def invalid_httpurl():
+ return http_fakepath + http_fakefile
+
+
+def valid_baseurl():
+ return http_path
+
+
+def invalid_baseurl():
+ return http_fakepath
+
+
+def valid_httpfile():
+ return http_file
+
+
+def invalid_httpfile():
+ return http_fakefile
+
+
+class TestDataSourceOpen:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.ds = datasource.DataSource(self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.ds
+
+ def test_ValidHTTP(self):
+ fh = self.ds.open(valid_httpurl())
+ assert_(fh)
+ fh.close()
+
+ def test_InvalidHTTP(self):
+ url = invalid_httpurl()
+ assert_raises(OSError, self.ds.open, url)
+ try:
+ self.ds.open(url)
+ except OSError as e:
+ # Regression test for bug fixed in r4342.
+ assert_(e.errno is None)
+
+ def test_InvalidHTTPCacheURLError(self):
+ assert_raises(URLError, self.ds._cache, invalid_httpurl())
+
+ def test_ValidFile(self):
+ local_file = valid_textfile(self.tmpdir)
+ fh = self.ds.open(local_file)
+ assert_(fh)
+ fh.close()
+
+ def test_InvalidFile(self):
+ invalid_file = invalid_textfile(self.tmpdir)
+ assert_raises(OSError, self.ds.open, invalid_file)
+
+ def test_ValidGzipFile(self):
+ try:
+ import gzip
+ except ImportError:
+ # We don't have the gzip capabilities to test.
+ pytest.skip()
+ # Test datasource's internal file_opener for Gzip files.
+ filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
+ fp = gzip.open(filepath, 'w')
+ fp.write(magic_line)
+ fp.close()
+ fp = self.ds.open(filepath)
+ result = fp.readline()
+ fp.close()
+ assert_equal(magic_line, result)
+
+ def test_ValidBz2File(self):
+ try:
+ import bz2
+ except ImportError:
+ # We don't have the bz2 capabilities to test.
+ pytest.skip()
+ # Test datasource's internal file_opener for BZip2 files.
+ filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
+ fp = bz2.BZ2File(filepath, 'w')
+ fp.write(magic_line)
+ fp.close()
+ fp = self.ds.open(filepath)
+ result = fp.readline()
+ fp.close()
+ assert_equal(magic_line, result)
+
+
+class TestDataSourceExists:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.ds = datasource.DataSource(self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.ds
+
+ def test_ValidHTTP(self):
+ assert_(self.ds.exists(valid_httpurl()))
+
+ def test_InvalidHTTP(self):
+ assert_equal(self.ds.exists(invalid_httpurl()), False)
+
+ def test_ValidFile(self):
+ # Test valid file in destpath
+ tmpfile = valid_textfile(self.tmpdir)
+ assert_(self.ds.exists(tmpfile))
+ # Test valid local file not in destpath
+ localdir = mkdtemp()
+ tmpfile = valid_textfile(localdir)
+ assert_(self.ds.exists(tmpfile))
+ rmtree(localdir)
+
+ def test_InvalidFile(self):
+ tmpfile = invalid_textfile(self.tmpdir)
+ assert_equal(self.ds.exists(tmpfile), False)
+
+
+class TestDataSourceAbspath:
+ def setup_method(self):
+ self.tmpdir = os.path.abspath(mkdtemp())
+ self.ds = datasource.DataSource(self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.ds
+
+ def test_ValidHTTP(self):
+ scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
+ local_path = os.path.join(self.tmpdir, netloc,
+ upath.strip(os.sep).strip('/'))
+ assert_equal(local_path, self.ds.abspath(valid_httpurl()))
+
+ def test_ValidFile(self):
+ tmpfile = valid_textfile(self.tmpdir)
+ tmpfilename = os.path.split(tmpfile)[-1]
+ # Test with filename only
+ assert_equal(tmpfile, self.ds.abspath(tmpfilename))
+ # Test filename with complete path
+ assert_equal(tmpfile, self.ds.abspath(tmpfile))
+
+ def test_InvalidHTTP(self):
+ scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
+ invalidhttp = os.path.join(self.tmpdir, netloc,
+ upath.strip(os.sep).strip('/'))
+ assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
+
+ def test_InvalidFile(self):
+ invalidfile = valid_textfile(self.tmpdir)
+ tmpfile = valid_textfile(self.tmpdir)
+ tmpfilename = os.path.split(tmpfile)[-1]
+ # Test with filename only
+ assert_(invalidfile != self.ds.abspath(tmpfilename))
+ # Test filename with complete path
+ assert_(invalidfile != self.ds.abspath(tmpfile))
+
+ def test_sandboxing(self):
+ tmpfile = valid_textfile(self.tmpdir)
+ tmpfilename = os.path.split(tmpfile)[-1]
+
+ tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
+
+ assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))
+ assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))
+ assert_(tmp_path(tmpfile).startswith(self.tmpdir))
+ assert_(tmp_path(tmpfilename).startswith(self.tmpdir))
+ for fn in malicious_files:
+ assert_(tmp_path(http_path + fn).startswith(self.tmpdir))
+ assert_(tmp_path(fn).startswith(self.tmpdir))
+
+ def test_windows_os_sep(self):
+ orig_os_sep = os.sep
+ try:
+ os.sep = '\\'
+ self.test_ValidHTTP()
+ self.test_ValidFile()
+ self.test_InvalidHTTP()
+ self.test_InvalidFile()
+ self.test_sandboxing()
+ finally:
+ os.sep = orig_os_sep
+
+
+class TestRepositoryAbspath:
+ def setup_method(self):
+ self.tmpdir = os.path.abspath(mkdtemp())
+ self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.repos
+
+ def test_ValidHTTP(self):
+ scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
+ local_path = os.path.join(self.repos._destpath, netloc,
+ upath.strip(os.sep).strip('/'))
+ filepath = self.repos.abspath(valid_httpfile())
+ assert_equal(local_path, filepath)
+
+ def test_sandboxing(self):
+ tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
+ assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))
+ for fn in malicious_files:
+ assert_(tmp_path(http_path + fn).startswith(self.tmpdir))
+ assert_(tmp_path(fn).startswith(self.tmpdir))
+
+ def test_windows_os_sep(self):
+ orig_os_sep = os.sep
+ try:
+ os.sep = '\\'
+ self.test_ValidHTTP()
+ self.test_sandboxing()
+ finally:
+ os.sep = orig_os_sep
+
+
+class TestRepositoryExists:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.repos
+
+ def test_ValidFile(self):
+ # Create local temp file
+ tmpfile = valid_textfile(self.tmpdir)
+ assert_(self.repos.exists(tmpfile))
+
+ def test_InvalidFile(self):
+ tmpfile = invalid_textfile(self.tmpdir)
+ assert_equal(self.repos.exists(tmpfile), False)
+
+ def test_RemoveHTTPFile(self):
+ assert_(self.repos.exists(valid_httpurl()))
+
+ def test_CachedHTTPFile(self):
+ localfile = valid_httpurl()
+ # Create a locally cached temp file with an URL based
+ # directory structure. This is similar to what Repository.open
+ # would do.
+ scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
+ local_path = os.path.join(self.repos._destpath, netloc)
+ os.mkdir(local_path, 0o0700)
+ tmpfile = valid_textfile(local_path)
+ assert_(self.repos.exists(tmpfile))
+
+
+class TestOpenFunc:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+
+ def test_DataSourceOpen(self):
+ local_file = valid_textfile(self.tmpdir)
+ # Test case where destpath is passed in
+ fp = datasource.open(local_file, destpath=self.tmpdir)
+ assert_(fp)
+ fp.close()
+ # Test case where default destpath is used
+ fp = datasource.open(local_file)
+ assert_(fp)
+ fp.close()
+
+def test_del_attr_handling():
+ # DataSource __del__ can be called
+ # even if __init__ fails when the
+ # Exception object is caught by the
+ # caller as happens in refguide_check
+ # is_deprecated() function
+
+ ds = datasource.DataSource()
+ # simulate failed __init__ by removing key attribute
+ # produced within __init__ and expected by __del__
+ del ds._istmpdest
+ # should not raise an AttributeError if __del__
+ # gracefully handles failed __init__:
+ ds.__del__()
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py
new file mode 100644
index 0000000..1581ffb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__iotools.py
@@ -0,0 +1,360 @@
+import time
+from datetime import date
+
+import numpy as np
+from numpy.lib._iotools import (
+ LineSplitter,
+ NameValidator,
+ StringConverter,
+ easy_dtype,
+ flatten_dtype,
+ has_nested_fields,
+)
+from numpy.testing import (
+ assert_,
+ assert_allclose,
+ assert_equal,
+ assert_raises,
+)
+
+
+class TestLineSplitter:
+ "Tests the LineSplitter class."
+
+ def test_no_delimiter(self):
+ "Test LineSplitter w/o delimiter"
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter()(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5'])
+ test = LineSplitter('')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5'])
+
+ def test_space_delimiter(self):
+ "Test space delimiter"
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter(' ')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+ test = LineSplitter(' ')(strg)
+ assert_equal(test, ['1 2 3 4', '5'])
+
+ def test_tab_delimiter(self):
+ "Test tab delimiter"
+ strg = " 1\t 2\t 3\t 4\t 5 6"
+ test = LineSplitter('\t')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5 6'])
+ strg = " 1 2\t 3 4\t 5 6"
+ test = LineSplitter('\t')(strg)
+ assert_equal(test, ['1 2', '3 4', '5 6'])
+
+ def test_other_delimiter(self):
+ "Test LineSplitter on delimiter"
+ strg = "1,2,3,4,,5"
+ test = LineSplitter(',')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+ #
+ strg = " 1,2,3,4,,5 # test"
+ test = LineSplitter(',')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+
+ # gh-11028 bytes comment/delimiters should get encoded
+ strg = b" 1,2,3,4,,5 % test"
+ test = LineSplitter(delimiter=b',', comments=b'%')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+
+ def test_constant_fixed_width(self):
+ "Test LineSplitter w/ fixed-width fields"
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter(3)(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
+ #
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter(20)(strg)
+ assert_equal(test, ['1 3 4 5 6'])
+ #
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter(30)(strg)
+ assert_equal(test, ['1 3 4 5 6'])
+
+ def test_variable_fixed_width(self):
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter((3, 6, 6, 3))(strg)
+ assert_equal(test, ['1', '3', '4 5', '6'])
+ #
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter((6, 6, 9))(strg)
+ assert_equal(test, ['1', '3 4', '5 6'])
+
+# -----------------------------------------------------------------------------
+
+
+class TestNameValidator:
+
+ def test_case_sensitivity(self):
+ "Test case sensitivity"
+ names = ['A', 'a', 'b', 'c']
+ test = NameValidator().validate(names)
+ assert_equal(test, ['A', 'a', 'b', 'c'])
+ test = NameValidator(case_sensitive=False).validate(names)
+ assert_equal(test, ['A', 'A_1', 'B', 'C'])
+ test = NameValidator(case_sensitive='upper').validate(names)
+ assert_equal(test, ['A', 'A_1', 'B', 'C'])
+ test = NameValidator(case_sensitive='lower').validate(names)
+ assert_equal(test, ['a', 'a_1', 'b', 'c'])
+
+ # check exceptions
+ assert_raises(ValueError, NameValidator, case_sensitive='foobar')
+
+ def test_excludelist(self):
+ "Test excludelist"
+ names = ['dates', 'data', 'Other Data', 'mask']
+ validator = NameValidator(excludelist=['dates', 'data', 'mask'])
+ test = validator.validate(names)
+ assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
+
+ def test_missing_names(self):
+ "Test validate missing names"
+ namelist = ('a', 'b', 'c')
+ validator = NameValidator()
+ assert_equal(validator(namelist), ['a', 'b', 'c'])
+ namelist = ('', 'b', 'c')
+ assert_equal(validator(namelist), ['f0', 'b', 'c'])
+ namelist = ('a', 'b', '')
+ assert_equal(validator(namelist), ['a', 'b', 'f0'])
+ namelist = ('', 'f0', '')
+ assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
+
+ def test_validate_nb_names(self):
+ "Test validate nb names"
+ namelist = ('a', 'b', 'c')
+ validator = NameValidator()
+ assert_equal(validator(namelist, nbfields=1), ('a',))
+ assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
+ ['a', 'b', 'c', 'g0', 'g1'])
+
+ def test_validate_wo_names(self):
+ "Test validate no names"
+ namelist = None
+ validator = NameValidator()
+ assert_(validator(namelist) is None)
+ assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
+
+# -----------------------------------------------------------------------------
+
+
+def _bytes_to_date(s):
+ return date(*time.strptime(s, "%Y-%m-%d")[:3])
+
+
+class TestStringConverter:
+ "Test StringConverter"
+
+ def test_creation(self):
+ "Test creation of a StringConverter"
+ converter = StringConverter(int, -99999)
+ assert_equal(converter._status, 1)
+ assert_equal(converter.default, -99999)
+
+ def test_upgrade(self):
+ "Tests the upgrade method."
+
+ converter = StringConverter()
+ assert_equal(converter._status, 0)
+
+ # test int
+ assert_equal(converter.upgrade('0'), 0)
+ assert_equal(converter._status, 1)
+
+ # On systems where long defaults to 32-bit, the statuses will be
+ # offset by one, so we check for this here.
+ import numpy._core.numeric as nx
+ status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
+
+ # test int > 2**32
+ assert_equal(converter.upgrade('17179869184'), 17179869184)
+ assert_equal(converter._status, 1 + status_offset)
+
+ # test float
+ assert_allclose(converter.upgrade('0.'), 0.0)
+ assert_equal(converter._status, 2 + status_offset)
+
+ # test complex
+ assert_equal(converter.upgrade('0j'), complex('0j'))
+ assert_equal(converter._status, 3 + status_offset)
+
+ # test str
+ # note that the longdouble type has been skipped, so the
+ # _status increases by 2. Everything should succeed with
+ # unicode conversion (8).
+ for s in ['a', b'a']:
+ res = converter.upgrade(s)
+ assert_(type(res) is str)
+ assert_equal(res, 'a')
+ assert_equal(converter._status, 8 + status_offset)
+
+ def test_missing(self):
+ "Tests the use of missing values."
+ converter = StringConverter(missing_values=('missing',
+ 'missed'))
+ converter.upgrade('0')
+ assert_equal(converter('0'), 0)
+ assert_equal(converter(''), converter.default)
+ assert_equal(converter('missing'), converter.default)
+ assert_equal(converter('missed'), converter.default)
+ try:
+ converter('miss')
+ except ValueError:
+ pass
+
+ def test_upgrademapper(self):
+ "Tests updatemapper"
+ dateparser = _bytes_to_date
+ _original_mapper = StringConverter._mapper[:]
+ try:
+ StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
+ convert = StringConverter(dateparser, date(2000, 1, 1))
+ test = convert('2001-01-01')
+ assert_equal(test, date(2001, 1, 1))
+ test = convert('2009-01-01')
+ assert_equal(test, date(2009, 1, 1))
+ test = convert('')
+ assert_equal(test, date(2000, 1, 1))
+ finally:
+ StringConverter._mapper = _original_mapper
+
+ def test_string_to_object(self):
+ "Make sure that string-to-object functions are properly recognized"
+ old_mapper = StringConverter._mapper[:] # copy of list
+ conv = StringConverter(_bytes_to_date)
+ assert_equal(conv._mapper, old_mapper)
+ assert_(hasattr(conv, 'default'))
+
+ def test_keep_default(self):
+ "Make sure we don't lose an explicit default"
+ converter = StringConverter(None, missing_values='',
+ default=-999)
+ converter.upgrade('3.14159265')
+ assert_equal(converter.default, -999)
+ assert_equal(converter.type, np.dtype(float))
+ #
+ converter = StringConverter(
+ None, missing_values='', default=0)
+ converter.upgrade('3.14159265')
+ assert_equal(converter.default, 0)
+ assert_equal(converter.type, np.dtype(float))
+
+ def test_keep_default_zero(self):
+ "Check that we don't lose a default of 0"
+ converter = StringConverter(int, default=0,
+ missing_values="N/A")
+ assert_equal(converter.default, 0)
+
+ def test_keep_missing_values(self):
+ "Check that we're not losing missing values"
+ converter = StringConverter(int, default=0,
+ missing_values="N/A")
+ assert_equal(
+ converter.missing_values, {'', 'N/A'})
+
+ def test_int64_dtype(self):
+ "Check that int64 integer types can be specified"
+ converter = StringConverter(np.int64, default=0)
+ val = "-9223372036854775807"
+ assert_(converter(val) == -9223372036854775807)
+ val = "9223372036854775807"
+ assert_(converter(val) == 9223372036854775807)
+
+ def test_uint64_dtype(self):
+ "Check that uint64 integer types can be specified"
+ converter = StringConverter(np.uint64, default=0)
+ val = "9223372043271415339"
+ assert_(converter(val) == 9223372043271415339)
+
+
+class TestMiscFunctions:
+
+ def test_has_nested_dtype(self):
+ "Test has_nested_dtype"
+ ndtype = np.dtype(float)
+ assert_equal(has_nested_fields(ndtype), False)
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ assert_equal(has_nested_fields(ndtype), False)
+ ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
+ assert_equal(has_nested_fields(ndtype), True)
+
+ def test_easy_dtype(self):
+ "Test ndtype on dtypes"
+ # Simple case
+ ndtype = float
+ assert_equal(easy_dtype(ndtype), np.dtype(float))
+ # As string w/o names
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype),
+ np.dtype([('f0', "i4"), ('f1', "f8")]))
+ # As string w/o names but different default format
+ assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
+ np.dtype([('field_000', "i4"), ('field_001', "f8")]))
+ # As string w/ names
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype, names="a, b"),
+ np.dtype([('a', "i4"), ('b', "f8")]))
+ # As string w/ names (too many)
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype, names="a, b, c"),
+ np.dtype([('a', "i4"), ('b', "f8")]))
+ # As string w/ names (not enough)
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype, names=", b"),
+ np.dtype([('f0', "i4"), ('b', "f8")]))
+ # ... (with different default format)
+ assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
+ np.dtype([('a', "i4"), ('f00', "f8")]))
+ # As list of tuples w/o names
+ ndtype = [('A', int), ('B', float)]
+ assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
+ # As list of tuples w/ names
+ assert_equal(easy_dtype(ndtype, names="a,b"),
+ np.dtype([('a', int), ('b', float)]))
+ # As list of tuples w/ not enough names
+ assert_equal(easy_dtype(ndtype, names="a"),
+ np.dtype([('a', int), ('f0', float)]))
+ # As list of tuples w/ too many names
+ assert_equal(easy_dtype(ndtype, names="a,b,c"),
+ np.dtype([('a', int), ('b', float)]))
+ # As list of types w/o names
+ ndtype = (int, float, float)
+ assert_equal(easy_dtype(ndtype),
+ np.dtype([('f0', int), ('f1', float), ('f2', float)]))
+ # As list of types w names
+ ndtype = (int, float, float)
+ assert_equal(easy_dtype(ndtype, names="a, b, c"),
+ np.dtype([('a', int), ('b', float), ('c', float)]))
+ # As simple dtype w/ names
+ ndtype = np.dtype(float)
+ assert_equal(easy_dtype(ndtype, names="a, b, c"),
+ np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
+ # As simple dtype w/o names (but multiple fields)
+ ndtype = np.dtype(float)
+ assert_equal(
+ easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
+ np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
+
+ def test_flatten_dtype(self):
+ "Testing flatten_dtype"
+ # Standard dtype
+ dt = np.dtype([("a", "f8"), ("b", "f8")])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [float, float])
+ # Recursive dtype
+ dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
+ # dtype with shaped fields
+ dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [float, int])
+ dt_flat = flatten_dtype(dt, True)
+ assert_equal(dt_flat, [float] * 2 + [int] * 3)
+ # dtype w/ titles
+ dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [float, float])
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py
new file mode 100644
index 0000000..6e6a34a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test__version.py
@@ -0,0 +1,64 @@
+"""Tests for the NumpyVersion class.
+
+"""
+from numpy.lib import NumpyVersion
+from numpy.testing import assert_, assert_raises
+
+
+def test_main_versions():
+ assert_(NumpyVersion('1.8.0') == '1.8.0')
+ for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
+ assert_(NumpyVersion('1.8.0') < ver)
+
+ for ver in ['1.7.0', '1.7.1', '0.9.9']:
+ assert_(NumpyVersion('1.8.0') > ver)
+
+
+def test_version_1_point_10():
+ # regression test for gh-2998.
+ assert_(NumpyVersion('1.9.0') < '1.10.0')
+ assert_(NumpyVersion('1.11.0') < '1.11.1')
+ assert_(NumpyVersion('1.11.0') == '1.11.0')
+ assert_(NumpyVersion('1.99.11') < '1.99.12')
+
+
+def test_alpha_beta_rc():
+ assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
+ for ver in ['1.8.0', '1.8.0rc2']:
+ assert_(NumpyVersion('1.8.0rc1') < ver)
+
+ for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
+ assert_(NumpyVersion('1.8.0rc1') > ver)
+
+ assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
+
+
+def test_dev_version():
+ assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
+ assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
+
+ assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
+
+
+def test_dev_a_b_rc_mixed():
+ assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
+ assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
+
+
+def test_dev0_version():
+ assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
+ assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
+
+ assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
+
+
+def test_dev0_a_b_rc_mixed():
+ assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
+ assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
+
+
+def test_raises():
+ for ver in ['1.9', '1,9.0', '1.7.x']:
+ assert_raises(ValueError, NumpyVersion, ver)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py
new file mode 100644
index 0000000..55b9d28
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_array_utils.py
@@ -0,0 +1,32 @@
+import numpy as np
+from numpy.lib import array_utils
+from numpy.testing import assert_equal
+
+
+class TestByteBounds:
+ def test_byte_bounds(self):
+ # pointer difference matches size * itemsize
+ # due to contiguity
+ a = np.arange(12).reshape(3, 4)
+ low, high = array_utils.byte_bounds(a)
+ assert_equal(high - low, a.size * a.itemsize)
+
+ def test_unusual_order_positive_stride(self):
+ a = np.arange(12).reshape(3, 4)
+ b = a.T
+ low, high = array_utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_unusual_order_negative_stride(self):
+ a = np.arange(12).reshape(3, 4)
+ b = a.T[::-1]
+ low, high = array_utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_strided(self):
+ a = np.arange(12)
+ b = a[::2]
+ low, high = array_utils.byte_bounds(b)
+ # the largest pointer address is lost (even numbers only in the
+ # stride), and compensate addresses for striding by 2
+ assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py
new file mode 100644
index 0000000..6efbe34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraypad.py
@@ -0,0 +1,1415 @@
+"""Tests for the array padding functions.
+
+"""
+import pytest
+
+import numpy as np
+from numpy.lib._arraypad_impl import _as_pairs
+from numpy.testing import assert_allclose, assert_array_equal, assert_equal
+
+_numeric_dtypes = (
+ np._core.sctypes["uint"]
+ + np._core.sctypes["int"]
+ + np._core.sctypes["float"]
+ + np._core.sctypes["complex"]
+)
+_all_modes = {
+ 'constant': {'constant_values': 0},
+ 'edge': {},
+ 'linear_ramp': {'end_values': 0},
+ 'maximum': {'stat_length': None},
+ 'mean': {'stat_length': None},
+ 'median': {'stat_length': None},
+ 'minimum': {'stat_length': None},
+ 'reflect': {'reflect_type': 'even'},
+ 'symmetric': {'reflect_type': 'even'},
+ 'wrap': {},
+ 'empty': {}
+}
+
+
+class TestAsPairs:
+ def test_single_value(self):
+ """Test casting for a single value."""
+ expected = np.array([[3, 3]] * 10)
+ for x in (3, [3], [[3]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # Test with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(obj, 10),
+ np.array([[obj, obj]] * 10)
+ )
+
+ def test_two_values(self):
+ """Test proper casting for two different values."""
+ # Broadcasting in the first dimension with numbers
+ expected = np.array([[3, 4]] * 10)
+ for x in ([3, 4], [[3, 4]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # and with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(["a", obj], 10),
+ np.array([["a", obj]] * 10)
+ )
+
+ # Broadcasting in the second / last dimension with numbers
+ assert_equal(
+ _as_pairs([[3], [4]], 2),
+ np.array([[3, 3], [4, 4]])
+ )
+ # and with dtype=object
+ assert_equal(
+ _as_pairs([["a"], [obj]], 2),
+ np.array([["a", "a"], [obj, obj]])
+ )
+
+ def test_with_none(self):
+ expected = ((None, None), (None, None), (None, None))
+ assert_equal(
+ _as_pairs(None, 3, as_index=False),
+ expected
+ )
+ assert_equal(
+ _as_pairs(None, 3, as_index=True),
+ expected
+ )
+
+ def test_pass_through(self):
+ """Test if `x` already matching desired output are passed through."""
+ expected = np.arange(12).reshape((6, 2))
+ assert_equal(
+ _as_pairs(expected, 6),
+ expected
+ )
+
+ def test_as_index(self):
+ """Test results if `as_index=True`."""
+ assert_equal(
+ _as_pairs([2.6, 3.3], 10, as_index=True),
+ np.array([[3, 3]] * 10, dtype=np.intp)
+ )
+ assert_equal(
+ _as_pairs([2.6, 4.49], 10, as_index=True),
+ np.array([[3, 4]] * 10, dtype=np.intp)
+ )
+ for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
+ [[1, 2]] * 9 + [[1, -2]]):
+ with pytest.raises(ValueError, match="negative values"):
+ _as_pairs(x, 10, as_index=True)
+
+ def test_exceptions(self):
+ """Ensure faulty usage is discovered."""
+ with pytest.raises(ValueError, match="more dimensions than allowed"):
+ _as_pairs([[[3]]], 10)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs([[1, 2], [3, 4]], 3)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs(np.ones((2, 3)), 3)
+
+
+class TestConditionalShortcuts:
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_zero_padding_shortcuts(self, mode):
+ test = np.arange(120).reshape(4, 5, 6)
+ pad_amt = [(0, 0) for _ in test.shape]
+ assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
+
+ @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
+ def test_shallow_statistic_range(self, mode):
+ test = np.arange(120).reshape(4, 5, 6)
+ pad_amt = [(1, 1) for _ in test.shape]
+ assert_array_equal(np.pad(test, pad_amt, mode='edge'),
+ np.pad(test, pad_amt, mode=mode, stat_length=1))
+
+ @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
+ def test_clip_statistic_range(self, mode):
+ test = np.arange(30).reshape(5, 6)
+ pad_amt = [(3, 3) for _ in test.shape]
+ assert_array_equal(np.pad(test, pad_amt, mode=mode),
+ np.pad(test, pad_amt, mode=mode, stat_length=30))
+
+
+class TestStatistic:
+ def test_check_mean_stat_length(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
+ b = np.array(
+ [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
+ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
+ 0.5, 0.5, 0.5, 0.5, 0.5,
+
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+
+ 98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
+ 98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
+ ])
+ assert_array_equal(a, b)
+
+ def test_check_maximum_1(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'maximum')
+ b = np.array(
+ [99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_maximum_2(self):
+ a = np.arange(100) + 1
+ a = np.pad(a, (25, 20), 'maximum')
+ b = np.array(
+ [100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100,
+
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_maximum_stat_length(self):
+ a = np.arange(100) + 1
+ a = np.pad(a, (25, 20), 'maximum', stat_length=10)
+ b = np.array(
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10,
+
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_minimum_1(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'minimum')
+ b = np.array(
+ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_minimum_2(self):
+ a = np.arange(100) + 2
+ a = np.pad(a, (25, 20), 'minimum')
+ b = np.array(
+ [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2,
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_minimum_stat_length(self):
+ a = np.arange(100) + 1
+ a = np.pad(a, (25, 20), 'minimum', stat_length=10)
+ b = np.array(
+ [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1,
+
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+
+ 91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
+ 91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, (25, 20), 'median')
+ b = np.array(
+ [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5,
+
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median_01(self):
+ a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
+ a = np.pad(a, 1, 'median')
+ b = np.array(
+ [[4, 4, 5, 4, 4],
+
+ [3, 3, 1, 4, 3],
+ [5, 4, 5, 9, 5],
+ [8, 9, 8, 2, 8],
+
+ [4, 4, 5, 4, 4]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median_02(self):
+ a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
+ a = np.pad(a.T, 1, 'median').T
+ b = np.array(
+ [[5, 4, 5, 4, 5],
+
+ [3, 3, 1, 4, 3],
+ [5, 4, 5, 9, 5],
+ [8, 9, 8, 2, 8],
+
+ [5, 4, 5, 4, 5]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median_stat_length(self):
+ a = np.arange(100).astype('f')
+ a[1] = 2.
+ a[97] = 96.
+ a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
+ b = np.array(
+ [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
+ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
+ 2., 2., 2., 2., 2.,
+
+ 0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
+
+ 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
+ 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_mean_shape_one(self):
+ a = [[4, 5, 6]]
+ a = np.pad(a, (5, 7), 'mean', stat_length=2)
+ b = np.array(
+ [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_mean_2(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, (25, 20), 'mean')
+ b = np.array(
+ [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5,
+
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.parametrize("mode", [
+ "mean",
+ "median",
+ "minimum",
+ "maximum"
+ ])
+ def test_same_prepend_append(self, mode):
+ """ Test that appended and prepended values are equal """
+ # This test is constructed to trigger floating point rounding errors in
+ # a way that caused gh-11216 for mode=='mean'
+ a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
+ a = np.pad(a, (1, 1), mode)
+ assert_equal(a[0], a[-1])
+
+ @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])
+ @pytest.mark.parametrize(
+ "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]
+ )
+ def test_check_negative_stat_length(self, mode, stat_length):
+ arr = np.arange(30).reshape((6, 5))
+ match = "index can't contain negative values"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, 2, mode, stat_length=stat_length)
+
+ def test_simple_stat_length(self):
+ a = np.arange(30)
+ a = np.reshape(a, (6, 5))
+ a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
+ b = np.array(
+ [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+
+ [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
+ [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
+
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
+ @pytest.mark.filterwarnings(
+ "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning"
+ )
+ @pytest.mark.parametrize("mode", ["mean", "median"])
+ def test_zero_stat_length_valid(self, mode):
+ arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
+ expected = np.array([np.nan, 1., 2., np.nan, np.nan])
+ assert_equal(arr, expected)
+
+ @pytest.mark.parametrize("mode", ["minimum", "maximum"])
+ def test_zero_stat_length_invalid(self, mode):
+ match = "stat_length of 0 yields no value for padding"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=(1, 0))
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=(1, 0))
+
+
+class TestConstant:
+ def test_check_constant(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
+ b = np.array(
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_constant_zeros(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'constant')
+ b = np.array(
+ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_constant_float(self):
+ # If input array is int, but constant_values are float, the dtype of
+ # the array to be padded is kept
+ arr = np.arange(30).reshape(5, 6)
+ test = np.pad(arr, (1, 2), mode='constant',
+ constant_values=1.1)
+ expected = np.array(
+ [[1, 1, 1, 1, 1, 1, 1, 1, 1],
+
+ [1, 0, 1, 2, 3, 4, 5, 1, 1],
+ [1, 6, 7, 8, 9, 10, 11, 1, 1],
+ [1, 12, 13, 14, 15, 16, 17, 1, 1],
+ [1, 18, 19, 20, 21, 22, 23, 1, 1],
+ [1, 24, 25, 26, 27, 28, 29, 1, 1],
+
+ [1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_constant_float2(self):
+ # If input array is float, and constant_values are float, the dtype of
+ # the array to be padded is kept - here retaining the float constants
+ arr = np.arange(30).reshape(5, 6)
+ arr_float = arr.astype(np.float64)
+ test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',
+ constant_values=1.1)
+ expected = np.array(
+ [[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
+
+ [1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], # noqa: E203
+ [1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], # noqa: E203
+ [1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], # noqa: E203
+ [1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], # noqa: E203
+ [1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], # noqa: E203
+
+ [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
+ [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_constant_float3(self):
+ a = np.arange(100, dtype=float)
+ a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
+ b = np.array(
+ [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
+ -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
+ -1.1, -1.1, -1.1, -1.1, -1.1,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
+ -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
+ )
+ assert_allclose(a, b)
+
+ def test_check_constant_odd_pad_amount(self):
+ arr = np.arange(30).reshape(5, 6)
+ test = np.pad(arr, ((1,), (2,)), mode='constant',
+ constant_values=3)
+ expected = np.array(
+ [[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
+
+ [3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
+ [3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
+ [3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
+ [3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
+ [3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
+
+ [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_constant_pad_2d(self):
+ arr = np.arange(4).reshape(2, 2)
+ test = np.pad(arr, ((1, 2), (1, 3)), mode='constant',
+ constant_values=((1, 2), (3, 4)))
+ expected = np.array(
+ [[3, 1, 1, 4, 4, 4],
+ [3, 0, 1, 4, 4, 4],
+ [3, 2, 3, 4, 4, 4],
+ [3, 2, 2, 4, 4, 4],
+ [3, 2, 2, 4, 4, 4]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
+ def test_check_object_array(self):
+ arr = np.empty(1, dtype=object)
+ obj_a = object()
+ arr[0] = obj_a
+ obj_b = object()
+ obj_c = object()
+ arr = np.pad(arr, pad_width=1, mode='constant',
+ constant_values=(obj_b, obj_c))
+
+ expected = np.empty((3,), dtype=object)
+ expected[0] = obj_b
+ expected[1] = obj_a
+ expected[2] = obj_c
+
+ assert_array_equal(arr, expected)
+
+ def test_pad_empty_dimension(self):
+ arr = np.zeros((3, 0, 2))
+ result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
+ assert result.shape == (3, 4, 4)
+
+
+class TestLinearRamp:
+ def test_check_simple(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
+ b = np.array(
+ [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
+ 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
+ 0.80, 0.64, 0.48, 0.32, 0.16,
+
+ 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
+ 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
+ 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
+ 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
+ 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
+ 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
+ 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
+ 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
+ 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
+ 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
+
+ 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
+ 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
+ )
+ assert_allclose(a, b, rtol=1e-5, atol=1e-5)
+
+ def test_check_2d(self):
+ arr = np.arange(20).reshape(4, 5).astype(np.float64)
+ test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
+ expected = np.array(
+ [[0., 0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
+ [0., 0., 0., 1., 2., 3., 4., 2., 0.],
+ [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
+ [0., 5., 10., 11., 12., 13., 14., 7., 0.],
+ [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
+ [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0.]])
+ assert_allclose(test, expected)
+
+ @pytest.mark.xfail(exceptions=(AssertionError,))
+ def test_object_array(self):
+ from fractions import Fraction
+ arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
+ actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
+
+ # deliberately chosen to have a non-power-of-2 denominator such that
+ # rounding to floats causes a failure.
+ expected = np.array([
+ Fraction( 0, 12),
+ Fraction( 3, 12),
+ Fraction( 6, 12),
+ Fraction(-6, 12),
+ Fraction(-4, 12),
+ Fraction(-2, 12),
+ Fraction(-0, 12),
+ ])
+ assert_equal(actual, expected)
+
+ def test_end_values(self):
+ """Ensure that end values are exact."""
+ a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
+ assert_equal(a[:, 0], 0.)
+ assert_equal(a[:, -1], 0.)
+ assert_equal(a[0, :], 0.)
+ assert_equal(a[-1, :], 0.)
+
+ @pytest.mark.parametrize("dtype", _numeric_dtypes)
+ def test_negative_difference(self, dtype):
+ """
+ Check correct behavior of unsigned dtypes if there is a negative
+ difference between the edge to pad and `end_values`. Check both cases
+ to be independent of implementation. Test behavior for all other dtypes
+ in case dtype casting interferes with complex dtypes. See gh-14191.
+ """
+ x = np.array([3], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=0)
+ expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
+ assert_equal(result, expected)
+
+ x = np.array([0], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=3)
+ expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
+ assert_equal(result, expected)
+
+
+class TestReflect:
+ def test_check_simple(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'reflect')
+ b = np.array(
+ [25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
+ 88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_odd_method(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')
+ b = np.array(
+ [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
+ -15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
+ -5, -4, -3, -2, -1,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_large_pad(self):
+ a = [[4, 5, 6], [6, 7, 8]]
+ a = np.pad(a, (5, 7), 'reflect')
+ b = np.array(
+ [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_shape(self):
+ a = [[4, 5, 6]]
+ a = np.pad(a, (5, 7), 'reflect')
+ b = np.array(
+ [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_01(self):
+ a = np.pad([1, 2, 3], 2, 'reflect')
+ b = np.array([3, 2, 1, 2, 3, 2, 1])
+ assert_array_equal(a, b)
+
+ def test_check_02(self):
+ a = np.pad([1, 2, 3], 3, 'reflect')
+ b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
+ assert_array_equal(a, b)
+
+ def test_check_03(self):
+ a = np.pad([1, 2, 3], 4, 'reflect')
+ b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
+ assert_array_equal(a, b)
+
+ def test_check_04(self):
+ a = np.pad([1, 2, 3], [1, 10], 'reflect')
+ b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1])
+ assert_array_equal(a, b)
+
+ def test_check_05(self):
+ a = np.pad([1, 2, 3, 4], [45, 10], 'reflect')
+ b = np.array(
+ [4, 3, 2, 1, 2, 3, 4, 3, 2, 1,
+ 2, 3, 4, 3, 2, 1, 2, 3, 4, 3,
+ 2, 1, 2, 3, 4, 3, 2, 1, 2, 3,
+ 4, 3, 2, 1, 2, 3, 4, 3, 2, 1,
+ 2, 3, 4, 3, 2, 1, 2, 3, 4, 3,
+ 2, 1, 2, 3, 4, 3, 2, 1, 2])
+ assert_array_equal(a, b)
+
+ def test_check_06(self):
+ a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric')
+ b = np.array(
+ [2, 3, 4, 4, 3, 2, 1, 1, 2, 3,
+ 4, 4, 3, 2, 1, 1, 2, 3, 4, 4,
+ 3]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_07(self):
+ a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric')
+ b = np.array(
+ [4, 5, 6, 6, 5, 4, 3, 2, 1, 1,
+ 2, 3, 4, 5, 6, 6, 5, 4, 3, 2,
+ 1, 1, 2, 3, 4, 5, 6, 6, 5, 4,
+ 3, 2, 1, 1, 2, 3, 4, 5, 6, 6,
+ 5, 4, 3, 2, 1, 1, 2, 3, 4, 5,
+ 6, 6, 5, 4])
+ assert_array_equal(a, b)
+
+
+class TestEmptyArray:
+ """Check how padding behaves on arrays with an empty dimension."""
+
+ @pytest.mark.parametrize(
+ # Keep parametrization ordered, otherwise pytest-xdist might believe
+ # that different tests were collected during parallelization
+ "mode", sorted(_all_modes.keys() - {"constant", "empty"})
+ )
+ def test_pad_empty_dimension(self, mode):
+ match = ("can't extend empty axis 0 using modes other than 'constant' "
+ "or 'empty'")
+ with pytest.raises(ValueError, match=match):
+ np.pad([], 4, mode=mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad(np.ndarray(0), 4, mode=mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
+
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_pad_non_empty_dimension(self, mode):
+ result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
+ assert result.shape == (8, 0, 4)
+
+
+class TestSymmetric:
+ def test_check_simple(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'symmetric')
+ b = np.array(
+ [24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
+ 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
+ 4, 3, 2, 1, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
+ 89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_odd_method(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
+ b = np.array(
+ [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
+ -14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
+ -4, -3, -2, -1, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_large_pad(self):
+ a = [[4, 5, 6], [6, 7, 8]]
+ a = np.pad(a, (5, 7), 'symmetric')
+ b = np.array(
+ [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
+ )
+
+ assert_array_equal(a, b)
+
+ def test_check_large_pad_odd(self):
+ a = [[4, 5, 6], [6, 7, 8]]
+ a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
+ b = np.array(
+ [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
+ [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
+ [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
+ [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
+ [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
+
+ [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
+ [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
+
+ [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
+ [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
+ [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
+ [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
+ [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
+ [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
+ [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_shape(self):
+ a = [[4, 5, 6]]
+ a = np.pad(a, (5, 7), 'symmetric')
+ b = np.array(
+ [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_01(self):
+ a = np.pad([1, 2, 3], 2, 'symmetric')
+ b = np.array([2, 1, 1, 2, 3, 3, 2])
+ assert_array_equal(a, b)
+
+ def test_check_02(self):
+ a = np.pad([1, 2, 3], 3, 'symmetric')
+ b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
+ assert_array_equal(a, b)
+
+ def test_check_03(self):
+ a = np.pad([1, 2, 3], 6, 'symmetric')
+ b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
+ assert_array_equal(a, b)
+
+
+class TestWrap:
+ def test_check_simple(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'wrap')
+ b = np.array(
+ [75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_large_pad(self):
+ a = np.arange(12)
+ a = np.reshape(a, (3, 4))
+ a = np.pad(a, (10, 12), 'wrap')
+ b = np.array(
+ [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_01(self):
+ a = np.pad([1, 2, 3], 3, 'wrap')
+ b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ assert_array_equal(a, b)
+
+ def test_check_02(self):
+ a = np.pad([1, 2, 3], 4, 'wrap')
+ b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
+ assert_array_equal(a, b)
+
+ def test_pad_with_zero(self):
+ a = np.ones((3, 5))
+ b = np.pad(a, (0, 5), mode="wrap")
+ assert_array_equal(a, b[:-5, :-5])
+
+ def test_repeated_wrapping(self):
+ """
+ Check wrapping on each side individually if the wrapped area is longer
+ than the original array.
+ """
+ a = np.arange(5)
+ b = np.pad(a, (12, 0), mode="wrap")
+ assert_array_equal(np.r_[a, a, a, a][3:], b)
+
+ a = np.arange(5)
+ b = np.pad(a, (0, 12), mode="wrap")
+ assert_array_equal(np.r_[a, a, a, a][:-3], b)
+
+ def test_repeated_wrapping_multiple_origin(self):
+ """
+ Assert that 'wrap' pads only with multiples of the original area if
+ the pad width is larger than the original array.
+ """
+ a = np.arange(4).reshape(2, 2)
+ a = np.pad(a, [(1, 3), (3, 1)], mode='wrap')
+ b = np.array(
+ [[3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0],
+ [3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0],
+ [3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0]]
+ )
+ assert_array_equal(a, b)
+
+
+class TestEdge:
+ def test_check_simple(self):
+ a = np.arange(12)
+ a = np.reshape(a, (4, 3))
+ a = np.pad(a, ((2, 3), (3, 2)), 'edge')
+ b = np.array(
+ [[0, 0, 0, 0, 1, 2, 2, 2],
+ [0, 0, 0, 0, 1, 2, 2, 2],
+
+ [0, 0, 0, 0, 1, 2, 2, 2],
+ [3, 3, 3, 3, 4, 5, 5, 5],
+ [6, 6, 6, 6, 7, 8, 8, 8],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_width_shape_1_2(self):
+ # Check a pad_width of the form ((1, 2),).
+ # Regression test for issue gh-7808.
+ a = np.array([1, 2, 3])
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.array([1, 1, 2, 3, 3, 3])
+ assert_array_equal(padded, expected)
+
+ a = np.array([[1, 2, 3], [4, 5, 6]])
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.pad(a, ((1, 2), (1, 2)), 'edge')
+ assert_array_equal(padded, expected)
+
+ a = np.arange(24).reshape(2, 3, 4)
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
+ assert_array_equal(padded, expected)
+
+
+class TestEmpty:
+ def test_simple(self):
+ arr = np.arange(24).reshape(4, 6)
+ result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
+ assert result.shape == (9, 10)
+ assert_equal(arr, result[2:-3, 3:-1])
+
+ def test_pad_empty_dimension(self):
+ arr = np.zeros((3, 0, 2))
+ result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
+ assert result.shape == (3, 4, 4)
+
+
+def test_legacy_vector_functionality():
+ def _padwithtens(vector, pad_width, iaxis, kwargs):
+ vector[:pad_width[0]] = 10
+ vector[-pad_width[1]:] = 10
+
+ a = np.arange(6).reshape(2, 3)
+ a = np.pad(a, 2, _padwithtens)
+ b = np.array(
+ [[10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+
+ [10, 10, 0, 1, 2, 10, 10],
+ [10, 10, 3, 4, 5, 10, 10],
+
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10]]
+ )
+ assert_array_equal(a, b)
+
+
+def test_unicode_mode():
+ a = np.pad([1], 2, mode='constant')
+ b = np.array([0, 0, 1, 0, 0])
+ assert_array_equal(a, b)
+
+
+@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
+def test_object_input(mode):
+ # Regression test for issue gh-11395.
+ a = np.full((4, 3), fill_value=None)
+ pad_amt = ((2, 3), (3, 2))
+ b = np.full((9, 8), fill_value=None)
+ assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
+
+
+class TestPadWidth:
+ @pytest.mark.parametrize("pad_width", [
+ (4, 5, 6, 7),
+ ((1,), (2,), (3,)),
+ ((1, 2), (3, 4), (5, 6)),
+ ((3, 4, 5), (0, 1, 2)),
+ ])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_misshaped_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "operands could not be broadcast together"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, pad_width, mode)
+
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_misshaped_pad_width_2(self, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = ("input operand has more dimensions than allowed by the axis "
+ "remapping")
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)
+
+ @pytest.mark.parametrize(
+ "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_negative_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "index can't contain negative values"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, pad_width, mode)
+
+ @pytest.mark.parametrize("pad_width, dtype", [
+ ("3", None),
+ ("word", None),
+ (None, None),
+ (object(), None),
+ (3.4, None),
+ (((2, 3, 4), (3, 2)), object),
+ (complex(1, -1), None),
+ (((-2.1, 3), (3, 2)), None),
+ ])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_bad_type(self, pad_width, dtype, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "`pad_width` must be of integral type."
+ if dtype is not None:
+ # avoid DeprecationWarning when not specifying dtype
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width, dtype=dtype), mode)
+ else:
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, pad_width, mode)
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width), mode)
+
+ def test_pad_width_as_ndarray(self):
+ a = np.arange(12)
+ a = np.reshape(a, (4, 3))
+ a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')
+ b = np.array(
+ [[0, 0, 0, 0, 1, 2, 2, 2],
+ [0, 0, 0, 0, 1, 2, 2, 2],
+
+ [0, 0, 0, 0, 1, 2, 2, 2],
+ [3, 3, 3, 3, 4, 5, 5, 5],
+ [6, 6, 6, 6, 7, 8, 8, 8],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11]]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_zero_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape(6, 5)
+ assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_kwargs(mode):
+ """Test behavior of pad's kwargs for the given mode."""
+ allowed = _all_modes[mode]
+ not_allowed = {}
+ for kwargs in _all_modes.values():
+ if kwargs != allowed:
+ not_allowed.update(kwargs)
+ # Test if allowed keyword arguments pass
+ np.pad([1, 2, 3], 1, mode, **allowed)
+ # Test if prohibited keyword arguments of other modes raise an error
+ for key, value in not_allowed.items():
+ match = f"unsupported keyword arguments for mode '{mode}'"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1, 2, 3], 1, mode, **{key: value})
+
+
+def test_constant_zero_default():
+ arr = np.array([1, 1])
+ assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])
+
+
+@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])
+def test_unsupported_mode(mode):
+ match = f"mode '{mode}' is not supported"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1, 2, 3], 4, mode=mode)
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_non_contiguous_array(mode):
+ arr = np.arange(24).reshape(4, 6)[::2, ::2]
+ result = np.pad(arr, (2, 3), mode)
+ assert result.shape == (7, 8)
+ assert_equal(result[2:-3, 2:-3], arr)
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_memory_layout_persistence(mode):
+ """Test if C and F order is preserved for all pad modes."""
+ x = np.ones((5, 10), order='C')
+ assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]
+ x = np.ones((5, 10), order='F')
+ assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
+
+
+@pytest.mark.parametrize("dtype", _numeric_dtypes)
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_dtype_persistence(dtype, mode):
+ arr = np.zeros((3, 2, 1), dtype=dtype)
+ result = np.pad(arr, 1, mode=mode)
+ assert result.dtype == dtype
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py
new file mode 100644
index 0000000..7865e1b
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arraysetops.py
@@ -0,0 +1,1074 @@
+"""Test functions for 1D array set operations.
+
+"""
+import pytest
+
+import numpy as np
+from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique
+from numpy.exceptions import AxisError
+from numpy.testing import (
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+ assert_raises_regex,
+)
+
+
+class TestSetOps:
+
+ def test_intersect1d(self):
+ # unique inputs
+ a = np.array([5, 7, 1, 2])
+ b = np.array([2, 4, 3, 1, 5])
+
+ ec = np.array([1, 2, 5])
+ c = intersect1d(a, b, assume_unique=True)
+ assert_array_equal(c, ec)
+
+ # non-unique inputs
+ a = np.array([5, 5, 7, 1, 2])
+ b = np.array([2, 1, 4, 3, 3, 1, 5])
+
+ ed = np.array([1, 2, 5])
+ c = intersect1d(a, b)
+ assert_array_equal(c, ed)
+ assert_array_equal([], intersect1d([], []))
+
+ def test_intersect1d_array_like(self):
+ # See gh-11772
+ class Test:
+ def __array__(self, dtype=None, copy=None):
+ return np.arange(3)
+
+ a = Test()
+ res = intersect1d(a, a)
+ assert_array_equal(res, a)
+ res = intersect1d([1, 2, 3], [1, 2, 3])
+ assert_array_equal(res, [1, 2, 3])
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ def test_setxor1d(self):
+ a = np.array([5, 7, 1, 2])
+ b = np.array([2, 4, 3, 1, 5])
+
+ ec = np.array([3, 4, 7])
+ c = setxor1d(a, b)
+ assert_array_equal(c, ec)
+
+ a = np.array([1, 2, 3])
+ b = np.array([6, 5, 4])
+
+ ec = np.array([1, 2, 3, 4, 5, 6])
+ c = setxor1d(a, b)
+ assert_array_equal(c, ec)
+
+ a = np.array([1, 8, 2, 3])
+ b = np.array([6, 5, 4, 8])
+
+ ec = np.array([1, 2, 3, 4, 5, 6])
+ c = setxor1d(a, b)
+ assert_array_equal(c, ec)
+
+ assert_array_equal([], setxor1d([], []))
+
+ def test_setxor1d_unique(self):
+ a = np.array([1, 8, 2, 3])
+ b = np.array([6, 5, 4, 8])
+
+ ec = np.array([1, 2, 3, 4, 5, 6])
+ c = setxor1d(a, b, assume_unique=True)
+ assert_array_equal(c, ec)
+
+ a = np.array([[1], [8], [2], [3]])
+ b = np.array([[6, 5], [4, 8]])
+
+ ec = np.array([1, 2, 3, 4, 5, 6])
+ c = setxor1d(a, b, assume_unique=True)
+ assert_array_equal(c, ec)
+
+ def test_ediff1d(self):
+ zero_elem = np.array([])
+ one_elem = np.array([1])
+ two_elem = np.array([1, 2])
+
+ assert_array_equal([], ediff1d(zero_elem))
+ assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
+ assert_array_equal([0], ediff1d(zero_elem, to_end=0))
+ assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
+ assert_array_equal([], ediff1d(one_elem))
+ assert_array_equal([1], ediff1d(two_elem))
+ assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
+ assert_array_equal([5, 6, 1, 7, 8],
+ ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))
+ assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
+ assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
+ assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
+ assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
+
+ @pytest.mark.parametrize("ary, prepend, append, expected", [
+ # should fail because trying to cast
+ # np.nan standard floating point value
+ # into an integer array:
+ (np.array([1, 2, 3], dtype=np.int64),
+ None,
+ np.nan,
+ 'to_end'),
+ # should fail because attempting
+ # to downcast to int type:
+ (np.array([1, 2, 3], dtype=np.int64),
+ np.array([5, 7, 2], dtype=np.float32),
+ None,
+ 'to_begin'),
+ # should fail because attempting to cast
+ # two special floating point values
+ # to integers (on both sides of ary),
+ # `to_begin` is in the error message as the impl checks this first:
+ (np.array([1., 3., 9.], dtype=np.int8),
+ np.nan,
+ np.nan,
+ 'to_begin'),
+ ])
+ def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):
+ # verify resolution of gh-11490
+
+ # specifically, raise an appropriate
+ # Exception when attempting to append or
+ # prepend with an incompatible type
+ msg = f'dtype of `{expected}` must be compatible'
+ with assert_raises_regex(TypeError, msg):
+ ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+
+ @pytest.mark.parametrize(
+ "ary,prepend,append,expected",
+ [
+ (np.array([1, 2, 3], dtype=np.int16),
+ 2**16, # will be cast to int16 under same kind rule.
+ 2**16 + 4,
+ np.array([0, 1, 1, 4], dtype=np.int16)),
+ (np.array([1, 2, 3], dtype=np.float32),
+ np.array([5], dtype=np.float64),
+ None,
+ np.array([5, 1, 1], dtype=np.float32)),
+ (np.array([1, 2, 3], dtype=np.int32),
+ 0,
+ 0,
+ np.array([0, 1, 1, 0], dtype=np.int32)),
+ (np.array([1, 2, 3], dtype=np.int64),
+ 3,
+ -9,
+ np.array([3, 1, 1, -9], dtype=np.int64)),
+ ]
+ )
+ def test_ediff1d_scalar_handling(self,
+ ary,
+ prepend,
+ append,
+ expected):
+ # maintain backwards-compatibility
+ # of scalar prepend / append behavior
+ # in ediff1d following fix for gh-11490
+ actual = np.ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+ assert_equal(actual, expected)
+ assert actual.dtype == expected.dtype
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin(self, kind):
+ def _isin_slow(a, b):
+ b = np.asarray(b).flatten().tolist()
+ return a in b
+ isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
+
+ def assert_isin_equal(a, b):
+ x = isin(a, b, kind=kind)
+ y = isin_slow(a, b)
+ assert_array_equal(x, y)
+
+ # multidimensional arrays in both arguments
+ a = np.arange(24).reshape([2, 3, 4])
+ b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
+ assert_isin_equal(a, b)
+
+ # array-likes as both arguments
+ c = [(9, 8), (7, 6)]
+ d = (9, 7)
+ assert_isin_equal(c, d)
+
+ # zero-d array:
+ f = np.array(3)
+ assert_isin_equal(f, b)
+ assert_isin_equal(a, f)
+ assert_isin_equal(f, f)
+
+ # scalar:
+ assert_isin_equal(5, b)
+ assert_isin_equal(a, 6)
+ assert_isin_equal(5, 6)
+
+ # empty array-like:
+ if kind != "table":
+ # An empty list will become float64,
+ # which is invalid for kind="table"
+ x = []
+ assert_isin_equal(x, b)
+ assert_isin_equal(a, x)
+ assert_isin_equal(x, x)
+
+ # empty array with various types:
+ for dtype in [bool, np.int64, np.float64]:
+ if kind == "table" and dtype == np.float64:
+ continue
+
+ if dtype in {np.int64, np.float64}:
+ ar = np.array([10, 20, 30], dtype=dtype)
+ elif dtype in {bool}:
+ ar = np.array([True, False, False])
+
+ empty_array = np.array([], dtype=dtype)
+
+ assert_isin_equal(empty_array, ar)
+ assert_isin_equal(ar, empty_array)
+ assert_isin_equal(empty_array, empty_array)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin_additional(self, kind):
+ # we use two different sizes for the b array here to test the
+ # two different paths in isin().
+ for mult in (1, 10):
+ # One check without np.array to make sure lists are handled correct
+ a = [5, 7, 1, 2]
+ b = [2, 4, 3, 1, 5] * mult
+ ec = np.array([True, False, True, True])
+ c = isin(a, b, assume_unique=True, kind=kind)
+ assert_array_equal(c, ec)
+
+ a[0] = 8
+ ec = np.array([False, False, True, True])
+ c = isin(a, b, assume_unique=True, kind=kind)
+ assert_array_equal(c, ec)
+
+ a[0], a[3] = 4, 8
+ ec = np.array([True, False, True, False])
+ c = isin(a, b, assume_unique=True, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
+ b = [2, 3, 4] * mult
+ ec = [False, True, False, True, True, True, True, True, True,
+ False, True, False, False, False]
+ c = isin(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ b = b + [5, 5, 4] * mult
+ ec = [True, True, True, True, True, True, True, True, True, True,
+ True, False, True, True]
+ c = isin(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 7, 1, 2])
+ b = np.array([2, 4, 3, 1, 5] * mult)
+ ec = np.array([True, False, True, True])
+ c = isin(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 7, 1, 1, 2])
+ b = np.array([2, 4, 3, 3, 1, 5] * mult)
+ ec = np.array([True, False, True, True, True])
+ c = isin(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 5])
+ b = np.array([2, 2] * mult)
+ ec = np.array([False, False])
+ c = isin(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5])
+ b = np.array([2])
+ ec = np.array([False])
+ c = isin(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ if kind in {None, "sort"}:
+ assert_array_equal(isin([], [], kind=kind), [])
+
+ def test_isin_char_array(self):
+ a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
+ b = np.array(['a', 'c'])
+
+ ec = np.array([True, False, True, False, False, True, False, False])
+ c = isin(a, b)
+
+ assert_array_equal(c, ec)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin_invert(self, kind):
+ "Test isin's invert parameter"
+ # We use two different sizes for the b array here to test the
+ # two different paths in isin().
+ for mult in (1, 10):
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
+ b = [2, 3, 4] * mult
+ assert_array_equal(np.invert(isin(a, b, kind=kind)),
+ isin(a, b, invert=True, kind=kind))
+
+ # float:
+ if kind in {None, "sort"}:
+ for mult in (1, 10):
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5],
+ dtype=np.float32)
+ b = [2, 3, 4] * mult
+ b = np.array(b, dtype=np.float32)
+ assert_array_equal(np.invert(isin(a, b, kind=kind)),
+ isin(a, b, invert=True, kind=kind))
+
+ def test_isin_hit_alternate_algorithm(self):
+ """Hit the standard isin code with integers"""
+ # Need extreme range to hit standard code
+ # This hits it without the use of kind='table'
+ a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)
+ b = np.array([2, 3, 4, 1e9], dtype=np.int64)
+ expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)
+ assert_array_equal(expected, isin(a, b))
+ assert_array_equal(np.invert(expected), isin(a, b, invert=True))
+
+ a = np.array([5, 7, 1, 2], dtype=np.int64)
+ b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)
+ ec = np.array([True, False, True, True])
+ c = isin(a, b, assume_unique=True)
+ assert_array_equal(c, ec)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin_boolean(self, kind):
+ """Test that isin works for boolean input"""
+ a = np.array([True, False])
+ b = np.array([False, False, False])
+ expected = np.array([False, True])
+ assert_array_equal(expected,
+ isin(a, b, kind=kind))
+ assert_array_equal(np.invert(expected),
+ isin(a, b, invert=True, kind=kind))
+
+ @pytest.mark.parametrize("kind", [None, "sort"])
+ def test_isin_timedelta(self, kind):
+ """Test that isin works for timedelta input"""
+ rstate = np.random.RandomState(0)
+ a = rstate.randint(0, 100, size=10)
+ b = rstate.randint(0, 100, size=10)
+ truth = isin(a, b)
+ a_timedelta = a.astype("timedelta64[s]")
+ b_timedelta = b.astype("timedelta64[s]")
+ assert_array_equal(truth, isin(a_timedelta, b_timedelta, kind=kind))
+
+ def test_isin_table_timedelta_fails(self):
+ a = np.array([0, 1, 2], dtype="timedelta64[s]")
+ b = a
+ # Make sure it raises a value error:
+ with pytest.raises(ValueError):
+ isin(a, b, kind="table")
+
+ @pytest.mark.parametrize(
+ "dtype1,dtype2",
+ [
+ (np.int8, np.int16),
+ (np.int16, np.int8),
+ (np.uint8, np.uint16),
+ (np.uint16, np.uint8),
+ (np.uint8, np.int16),
+ (np.int16, np.uint8),
+ (np.uint64, np.int64),
+ ]
+ )
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin_mixed_dtype(self, dtype1, dtype2, kind):
+ """Test that isin works as expected for mixed dtype input."""
+ is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
+ ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
+
+ if is_dtype2_signed:
+ ar2 = np.array([-128, 0, 127], dtype=dtype2)
+ else:
+ ar2 = np.array([127, 0, 255], dtype=dtype2)
+
+ expected = np.array([True, True, False, False])
+
+ expect_failure = kind == "table" and (
+ dtype1 == np.int16 and dtype2 == np.int8)
+
+ if expect_failure:
+ with pytest.raises(RuntimeError, match="exceed the maximum"):
+ isin(ar1, ar2, kind=kind)
+ else:
+ assert_array_equal(isin(ar1, ar2, kind=kind), expected)
+
+ @pytest.mark.parametrize("data", [
+ np.array([2**63, 2**63 + 1], dtype=np.uint64),
+ np.array([-2**62, -2**62 - 1], dtype=np.int64),
+ ])
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin_mixed_huge_vals(self, kind, data):
+ """Test values outside intp range (negative ones if 32bit system)"""
+ query = data[1]
+ res = np.isin(data, query, kind=kind)
+ assert_array_equal(res, [False, True])
+ # Also check that nothing weird happens for values can't possibly
+ # in range.
+ data = data.astype(np.int32) # clearly different values
+ res = np.isin(data, query, kind=kind)
+ assert_array_equal(res, [False, False])
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin_mixed_boolean(self, kind):
+ """Test that isin works as expected for bool/int input."""
+ for dtype in np.typecodes["AllInteger"]:
+ a = np.array([True, False, False], dtype=bool)
+ b = np.array([0, 0, 0, 0], dtype=dtype)
+ expected = np.array([False, True, True], dtype=bool)
+ assert_array_equal(isin(a, b, kind=kind), expected)
+
+ a, b = b, a
+ expected = np.array([True, True, True, True], dtype=bool)
+ assert_array_equal(isin(a, b, kind=kind), expected)
+
+ def test_isin_first_array_is_object(self):
+ ar1 = [None]
+ ar2 = np.array([1] * 10)
+ expected = np.array([False])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_isin_second_array_is_object(self):
+ ar1 = 1
+ ar2 = np.array([None] * 10)
+ expected = np.array([False])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_isin_both_arrays_are_object(self):
+ ar1 = [None]
+ ar2 = np.array([None] * 10)
+ expected = np.array([True])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_isin_both_arrays_have_structured_dtype(self):
+ # Test arrays of a structured data type containing an integer field
+ # and a field of dtype `object` allowing for arbitrary Python objects
+ dt = np.dtype([('field1', int), ('field2', object)])
+ ar1 = np.array([(1, None)], dtype=dt)
+ ar2 = np.array([(1, None)] * 10, dtype=dt)
+ expected = np.array([True])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_isin_with_arrays_containing_tuples(self):
+ ar1 = np.array([(1,), 2], dtype=object)
+ ar2 = np.array([(1,), 2], dtype=object)
+ expected = np.array([True, True])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.isin(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ # An integer is added at the end of the array to make sure
+ # that the array builder will create the array with tuples
+ # and after it's created the integer is removed.
+ # There's a bug in the array constructor that doesn't handle
+ # tuples properly and adding the integer fixes that.
+ ar1 = np.array([(1,), (2, 1), 1], dtype=object)
+ ar1 = ar1[:-1]
+ ar2 = np.array([(1,), (2, 1), 1], dtype=object)
+ ar2 = ar2[:-1]
+ expected = np.array([True, True])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.isin(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ ar1 = np.array([(1,), (2, 3), 1], dtype=object)
+ ar1 = ar1[:-1]
+ ar2 = np.array([(1,), 2], dtype=object)
+ expected = np.array([True, False])
+ result = np.isin(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.isin(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ def test_isin_errors(self):
+ """Test that isin raises expected errors."""
+
+ # Error 1: `kind` is not one of 'sort' 'table' or None.
+ ar1 = np.array([1, 2, 3, 4, 5])
+ ar2 = np.array([2, 4, 6, 8, 10])
+ assert_raises(ValueError, isin, ar1, ar2, kind='quicksort')
+
+ # Error 2: `kind="table"` does not work for non-integral arrays.
+ obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)
+ obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)
+ assert_raises(ValueError, isin, obj_ar1, obj_ar2, kind='table')
+
+ for dtype in [np.int32, np.int64]:
+ ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)
+ # The range of this array will overflow:
+ overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)
+
+ # Error 3: `kind="table"` will trigger a runtime error
+ # if there is an integer overflow expected when computing the
+ # range of ar2
+ assert_raises(
+ RuntimeError,
+ isin, ar1, overflow_ar2, kind='table'
+ )
+
+ # Non-error: `kind=None` will *not* trigger a runtime error
+ # if there is an integer overflow, it will switch to
+ # the `sort` algorithm.
+ result = np.isin(ar1, overflow_ar2, kind=None)
+ assert_array_equal(result, [True] + [False] * 4)
+ result = np.isin(ar1, overflow_ar2, kind='sort')
+ assert_array_equal(result, [True] + [False] * 4)
+
+ def test_union1d(self):
+ a = np.array([5, 4, 7, 1, 2])
+ b = np.array([2, 4, 3, 3, 2, 1, 5])
+
+ ec = np.array([1, 2, 3, 4, 5, 7])
+ c = union1d(a, b)
+ assert_array_equal(c, ec)
+
+ # Tests gh-10340, arguments to union1d should be
+ # flattened if they are not already 1D
+ x = np.array([[0, 1, 2], [3, 4, 5]])
+ y = np.array([0, 1, 2, 3, 4])
+ ez = np.array([0, 1, 2, 3, 4, 5])
+ z = union1d(x, y)
+ assert_array_equal(z, ez)
+
+ assert_array_equal([], union1d([], []))
+
+ def test_setdiff1d(self):
+ a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
+ b = np.array([2, 4, 3, 3, 2, 1, 5])
+
+ ec = np.array([6, 7])
+ c = setdiff1d(a, b)
+ assert_array_equal(c, ec)
+
+ a = np.arange(21)
+ b = np.arange(19)
+ ec = np.array([19, 20])
+ c = setdiff1d(a, b)
+ assert_array_equal(c, ec)
+
+ assert_array_equal([], setdiff1d([], []))
+ a = np.array((), np.uint32)
+ assert_equal(setdiff1d(a, []).dtype, np.uint32)
+
+ def test_setdiff1d_unique(self):
+ a = np.array([3, 2, 1])
+ b = np.array([7, 5, 2])
+ expected = np.array([3, 1])
+ actual = setdiff1d(a, b, assume_unique=True)
+ assert_equal(actual, expected)
+
+ def test_setdiff1d_char_array(self):
+ a = np.array(['a', 'b', 'c'])
+ b = np.array(['a', 'b', 's'])
+ assert_array_equal(setdiff1d(a, b), np.array(['c']))
+
+ def test_manyways(self):
+ a = np.array([5, 7, 1, 2, 8])
+ b = np.array([9, 8, 2, 4, 3, 1, 5])
+
+ c1 = setxor1d(a, b)
+ aux1 = intersect1d(a, b)
+ aux2 = union1d(a, b)
+ c2 = setdiff1d(aux2, aux1)
+ assert_array_equal(c1, c2)
+
+
+class TestUnique:
+
+ def check_all(self, a, b, i1, i2, c, dt):
+ base_msg = 'check {0} failed for type {1}'
+
+ msg = base_msg.format('values', dt)
+ v = unique(a)
+ assert_array_equal(v, b, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format('return_index', dt)
+ v, j = unique(a, True, False, False)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j, i1, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format('return_inverse', dt)
+ v, j = unique(a, False, True, False)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j, i2, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format('return_counts', dt)
+ v, j = unique(a, False, False, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j, c, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format('return_index and return_inverse', dt)
+ v, j1, j2 = unique(a, True, True, False)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i1, msg)
+ assert_array_equal(j2, i2, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format('return_index and return_counts', dt)
+ v, j1, j2 = unique(a, True, False, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i1, msg)
+ assert_array_equal(j2, c, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format('return_inverse and return_counts', dt)
+ v, j1, j2 = unique(a, False, True, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i2, msg)
+ assert_array_equal(j2, c, msg)
+ assert type(v) == type(b)
+
+ msg = base_msg.format(('return_index, return_inverse '
+ 'and return_counts'), dt)
+ v, j1, j2, j3 = unique(a, True, True, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i1, msg)
+ assert_array_equal(j2, i2, msg)
+ assert_array_equal(j3, c, msg)
+ assert type(v) == type(b)
+
+ def get_types(self):
+ types = []
+ types.extend(np.typecodes['AllInteger'])
+ types.extend(np.typecodes['AllFloat'])
+ types.append('datetime64[D]')
+ types.append('timedelta64[D]')
+ return types
+
+ def test_unique_1d(self):
+
+ a = [5, 7, 1, 2, 1, 5, 7] * 10
+ b = [1, 2, 5, 7]
+ i1 = [2, 3, 0, 1]
+ i2 = [2, 3, 0, 1, 0, 2, 3] * 10
+ c = np.multiply([2, 1, 2, 2], 10)
+
+ # test for numeric arrays
+ types = self.get_types()
+ for dt in types:
+ aa = np.array(a, dt)
+ bb = np.array(b, dt)
+ self.check_all(aa, bb, i1, i2, c, dt)
+
+ # test for object arrays
+ dt = 'O'
+ aa = np.empty(len(a), dt)
+ aa[:] = a
+ bb = np.empty(len(b), dt)
+ bb[:] = b
+ self.check_all(aa, bb, i1, i2, c, dt)
+
+ # test for structured arrays
+ dt = [('', 'i'), ('', 'i')]
+ aa = np.array(list(zip(a, a)), dt)
+ bb = np.array(list(zip(b, b)), dt)
+ self.check_all(aa, bb, i1, i2, c, dt)
+
+ # test for ticket #2799
+ aa = [1. + 0.j, 1 - 1.j, 1]
+ assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
+
+ # test for ticket #4785
+ a = [(1, 2), (1, 2), (2, 3)]
+ unq = [1, 2, 3]
+ inv = [[0, 1], [0, 1], [1, 2]]
+ a1 = unique(a)
+ assert_array_equal(a1, unq)
+ a2, a2_inv = unique(a, return_inverse=True)
+ assert_array_equal(a2, unq)
+ assert_array_equal(a2_inv, inv)
+
+ # test for chararrays with return_inverse (gh-5099)
+ a = np.char.chararray(5)
+ a[...] = ''
+ a2, a2_inv = np.unique(a, return_inverse=True)
+ assert_array_equal(a2_inv, np.zeros(5))
+
+ # test for ticket #9137
+ a = []
+ a1_idx = np.unique(a, return_index=True)[1]
+ a2_inv = np.unique(a, return_inverse=True)[1]
+ a3_idx, a3_inv = np.unique(a, return_index=True,
+ return_inverse=True)[1:]
+ assert_equal(a1_idx.dtype, np.intp)
+ assert_equal(a2_inv.dtype, np.intp)
+ assert_equal(a3_idx.dtype, np.intp)
+ assert_equal(a3_inv.dtype, np.intp)
+
+ # test for ticket 2111 - float
+ a = [2.0, np.nan, 1.0, np.nan]
+ ua = [1.0, 2.0, np.nan]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - complex
+ a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)]
+ ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)]
+ ua_idx = [2, 0, 3]
+ ua_inv = [1, 2, 0, 2, 2]
+ ua_cnt = [1, 1, 3]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - datetime64
+ nat = np.datetime64('nat')
+ a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat]
+ ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - timedelta
+ nat = np.timedelta64('nat')
+ a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat]
+ ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for gh-19300
+ all_nans = [np.nan] * 4
+ ua = [np.nan]
+ ua_idx = [0]
+ ua_inv = [0, 0, 0, 0]
+ ua_cnt = [4]
+ assert_equal(np.unique(all_nans), ua)
+ assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt))
+
+ def test_unique_zero_sized(self):
+ # test for zero-sized arrays
+ for dt in self.get_types():
+ a = np.array([], dt)
+ b = np.array([], dt)
+ i1 = np.array([], np.int64)
+ i2 = np.array([], np.int64)
+ c = np.array([], np.int64)
+ self.check_all(a, b, i1, i2, c, dt)
+
+ def test_unique_subclass(self):
+ class Subclass(np.ndarray):
+ pass
+
+ i1 = [2, 3, 0, 1]
+ i2 = [2, 3, 0, 1, 0, 2, 3] * 10
+ c = np.multiply([2, 1, 2, 2], 10)
+
+ # test for numeric arrays
+ types = self.get_types()
+ for dt in types:
+ a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt)
+ b = np.array([1, 2, 5, 7], dtype=dt)
+ aa = Subclass(a.shape, dtype=dt, buffer=a)
+ bb = Subclass(b.shape, dtype=dt, buffer=b)
+ self.check_all(aa, bb, i1, i2, c, dt)
+
+ @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"])
+ def test_unsupported_hash_based(self, arg):
+ """These currently never use the hash-based solution. However,
+ it seems easier to just allow it.
+
+ When the hash-based solution is added, this test should fail and be
+ replaced with something more comprehensive.
+ """
+ a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5])
+
+ res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True})
+ res_sorted = np.unique([1, 1], sorted=True, **{arg: True})
+ # The following should fail without first sorting `res_not_sorted`.
+ for arr, expected in zip(res_not_sorted, res_sorted):
+ assert_array_equal(arr, expected)
+
+ def test_unique_axis_errors(self):
+ assert_raises(TypeError, self._run_axis_tests, object)
+ assert_raises(TypeError, self._run_axis_tests,
+ [('a', int), ('b', object)])
+
+ assert_raises(AxisError, unique, np.arange(10), axis=2)
+ assert_raises(AxisError, unique, np.arange(10), axis=-2)
+
+ def test_unique_axis_list(self):
+ msg = "Unique failed on list of lists"
+ inp = [[0, 1, 0], [0, 1, 0]]
+ inp_arr = np.asarray(inp)
+ assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
+ assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
+
+ def test_unique_axis(self):
+ types = []
+ types.extend(np.typecodes['AllInteger'])
+ types.extend(np.typecodes['AllFloat'])
+ types.append('datetime64[D]')
+ types.append('timedelta64[D]')
+ types.append([('a', int), ('b', int)])
+ types.append([('a', int), ('b', float)])
+
+ for dtype in types:
+ self._run_axis_tests(dtype)
+
+ msg = 'Non-bitwise-equal booleans test failed'
+ data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)
+ result = np.array([[False, True], [True, True]], dtype=bool)
+ assert_array_equal(unique(data, axis=0), result, msg)
+
+ msg = 'Negative zero equality test failed'
+ data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])
+ result = np.array([[-0.0, 0.0]])
+ assert_array_equal(unique(data, axis=0), result, msg)
+
+ @pytest.mark.parametrize("axis", [0, -1])
+ def test_unique_1d_with_axis(self, axis):
+ x = np.array([4, 3, 2, 3, 2, 1, 2, 2])
+ uniq = unique(x, axis=axis)
+ assert_array_equal(uniq, [1, 2, 3, 4])
+
+ @pytest.mark.parametrize("axis", [None, 0, -1])
+ def test_unique_inverse_with_axis(self, axis):
+ x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]])
+ uniq, inv = unique(x, return_inverse=True, axis=axis)
+ assert_equal(inv.ndim, x.ndim if axis is None else 1)
+ assert_array_equal(x, np.take(uniq, inv, axis=axis))
+
+ def test_unique_axis_zeros(self):
+ # issue 15559
+ single_zero = np.empty(shape=(2, 0), dtype=np.int8)
+ uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,
+ return_inverse=True, return_counts=True)
+
+ # there's 1 element of shape (0,) along axis 0
+ assert_equal(uniq.dtype, single_zero.dtype)
+ assert_array_equal(uniq, np.empty(shape=(1, 0)))
+ assert_array_equal(idx, np.array([0]))
+ assert_array_equal(inv, np.array([0, 0]))
+ assert_array_equal(cnt, np.array([2]))
+
+ # there's 0 elements of shape (2,) along axis 1
+ uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,
+ return_inverse=True, return_counts=True)
+
+ assert_equal(uniq.dtype, single_zero.dtype)
+ assert_array_equal(uniq, np.empty(shape=(2, 0)))
+ assert_array_equal(idx, np.array([]))
+ assert_array_equal(inv, np.array([]))
+ assert_array_equal(cnt, np.array([]))
+
+ # test a "complicated" shape
+ shape = (0, 2, 0, 3, 0, 4, 0)
+ multiple_zeros = np.empty(shape=shape)
+ for axis in range(len(shape)):
+ expected_shape = list(shape)
+ if shape[axis] == 0:
+ expected_shape[axis] = 0
+ else:
+ expected_shape[axis] = 1
+
+ assert_array_equal(unique(multiple_zeros, axis=axis),
+ np.empty(shape=expected_shape))
+
+ def test_unique_masked(self):
+ # issue 8664
+ x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],
+ dtype='uint8')
+ y = np.ma.masked_equal(x, 0)
+
+ v = np.unique(y)
+ v2, i, c = np.unique(y, return_index=True, return_counts=True)
+
+ msg = 'Unique returned different results when asked for index'
+ assert_array_equal(v.data, v2.data, msg)
+ assert_array_equal(v.mask, v2.mask, msg)
+
+ def test_unique_sort_order_with_axis(self):
+ # These tests fail if sorting along axis is done by treating subarrays
+ # as unsigned byte strings. See gh-10495.
+ fmt = "sort order incorrect for integer type '%s'"
+ for dt in 'bhilq':
+ a = np.array([[-1], [0]], dt)
+ b = np.unique(a, axis=0)
+ assert_array_equal(a, b, fmt % dt)
+
+ def _run_axis_tests(self, dtype):
+ data = np.array([[0, 1, 0, 0],
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [1, 0, 0, 0]]).astype(dtype)
+
+ msg = 'Unique with 1d array and axis=0 failed'
+ result = np.array([0, 1])
+ assert_array_equal(unique(data), result.astype(dtype), msg)
+
+ msg = 'Unique with 2d array and axis=0 failed'
+ result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])
+ assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)
+
+ msg = 'Unique with 2d array and axis=1 failed'
+ result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
+ assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
+
+ msg = 'Unique with 3d array and axis=2 failed'
+ data3d = np.array([[[1, 1],
+ [1, 0]],
+ [[0, 1],
+ [0, 0]]]).astype(dtype)
+ result = np.take(data3d, [1, 0], axis=2)
+ assert_array_equal(unique(data3d, axis=2), result, msg)
+
+ uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
+ return_inverse=True, return_counts=True)
+ msg = "Unique's return_index=True failed with axis=0"
+ assert_array_equal(data[idx], uniq, msg)
+ msg = "Unique's return_inverse=True failed with axis=0"
+ assert_array_equal(np.take(uniq, inv, axis=0), data)
+ msg = "Unique's return_counts=True failed with axis=0"
+ assert_array_equal(cnt, np.array([2, 2]), msg)
+
+ uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,
+ return_inverse=True, return_counts=True)
+ msg = "Unique's return_index=True failed with axis=1"
+ assert_array_equal(data[:, idx], uniq)
+ msg = "Unique's return_inverse=True failed with axis=1"
+ assert_array_equal(np.take(uniq, inv, axis=1), data)
+ msg = "Unique's return_counts=True failed with axis=1"
+ assert_array_equal(cnt, np.array([2, 1, 1]), msg)
+
+ def test_unique_nanequals(self):
+ # issue 20326
+ a = np.array([1, 1, np.nan, np.nan, np.nan])
+ unq = np.unique(a)
+ not_unq = np.unique(a, equal_nan=False)
+ assert_array_equal(unq, np.array([1, np.nan]))
+ assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan]))
+
+ def test_unique_array_api_functions(self):
+ arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1])
+
+ for res_unique_array_api, res_unique in [
+ (
+ np.unique_values(arr),
+ np.unique(arr, equal_nan=False)
+ ),
+ (
+ np.unique_counts(arr),
+ np.unique(arr, return_counts=True, equal_nan=False)
+ ),
+ (
+ np.unique_inverse(arr),
+ np.unique(arr, return_inverse=True, equal_nan=False)
+ ),
+ (
+ np.unique_all(arr),
+ np.unique(
+ arr,
+ return_index=True,
+ return_inverse=True,
+ return_counts=True,
+ equal_nan=False
+ )
+ )
+ ]:
+ assert len(res_unique_array_api) == len(res_unique)
+ for actual, expected in zip(res_unique_array_api, res_unique):
+ assert_array_equal(actual, expected)
+
+ def test_unique_inverse_shape(self):
+ # Regression test for https://github.com/numpy/numpy/issues/25552
+ arr = np.array([[1, 2, 3], [2, 3, 1]])
+ expected_values, expected_inverse = np.unique(arr, return_inverse=True)
+ expected_inverse = expected_inverse.reshape(arr.shape)
+ for func in np.unique_inverse, np.unique_all:
+ result = func(arr)
+ assert_array_equal(expected_values, result.values)
+ assert_array_equal(expected_inverse, result.inverse_indices)
+ assert_array_equal(arr, result.values[result.inverse_indices])
+
+ @pytest.mark.parametrize(
+ 'data',
+ [[[1, 1, 1],
+ [1, 1, 1]],
+ [1, 3, 2],
+ 1],
+ )
+ @pytest.mark.parametrize('transpose', [False, True])
+ @pytest.mark.parametrize('dtype', [np.int32, np.float64])
+ def test_unique_with_matrix(self, data, transpose, dtype):
+ mat = np.matrix(data).astype(dtype)
+ if transpose:
+ mat = mat.T
+ u = np.unique(mat)
+ expected = np.unique(np.asarray(mat))
+ assert_array_equal(u, expected, strict=True)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py
new file mode 100644
index 0000000..800c9a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_arrayterator.py
@@ -0,0 +1,46 @@
+from functools import reduce
+from operator import mul
+
+import numpy as np
+from numpy.lib import Arrayterator
+from numpy.random import randint
+from numpy.testing import assert_
+
+
+def test():
+ np.random.seed(np.arange(10))
+
+ # Create a random array
+ ndims = randint(5) + 1
+ shape = tuple(randint(10) + 1 for dim in range(ndims))
+ els = reduce(mul, shape)
+ a = np.arange(els)
+ a.shape = shape
+
+ buf_size = randint(2 * els)
+ b = Arrayterator(a, buf_size)
+
+ # Check that each block has at most ``buf_size`` elements
+ for block in b:
+ assert_(len(block.flat) <= (buf_size or els))
+
+ # Check that all elements are iterated correctly
+ assert_(list(b.flat) == list(a.flat))
+
+ # Slice arrayterator
+ start = [randint(dim) for dim in shape]
+ stop = [randint(dim) + 1 for dim in shape]
+ step = [randint(dim) + 1 for dim in shape]
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
+ c = b[slice_]
+ d = a[slice_]
+
+ # Check that each block has at most ``buf_size`` elements
+ for block in c:
+ assert_(len(block.flat) <= (buf_size or els))
+
+ # Check that the arrayterator is sliced correctly
+ assert_(np.all(c.__array__() == d))
+
+ # Check that all elements are iterated correctly
+ assert_(list(c.flat) == list(d.flat))
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py
new file mode 100644
index 0000000..d805d34
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_format.py
@@ -0,0 +1,1054 @@
+# doctest
+r''' Test the .npy file format.
+
+Set up:
+
+ >>> import sys
+ >>> from io import BytesIO
+ >>> from numpy.lib import format
+ >>>
+ >>> scalars = [
+ ... np.uint8,
+ ... np.int8,
+ ... np.uint16,
+ ... np.int16,
+ ... np.uint32,
+ ... np.int32,
+ ... np.uint64,
+ ... np.int64,
+ ... np.float32,
+ ... np.float64,
+ ... np.complex64,
+ ... np.complex128,
+ ... object,
+ ... ]
+ >>>
+ >>> basic_arrays = []
+ >>>
+ >>> for scalar in scalars:
+ ... for endian in '<>':
+ ... dtype = np.dtype(scalar).newbyteorder(endian)
+ ... basic = np.arange(15).astype(dtype)
+ ... basic_arrays.extend([
+ ... np.array([], dtype=dtype),
+ ... np.array(10, dtype=dtype),
+ ... basic,
+ ... basic.reshape((3,5)),
+ ... basic.reshape((3,5)).T,
+ ... basic.reshape((3,5))[::-1,::2],
+ ... ])
+ ...
+ >>>
+ >>> Pdescr = [
+ ... ('x', 'i4', (2,)),
+ ... ('y', 'f8', (2, 2)),
+ ... ('z', 'u1')]
+ >>>
+ >>>
+ >>> PbufferT = [
+ ... ([3,2], [[6.,4.],[6.,4.]], 8),
+ ... ([4,3], [[7.,5.],[7.,5.]], 9),
+ ... ]
+ >>>
+ >>>
+ >>> Ndescr = [
+ ... ('x', 'i4', (2,)),
+ ... ('Info', [
+ ... ('value', 'c16'),
+ ... ('y2', 'f8'),
+ ... ('Info2', [
+ ... ('name', 'S2'),
+ ... ('value', 'c16', (2,)),
+ ... ('y3', 'f8', (2,)),
+ ... ('z3', 'u4', (2,))]),
+ ... ('name', 'S2'),
+ ... ('z2', 'b1')]),
+ ... ('color', 'S2'),
+ ... ('info', [
+ ... ('Name', 'U8'),
+ ... ('Value', 'c16')]),
+ ... ('y', 'f8', (2, 2)),
+ ... ('z', 'u1')]
+ >>>
+ >>>
+ >>> NbufferT = [
+ ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
+ ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
+ ... ]
+ >>>
+ >>>
+ >>> record_arrays = [
+ ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
+ ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
+ ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
+ ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ ... ]
+
+Test the magic string writing.
+
+ >>> format.magic(1, 0)
+ '\x93NUMPY\x01\x00'
+ >>> format.magic(0, 0)
+ '\x93NUMPY\x00\x00'
+ >>> format.magic(255, 255)
+ '\x93NUMPY\xff\xff'
+ >>> format.magic(2, 5)
+ '\x93NUMPY\x02\x05'
+
+Test the magic string reading.
+
+ >>> format.read_magic(BytesIO(format.magic(1, 0)))
+ (1, 0)
+ >>> format.read_magic(BytesIO(format.magic(0, 0)))
+ (0, 0)
+ >>> format.read_magic(BytesIO(format.magic(255, 255)))
+ (255, 255)
+ >>> format.read_magic(BytesIO(format.magic(2, 5)))
+ (2, 5)
+
+Test the header writing.
+
+ >>> for arr in basic_arrays + record_arrays:
+ ... f = BytesIO()
+ ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
+ ... print(repr(f.getvalue()))
+ ...
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+ "\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+ "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+ "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+'''
+import os
+import sys
+import warnings
+from io import BytesIO
+
+import pytest
+
+import numpy as np
+from numpy.lib import format
+from numpy.testing import (
+ IS_64BIT,
+ IS_PYPY,
+ IS_WASM,
+ assert_,
+ assert_array_equal,
+ assert_raises,
+ assert_raises_regex,
+ assert_warns,
+)
+from numpy.testing._private.utils import requires_memory
+
+# Generate some basic arrays to test with.
+scalars = [
+ np.uint8,
+ np.int8,
+ np.uint16,
+ np.int16,
+ np.uint32,
+ np.int32,
+ np.uint64,
+ np.int64,
+ np.float32,
+ np.float64,
+ np.complex64,
+ np.complex128,
+ object,
+]
+basic_arrays = []
+for scalar in scalars:
+ for endian in '<>':
+ dtype = np.dtype(scalar).newbyteorder(endian)
+ basic = np.arange(1500).astype(dtype)
+ basic_arrays.extend([
+ # Empty
+ np.array([], dtype=dtype),
+ # Rank-0
+ np.array(10, dtype=dtype),
+ # 1-D
+ basic,
+ # 2-D C-contiguous
+ basic.reshape((30, 50)),
+ # 2-D F-contiguous
+ basic.reshape((30, 50)).T,
+ # 2-D non-contiguous
+ basic.reshape((30, 50))[::-1, ::2],
+ ])
+
+# More complicated record arrays.
+# This is the structure of the table used for plain objects:
+#
+# +-+-+-+
+# |x|y|z|
+# +-+-+-+
+
+# Structure of a plain array description:
+Pdescr = [
+ ('x', 'i4', (2,)),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+# A plain list of tuples with values for testing:
+PbufferT = [
+ # x y z
+ ([3, 2], [[6., 4.], [6., 4.]], 8),
+ ([4, 3], [[7., 5.], [7., 5.]], 9),
+ ]
+
+
+# This is the structure of the table used for nested objects (DON'T PANIC!):
+#
+# +-+---------------------------------+-----+----------+-+-+
+# |x|Info |color|info |y|z|
+# | +-----+--+----------------+----+--+ +----+-----+ | |
+# | |value|y2|Info2 |name|z2| |Name|Value| | |
+# | | | +----+-----+--+--+ | | | | | | |
+# | | | |name|value|y3|z3| | | | | | | |
+# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
+#
+
+# The corresponding nested array description:
+Ndescr = [
+ ('x', 'i4', (2,)),
+ ('Info', [
+ ('value', 'c16'),
+ ('y2', 'f8'),
+ ('Info2', [
+ ('name', 'S2'),
+ ('value', 'c16', (2,)),
+ ('y3', 'f8', (2,)),
+ ('z3', 'u4', (2,))]),
+ ('name', 'S2'),
+ ('z2', 'b1')]),
+ ('color', 'S2'),
+ ('info', [
+ ('Name', 'U8'),
+ ('Value', 'c16')]),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+NbufferT = [
+ # x Info color info y z
+ # value y2 Info2 name z2 Name Value
+ # name value y3 z3
+ ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
+ 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
+ ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
+ 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
+ ]
+
+record_arrays = [
+ np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
+ np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
+ np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
+ np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
+]
+
+
+# BytesIO that reads a random number of bytes at a time
+class BytesIOSRandomSize(BytesIO):
+ def read(self, size=None):
+ import random
+ size = random.randint(1, size)
+ return super().read(size)
+
+
+def roundtrip(arr):
+ f = BytesIO()
+ format.write_array(f, arr)
+ f2 = BytesIO(f.getvalue())
+ arr2 = format.read_array(f2, allow_pickle=True)
+ return arr2
+
+
+def roundtrip_randsize(arr):
+ f = BytesIO()
+ format.write_array(f, arr)
+ f2 = BytesIOSRandomSize(f.getvalue())
+ arr2 = format.read_array(f2)
+ return arr2
+
+
+def roundtrip_truncated(arr):
+ f = BytesIO()
+ format.write_array(f, arr)
+ # BytesIO is one byte short
+ f2 = BytesIO(f.getvalue()[0:-1])
+ arr2 = format.read_array(f2)
+ return arr2
+
+def assert_equal_(o1, o2):
+ assert_(o1 == o2)
+
+
+def test_roundtrip():
+ for arr in basic_arrays + record_arrays:
+ arr2 = roundtrip(arr)
+ assert_array_equal(arr, arr2)
+
+
+def test_roundtrip_randsize():
+ for arr in basic_arrays + record_arrays:
+ if arr.dtype != object:
+ arr2 = roundtrip_randsize(arr)
+ assert_array_equal(arr, arr2)
+
+
+def test_roundtrip_truncated():
+ for arr in basic_arrays:
+ if arr.dtype != object:
+ assert_raises(ValueError, roundtrip_truncated, arr)
+
+def test_file_truncated(tmp_path):
+ path = tmp_path / "a.npy"
+ for arr in basic_arrays:
+ if arr.dtype != object:
+ with open(path, 'wb') as f:
+ format.write_array(f, arr)
+ # truncate the file by one byte
+ with open(path, 'rb+') as f:
+ f.seek(-1, os.SEEK_END)
+ f.truncate()
+ with open(path, 'rb') as f:
+ with pytest.raises(
+ ValueError,
+ match=(
+ r"EOF: reading array header, "
+ r"expected (\d+) bytes got (\d+)"
+ ) if arr.size == 0 else (
+ r"Failed to read all data for array\. "
+ r"Expected \(.*?\) = (\d+) elements, "
+ r"could only read (\d+) elements\. "
+ r"\(file seems not fully written\?\)"
+ )
+ ):
+ _ = format.read_array(f)
+
+def test_long_str():
+ # check items larger than internal buffer size, gh-4027
+ long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
+ long_str_arr2 = roundtrip(long_str_arr)
+ assert_array_equal(long_str_arr, long_str_arr2)
+
+
+@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
+@pytest.mark.slow
+def test_memmap_roundtrip(tmpdir):
+ for i, arr in enumerate(basic_arrays + record_arrays):
+ if arr.dtype.hasobject:
+ # Skip these since they can't be mmap'ed.
+ continue
+ # Write it out normally and through mmap.
+ nfn = os.path.join(tmpdir, f'normal{i}.npy')
+ mfn = os.path.join(tmpdir, f'memmap{i}.npy')
+ with open(nfn, 'wb') as fp:
+ format.write_array(fp, arr)
+
+ fortran_order = (
+ arr.flags.f_contiguous and not arr.flags.c_contiguous)
+ ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
+ shape=arr.shape, fortran_order=fortran_order)
+ ma[...] = arr
+ ma.flush()
+
+ # Check that both of these files' contents are the same.
+ with open(nfn, 'rb') as fp:
+ normal_bytes = fp.read()
+ with open(mfn, 'rb') as fp:
+ memmap_bytes = fp.read()
+ assert_equal_(normal_bytes, memmap_bytes)
+
+ # Check that reading the file using memmap works.
+ ma = format.open_memmap(nfn, mode='r')
+ ma.flush()
+
+
+def test_compressed_roundtrip(tmpdir):
+ arr = np.random.rand(200, 200)
+ npz_file = os.path.join(tmpdir, 'compressed.npz')
+ np.savez_compressed(npz_file, arr=arr)
+ with np.load(npz_file) as npz:
+ arr1 = npz['arr']
+ assert_array_equal(arr, arr1)
+
+
+# aligned
+dt1 = np.dtype('i1, i4, i1', align=True)
+# non-aligned, explicit offsets
+dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6]})
+# nested struct-in-struct
+dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
+# field with '' name
+dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4'] * 3})
+# titles
+dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6], 'titles': ['aa', 'bb']})
+# empty
+dt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})
+
+@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])
+def test_load_padded_dtype(tmpdir, dt):
+ arr = np.zeros(3, dt)
+ for i in range(3):
+ arr[i] = i + 5
+ npz_file = os.path.join(tmpdir, 'aligned.npz')
+ np.savez(npz_file, arr=arr)
+ with np.load(npz_file) as npz:
+ arr1 = npz['arr']
+ assert_array_equal(arr, arr1)
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988")
+@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup")
+def test_python2_python3_interoperability():
+ fname = 'win64python2.npy'
+ path = os.path.join(os.path.dirname(__file__), 'data', fname)
+ with pytest.warns(UserWarning, match="Reading.*this warning\\."):
+ data = np.load(path)
+ assert_array_equal(data, np.ones(2))
+
+
+def test_pickle_python2_python3():
+ # Test that loading object arrays saved on Python 2 works both on
+ # Python 2 and Python 3 and vice versa
+ data_dir = os.path.join(os.path.dirname(__file__), 'data')
+
+ expected = np.array([None, range, '\u512a\u826f',
+ b'\xe4\xb8\x8d\xe8\x89\xaf'],
+ dtype=object)
+
+ for fname in ['py2-np0-objarr.npy', 'py2-objarr.npy', 'py2-objarr.npz',
+ 'py3-objarr.npy', 'py3-objarr.npz']:
+ path = os.path.join(data_dir, fname)
+
+ for encoding in ['bytes', 'latin1']:
+ data_f = np.load(path, allow_pickle=True, encoding=encoding)
+ if fname.endswith('.npz'):
+ data = data_f['x']
+ data_f.close()
+ else:
+ data = data_f
+
+ if encoding == 'latin1' and fname.startswith('py2'):
+ assert_(isinstance(data[3], str))
+ assert_array_equal(data[:-1], expected[:-1])
+ # mojibake occurs
+ assert_array_equal(data[-1].encode(encoding), expected[-1])
+ else:
+ assert_(isinstance(data[3], bytes))
+ assert_array_equal(data, expected)
+
+ if fname.startswith('py2'):
+ if fname.endswith('.npz'):
+ data = np.load(path, allow_pickle=True)
+ assert_raises(UnicodeError, data.__getitem__, 'x')
+ data.close()
+ data = np.load(path, allow_pickle=True, fix_imports=False,
+ encoding='latin1')
+ assert_raises(ImportError, data.__getitem__, 'x')
+ data.close()
+ else:
+ assert_raises(UnicodeError, np.load, path,
+ allow_pickle=True)
+ assert_raises(ImportError, np.load, path,
+ allow_pickle=True, fix_imports=False,
+ encoding='latin1')
+
+
+def test_pickle_disallow(tmpdir):
+ data_dir = os.path.join(os.path.dirname(__file__), 'data')
+
+ path = os.path.join(data_dir, 'py2-objarr.npy')
+ assert_raises(ValueError, np.load, path,
+ allow_pickle=False, encoding='latin1')
+
+ path = os.path.join(data_dir, 'py2-objarr.npz')
+ with np.load(path, allow_pickle=False, encoding='latin1') as f:
+ assert_raises(ValueError, f.__getitem__, 'x')
+
+ path = os.path.join(tmpdir, 'pickle-disabled.npy')
+ assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
+ allow_pickle=False)
+
+@pytest.mark.parametrize('dt', [
+ np.dtype(np.dtype([('a', np.int8),
+ ('b', np.int16),
+ ('c', np.int32),
+ ], align=True),
+ (3,)),
+ np.dtype([('x', np.dtype({'names': ['a', 'b'],
+ 'formats': ['i1', 'i1'],
+ 'offsets': [0, 4],
+ 'itemsize': 8,
+ },
+ (3,)),
+ (4,),
+ )]),
+ np.dtype([('x',
+ ('<f8', (5,)),
+ (2,),
+ )]),
+ np.dtype([('x', np.dtype((
+ np.dtype((
+ np.dtype({'names': ['a', 'b'],
+ 'formats': ['i1', 'i1'],
+ 'offsets': [0, 4],
+ 'itemsize': 8}),
+ (3,)
+ )),
+ (4,)
+ )))
+ ]),
+ np.dtype([
+ ('a', np.dtype((
+ np.dtype((
+ np.dtype((
+ np.dtype([
+ ('a', int),
+ ('b', np.dtype({'names': ['a', 'b'],
+ 'formats': ['i1', 'i1'],
+ 'offsets': [0, 4],
+ 'itemsize': 8})),
+ ]),
+ (3,),
+ )),
+ (4,),
+ )),
+ (5,),
+ )))
+ ]),
+ ])
+def test_descr_to_dtype(dt):
+ dt1 = format.descr_to_dtype(dt.descr)
+ assert_equal_(dt1, dt)
+ arr1 = np.zeros(3, dt)
+ arr2 = roundtrip(arr1)
+ assert_array_equal(arr1, arr2)
+
+def test_version_2_0():
+ f = BytesIO()
+ # requires more than 2 byte for header
+ dt = [(("%d" % i) * 100, float) for i in range(500)]
+ d = np.ones(1000, dtype=dt)
+
+ format.write_array(f, d, version=(2, 0))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', UserWarning)
+ format.write_array(f, d)
+ assert_(w[0].category is UserWarning)
+
+ # check alignment of data portion
+ f.seek(0)
+ header = f.readline()
+ assert_(len(header) % format.ARRAY_ALIGN == 0)
+
+ f.seek(0)
+ n = format.read_array(f, max_header_size=200000)
+ assert_array_equal(d, n)
+
+ # 1.0 requested but data cannot be saved this way
+ assert_raises(ValueError, format.write_array, f, d, (1, 0))
+
+
+@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
+def test_version_2_0_memmap(tmpdir):
+ # requires more than 2 byte for header
+ dt = [(("%d" % i) * 100, float) for i in range(500)]
+ d = np.ones(1000, dtype=dt)
+ tf1 = os.path.join(tmpdir, 'version2_01.npy')
+ tf2 = os.path.join(tmpdir, 'version2_02.npy')
+
+ # 1.0 requested but data cannot be saved this way
+ assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype,
+ shape=d.shape, version=(1, 0))
+
+ ma = format.open_memmap(tf1, mode='w+', dtype=d.dtype,
+ shape=d.shape, version=(2, 0))
+ ma[...] = d
+ ma.flush()
+ ma = format.open_memmap(tf1, mode='r', max_header_size=200000)
+ assert_array_equal(ma, d)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', UserWarning)
+ ma = format.open_memmap(tf2, mode='w+', dtype=d.dtype,
+ shape=d.shape, version=None)
+ assert_(w[0].category is UserWarning)
+ ma[...] = d
+ ma.flush()
+
+ ma = format.open_memmap(tf2, mode='r', max_header_size=200000)
+
+ assert_array_equal(ma, d)
+
+@pytest.mark.parametrize("mmap_mode", ["r", None])
+def test_huge_header(tmpdir, mmap_mode):
+ f = os.path.join(tmpdir, 'large_header.npy')
+ arr = np.array(1, dtype="i," * 10000 + "i")
+
+ with pytest.warns(UserWarning, match=".*format 2.0"):
+ np.save(f, arr)
+
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f, mmap_mode=mmap_mode)
+
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f, mmap_mode=mmap_mode, max_header_size=20000)
+
+ res = np.load(f, mmap_mode=mmap_mode, allow_pickle=True)
+ assert_array_equal(res, arr)
+
+ res = np.load(f, mmap_mode=mmap_mode, max_header_size=180000)
+ assert_array_equal(res, arr)
+
+def test_huge_header_npz(tmpdir):
+ f = os.path.join(tmpdir, 'large_header.npz')
+ arr = np.array(1, dtype="i," * 10000 + "i")
+
+ with pytest.warns(UserWarning, match=".*format 2.0"):
+ np.savez(f, arr=arr)
+
+ # Only getting the array from the file actually reads it
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f)["arr"]
+
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f, max_header_size=20000)["arr"]
+
+ res = np.load(f, allow_pickle=True)["arr"]
+ assert_array_equal(res, arr)
+
+ res = np.load(f, max_header_size=180000)["arr"]
+ assert_array_equal(res, arr)
+
+def test_write_version():
+ f = BytesIO()
+ arr = np.arange(1)
+ # These should pass.
+ format.write_array(f, arr, version=(1, 0))
+ format.write_array(f, arr)
+
+ format.write_array(f, arr, version=None)
+ format.write_array(f, arr)
+
+ format.write_array(f, arr, version=(2, 0))
+ format.write_array(f, arr)
+
+ # These should all fail.
+ bad_versions = [
+ (1, 1),
+ (0, 0),
+ (0, 1),
+ (2, 2),
+ (255, 255),
+ ]
+ for version in bad_versions:
+ with assert_raises_regex(ValueError,
+ 'we only support format version.*'):
+ format.write_array(f, arr, version=version)
+
+
+bad_version_magic = [
+ b'\x93NUMPY\x01\x01',
+ b'\x93NUMPY\x00\x00',
+ b'\x93NUMPY\x00\x01',
+ b'\x93NUMPY\x02\x00',
+ b'\x93NUMPY\x02\x02',
+ b'\x93NUMPY\xff\xff',
+]
+malformed_magic = [
+ b'\x92NUMPY\x01\x00',
+ b'\x00NUMPY\x01\x00',
+ b'\x93numpy\x01\x00',
+ b'\x93MATLB\x01\x00',
+ b'\x93NUMPY\x01',
+ b'\x93NUMPY',
+ b'',
+]
+
+def test_read_magic():
+ s1 = BytesIO()
+ s2 = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+
+ format.write_array(s1, arr, version=(1, 0))
+ format.write_array(s2, arr, version=(2, 0))
+
+ s1.seek(0)
+ s2.seek(0)
+
+ version1 = format.read_magic(s1)
+ version2 = format.read_magic(s2)
+
+ assert_(version1 == (1, 0))
+ assert_(version2 == (2, 0))
+
+ assert_(s1.tell() == format.MAGIC_LEN)
+ assert_(s2.tell() == format.MAGIC_LEN)
+
+def test_read_magic_bad_magic():
+ for magic in malformed_magic:
+ f = BytesIO(magic)
+ assert_raises(ValueError, format.read_array, f)
+
+
+def test_read_version_1_0_bad_magic():
+ for magic in bad_version_magic + malformed_magic:
+ f = BytesIO(magic)
+ assert_raises(ValueError, format.read_array, f)
+
+
+def test_bad_magic_args():
+ assert_raises(ValueError, format.magic, -1, 1)
+ assert_raises(ValueError, format.magic, 256, 1)
+ assert_raises(ValueError, format.magic, 1, -1)
+ assert_raises(ValueError, format.magic, 1, 256)
+
+
+def test_large_header():
+ s = BytesIO()
+ d = {'shape': (), 'fortran_order': False, 'descr': '<i8'}
+ format.write_array_header_1_0(s, d)
+
+ s = BytesIO()
+ d['descr'] = [('x' * 256 * 256, '<i8')]
+ assert_raises(ValueError, format.write_array_header_1_0, s, d)
+
+
+def test_read_array_header_1_0():
+ s = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+ format.write_array(s, arr, version=(1, 0))
+
+ s.seek(format.MAGIC_LEN)
+ shape, fortran, dtype = format.read_array_header_1_0(s)
+
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
+ assert_((shape, fortran, dtype) == ((3, 6), False, float))
+
+
+def test_read_array_header_2_0():
+ s = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+ format.write_array(s, arr, version=(2, 0))
+
+ s.seek(format.MAGIC_LEN)
+ shape, fortran, dtype = format.read_array_header_2_0(s)
+
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
+ assert_((shape, fortran, dtype) == ((3, 6), False, float))
+
+
+def test_bad_header():
+ # header of length less than 2 should fail
+ s = BytesIO()
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+ s = BytesIO(b'1')
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+ # header shorter than indicated size should fail
+ s = BytesIO(b'\x01\x00')
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+ # headers without the exact keys required should fail
+ # d = {"shape": (1, 2),
+ # "descr": "x"}
+ s = BytesIO(
+ b"\x93NUMPY\x01\x006\x00{'descr': 'x', 'shape': (1, 2), }"
+ b" \n"
+ )
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+ d = {"shape": (1, 2),
+ "fortran_order": False,
+ "descr": "x",
+ "extrakey": -1}
+ s = BytesIO()
+ format.write_array_header_1_0(s, d)
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+
+def test_large_file_support(tmpdir):
+ if (sys.platform == 'win32' or sys.platform == 'cygwin'):
+ pytest.skip("Unknown if Windows has sparse filesystems")
+ # try creating a large sparse file
+ tf_name = os.path.join(tmpdir, 'sparse_file')
+ try:
+ # seek past end would work too, but linux truncate somewhat
+ # increases the chances that we have a sparse filesystem and can
+ # avoid actually writing 5GB
+ import subprocess as sp
+ sp.check_call(["truncate", "-s", "5368709120", tf_name])
+ except Exception:
+ pytest.skip("Could not create 5GB large file")
+ # write a small array to the end
+ with open(tf_name, "wb") as f:
+ f.seek(5368709120)
+ d = np.arange(5)
+ np.save(f, d)
+ # read it back
+ with open(tf_name, "rb") as f:
+ f.seek(5368709120)
+ r = np.load(f)
+ assert_array_equal(r, d)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy")
+@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system")
+@pytest.mark.slow
+@requires_memory(free_bytes=2 * 2**30)
+def test_large_archive(tmpdir):
+ # Regression test for product of saving arrays with dimensions of array
+ # having a product that doesn't fit in int32. See gh-7598 for details.
+ shape = (2**30, 2)
+ try:
+ a = np.empty(shape, dtype=np.uint8)
+ except MemoryError:
+ pytest.skip("Could not create large file")
+
+ fname = os.path.join(tmpdir, "large_archive")
+
+ with open(fname, "wb") as f:
+ np.savez(f, arr=a)
+
+ del a
+
+ with open(fname, "rb") as f:
+ new_a = np.load(f)["arr"]
+
+ assert new_a.shape == shape
+
+
+def test_empty_npz(tmpdir):
+ # Test for gh-9989
+ fname = os.path.join(tmpdir, "nothing.npz")
+ np.savez(fname)
+ with np.load(fname) as nps:
+ pass
+
+
+def test_unicode_field_names(tmpdir):
+ # gh-7391
+ arr = np.array([
+ (1, 3),
+ (1, 2),
+ (1, 3),
+ (1, 2)
+ ], dtype=[
+ ('int', int),
+ ('\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
+ ])
+ fname = os.path.join(tmpdir, "unicode.npy")
+ with open(fname, 'wb') as f:
+ format.write_array(f, arr, version=(3, 0))
+ with open(fname, 'rb') as f:
+ arr2 = format.read_array(f)
+ assert_array_equal(arr, arr2)
+
+ # notifies the user that 3.0 is selected
+ with open(fname, 'wb') as f:
+ with assert_warns(UserWarning):
+ format.write_array(f, arr, version=None)
+
+def test_header_growth_axis():
+ for is_fortran_array, dtype_space, expected_header_length in [
+ [False, 22, 128], [False, 23, 192], [True, 23, 128], [True, 24, 192]
+ ]:
+ for size in [10**i for i in range(format.GROWTH_AXIS_MAX_DIGITS)]:
+ fp = BytesIO()
+ format.write_array_header_1_0(fp, {
+ 'shape': (2, size) if is_fortran_array else (size, 2),
+ 'fortran_order': is_fortran_array,
+ 'descr': np.dtype([(' ' * dtype_space, int)])
+ })
+
+ assert len(fp.getvalue()) == expected_header_length
+
+@pytest.mark.parametrize('dt', [
+ np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
+ metadata={'some': 'stuff'})]}),
+ np.dtype(int, metadata={'some': 'stuff'}),
+ np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}),
+ # recursive: metadata on the field of a dtype
+ np.dtype({'names': ['a', 'b'], 'formats': [
+ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
+ ]}),
+ ])
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_metadata_dtype(dt):
+ # gh-14142
+ arr = np.ones(10, dtype=dt)
+ buf = BytesIO()
+ with assert_warns(UserWarning):
+ np.save(buf, arr)
+ buf.seek(0)
+
+ # Loading should work (metadata was stripped):
+ arr2 = np.load(buf)
+ # BUG: assert_array_equal does not check metadata
+ from numpy.lib._utils_impl import drop_metadata
+ assert_array_equal(arr, arr2)
+ assert drop_metadata(arr.dtype) is not arr.dtype
+ assert drop_metadata(arr2.dtype) is arr2.dtype
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_function_base.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_function_base.py
new file mode 100644
index 0000000..f2dba19
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_function_base.py
@@ -0,0 +1,4573 @@
+import decimal
+import math
+import operator
+import sys
+import warnings
+from fractions import Fraction
+from functools import partial
+
+import hypothesis
+import hypothesis.strategies as st
+import pytest
+from hypothesis.extra.numpy import arrays
+
+import numpy as np
+import numpy.lib._function_base_impl as nfb
+from numpy import (
+ angle,
+ average,
+ bartlett,
+ blackman,
+ corrcoef,
+ cov,
+ delete,
+ diff,
+ digitize,
+ extract,
+ flipud,
+ gradient,
+ hamming,
+ hanning,
+ i0,
+ insert,
+ interp,
+ kaiser,
+ ma,
+ meshgrid,
+ piecewise,
+ place,
+ rot90,
+ select,
+ setxor1d,
+ sinc,
+ trapezoid,
+ trim_zeros,
+ unique,
+ unwrap,
+ vectorize,
+)
+from numpy._core.numeric import normalize_axis_tuple
+from numpy.exceptions import AxisError
+from numpy.random import rand
+from numpy.testing import (
+ HAS_REFCOUNT,
+ IS_WASM,
+ NOGIL_BUILD,
+ assert_,
+ assert_allclose,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+ assert_raises_regex,
+ assert_warns,
+ suppress_warnings,
+)
+
+
+def get_mat(n):
+ data = np.arange(n)
+ data = np.add.outer(data, data)
+ return data
+
+
+def _make_complex(real, imag):
+ """
+ Like real + 1j * imag, but behaves as expected when imag contains non-finite
+ values
+ """
+ ret = np.zeros(np.broadcast(real, imag).shape, np.complex128)
+ ret.real = real
+ ret.imag = imag
+ return ret
+
+
+class TestRot90:
+ def test_basic(self):
+ assert_raises(ValueError, rot90, np.ones(4))
+ assert_raises(ValueError, rot90, np.ones((2, 2, 2)), axes=(0, 1, 2))
+ assert_raises(ValueError, rot90, np.ones((2, 2)), axes=(0, 2))
+ assert_raises(ValueError, rot90, np.ones((2, 2)), axes=(1, 1))
+ assert_raises(ValueError, rot90, np.ones((2, 2, 2)), axes=(-2, 1))
+
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b1 = [[2, 5],
+ [1, 4],
+ [0, 3]]
+ b2 = [[5, 4, 3],
+ [2, 1, 0]]
+ b3 = [[3, 0],
+ [4, 1],
+ [5, 2]]
+ b4 = [[0, 1, 2],
+ [3, 4, 5]]
+
+ for k in range(-3, 13, 4):
+ assert_equal(rot90(a, k=k), b1)
+ for k in range(-2, 13, 4):
+ assert_equal(rot90(a, k=k), b2)
+ for k in range(-1, 13, 4):
+ assert_equal(rot90(a, k=k), b3)
+ for k in range(0, 13, 4):
+ assert_equal(rot90(a, k=k), b4)
+
+ assert_equal(rot90(rot90(a, axes=(0, 1)), axes=(1, 0)), a)
+ assert_equal(rot90(a, k=1, axes=(1, 0)), rot90(a, k=-1, axes=(0, 1)))
+
+ def test_axes(self):
+ a = np.ones((50, 40, 3))
+ assert_equal(rot90(a).shape, (40, 50, 3))
+ assert_equal(rot90(a, axes=(0, 2)), rot90(a, axes=(0, -1)))
+ assert_equal(rot90(a, axes=(1, 2)), rot90(a, axes=(-2, -1)))
+
+ def test_rotation_axes(self):
+ a = np.arange(8).reshape((2, 2, 2))
+
+ a_rot90_01 = [[[2, 3],
+ [6, 7]],
+ [[0, 1],
+ [4, 5]]]
+ a_rot90_12 = [[[1, 3],
+ [0, 2]],
+ [[5, 7],
+ [4, 6]]]
+ a_rot90_20 = [[[4, 0],
+ [6, 2]],
+ [[5, 1],
+ [7, 3]]]
+ a_rot90_10 = [[[4, 5],
+ [0, 1]],
+ [[6, 7],
+ [2, 3]]]
+
+ assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)
+ assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)
+ assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)
+
+ for k in range(1, 5):
+ assert_equal(rot90(a, k=k, axes=(2, 0)),
+ rot90(a_rot90_20, k=k - 1, axes=(2, 0)))
+
+
+class TestFlip:
+
+ def test_axes(self):
+ assert_raises(AxisError, np.flip, np.ones(4), axis=1)
+ assert_raises(AxisError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(AxisError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
+
+ def test_basic_lr(self):
+ a = get_mat(4)
+ b = a[:, ::-1]
+ assert_equal(np.flip(a, 1), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[2, 1, 0],
+ [5, 4, 3]]
+ assert_equal(np.flip(a, 1), b)
+
+ def test_basic_ud(self):
+ a = get_mat(4)
+ b = a[::-1, :]
+ assert_equal(np.flip(a, 0), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[3, 4, 5],
+ [0, 1, 2]]
+ assert_equal(np.flip(a, 0), b)
+
+ def test_3d_swap_axis0(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ b = np.array([[[4, 5],
+ [6, 7]],
+ [[0, 1],
+ [2, 3]]])
+
+ assert_equal(np.flip(a, 0), b)
+
+ def test_3d_swap_axis1(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ b = np.array([[[2, 3],
+ [0, 1]],
+ [[6, 7],
+ [4, 5]]])
+
+ assert_equal(np.flip(a, 1), b)
+
+ def test_3d_swap_axis2(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ b = np.array([[[1, 0],
+ [3, 2]],
+ [[5, 4],
+ [7, 6]]])
+
+ assert_equal(np.flip(a, 2), b)
+
+ def test_4d(self):
+ a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
+ for i in range(a.ndim):
+ assert_equal(np.flip(a, i),
+ np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+
+ def test_default_axis(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ b = np.array([[6, 5, 4],
+ [3, 2, 1]])
+ assert_equal(np.flip(a), b)
+
+ def test_multiple_axes(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ assert_equal(np.flip(a, axis=()), a)
+
+ b = np.array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+
+ assert_equal(np.flip(a, axis=(0, 2)), b)
+
+ c = np.array([[[3, 2],
+ [1, 0]],
+ [[7, 6],
+ [5, 4]]])
+
+ assert_equal(np.flip(a, axis=(1, 2)), c)
+
+
+class TestAny:
+
+ def test_basic(self):
+ y1 = [0, 0, 1, 0]
+ y2 = [0, 0, 0, 0]
+ y3 = [1, 0, 1, 0]
+ assert_(np.any(y1))
+ assert_(np.any(y3))
+ assert_(not np.any(y2))
+
+ def test_nd(self):
+ y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
+ assert_(np.any(y1))
+ assert_array_equal(np.any(y1, axis=0), [1, 1, 0])
+ assert_array_equal(np.any(y1, axis=1), [0, 1, 1])
+
+
+class TestAll:
+
+ def test_basic(self):
+ y1 = [0, 1, 1, 0]
+ y2 = [0, 0, 0, 0]
+ y3 = [1, 1, 1, 1]
+ assert_(not np.all(y1))
+ assert_(np.all(y3))
+ assert_(not np.all(y2))
+ assert_(np.all(~np.array(y2)))
+
+ def test_nd(self):
+ y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
+ assert_(not np.all(y1))
+ assert_array_equal(np.all(y1, axis=0), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=1), [0, 0, 1])
+
+
+@pytest.mark.parametrize("dtype", ["i8", "U10", "object", "datetime64[ms]"])
+def test_any_and_all_result_dtype(dtype):
+ arr = np.ones(3, dtype=dtype)
+ assert np.any(arr).dtype == np.bool
+ assert np.all(arr).dtype == np.bool
+
+
+class TestCopy:
+
+ def test_basic(self):
+ a = np.array([[1, 2], [3, 4]])
+ a_copy = np.copy(a)
+ assert_array_equal(a, a_copy)
+ a_copy[0, 0] = 10
+ assert_equal(a[0, 0], 1)
+ assert_equal(a_copy[0, 0], 10)
+
+ def test_order(self):
+ # It turns out that people rely on np.copy() preserving order by
+ # default; changing this broke scikit-learn:
+ # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783
+ a = np.array([[1, 2], [3, 4]])
+ assert_(a.flags.c_contiguous)
+ assert_(not a.flags.f_contiguous)
+ a_fort = np.array([[1, 2], [3, 4]], order="F")
+ assert_(not a_fort.flags.c_contiguous)
+ assert_(a_fort.flags.f_contiguous)
+ a_copy = np.copy(a)
+ assert_(a_copy.flags.c_contiguous)
+ assert_(not a_copy.flags.f_contiguous)
+ a_fort_copy = np.copy(a_fort)
+ assert_(not a_fort_copy.flags.c_contiguous)
+ assert_(a_fort_copy.flags.f_contiguous)
+
+ def test_subok(self):
+ mx = ma.ones(5)
+ assert_(not ma.isMaskedArray(np.copy(mx, subok=False)))
+ assert_(ma.isMaskedArray(np.copy(mx, subok=True)))
+ # Default behavior
+ assert_(not ma.isMaskedArray(np.copy(mx)))
+
+
+class TestAverage:
+
+ def test_basic(self):
+ y1 = np.array([1, 2, 3])
+ assert_(average(y1, axis=0) == 2.)
+ y2 = np.array([1., 2., 3.])
+ assert_(average(y2, axis=0) == 2.)
+ y3 = [0., 0., 0.]
+ assert_(average(y3, axis=0) == 0.)
+
+ y4 = np.ones((4, 4))
+ y4[0, 1] = 0
+ y4[1, 0] = 2
+ assert_almost_equal(y4.mean(0), average(y4, 0))
+ assert_almost_equal(y4.mean(1), average(y4, 1))
+
+ y5 = rand(5, 5)
+ assert_almost_equal(y5.mean(0), average(y5, 0))
+ assert_almost_equal(y5.mean(1), average(y5, 1))
+
+ @pytest.mark.parametrize(
+ 'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
+ [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
+ ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
+ [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
+ )
+ def test_basic_keepdims(self, x, axis, expected_avg,
+ weights, expected_wavg, expected_wsum):
+ avg = np.average(x, axis=axis, keepdims=True)
+ assert avg.shape == np.shape(expected_avg)
+ assert_array_equal(avg, expected_avg)
+
+ wavg = np.average(x, axis=axis, weights=weights, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+
+ wavg, wsum = np.average(x, axis=axis, weights=weights, returned=True,
+ keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+ assert wsum.shape == np.shape(expected_wsum)
+ assert_array_equal(wsum, expected_wsum)
+
+ def test_weights(self):
+ y = np.arange(10)
+ w = np.arange(10)
+ actual = average(y, weights=w)
+ desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum()
+ assert_almost_equal(actual, desired)
+
+ y1 = np.array([[1, 2, 3], [4, 5, 6]])
+ w0 = [1, 2]
+ actual = average(y1, weights=w0, axis=0)
+ desired = np.array([3., 4., 5.])
+ assert_almost_equal(actual, desired)
+
+ w1 = [0, 0, 1]
+ actual = average(y1, weights=w1, axis=1)
+ desired = np.array([3., 6.])
+ assert_almost_equal(actual, desired)
+
+ # weights and input have different shapes but no axis is specified
+ with pytest.raises(
+ TypeError,
+ match="Axis must be specified when shapes of a "
+ "and weights differ"):
+ average(y1, weights=w1)
+
+ # 2D Case
+ w2 = [[0, 0, 1], [0, 0, 2]]
+ desired = np.array([3., 6.])
+ assert_array_equal(average(y1, weights=w2, axis=1), desired)
+ assert_equal(average(y1, weights=w2), 5.)
+
+ y3 = rand(5).astype(np.float32)
+ w3 = rand(5).astype(np.float64)
+
+ assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))
+
+ # test weights with `keepdims=False` and `keepdims=True`
+ x = np.array([2, 3, 4]).reshape(3, 1)
+ w = np.array([4, 5, 6]).reshape(3, 1)
+
+ actual = np.average(x, weights=w, axis=1, keepdims=False)
+ desired = np.array([2., 3., 4.])
+ assert_array_equal(actual, desired)
+
+ actual = np.average(x, weights=w, axis=1, keepdims=True)
+ desired = np.array([[2.], [3.], [4.]])
+ assert_array_equal(actual, desired)
+
+ def test_weight_and_input_dims_different(self):
+ y = np.arange(12).reshape(2, 2, 3)
+ w = np.array([0., 0., 1., .5, .5, 0., 0., .5, .5, 1., 0., 0.])\
+ .reshape(2, 2, 3)
+
+ subw0 = w[:, :, 0]
+ actual = average(y, axis=(0, 1), weights=subw0)
+ desired = np.array([7., 8., 9.])
+ assert_almost_equal(actual, desired)
+
+ subw1 = w[1, :, :]
+ actual = average(y, axis=(1, 2), weights=subw1)
+ desired = np.array([2.25, 8.25])
+ assert_almost_equal(actual, desired)
+
+ subw2 = w[:, 0, :]
+ actual = average(y, axis=(0, 2), weights=subw2)
+ desired = np.array([4.75, 7.75])
+ assert_almost_equal(actual, desired)
+
+ # here the weights have the wrong shape for the specified axes
+ with pytest.raises(
+ ValueError,
+ match="Shape of weights must be consistent with "
+ "shape of a along specified axis"):
+ average(y, axis=(0, 1, 2), weights=subw0)
+
+ with pytest.raises(
+ ValueError,
+ match="Shape of weights must be consistent with "
+ "shape of a along specified axis"):
+ average(y, axis=(0, 1), weights=subw1)
+
+ # swapping the axes should be same as transposing weights
+ actual = average(y, axis=(1, 0), weights=subw0)
+ desired = average(y, axis=(0, 1), weights=subw0.T)
+ assert_almost_equal(actual, desired)
+
+ # if average over all axes, should have float output
+ actual = average(y, axis=(0, 1, 2), weights=w)
+ assert_(actual.ndim == 0)
+
+ def test_returned(self):
+ y = np.array([[1, 2, 3], [4, 5, 6]])
+
+ # No weights
+ avg, scl = average(y, returned=True)
+ assert_equal(scl, 6.)
+
+ avg, scl = average(y, 0, returned=True)
+ assert_array_equal(scl, np.array([2., 2., 2.]))
+
+ avg, scl = average(y, 1, returned=True)
+ assert_array_equal(scl, np.array([3., 3.]))
+
+ # With weights
+ w0 = [1, 2]
+ avg, scl = average(y, weights=w0, axis=0, returned=True)
+ assert_array_equal(scl, np.array([3., 3., 3.]))
+
+ w1 = [1, 2, 3]
+ avg, scl = average(y, weights=w1, axis=1, returned=True)
+ assert_array_equal(scl, np.array([6., 6.]))
+
+ w2 = [[0, 0, 1], [1, 2, 3]]
+ avg, scl = average(y, weights=w2, axis=1, returned=True)
+ assert_array_equal(scl, np.array([1., 6.]))
+
+ def test_subclasses(self):
+ class subclass(np.ndarray):
+ pass
+ a = np.array([[1, 2], [3, 4]]).view(subclass)
+ w = np.array([[1, 2], [3, 4]]).view(subclass)
+
+ assert_equal(type(np.average(a)), subclass)
+ assert_equal(type(np.average(a, weights=w)), subclass)
+
+ def test_upcasting(self):
+ typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
+ ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
+ for at, wt, rt in typs:
+ a = np.array([[1, 2], [3, 4]], dtype=at)
+ w = np.array([[1, 2], [3, 4]], dtype=wt)
+ assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
+
+ def test_object_dtype(self):
+ a = np.array([decimal.Decimal(x) for x in range(10)])
+ w = np.array([decimal.Decimal(1) for _ in range(10)])
+ w /= w.sum()
+ assert_almost_equal(a.mean(0), average(a, weights=w))
+
+ def test_object_no_weights(self):
+ a = np.array([decimal.Decimal(x) for x in range(10)])
+ m = average(a)
+ assert m == decimal.Decimal('4.5')
+
+ def test_average_class_without_dtype(self):
+ # see gh-21988
+ a = np.array([Fraction(1, 5), Fraction(3, 5)])
+ assert_equal(np.average(a), Fraction(2, 5))
+
+
+class TestSelect:
+ choices = [np.array([1, 2, 3]),
+ np.array([4, 5, 6]),
+ np.array([7, 8, 9])]
+ conditions = [np.array([False, False, False]),
+ np.array([False, True, False]),
+ np.array([False, False, True])]
+
+ def _select(self, cond, values, default=0):
+ output = []
+ for m in range(len(cond)):
+ output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
+ return output
+
+ def test_basic(self):
+ choices = self.choices
+ conditions = self.conditions
+ assert_array_equal(select(conditions, choices, default=15),
+ self._select(conditions, choices, default=15))
+
+ assert_equal(len(choices), 3)
+ assert_equal(len(conditions), 3)
+
+ def test_broadcasting(self):
+ conditions = [np.array(True), np.array([False, True, False])]
+ choices = [1, np.arange(12).reshape(4, 3)]
+ assert_array_equal(select(conditions, choices), np.ones((4, 3)))
+ # default can broadcast too:
+ assert_equal(select([True], [0], default=[0]).shape, (1,))
+
+ def test_return_dtype(self):
+ assert_equal(select(self.conditions, self.choices, 1j).dtype,
+ np.complex128)
+ # But the conditions need to be stronger then the scalar default
+ # if it is scalar.
+ choices = [choice.astype(np.int8) for choice in self.choices]
+ assert_equal(select(self.conditions, choices).dtype, np.int8)
+
+ d = np.array([1, 2, 3, np.nan, 5, 7])
+ m = np.isnan(d)
+ assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
+
+ def test_deprecated_empty(self):
+ assert_raises(ValueError, select, [], [], 3j)
+ assert_raises(ValueError, select, [], [])
+
+ def test_non_bool_deprecation(self):
+ choices = self.choices
+ conditions = self.conditions[:]
+ conditions[0] = conditions[0].astype(np.int_)
+ assert_raises(TypeError, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.uint8)
+ assert_raises(TypeError, select, conditions, choices)
+ assert_raises(TypeError, select, conditions, choices)
+
+ def test_many_arguments(self):
+ # This used to be limited by NPY_MAXARGS == 32
+ conditions = [np.array([False])] * 100
+ choices = [np.array([1])] * 100
+ select(conditions, choices)
+
+
+class TestInsert:
+
+ def test_basic(self):
+ a = [1, 2, 3]
+ assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
+ assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
+ assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
+ assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])
+ assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])
+ assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])
+ assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])
+ b = np.array([0, 1], dtype=np.float64)
+ assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
+ assert_equal(insert(b, [], []), b)
+ assert_equal(insert(a, np.array([True] * 4), 9), [9, 1, 9, 2, 9, 3, 9])
+ assert_equal(insert(a, np.array([True, False, True, False]), 9),
+ [9, 1, 2, 9, 3])
+
+ def test_multidim(self):
+ a = [[1, 1, 1]]
+ r = [[2, 2, 2],
+ [1, 1, 1]]
+ assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])
+ assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
+ assert_equal(insert(a, 0, 2, axis=0), r)
+ assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
+
+ a = np.array([[1, 1], [2, 2], [3, 3]])
+ b = np.arange(1, 4).repeat(3).reshape(3, 3)
+ c = np.concatenate(
+ (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,
+ a[:, 1:2]), axis=1)
+ assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)
+ assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
+ # scalars behave differently, in this case exactly opposite:
+ assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
+ assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)
+
+ a = np.arange(4).reshape(2, 2)
+ assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)
+ assert_equal(insert(a[:1, :], 1, a[1, :], axis=0), a)
+
+ # negative axis value
+ a = np.arange(24).reshape((2, 3, 4))
+ assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
+ insert(a, 1, a[:, :, 3], axis=2))
+ assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
+ insert(a, 1, a[:, 2, :], axis=1))
+
+ # invalid axis value
+ assert_raises(AxisError, insert, a, 1, a[:, 2, :], axis=3)
+ assert_raises(AxisError, insert, a, 1, a[:, 2, :], axis=-4)
+
+ # negative axis value
+ a = np.arange(24).reshape((2, 3, 4))
+ assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
+ insert(a, 1, a[:, :, 3], axis=2))
+ assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
+ insert(a, 1, a[:, 2, :], axis=1))
+
+ def test_0d(self):
+ a = np.array(1)
+ with pytest.raises(AxisError):
+ insert(a, [], 2, axis=0)
+ with pytest.raises(TypeError):
+ insert(a, [], 2, axis="nonsense")
+
+ def test_subclass(self):
+ class SubClass(np.ndarray):
+ pass
+ a = np.arange(10).view(SubClass)
+ assert_(isinstance(np.insert(a, 0, [0]), SubClass))
+ assert_(isinstance(np.insert(a, [], []), SubClass))
+ assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))
+ assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))
+ assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))
+ # This is an error in the future:
+ a = np.array(1).view(SubClass)
+ assert_(isinstance(np.insert(a, 0, [0]), SubClass))
+
+ def test_index_array_copied(self):
+ x = np.array([1, 1, 1])
+ np.insert([0, 1, 2], x, [3, 4, 5])
+ assert_equal(x, np.array([1, 1, 1]))
+
+ def test_structured_array(self):
+ a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],
+ dtype=[('foo', 'i'), ('bar', 'S1')])
+ val = (4, 'd')
+ b = np.insert(a, 0, val)
+ assert_array_equal(b[0], np.array(val, dtype=b.dtype))
+ val = [(4, 'd')] * 2
+ b = np.insert(a, [0, 2], val)
+ assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
+
+ def test_index_floats(self):
+ with pytest.raises(IndexError):
+ np.insert([0, 1, 2], np.array([1.0, 2.0]), [10, 20])
+ with pytest.raises(IndexError):
+ np.insert([0, 1, 2], np.array([], dtype=float), [])
+
+ @pytest.mark.parametrize('idx', [4, -4])
+ def test_index_out_of_bounds(self, idx):
+ with pytest.raises(IndexError, match='out of bounds'):
+ np.insert([0, 1, 2], [idx], [3, 4])
+
+
+class TestAmax:
+
+ def test_basic(self):
+ a = [3, 4, 5, 10, -3, -5, 6.0]
+ assert_equal(np.amax(a), 10.0)
+ b = [[3, 6.0, 9.0],
+ [4, 10.0, 5.0],
+ [8, 3.0, 2.0]]
+ assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])
+ assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
+
+
+class TestAmin:
+
+ def test_basic(self):
+ a = [3, 4, 5, 10, -3, -5, 6.0]
+ assert_equal(np.amin(a), -5.0)
+ b = [[3, 6.0, 9.0],
+ [4, 10.0, 5.0],
+ [8, 3.0, 2.0]]
+ assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0])
+ assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
+
+
+class TestPtp:
+
+ def test_basic(self):
+ a = np.array([3, 4, 5, 10, -3, -5, 6.0])
+ assert_equal(np.ptp(a, axis=0), 15.0)
+ b = np.array([[3, 6.0, 9.0],
+ [4, 10.0, 5.0],
+ [8, 3.0, 2.0]])
+ assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0])
+ assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0])
+
+ assert_equal(np.ptp(b, axis=0, keepdims=True), [[5.0, 7.0, 7.0]])
+ assert_equal(np.ptp(b, axis=(0, 1), keepdims=True), [[8.0]])
+
+
+class TestCumsum:
+
+ @pytest.mark.parametrize("cumsum", [np.cumsum, np.cumulative_sum])
+ def test_basic(self, cumsum):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+ for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
+ np.uint32, np.float32, np.float64, np.complex64,
+ np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+
+ tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
+ assert_array_equal(cumsum(a, axis=0), tgt)
+
+ tgt = np.array(
+ [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
+ assert_array_equal(cumsum(a2, axis=0), tgt)
+
+ tgt = np.array(
+ [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
+ assert_array_equal(cumsum(a2, axis=1), tgt)
+
+
+class TestProd:
+
+ def test_basic(self):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+ for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+ if ctype in ['1', 'b']:
+ assert_raises(ArithmeticError, np.prod, a)
+ assert_raises(ArithmeticError, np.prod, a2, 1)
+ else:
+ assert_equal(a.prod(axis=0), 26400)
+ assert_array_equal(a2.prod(axis=0),
+ np.array([50, 36, 84, 180], ctype))
+ assert_array_equal(a2.prod(axis=-1),
+ np.array([24, 1890, 600], ctype))
+
+
+class TestCumprod:
+
+ @pytest.mark.parametrize("cumprod", [np.cumprod, np.cumulative_prod])
+ def test_basic(self, cumprod):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+ for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+ if ctype in ['1', 'b']:
+ assert_raises(ArithmeticError, cumprod, a)
+ assert_raises(ArithmeticError, cumprod, a2, 1)
+ assert_raises(ArithmeticError, cumprod, a)
+ else:
+ assert_array_equal(cumprod(a, axis=-1),
+ np.array([1, 2, 20, 220,
+ 1320, 6600, 26400], ctype))
+ assert_array_equal(cumprod(a2, axis=0),
+ np.array([[1, 2, 3, 4],
+ [5, 12, 21, 36],
+ [50, 36, 84, 180]], ctype))
+ assert_array_equal(cumprod(a2, axis=-1),
+ np.array([[1, 2, 6, 24],
+ [5, 30, 210, 1890],
+ [10, 30, 120, 600]], ctype))
+
+
+def test_cumulative_include_initial():
+ arr = np.arange(8).reshape((2, 2, 2))
+
+ expected = np.array([
+ [[0, 0], [0, 1], [2, 4]], [[0, 0], [4, 5], [10, 12]]
+ ])
+ assert_array_equal(
+ np.cumulative_sum(arr, axis=1, include_initial=True), expected
+ )
+
+ expected = np.array([
+ [[1, 0, 0], [1, 2, 6]], [[1, 4, 20], [1, 6, 42]]
+ ])
+ assert_array_equal(
+ np.cumulative_prod(arr, axis=2, include_initial=True), expected
+ )
+
+ out = np.zeros((3, 2), dtype=np.float64)
+ expected = np.array([[0, 0], [1, 2], [4, 6]], dtype=np.float64)
+ arr = np.arange(1, 5).reshape((2, 2))
+ np.cumulative_sum(arr, axis=0, out=out, include_initial=True)
+ assert_array_equal(out, expected)
+
+ expected = np.array([1, 2, 4])
+ assert_array_equal(
+ np.cumulative_prod(np.array([2, 2]), include_initial=True), expected
+ )
+
+
+class TestDiff:
+
+ def test_basic(self):
+ x = [1, 4, 6, 7, 12]
+ out = np.array([3, 2, 1, 5])
+ out2 = np.array([-1, -1, 4])
+ out3 = np.array([0, 5])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+ assert_array_equal(diff(x, n=3), out3)
+
+ x = [1.1, 2.2, 3.0, -0.2, -0.1]
+ out = np.array([1.1, 0.8, -3.2, 0.1])
+ assert_almost_equal(diff(x), out)
+
+ x = [True, True, False, False]
+ out = np.array([False, True, False])
+ out2 = np.array([True, True])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+
+ def test_axis(self):
+ x = np.zeros((10, 20, 30))
+ x[:, 1::2, :] = 1
+ exp = np.ones((10, 19, 30))
+ exp[:, 1::2, :] = -1
+ assert_array_equal(diff(x), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
+ assert_array_equal(diff(x, axis=1), exp)
+ assert_array_equal(diff(x, axis=-2), exp)
+ assert_raises(AxisError, diff, x, axis=3)
+ assert_raises(AxisError, diff, x, axis=-4)
+
+ x = np.array(1.11111111111, np.float64)
+ assert_raises(ValueError, diff, x)
+
+ def test_nd(self):
+ x = 20 * rand(10, 20, 30)
+ out1 = x[:, :, 1:] - x[:, :, :-1]
+ out2 = out1[:, :, 1:] - out1[:, :, :-1]
+ out3 = x[1:, :, :] - x[:-1, :, :]
+ out4 = out3[1:, :, :] - out3[:-1, :, :]
+ assert_array_equal(diff(x), out1)
+ assert_array_equal(diff(x, n=2), out2)
+ assert_array_equal(diff(x, axis=0), out3)
+ assert_array_equal(diff(x, n=2, axis=0), out4)
+
+ def test_n(self):
+ x = list(range(3))
+ assert_raises(ValueError, diff, x, n=-1)
+ output = [diff(x, n=n) for n in range(1, 5)]
+ expected = [[1, 1], [0], [], []]
+ assert_(diff(x, n=0) is x)
+ for n, (expected_n, output_n) in enumerate(zip(expected, output), start=1):
+ assert_(type(output_n) is np.ndarray)
+ assert_array_equal(output_n, expected_n)
+ assert_equal(output_n.dtype, np.int_)
+ assert_equal(len(output_n), max(0, len(x) - n))
+
+ def test_times(self):
+ x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ expected = [
+ np.array([1, 1], dtype='timedelta64[D]'),
+ np.array([0], dtype='timedelta64[D]'),
+ ]
+ expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
+ for n, exp in enumerate(expected, start=1):
+ out = diff(x, n=n)
+ assert_array_equal(out, exp)
+ assert_equal(out.dtype, exp.dtype)
+
+ def test_subclass(self):
+ x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
+ mask=[[False, False], [True, False],
+ [False, True], [True, True], [False, False]])
+ out = diff(x)
+ assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
+ assert_array_equal(out.mask, [[False], [True],
+ [True], [True], [False]])
+ assert_(type(out) is type(x))
+
+ out3 = diff(x, n=3)
+ assert_array_equal(out3.data, [[], [], [], [], []])
+ assert_array_equal(out3.mask, [[], [], [], [], []])
+ assert_(type(out3) is type(x))
+
+ def test_prepend(self):
+ x = np.arange(5) + 1
+ assert_array_equal(diff(x, prepend=0), np.ones(5))
+ assert_array_equal(diff(x, prepend=[0]), np.ones(5))
+ assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)
+ assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))
+
+ x = np.arange(4).reshape(2, 2)
+ result = np.diff(x, axis=1, prepend=0)
+ expected = [[0, 1], [2, 1]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=1, prepend=[[0], [0]])
+ assert_array_equal(result, expected)
+
+ result = np.diff(x, axis=0, prepend=0)
+ expected = [[0, 1], [2, 2]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=0, prepend=[[0, 0]])
+ assert_array_equal(result, expected)
+
+ assert_raises(ValueError, np.diff, x, prepend=np.zeros((3, 3)))
+
+ assert_raises(AxisError, diff, x, prepend=0, axis=3)
+
+ def test_append(self):
+ x = np.arange(5)
+ result = diff(x, append=0)
+ expected = [1, 1, 1, 1, -4]
+ assert_array_equal(result, expected)
+ result = diff(x, append=[0])
+ assert_array_equal(result, expected)
+ result = diff(x, append=[0, 2])
+ expected = expected + [2]
+ assert_array_equal(result, expected)
+
+ x = np.arange(4).reshape(2, 2)
+ result = np.diff(x, axis=1, append=0)
+ expected = [[1, -1], [1, -3]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=1, append=[[0], [0]])
+ assert_array_equal(result, expected)
+
+ result = np.diff(x, axis=0, append=0)
+ expected = [[2, 2], [-2, -3]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=0, append=[[0, 0]])
+ assert_array_equal(result, expected)
+
+ assert_raises(ValueError, np.diff, x, append=np.zeros((3, 3)))
+
+ assert_raises(AxisError, diff, x, append=0, axis=3)
+
+
+class TestDelete:
+
+ def setup_method(self):
+ self.a = np.arange(5)
+ self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
+
+ def _check_inverse_of_slicing(self, indices):
+ a_del = delete(self.a, indices)
+ nd_a_del = delete(self.nd_a, indices, axis=1)
+ msg = f'Delete failed for obj: {indices!r}'
+ assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,
+ err_msg=msg)
+ xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0])
+ assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg)
+
+ def test_slices(self):
+ lims = [-6, -2, 0, 1, 2, 4, 5]
+ steps = [-3, -1, 1, 3]
+ for start in lims:
+ for stop in lims:
+ for step in steps:
+ s = slice(start, stop, step)
+ self._check_inverse_of_slicing(s)
+
+ def test_fancy(self):
+ self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
+ with pytest.raises(IndexError):
+ delete(self.a, [100])
+ with pytest.raises(IndexError):
+ delete(self.a, [-100])
+
+ self._check_inverse_of_slicing([0, -1, 2, 2])
+
+ self._check_inverse_of_slicing([True, False, False, True, False])
+
+ # not legal, indexing with these would change the dimension
+ with pytest.raises(ValueError):
+ delete(self.a, True)
+ with pytest.raises(ValueError):
+ delete(self.a, False)
+
+ # not enough items
+ with pytest.raises(ValueError):
+ delete(self.a, [False] * 4)
+
+ def test_single(self):
+ self._check_inverse_of_slicing(0)
+ self._check_inverse_of_slicing(-4)
+
+ def test_0d(self):
+ a = np.array(1)
+ with pytest.raises(AxisError):
+ delete(a, [], axis=0)
+ with pytest.raises(TypeError):
+ delete(a, [], axis="nonsense")
+
+ def test_subclass(self):
+ class SubClass(np.ndarray):
+ pass
+ a = self.a.view(SubClass)
+ assert_(isinstance(delete(a, 0), SubClass))
+ assert_(isinstance(delete(a, []), SubClass))
+ assert_(isinstance(delete(a, [0, 1]), SubClass))
+ assert_(isinstance(delete(a, slice(1, 2)), SubClass))
+ assert_(isinstance(delete(a, slice(1, -2)), SubClass))
+
+ def test_array_order_preserve(self):
+ # See gh-7113
+ k = np.arange(10).reshape(2, 5, order='F')
+ m = delete(k, slice(60, None), axis=1)
+
+ # 'k' is Fortran ordered, and 'm' should have the
+ # same ordering as 'k' and NOT become C ordered
+ assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
+ assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
+
+ def test_index_floats(self):
+ with pytest.raises(IndexError):
+ np.delete([0, 1, 2], np.array([1.0, 2.0]))
+ with pytest.raises(IndexError):
+ np.delete([0, 1, 2], np.array([], dtype=float))
+
+ @pytest.mark.parametrize("indexer", [np.array([1]), [1]])
+ def test_single_item_array(self, indexer):
+ a_del_int = delete(self.a, 1)
+ a_del = delete(self.a, indexer)
+ assert_equal(a_del_int, a_del)
+
+ nd_a_del_int = delete(self.nd_a, 1, axis=1)
+ nd_a_del = delete(self.nd_a, np.array([1]), axis=1)
+ assert_equal(nd_a_del_int, nd_a_del)
+
+ def test_single_item_array_non_int(self):
+ # Special handling for integer arrays must not affect non-integer ones.
+ # If `False` was cast to `0` it would delete the element:
+ res = delete(np.ones(1), np.array([False]))
+ assert_array_equal(res, np.ones(1))
+
+ # Test the more complicated (with axis) case from gh-21840
+ x = np.ones((3, 1))
+ false_mask = np.array([False], dtype=bool)
+ true_mask = np.array([True], dtype=bool)
+
+ res = delete(x, false_mask, axis=-1)
+ assert_array_equal(res, x)
+ res = delete(x, true_mask, axis=-1)
+ assert_array_equal(res, x[:, :0])
+
+ # Object or e.g. timedeltas should *not* be allowed
+ with pytest.raises(IndexError):
+ delete(np.ones(2), np.array([0], dtype=object))
+
+ with pytest.raises(IndexError):
+ # timedeltas are sometimes "integral, but clearly not allowed:
+ delete(np.ones(2), np.array([0], dtype="m8[ns]"))
+
+
+class TestGradient:
+
+ def test_basic(self):
+ v = [[1, 1], [3, 4]]
+ x = np.array(v)
+ dx = [np.array([[2., 3.], [2., 3.]]),
+ np.array([[0., 0.], [1., 1.]])]
+ assert_array_equal(gradient(x), dx)
+ assert_array_equal(gradient(v), dx)
+
+ def test_args(self):
+ dx = np.cumsum(np.ones(5))
+ dx_uneven = [1., 2., 5., 9., 11.]
+ f_2d = np.arange(25).reshape(5, 5)
+
+ # distances must be scalars or have size equal to gradient[axis]
+ gradient(np.arange(5), 3.)
+ gradient(np.arange(5), np.array(3.))
+ gradient(np.arange(5), dx)
+ # dy is set equal to dx because scalar
+ gradient(f_2d, 1.5)
+ gradient(f_2d, np.array(1.5))
+
+ gradient(f_2d, dx_uneven, dx_uneven)
+ # mix between even and uneven spaces and
+ # mix between scalar and vector
+ gradient(f_2d, dx, 2)
+
+ # 2D but axis specified
+ gradient(f_2d, dx, axis=1)
+
+ # 2d coordinate arguments are not yet allowed
+ assert_raises_regex(ValueError, '.*scalars or 1d',
+ gradient, f_2d, np.stack([dx] * 2, axis=-1), 1)
+
+ def test_badargs(self):
+ f_2d = np.arange(25).reshape(5, 5)
+ x = np.cumsum(np.ones(5))
+
+ # wrong sizes
+ assert_raises(ValueError, gradient, f_2d, x, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))
+ # wrong number of arguments
+ assert_raises(TypeError, gradient, f_2d, x)
+ assert_raises(TypeError, gradient, f_2d, x, axis=(0, 1))
+ assert_raises(TypeError, gradient, f_2d, x, x, x)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, 1)
+ assert_raises(TypeError, gradient, f_2d, x, x, axis=1)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)
+
+ def test_datetime64(self):
+ # Make sure gradient() can handle special types like datetime64
+ x = np.array(
+ ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
+ '1910-10-12', '1910-12-12', '1912-12-12'],
+ dtype='datetime64[D]')
+ dx = np.array(
+ [-5, -3, 0, 31, 61, 396, 731],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
+
+ def test_masked(self):
+ # Make sure that gradient supports subclasses like masked arrays
+ x = np.ma.array([[1, 1], [3, 4]],
+ mask=[[False, False], [False, False]])
+ out = gradient(x)[0]
+ assert_equal(type(out), type(x))
+ # And make sure that the output and input don't have aliased mask
+ # arrays
+ assert_(x._mask is not out._mask)
+ # Also check that edge_order=2 doesn't alter the original mask
+ x2 = np.ma.arange(5)
+ x2[2] = np.ma.masked
+ np.gradient(x2, edge_order=2)
+ assert_array_equal(x2.mask, [False, False, True, False, False])
+
+ def test_second_order_accurate(self):
+ # Testing that the relative numerical error is less that 3% for
+ # this example problem. This corresponds to second order
+ # accurate finite differences for all interior and boundary
+ # points.
+ x = np.linspace(0, 1, 10)
+ dx = x[1] - x[0]
+ y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
+ analytical = 6 * x ** 2 + 8 * x + 2
+ num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
+ assert_(np.all(num_error < 0.03) == True)
+
+ # test with unevenly spaced
+ np.random.seed(0)
+ x = np.sort(np.random.random(10))
+ y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
+ analytical = 6 * x ** 2 + 8 * x + 2
+ num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)
+ assert_(np.all(num_error < 0.03) == True)
+
+ def test_spacing(self):
+ f = np.array([0, 2., 3., 4., 5., 5.])
+ f = np.tile(f, (6, 1)) + f.reshape(-1, 1)
+ x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
+ x_even = np.arange(6.)
+
+ fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6, 1))
+ fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6, 1))
+ fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6, 1))
+ fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6, 1))
+
+ # evenly spaced
+ for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
+ res1 = gradient(f, 1., axis=(0, 1), edge_order=edge_order)
+ res2 = gradient(f, x_even, x_even,
+ axis=(0, 1), edge_order=edge_order)
+ res3 = gradient(f, x_even, x_even,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_array_equal(res2, res3)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, 1., axis=0, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_almost_equal(res2, exp_res.T)
+
+ res1 = gradient(f, 1., axis=1, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_array_equal(res2, exp_res)
+
+ # unevenly spaced
+ for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
+ res1 = gradient(f, x_uneven, x_uneven,
+ axis=(0, 1), edge_order=edge_order)
+ res2 = gradient(f, x_uneven, x_uneven,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res.T)
+
+ res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res)
+
+ # mixed
+ res1 = gradient(f, x_even, x_uneven, axis=(0, 1), edge_order=1)
+ res2 = gradient(f, x_uneven, x_even, axis=(1, 0), edge_order=1)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord1.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord1)
+
+ res1 = gradient(f, x_even, x_uneven, axis=(0, 1), edge_order=2)
+ res2 = gradient(f, x_uneven, x_even, axis=(1, 0), edge_order=2)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord2.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord2)
+
+ def test_specific_axes(self):
+ # Testing that gradient can work on a given axis only
+ v = [[1, 1], [3, 4]]
+ x = np.array(v)
+ dx = [np.array([[2., 3.], [2., 3.]]),
+ np.array([[0., 0.], [1., 1.]])]
+ assert_array_equal(gradient(x, axis=0), dx[0])
+ assert_array_equal(gradient(x, axis=1), dx[1])
+ assert_array_equal(gradient(x, axis=-1), dx[1])
+ assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])
+
+ # test axis=None which means all axes
+ assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
+ # and is the same as no axis keyword given
+ assert_almost_equal(gradient(x, axis=None), gradient(x))
+
+ # test vararg order
+ assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),
+ [dx[1] / 2.0, dx[0] / 3.0])
+ # test maximal number of varargs
+ assert_raises(TypeError, gradient, x, 1, 2, axis=1)
+
+ assert_raises(AxisError, gradient, x, axis=3)
+ assert_raises(AxisError, gradient, x, axis=-3)
+ # assert_raises(TypeError, gradient, x, axis=[1,])
+
+ def test_timedelta64(self):
+ # Make sure gradient() can handle special types like timedelta64
+ x = np.array(
+ [-5, -3, 10, 12, 61, 321, 300],
+ dtype='timedelta64[D]')
+ dx = np.array(
+ [2, 7, 7, 25, 154, 119, -21],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
+
+ def test_inexact_dtypes(self):
+ for dt in [np.float16, np.float32, np.float64]:
+ # dtypes should not be promoted in a different way to what diff does
+ x = np.array([1, 2, 3], dtype=dt)
+ assert_equal(gradient(x).dtype, np.diff(x).dtype)
+
+ def test_values(self):
+ # needs at least 2 points for edge_order ==1
+ gradient(np.arange(2), edge_order=1)
+ # needs at least 3 points for edge_order ==1
+ gradient(np.arange(3), edge_order=2)
+
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
+
+ @pytest.mark.parametrize('f_dtype', [np.uint8, np.uint16,
+ np.uint32, np.uint64])
+ def test_f_decreasing_unsigned_int(self, f_dtype):
+ f = np.array([5, 4, 3, 2, 1], dtype=f_dtype)
+ g = gradient(f)
+ assert_array_equal(g, [-1] * len(f))
+
+ @pytest.mark.parametrize('f_dtype', [np.int8, np.int16,
+ np.int32, np.int64])
+ def test_f_signed_int_big_jump(self, f_dtype):
+ maxint = np.iinfo(f_dtype).max
+ x = np.array([1, 3])
+ f = np.array([-1, maxint], dtype=f_dtype)
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [(maxint + 1) // 2] * 2)
+
+ @pytest.mark.parametrize('x_dtype', [np.uint8, np.uint16,
+ np.uint32, np.uint64])
+ def test_x_decreasing_unsigned(self, x_dtype):
+ x = np.array([3, 2, 1], dtype=x_dtype)
+ f = np.array([0, 2, 4])
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [-2] * len(x))
+
+ @pytest.mark.parametrize('x_dtype', [np.int8, np.int16,
+ np.int32, np.int64])
+ def test_x_signed_int_big_jump(self, x_dtype):
+ minint = np.iinfo(x_dtype).min
+ maxint = np.iinfo(x_dtype).max
+ x = np.array([-1, maxint], dtype=x_dtype)
+ f = np.array([minint // 2, 0])
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [0.5, 0.5])
+
+ def test_return_type(self):
+ res = np.gradient(([1, 2], [2, 3]))
+ assert type(res) is tuple
+
+
+class TestAngle:
+
+ def test_basic(self):
+ x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
+ 1, 1j, -1, -1j, 1 - 3j, -1 + 3j]
+ y = angle(x)
+ yo = [
+ np.arctan(3.0 / 1.0),
+ np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
+ -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
+ z = angle(x, deg=True)
+ zo = np.array(yo) * 180 / np.pi
+ assert_array_almost_equal(y, yo, 11)
+ assert_array_almost_equal(z, zo, 11)
+
+ def test_subclass(self):
+ x = np.ma.array([1 + 3j, 1, np.sqrt(2) / 2 * (1 + 1j)])
+ x[1] = np.ma.masked
+ expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])
+ expected[1] = np.ma.masked
+ actual = angle(x)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual.mask, expected.mask)
+ assert_equal(actual, expected)
+
+
+class TestTrimZeros:
+
+ a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
+ b = a.astype(float)
+ c = a.astype(complex)
+ d = a.astype(object)
+
+ def values(self):
+ attr_names = ('a', 'b', 'c', 'd')
+ return (getattr(self, name) for name in attr_names)
+
+ def test_basic(self):
+ slc = np.s_[2:-1]
+ for arr in self.values():
+ res = trim_zeros(arr)
+ assert_array_equal(res, arr[slc])
+
+ def test_leading_skip(self):
+ slc = np.s_[:-1]
+ for arr in self.values():
+ res = trim_zeros(arr, trim='b')
+ assert_array_equal(res, arr[slc])
+
+ def test_trailing_skip(self):
+ slc = np.s_[2:]
+ for arr in self.values():
+ res = trim_zeros(arr, trim='F')
+ assert_array_equal(res, arr[slc])
+
+ def test_all_zero(self):
+ for _arr in self.values():
+ arr = np.zeros_like(_arr, dtype=_arr.dtype)
+
+ res1 = trim_zeros(arr, trim='B')
+ assert len(res1) == 0
+
+ res2 = trim_zeros(arr, trim='f')
+ assert len(res2) == 0
+
+ def test_size_zero(self):
+ arr = np.zeros(0)
+ res = trim_zeros(arr)
+ assert_array_equal(arr, res)
+
+ @pytest.mark.parametrize(
+ 'arr',
+ [np.array([0, 2**62, 0]),
+ np.array([0, 2**63, 0]),
+ np.array([0, 2**64, 0])]
+ )
+ def test_overflow(self, arr):
+ slc = np.s_[1:2]
+ res = trim_zeros(arr)
+ assert_array_equal(res, arr[slc])
+
+ def test_no_trim(self):
+ arr = np.array([None, 1, None])
+ res = trim_zeros(arr)
+ assert_array_equal(arr, res)
+
+ def test_list_to_list(self):
+ res = trim_zeros(self.a.tolist())
+ assert isinstance(res, list)
+
+ @pytest.mark.parametrize("ndim", (0, 1, 2, 3, 10))
+ def test_nd_basic(self, ndim):
+ a = np.ones((2,) * ndim)
+ b = np.pad(a, (2, 1), mode="constant", constant_values=0)
+ res = trim_zeros(b, axis=None)
+ assert_array_equal(a, res)
+
+ @pytest.mark.parametrize("ndim", (0, 1, 2, 3))
+ def test_allzero(self, ndim):
+ a = np.zeros((3,) * ndim)
+ res = trim_zeros(a, axis=None)
+ assert_array_equal(res, np.zeros((0,) * ndim))
+
+ def test_trim_arg(self):
+ a = np.array([0, 1, 2, 0])
+
+ res = trim_zeros(a, trim='f')
+ assert_array_equal(res, [1, 2, 0])
+
+ res = trim_zeros(a, trim='b')
+ assert_array_equal(res, [0, 1, 2])
+
+ @pytest.mark.parametrize("trim", ("front", ""))
+ def test_unexpected_trim_value(self, trim):
+ arr = self.a
+ with pytest.raises(ValueError, match=r"unexpected character\(s\) in `trim`"):
+ trim_zeros(arr, trim=trim)
+
+
+class TestExtins:
+
+ def test_basic(self):
+ a = np.array([1, 3, 2, 1, 2, 3, 3])
+ b = extract(a > 1, a)
+ assert_array_equal(b, [3, 2, 2, 3, 3])
+
+ def test_place(self):
+ # Make sure that non-np.ndarray objects
+ # raise an error instead of doing nothing
+ assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1])
+
+ a = np.array([1, 4, 3, 2, 5, 8, 7])
+ place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])
+ assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])
+
+ place(a, np.zeros(7), [])
+ assert_array_equal(a, np.arange(1, 8))
+
+ place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9])
+ assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9])
+ assert_raises_regex(ValueError, "Cannot insert from an empty array",
+ lambda: place(a, [0, 0, 0, 0, 0, 1, 0], []))
+
+ # See Issue #6974
+ a = np.array(['12', '34'])
+ place(a, [0, 1], '9')
+ assert_array_equal(a, ['12', '9'])
+
+ def test_both(self):
+ a = rand(10)
+ mask = a > 0.5
+ ac = a.copy()
+ c = extract(mask, a)
+ place(a, mask, 0)
+ place(a, mask, c)
+ assert_array_equal(a, ac)
+
+
+# _foo1 and _foo2 are used in some tests in TestVectorize.
+
+def _foo1(x, y=1.0):
+ return y * math.floor(x)
+
+
+def _foo2(x, y=1.0, z=0.0):
+ return y * math.floor(x) + z
+
+
+class TestVectorize:
+
+ def test_simple(self):
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ f = vectorize(addsubtract)
+ r = f([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_scalar(self):
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ f = vectorize(addsubtract)
+ r = f([0, 3, 6, 9], 5)
+ assert_array_equal(r, [5, 8, 1, 4])
+
+ def test_large(self):
+ x = np.linspace(-3, 2, 10000)
+ f = vectorize(lambda x: x)
+ y = f(x)
+ assert_array_equal(y, x)
+
+ def test_ufunc(self):
+ f = vectorize(math.cos)
+ args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
+ r1 = f(args)
+ r2 = np.cos(args)
+ assert_array_almost_equal(r1, r2)
+
+ def test_keywords(self):
+
+ def foo(a, b=1):
+ return a + b
+
+ f = vectorize(foo)
+ args = np.array([1, 2, 3])
+ r1 = f(args)
+ r2 = np.array([2, 3, 4])
+ assert_array_equal(r1, r2)
+ r1 = f(args, 2)
+ r2 = np.array([3, 4, 5])
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order1(self):
+ # gh-1620: The second call of f would crash with
+ # `ValueError: invalid number of arguments`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0), 1.0)
+ r2 = f(np.arange(3.0))
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order2(self):
+ # gh-1620: The second call of f would crash with
+ # `ValueError: non-broadcastable output operand with shape ()
+ # doesn't match the broadcast shape (3,)`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0))
+ r2 = f(np.arange(3.0), 1.0)
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order3(self):
+ # gh-1620: The third call of f would crash with
+ # `ValueError: invalid number of arguments`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0))
+ r2 = f(np.arange(3.0), y=1.0)
+ r3 = f(np.arange(3.0))
+ assert_array_equal(r1, r2)
+ assert_array_equal(r1, r3)
+
+ def test_keywords_with_otypes_several_kwd_args1(self):
+ # gh-1620 Make sure different uses of keyword arguments
+ # don't break the vectorized function.
+ f = vectorize(_foo2, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(10.4, z=100)
+ r2 = f(10.4, y=-1)
+ r3 = f(10.4)
+ assert_equal(r1, _foo2(10.4, z=100))
+ assert_equal(r2, _foo2(10.4, y=-1))
+ assert_equal(r3, _foo2(10.4))
+
+ def test_keywords_with_otypes_several_kwd_args2(self):
+ # gh-1620 Make sure different uses of keyword arguments
+ # don't break the vectorized function.
+ f = vectorize(_foo2, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(z=100, x=10.4, y=-1)
+ r2 = f(1, 2, 3)
+ assert_equal(r1, _foo2(z=100, x=10.4, y=-1))
+ assert_equal(r2, _foo2(1, 2, 3))
+
+ def test_keywords_no_func_code(self):
+ # This needs to test a function that has keywords but
+ # no func_code attribute, since otherwise vectorize will
+ # inspect the func_code.
+ import random
+ try:
+ vectorize(random.randrange) # Should succeed
+ except Exception:
+ raise AssertionError
+
+ def test_keywords2_ticket_2100(self):
+ # Test kwarg support: enhancement ticket 2100
+
+ def foo(a, b=1):
+ return a + b
+
+ f = vectorize(foo)
+ args = np.array([1, 2, 3])
+ r1 = f(a=args)
+ r2 = np.array([2, 3, 4])
+ assert_array_equal(r1, r2)
+ r1 = f(b=1, a=args)
+ assert_array_equal(r1, r2)
+ r1 = f(args, b=2)
+ r2 = np.array([3, 4, 5])
+ assert_array_equal(r1, r2)
+
+ def test_keywords3_ticket_2100(self):
+ # Test excluded with mixed positional and kwargs: ticket 2100
+ def mypolyval(x, p):
+ _p = list(p)
+ res = _p.pop(0)
+ while _p:
+ res = res * x + _p.pop(0)
+ return res
+
+ vpolyval = np.vectorize(mypolyval, excluded=['p', 1])
+ ans = [3, 6]
+ assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
+ assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
+ assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
+
+ def test_keywords4_ticket_2100(self):
+ # Test vectorizing function with no positional args.
+ @vectorize
+ def f(**kw):
+ res = 1.0
+ for _k in kw:
+ res *= kw[_k]
+ return res
+
+ assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
+
+ def test_keywords5_ticket_2100(self):
+ # Test vectorizing function with no kwargs args.
+ @vectorize
+ def f(*v):
+ return np.prod(v)
+
+ assert_array_equal(f([1, 2], [3, 4]), [3, 8])
+
+ def test_coverage1_ticket_2100(self):
+ def foo():
+ return 1
+
+ f = vectorize(foo)
+ assert_array_equal(f(), 1)
+
+ def test_assigning_docstring(self):
+ def foo(x):
+ """Original documentation"""
+ return x
+
+ f = vectorize(foo)
+ assert_equal(f.__doc__, foo.__doc__)
+
+ doc = "Provided documentation"
+ f = vectorize(foo, doc=doc)
+ assert_equal(f.__doc__, doc)
+
+ def test_UnboundMethod_ticket_1156(self):
+ # Regression test for issue 1156
+ class Foo:
+ b = 2
+
+ def bar(self, a):
+ return a ** self.b
+
+ assert_array_equal(vectorize(Foo().bar)(np.arange(9)),
+ np.arange(9) ** 2)
+ assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),
+ np.arange(9) ** 2)
+
+ def test_execution_order_ticket_1487(self):
+ # Regression test for dependence on execution order: issue 1487
+ f1 = vectorize(lambda x: x)
+ res1a = f1(np.arange(3))
+ res1b = f1(np.arange(0.1, 3))
+ f2 = vectorize(lambda x: x)
+ res2b = f2(np.arange(0.1, 3))
+ res2a = f2(np.arange(3))
+ assert_equal(res1a, res2a)
+ assert_equal(res1b, res2b)
+
+ def test_string_ticket_1892(self):
+ # Test vectorization over strings: issue 1892.
+ f = np.vectorize(lambda x: x)
+ s = '0123456789' * 10
+ assert_equal(s, f(s))
+
+ def test_dtype_promotion_gh_29189(self):
+ # dtype should not be silently promoted (int32 -> int64)
+ dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]
+
+ for dtype in dtypes:
+ x = np.asarray([1, 2, 3], dtype=dtype)
+ y = np.vectorize(lambda x: x + x)(x)
+ assert x.dtype == y.dtype
+
+ def test_cache(self):
+ # Ensure that vectorized func called exactly once per argument.
+ _calls = [0]
+
+ @vectorize
+ def f(x):
+ _calls[0] += 1
+ return x ** 2
+
+ f.cache = True
+ x = np.arange(5)
+ assert_array_equal(f(x), x * x)
+ assert_equal(_calls[0], len(x))
+
+ def test_otypes(self):
+ f = np.vectorize(lambda x: x)
+ f.otypes = 'i'
+ x = np.arange(5)
+ assert_array_equal(f(x), x)
+
+ def test_otypes_object_28624(self):
+ # with object otype, the vectorized function should return y
+ # wrapped into an object array
+ y = np.arange(3)
+ f = vectorize(lambda x: y, otypes=[object])
+
+ assert f(None).item() is y
+ assert f([None]).item() is y
+
+ y = [1, 2, 3]
+ f = vectorize(lambda x: y, otypes=[object])
+
+ assert f(None).item() is y
+ assert f([None]).item() is y
+
+ def test_parse_gufunc_signature(self):
+ assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x,y)->()'),
+ ([('x', 'y')], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'),
+ ([('x',), ('y',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x)->(y)'),
+ ([('x',)], [('y',)]))
+ assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'),
+ ([('x',)], [('y',), ()]))
+ assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
+ ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
+ # Tests to check if whitespaces are ignored
+ assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'),
+ ([('x', 'y')], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'),
+ ([('x',), ('y',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '),
+ ([('x',)], [('y',)]))
+ assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'),
+ ([('x',)], [('y',), ()]))
+ assert_equal(nfb._parse_gufunc_signature(
+ '( ), ( a, b,c ) ,( d) -> (d , e)'),
+ ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
+ with assert_raises(ValueError):
+ nfb._parse_gufunc_signature('(x)(y)->()')
+ with assert_raises(ValueError):
+ nfb._parse_gufunc_signature('(x),(y)->')
+ with assert_raises(ValueError):
+ nfb._parse_gufunc_signature('((x))->(x)')
+
+ def test_signature_simple(self):
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ f = vectorize(addsubtract, signature='(),()->()')
+ r = f([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_signature_mean_last(self):
+ def mean(a):
+ return a.mean()
+
+ f = vectorize(mean, signature='(n)->()')
+ r = f([[1, 3], [2, 4]])
+ assert_array_equal(r, [2, 3])
+
+ def test_signature_center(self):
+ def center(a):
+ return a - a.mean()
+
+ f = vectorize(center, signature='(n)->(n)')
+ r = f([[1, 3], [2, 4]])
+ assert_array_equal(r, [[-1, 1], [-1, 1]])
+
+ def test_signature_two_outputs(self):
+ f = vectorize(lambda x: (x, x), signature='()->(),()')
+ r = f([1, 2, 3])
+ assert_(isinstance(r, tuple) and len(r) == 2)
+ assert_array_equal(r[0], [1, 2, 3])
+ assert_array_equal(r[1], [1, 2, 3])
+
+ def test_signature_outer(self):
+ f = vectorize(np.outer, signature='(a),(b)->(a,b)')
+ r = f([1, 2], [1, 2, 3])
+ assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])
+
+ r = f([[[1, 2]]], [1, 2, 3])
+ assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])
+
+ r = f([[1, 0], [2, 0]], [1, 2, 3])
+ assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]],
+ [[2, 4, 6], [0, 0, 0]]])
+
+ r = f([1, 2], [[1, 2, 3], [0, 0, 0]])
+ assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]],
+ [[0, 0, 0], [0, 0, 0]]])
+
+ def test_signature_computed_size(self):
+ f = vectorize(lambda x: x[:-1], signature='(n)->(m)')
+ r = f([1, 2, 3])
+ assert_array_equal(r, [1, 2])
+
+ r = f([[1, 2, 3], [2, 3, 4]])
+ assert_array_equal(r, [[1, 2], [2, 3]])
+
+ def test_signature_excluded(self):
+
+ def foo(a, b=1):
+ return a + b
+
+ f = vectorize(foo, signature='()->()', excluded={'b'})
+ assert_array_equal(f([1, 2, 3]), [2, 3, 4])
+ assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])
+
+ def test_signature_otypes(self):
+ f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64'])
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+
+ def test_signature_invalid_inputs(self):
+ f = vectorize(operator.add, signature='(n),(n)->(n)')
+ with assert_raises_regex(TypeError, 'wrong number of positional'):
+ f([1, 2])
+ with assert_raises_regex(
+ ValueError, 'does not have enough dimensions'):
+ f(1, 2)
+ with assert_raises_regex(
+ ValueError, 'inconsistent size for core dimension'):
+ f([1, 2], [1, 2, 3])
+
+ f = vectorize(operator.add, signature='()->()')
+ with assert_raises_regex(TypeError, 'wrong number of positional'):
+ f(1, 2)
+
+ def test_signature_invalid_outputs(self):
+
+ f = vectorize(lambda x: x[:-1], signature='(n)->(n)')
+ with assert_raises_regex(
+ ValueError, 'inconsistent size for core dimension'):
+ f([1, 2, 3])
+
+ f = vectorize(lambda x: x, signature='()->(),()')
+ with assert_raises_regex(ValueError, 'wrong number of outputs'):
+ f(1)
+
+ f = vectorize(lambda x: (x, x), signature='()->()')
+ with assert_raises_regex(ValueError, 'wrong number of outputs'):
+ f([1, 2])
+
+ def test_size_zero_output(self):
+ # see issue 5868
+ f = np.vectorize(lambda x: x)
+ x = np.zeros([0, 5], dtype=int)
+ with assert_raises_regex(ValueError, 'otypes'):
+ f(x)
+
+ f.otypes = 'i'
+ assert_array_equal(f(x), x)
+
+ f = np.vectorize(lambda x: x, signature='()->()')
+ with assert_raises_regex(ValueError, 'otypes'):
+ f(x)
+
+ f = np.vectorize(lambda x: x, signature='()->()', otypes='i')
+ assert_array_equal(f(x), x)
+
+ f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i')
+ assert_array_equal(f(x), x)
+
+ f = np.vectorize(lambda x: x, signature='(n)->(n)')
+ assert_array_equal(f(x.T), x.T)
+
+ f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i')
+ with assert_raises_regex(ValueError, 'new output dimensions'):
+ f(x)
+
+ def test_subclasses(self):
+ class subclass(np.ndarray):
+ pass
+
+ m = np.array([[1., 0., 0.],
+ [0., 0., 1.],
+ [0., 1., 0.]]).view(subclass)
+ v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass)
+ # generalized (gufunc)
+ matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)')
+ r = matvec(m, v)
+ assert_equal(type(r), subclass)
+ assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]])
+
+ # element-wise (ufunc)
+ mult = np.vectorize(lambda x, y: x * y)
+ r = mult(m, v)
+ assert_equal(type(r), subclass)
+ assert_equal(r, m * v)
+
+ def test_name(self):
+ # gh-23021
+ @np.vectorize
+ def f2(a, b):
+ return a + b
+
+ assert f2.__name__ == 'f2'
+
+ def test_decorator(self):
+ @vectorize
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_docstring(self):
+ @vectorize
+ def f(x):
+ """Docstring"""
+ return x
+
+ if sys.flags.optimize < 2:
+ assert f.__doc__ == "Docstring"
+
+ def test_partial(self):
+ def foo(x, y):
+ return x + y
+
+ bar = partial(foo, 3)
+ vbar = np.vectorize(bar)
+ assert vbar(1) == 4
+
+ def test_signature_otypes_decorator(self):
+ @vectorize(signature='(n)->(n)', otypes=['float64'])
+ def f(x):
+ return x
+
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+ assert f.__name__ == 'f'
+
+ def test_bad_input(self):
+ with assert_raises(TypeError):
+ A = np.vectorize(pyfunc=3)
+
+ def test_no_keywords(self):
+ with assert_raises(TypeError):
+ @np.vectorize("string")
+ def foo():
+ return "bar"
+
+ def test_positional_regression_9477(self):
+ # This supplies the first keyword argument as a positional,
+ # to ensure that they are still properly forwarded after the
+ # enhancement for #9477
+ f = vectorize((lambda x: x), ['float64'])
+ r = f([2])
+ assert_equal(r.dtype, np.dtype('float64'))
+
+ def test_datetime_conversion(self):
+ otype = "datetime64[ns]"
+ arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'],
+ dtype='datetime64[ns]')
+ assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)",
+ otypes=[otype])(arr), arr)
+
+
+class TestLeaks:
+ class A:
+ iters = 20
+
+ def bound(self, *args):
+ return 0
+
+ @staticmethod
+ def unbound(*args):
+ return 0
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ @pytest.mark.skipif(NOGIL_BUILD,
+ reason=("Functions are immortalized if a thread is "
+ "launched, making this test flaky"))
+ @pytest.mark.parametrize('name, incr', [
+ ('bound', A.iters),
+ ('unbound', 0),
+ ])
+ def test_frompyfunc_leaks(self, name, incr):
+ # exposed in gh-11867 as np.vectorized, but the problem stems from
+ # frompyfunc.
+ # class.attribute = np.frompyfunc(<method>) creates a
+ # reference cycle if <method> is a bound class method.
+ # It requires a gc collection cycle to break the cycle.
+ import gc
+ A_func = getattr(self.A, name)
+ gc.disable()
+ try:
+ refcount = sys.getrefcount(A_func)
+ for i in range(self.A.iters):
+ a = self.A()
+ a.f = np.frompyfunc(getattr(a, name), 1, 1)
+ out = a.f(np.arange(10))
+ a = None
+ # A.func is part of a reference cycle if incr is non-zero
+ assert_equal(sys.getrefcount(A_func), refcount + incr)
+ for i in range(5):
+ gc.collect()
+ assert_equal(sys.getrefcount(A_func), refcount)
+ finally:
+ gc.enable()
+
+
+class TestDigitize:
+
+ def test_forward(self):
+ x = np.arange(-6, 5)
+ bins = np.arange(-5, 5)
+ assert_array_equal(digitize(x, bins), np.arange(11))
+
+ def test_reverse(self):
+ x = np.arange(5, -6, -1)
+ bins = np.arange(5, -5, -1)
+ assert_array_equal(digitize(x, bins), np.arange(11))
+
+ def test_random(self):
+ x = rand(10)
+ bin = np.linspace(x.min(), x.max(), 10)
+ assert_(np.all(digitize(x, bin) != 0))
+
+ def test_right_basic(self):
+ x = [1, 5, 4, 10, 8, 11, 0]
+ bins = [1, 5, 10]
+ default_answer = [1, 2, 1, 3, 2, 3, 0]
+ assert_array_equal(digitize(x, bins), default_answer)
+ right_answer = [0, 1, 1, 2, 2, 3, 0]
+ assert_array_equal(digitize(x, bins, True), right_answer)
+
+ def test_right_open(self):
+ x = np.arange(-6, 5)
+ bins = np.arange(-6, 4)
+ assert_array_equal(digitize(x, bins, True), np.arange(11))
+
+ def test_right_open_reverse(self):
+ x = np.arange(5, -6, -1)
+ bins = np.arange(4, -6, -1)
+ assert_array_equal(digitize(x, bins, True), np.arange(11))
+
+ def test_right_open_random(self):
+ x = rand(10)
+ bins = np.linspace(x.min(), x.max(), 10)
+ assert_(np.all(digitize(x, bins, True) != 10))
+
+ def test_monotonic(self):
+ x = [-1, 0, 1, 2]
+ bins = [0, 0, 1]
+ assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])
+ assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])
+ bins = [1, 1, 0]
+ assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])
+ assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])
+ bins = [1, 1, 1, 1]
+ assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])
+ assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])
+ bins = [0, 0, 1, 0]
+ assert_raises(ValueError, digitize, x, bins)
+ bins = [1, 1, 0, 1]
+ assert_raises(ValueError, digitize, x, bins)
+
+ def test_casting_error(self):
+ x = [1, 2, 3 + 1.j]
+ bins = [1, 2, 3]
+ assert_raises(TypeError, digitize, x, bins)
+ x, bins = bins, x
+ assert_raises(TypeError, digitize, x, bins)
+
+ def test_return_type(self):
+ # Functions returning indices should always return base ndarrays
+ class A(np.ndarray):
+ pass
+ a = np.arange(5).view(A)
+ b = np.arange(1, 3).view(A)
+ assert_(not isinstance(digitize(b, a, False), A))
+ assert_(not isinstance(digitize(b, a, True), A))
+
+ def test_large_integers_increasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
+
+ @pytest.mark.xfail(
+ reason="gh-11022: np._core.multiarray._monoticity loses precision")
+ def test_large_integers_decreasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
+
+
+class TestUnwrap:
+
+ def test_simple(self):
+ # check that unwrap removes jumps greater that 2*pi
+ assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
+ # check that unwrap maintains continuity
+ assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
+
+ def test_period(self):
+ # check that unwrap removes jumps greater that 255
+ assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2])
+ # check that unwrap maintains continuity
+ assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255))
+ # check simple case
+ simple_seq = np.array([0, 75, 150, 225, 300])
+ wrap_seq = np.mod(simple_seq, 255)
+ assert_array_equal(unwrap(wrap_seq, period=255), simple_seq)
+ # check custom discont value
+ uneven_seq = np.array([0, 75, 150, 225, 300, 430])
+ wrap_uneven = np.mod(uneven_seq, 250)
+ no_discont = unwrap(wrap_uneven, period=250)
+ assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180])
+ sm_discont = unwrap(wrap_uneven, period=250, discont=140)
+ assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430])
+ assert sm_discont.dtype == wrap_uneven.dtype
+
+
+@pytest.mark.parametrize(
+ "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"]
+)
+@pytest.mark.parametrize("M", [0, 1, 10])
+class TestFilterwindows:
+
+ def test_hanning(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hanning(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
+
+ def test_hamming(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hamming(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
+
+ def test_bartlett(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = bartlett(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
+
+ def test_blackman(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = blackman(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
+
+ def test_kaiser(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = kaiser(scalar, 0)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 10, 15)
+
+
+class TestTrapezoid:
+
+ def test_simple(self):
+ x = np.arange(-10, 10, .1)
+ r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
+ # check integral of normal equals 1
+ assert_almost_equal(r, 1, 7)
+
+ def test_ndim(self):
+ x = np.linspace(0, 1, 3)
+ y = np.linspace(0, 2, 8)
+ z = np.linspace(0, 3, 13)
+
+ wx = np.ones_like(x) * (x[1] - x[0])
+ wx[0] /= 2
+ wx[-1] /= 2
+ wy = np.ones_like(y) * (y[1] - y[0])
+ wy[0] /= 2
+ wy[-1] /= 2
+ wz = np.ones_like(z) * (z[1] - z[0])
+ wz[0] /= 2
+ wz[-1] /= 2
+
+ q = x[:, None, None] + y[None, :, None] + z[None, None, :]
+
+ qx = (q * wx[:, None, None]).sum(axis=0)
+ qy = (q * wy[None, :, None]).sum(axis=1)
+ qz = (q * wz[None, None, :]).sum(axis=2)
+
+ # n-d `x`
+ r = trapezoid(q, x=x[:, None, None], axis=0)
+ assert_almost_equal(r, qx)
+ r = trapezoid(q, x=y[None, :, None], axis=1)
+ assert_almost_equal(r, qy)
+ r = trapezoid(q, x=z[None, None, :], axis=2)
+ assert_almost_equal(r, qz)
+
+ # 1-d `x`
+ r = trapezoid(q, x=x, axis=0)
+ assert_almost_equal(r, qx)
+ r = trapezoid(q, x=y, axis=1)
+ assert_almost_equal(r, qy)
+ r = trapezoid(q, x=z, axis=2)
+ assert_almost_equal(r, qz)
+
+ def test_masked(self):
+ # Testing that masked arrays behave as if the function is 0 where
+ # masked
+ x = np.arange(5)
+ y = x * x
+ mask = x == 2
+ ym = np.ma.array(y, mask=mask)
+ r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
+ assert_almost_equal(trapezoid(ym, x), r)
+
+ xm = np.ma.array(x, mask=mask)
+ assert_almost_equal(trapezoid(ym, xm), r)
+
+ xm = np.ma.array(x, mask=mask)
+ assert_almost_equal(trapezoid(y, xm), r)
+
+
+class TestSinc:
+
+ def test_simple(self):
+ assert_(sinc(0) == 1)
+ w = sinc(np.linspace(-1, 1, 100))
+ # check symmetry
+ assert_array_almost_equal(w, flipud(w), 7)
+
+ def test_array_like(self):
+ x = [0, 0.5]
+ y1 = sinc(np.array(x))
+ y2 = sinc(list(x))
+ y3 = sinc(tuple(x))
+ assert_array_equal(y1, y2)
+ assert_array_equal(y1, y3)
+
+ def test_bool_dtype(self):
+ x = (np.arange(4, dtype=np.uint8) % 2 == 1)
+ actual = sinc(x)
+ expected = sinc(x.astype(np.float64))
+ assert_allclose(actual, expected)
+ assert actual.dtype == np.float64
+
+ @pytest.mark.parametrize('dtype', [np.uint8, np.int16, np.uint64])
+ def test_int_dtypes(self, dtype):
+ x = np.arange(4, dtype=dtype)
+ actual = sinc(x)
+ expected = sinc(x.astype(np.float64))
+ assert_allclose(actual, expected)
+ assert actual.dtype == np.float64
+
+ @pytest.mark.parametrize(
+ 'dtype',
+ [np.float16, np.float32, np.longdouble, np.complex64, np.complex128]
+ )
+ def test_float_dtypes(self, dtype):
+ x = np.arange(4, dtype=dtype)
+ assert sinc(x).dtype == x.dtype
+
+ def test_float16_underflow(self):
+ x = np.float16(0)
+ # before gh-27784, fill value for 0 in input would underflow float16,
+ # resulting in nan
+ assert_array_equal(sinc(x), np.asarray(1.0))
+
+class TestUnique:
+
+ def test_simple(self):
+ x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
+ assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
+ assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
+ x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
+ assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
+ x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
+ assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
+
+
+class TestCheckFinite:
+
+ def test_simple(self):
+ a = [1, 2, 3]
+ b = [1, 2, np.inf]
+ c = [1, 2, np.nan]
+ np.asarray_chkfinite(a)
+ assert_raises(ValueError, np.asarray_chkfinite, b)
+ assert_raises(ValueError, np.asarray_chkfinite, c)
+
+ def test_dtype_order(self):
+ # Regression test for missing dtype and order arguments
+ a = [1, 2, 3]
+ a = np.asarray_chkfinite(a, order='F', dtype=np.float64)
+ assert_(a.dtype == np.float64)
+
+
+class TestCorrCoef:
+ A = np.array(
+ [[0.15391142, 0.18045767, 0.14197213],
+ [0.70461506, 0.96474128, 0.27906989],
+ [0.9297531, 0.32296769, 0.19267156]])
+ B = np.array(
+ [[0.10377691, 0.5417086, 0.49807457],
+ [0.82872117, 0.77801674, 0.39226705],
+ [0.9314666, 0.66800209, 0.03538394]])
+ res1 = np.array(
+ [[1., 0.9379533, -0.04931983],
+ [0.9379533, 1., 0.30007991],
+ [-0.04931983, 0.30007991, 1.]])
+ res2 = np.array(
+ [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],
+ [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386],
+ [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601],
+ [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113],
+ [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],
+ [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])
+
+ def test_non_array(self):
+ assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),
+ [[1., -1.], [-1., 1.]])
+
+ def test_simple(self):
+ tgt1 = corrcoef(self.A)
+ assert_almost_equal(tgt1, self.res1)
+ assert_(np.all(np.abs(tgt1) <= 1.0))
+
+ tgt2 = corrcoef(self.A, self.B)
+ assert_almost_equal(tgt2, self.res2)
+ assert_(np.all(np.abs(tgt2) <= 1.0))
+
+ def test_ddof(self):
+ # ddof raises DeprecationWarning
+ with suppress_warnings() as sup:
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
+ sup.filter(DeprecationWarning)
+ # ddof has no or negligible effect on the function
+ assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
+ assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
+ assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
+ assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
+
+ def test_bias(self):
+ # bias raises DeprecationWarning
+ with suppress_warnings() as sup:
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
+ assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
+ sup.filter(DeprecationWarning)
+ # bias has no or negligible effect on the function
+ assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
+
+ def test_complex(self):
+ x = np.array([[1, 2, 3], [1j, 2j, 3j]])
+ res = corrcoef(x)
+ tgt = np.array([[1., -1.j], [1.j, 1.]])
+ assert_allclose(res, tgt)
+ assert_(np.all(np.abs(res) <= 1.0))
+
+ def test_xy(self):
+ x = np.array([[1, 2, 3]])
+ y = np.array([[1j, 2j, 3j]])
+ assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]]))
+
+ def test_empty(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('always', RuntimeWarning)
+ assert_array_equal(corrcoef(np.array([])), np.nan)
+ assert_array_equal(corrcoef(np.array([]).reshape(0, 2)),
+ np.array([]).reshape(0, 0))
+ assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),
+ np.array([[np.nan, np.nan], [np.nan, np.nan]]))
+
+ def test_extreme(self):
+ x = [[1e-100, 1e100], [1e100, 1e-100]]
+ with np.errstate(all='raise'):
+ c = corrcoef(x)
+ assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]]))
+ assert_(np.all(np.abs(c) <= 1.0))
+
+ @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble])
+ def test_corrcoef_dtype(self, test_type):
+ cast_A = self.A.astype(test_type)
+ res = corrcoef(cast_A, dtype=test_type)
+ assert test_type == res.dtype
+
+
+class TestCov:
+ x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
+ res1 = np.array([[1., -1.], [-1., 1.]])
+ x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
+ frequencies = np.array([1, 4, 1])
+ x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T
+ res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])
+ unit_frequencies = np.ones(3, dtype=np.int_)
+ weights = np.array([1.0, 4.0, 1.0])
+ res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])
+ unit_weights = np.ones(3)
+ x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])
+
+ def test_basic(self):
+ assert_allclose(cov(self.x1), self.res1)
+
+ def test_complex(self):
+ x = np.array([[1, 2, 3], [1j, 2j, 3j]])
+ res = np.array([[1., -1.j], [1.j, 1.]])
+ assert_allclose(cov(x), res)
+ assert_allclose(cov(x, aweights=np.ones(3)), res)
+
+ def test_xy(self):
+ x = np.array([[1, 2, 3]])
+ y = np.array([[1j, 2j, 3j]])
+ assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]]))
+
+ def test_empty(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('always', RuntimeWarning)
+ assert_array_equal(cov(np.array([])), np.nan)
+ assert_array_equal(cov(np.array([]).reshape(0, 2)),
+ np.array([]).reshape(0, 0))
+ assert_array_equal(cov(np.array([]).reshape(2, 0)),
+ np.array([[np.nan, np.nan], [np.nan, np.nan]]))
+
+ def test_wrong_ddof(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('always', RuntimeWarning)
+ assert_array_equal(cov(self.x1, ddof=5),
+ np.array([[np.inf, -np.inf],
+ [-np.inf, np.inf]]))
+
+ def test_1D_rowvar(self):
+ assert_allclose(cov(self.x3), cov(self.x3, rowvar=False))
+ y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
+ assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False))
+
+ def test_1D_variance(self):
+ assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
+
+ def test_fweights(self):
+ assert_allclose(cov(self.x2, fweights=self.frequencies),
+ cov(self.x2_repeats))
+ assert_allclose(cov(self.x1, fweights=self.frequencies),
+ self.res2)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies),
+ self.res1)
+ nonint = self.frequencies + 0.5
+ assert_raises(TypeError, cov, self.x1, fweights=nonint)
+ f = np.ones((2, 3), dtype=np.int_)
+ assert_raises(RuntimeError, cov, self.x1, fweights=f)
+ f = np.ones(2, dtype=np.int_)
+ assert_raises(RuntimeError, cov, self.x1, fweights=f)
+ f = -1 * np.ones(3, dtype=np.int_)
+ assert_raises(ValueError, cov, self.x1, fweights=f)
+
+ def test_aweights(self):
+ assert_allclose(cov(self.x1, aweights=self.weights), self.res3)
+ assert_allclose(cov(self.x1, aweights=3.0 * self.weights),
+ cov(self.x1, aweights=self.weights))
+ assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1)
+ w = np.ones((2, 3))
+ assert_raises(RuntimeError, cov, self.x1, aweights=w)
+ w = np.ones(2)
+ assert_raises(RuntimeError, cov, self.x1, aweights=w)
+ w = -1.0 * np.ones(3)
+ assert_raises(ValueError, cov, self.x1, aweights=w)
+
+ def test_unit_fweights_and_aweights(self):
+ assert_allclose(cov(self.x2, fweights=self.frequencies,
+ aweights=self.unit_weights),
+ cov(self.x2_repeats))
+ assert_allclose(cov(self.x1, fweights=self.frequencies,
+ aweights=self.unit_weights),
+ self.res2)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=self.unit_weights),
+ self.res1)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=self.weights),
+ self.res3)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=3.0 * self.weights),
+ cov(self.x1, aweights=self.weights))
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=self.unit_weights),
+ self.res1)
+
+ @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble])
+ def test_cov_dtype(self, test_type):
+ cast_x1 = self.x1.astype(test_type)
+ res = cov(cast_x1, dtype=test_type)
+ assert test_type == res.dtype
+
+ def test_gh_27658(self):
+ x = np.ones((3, 1))
+ expected = np.cov(x, ddof=0, rowvar=True)
+ actual = np.cov(x.T, ddof=0, rowvar=False)
+ assert_allclose(actual, expected, strict=True)
+
+
+class Test_I0:
+
+ def test_simple(self):
+ assert_almost_equal(
+ i0(0.5),
+ np.array(1.0634833707413234))
+
+ # need at least one test above 8, as the implementation is piecewise
+ A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0])
+ expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847])
+ assert_almost_equal(i0(A), expected)
+ assert_almost_equal(i0(-A), expected)
+
+ B = np.array([[0.827002, 0.99959078],
+ [0.89694769, 0.39298162],
+ [0.37954418, 0.05206293],
+ [0.36465447, 0.72446427],
+ [0.48164949, 0.50324519]])
+ assert_almost_equal(
+ i0(B),
+ np.array([[1.17843223, 1.26583466],
+ [1.21147086, 1.03898290],
+ [1.03633899, 1.00067775],
+ [1.03352052, 1.13557954],
+ [1.05884290, 1.06432317]]))
+ # Regression test for gh-11205
+ i0_0 = np.i0([0.])
+ assert_equal(i0_0.shape, (1,))
+ assert_array_equal(np.i0([0.]), np.array([1.]))
+
+ def test_non_array(self):
+ a = np.arange(4)
+
+ class array_like:
+ __array_interface__ = a.__array_interface__
+
+ def __array_wrap__(self, arr, context, return_scalar):
+ return self
+
+ # E.g. pandas series survive ufunc calls through array-wrap:
+ assert isinstance(np.abs(array_like()), array_like)
+ exp = np.i0(a)
+ res = np.i0(array_like())
+
+ assert_array_equal(exp, res)
+
+ def test_complex(self):
+ a = np.array([0, 1 + 2j])
+ with pytest.raises(TypeError, match="i0 not supported for complex values"):
+ res = i0(a)
+
+
+class TestKaiser:
+
+ def test_simple(self):
+ assert_(np.isfinite(kaiser(1, 1.0)))
+ assert_almost_equal(kaiser(0, 1.0),
+ np.array([]))
+ assert_almost_equal(kaiser(2, 1.0),
+ np.array([0.78984831, 0.78984831]))
+ assert_almost_equal(kaiser(5, 1.0),
+ np.array([0.78984831, 0.94503323, 1.,
+ 0.94503323, 0.78984831]))
+ assert_almost_equal(kaiser(5, 1.56789),
+ np.array([0.58285404, 0.88409679, 1.,
+ 0.88409679, 0.58285404]))
+
+ def test_int_beta(self):
+ kaiser(3, 4)
+
+
+class TestMeshgrid:
+
+ def test_simple(self):
+ [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
+ assert_array_equal(X, np.array([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]]))
+ assert_array_equal(Y, np.array([[4, 4, 4],
+ [5, 5, 5],
+ [6, 6, 6],
+ [7, 7, 7]]))
+
+ def test_single_input(self):
+ [X] = meshgrid([1, 2, 3, 4])
+ assert_array_equal(X, np.array([1, 2, 3, 4]))
+
+ def test_no_input(self):
+ args = []
+ assert_array_equal([], meshgrid(*args))
+ assert_array_equal([], meshgrid(*args, copy=False))
+
+ def test_indexing(self):
+ x = [1, 2, 3]
+ y = [4, 5, 6, 7]
+ [X, Y] = meshgrid(x, y, indexing='ij')
+ assert_array_equal(X, np.array([[1, 1, 1, 1],
+ [2, 2, 2, 2],
+ [3, 3, 3, 3]]))
+ assert_array_equal(Y, np.array([[4, 5, 6, 7],
+ [4, 5, 6, 7],
+ [4, 5, 6, 7]]))
+
+ # Test expected shapes:
+ z = [8, 9]
+ assert_(meshgrid(x, y)[0].shape == (4, 3))
+ assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4))
+ assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))
+ assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2))
+
+ assert_raises(ValueError, meshgrid, x, y, indexing='notvalid')
+
+ def test_sparse(self):
+ [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)
+ assert_array_equal(X, np.array([[1, 2, 3]]))
+ assert_array_equal(Y, np.array([[4], [5], [6], [7]]))
+
+ def test_invalid_arguments(self):
+ # Test that meshgrid complains about invalid arguments
+ # Regression test for issue #4755:
+ # https://github.com/numpy/numpy/issues/4755
+ assert_raises(TypeError, meshgrid,
+ [1, 2, 3], [4, 5, 6, 7], indices='ij')
+
+ def test_return_type(self):
+ # Test for appropriate dtype in returned arrays.
+ # Regression test for issue #5297
+ # https://github.com/numpy/numpy/issues/5297
+ x = np.arange(0, 10, dtype=np.float32)
+ y = np.arange(10, 20, dtype=np.float64)
+
+ X, Y = np.meshgrid(x, y)
+
+ assert_(X.dtype == x.dtype)
+ assert_(Y.dtype == y.dtype)
+
+ # copy
+ X, Y = np.meshgrid(x, y, copy=True)
+
+ assert_(X.dtype == x.dtype)
+ assert_(Y.dtype == y.dtype)
+
+ # sparse
+ X, Y = np.meshgrid(x, y, sparse=True)
+
+ assert_(X.dtype == x.dtype)
+ assert_(Y.dtype == y.dtype)
+
+ def test_writeback(self):
+ # Issue 8561
+ X = np.array([1.1, 2.2])
+ Y = np.array([3.3, 4.4])
+ x, y = np.meshgrid(X, Y, sparse=False, copy=True)
+
+ x[0, :] = 0
+ assert_equal(x[0, :], 0)
+ assert_equal(x[1, :], X)
+
+ def test_nd_shape(self):
+ a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6)))
+ expected_shape = (2, 1, 3, 4, 5)
+ assert_equal(a.shape, expected_shape)
+ assert_equal(b.shape, expected_shape)
+ assert_equal(c.shape, expected_shape)
+ assert_equal(d.shape, expected_shape)
+ assert_equal(e.shape, expected_shape)
+
+ def test_nd_values(self):
+ a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5])
+ assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]])
+ assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]])
+ assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]])
+
+ def test_nd_indexing(self):
+ a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij')
+ assert_equal(a, [[[0, 0, 0], [0, 0, 0]]])
+ assert_equal(b, [[[1, 1, 1], [2, 2, 2]]])
+ assert_equal(c, [[[3, 4, 5], [3, 4, 5]]])
+
+
+class TestPiecewise:
+
+ def test_simple(self):
+ # Condition is single bool list
+ x = piecewise([0, 0], [True, False], [1])
+ assert_array_equal(x, [1, 0])
+
+ # List of conditions: single bool list
+ x = piecewise([0, 0], [[True, False]], [1])
+ assert_array_equal(x, [1, 0])
+
+ # Conditions is single bool array
+ x = piecewise([0, 0], np.array([True, False]), [1])
+ assert_array_equal(x, [1, 0])
+
+ # Condition is single int array
+ x = piecewise([0, 0], np.array([1, 0]), [1])
+ assert_array_equal(x, [1, 0])
+
+ # List of conditions: int array
+ x = piecewise([0, 0], [np.array([1, 0])], [1])
+ assert_array_equal(x, [1, 0])
+
+ x = piecewise([0, 0], [[False, True]], [lambda x:-1])
+ assert_array_equal(x, [0, -1])
+
+ assert_raises_regex(ValueError, '1 or 2 functions are expected',
+ piecewise, [0, 0], [[False, True]], [])
+ assert_raises_regex(ValueError, '1 or 2 functions are expected',
+ piecewise, [0, 0], [[False, True]], [1, 2, 3])
+
+ def test_two_conditions(self):
+ x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
+ assert_array_equal(x, [3, 4])
+
+ def test_scalar_domains_three_conditions(self):
+ x = piecewise(3, [True, False, False], [4, 2, 0])
+ assert_equal(x, 4)
+
+ def test_default(self):
+ # No value specified for x[1], should be 0
+ x = piecewise([1, 2], [True, False], [2])
+ assert_array_equal(x, [2, 0])
+
+ # Should set x[1] to 3
+ x = piecewise([1, 2], [True, False], [2, 3])
+ assert_array_equal(x, [2, 3])
+
+ def test_0d(self):
+ x = np.array(3)
+ y = piecewise(x, x > 3, [4, 0])
+ assert_(y.ndim == 0)
+ assert_(y == 0)
+
+ x = 5
+ y = piecewise(x, [True, False], [1, 0])
+ assert_(y.ndim == 0)
+ assert_(y == 1)
+
+ # With 3 ranges (It was failing, before)
+ y = piecewise(x, [False, False, True], [1, 2, 3])
+ assert_array_equal(y, 3)
+
+ def test_0d_comparison(self):
+ x = 3
+ y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
+ assert_equal(y, 4)
+
+ # With 3 ranges (It was failing, before)
+ x = 4
+ y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
+ assert_array_equal(y, 2)
+
+ assert_raises_regex(ValueError, '2 or 3 functions are expected',
+ piecewise, x, [x <= 3, x > 3], [1])
+ assert_raises_regex(ValueError, '2 or 3 functions are expected',
+ piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1])
+
+ def test_0d_0d_condition(self):
+ x = np.array(3)
+ c = np.array(x > 3)
+ y = piecewise(x, [c], [1, 2])
+ assert_equal(y, 2)
+
+ def test_multidimensional_extrafunc(self):
+ x = np.array([[-2.5, -1.5, -0.5],
+ [0.5, 1.5, 2.5]])
+ y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
+ assert_array_equal(y, np.array([[-1., -1., -1.],
+ [3., 3., 1.]]))
+
+ def test_subclasses(self):
+ class subclass(np.ndarray):
+ pass
+ x = np.arange(5.).view(subclass)
+ r = piecewise(x, [x < 2., x >= 4], [-1., 1., 0.])
+ assert_equal(type(r), subclass)
+ assert_equal(r, [-1., -1., 0., 0., 1.])
+
+
+class TestBincount:
+
+ def test_simple(self):
+ y = np.bincount(np.arange(4))
+ assert_array_equal(y, np.ones(4))
+
+ def test_simple2(self):
+ y = np.bincount(np.array([1, 5, 2, 4, 1]))
+ assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
+
+ def test_simple_weight(self):
+ x = np.arange(4)
+ w = np.array([0.2, 0.3, 0.5, 0.1])
+ y = np.bincount(x, w)
+ assert_array_equal(y, w)
+
+ def test_simple_weight2(self):
+ x = np.array([1, 2, 4, 5, 2])
+ w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
+ y = np.bincount(x, w)
+ assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
+
+ def test_with_minlength(self):
+ x = np.array([0, 1, 0, 1, 1])
+ y = np.bincount(x, minlength=3)
+ assert_array_equal(y, np.array([2, 3, 0]))
+ x = []
+ y = np.bincount(x, minlength=0)
+ assert_array_equal(y, np.array([]))
+
+ def test_with_minlength_smaller_than_maxvalue(self):
+ x = np.array([0, 1, 1, 2, 2, 3, 3])
+ y = np.bincount(x, minlength=2)
+ assert_array_equal(y, np.array([1, 2, 2, 2]))
+ y = np.bincount(x, minlength=0)
+ assert_array_equal(y, np.array([1, 2, 2, 2]))
+
+ def test_with_minlength_and_weights(self):
+ x = np.array([1, 2, 4, 5, 2])
+ w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
+ y = np.bincount(x, w, 8)
+ assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))
+
+ def test_empty(self):
+ x = np.array([], dtype=int)
+ y = np.bincount(x)
+ assert_array_equal(x, y)
+
+ def test_empty_with_minlength(self):
+ x = np.array([], dtype=int)
+ y = np.bincount(x, minlength=5)
+ assert_array_equal(y, np.zeros(5, dtype=int))
+
+ @pytest.mark.parametrize('minlength', [0, 3])
+ def test_empty_list(self, minlength):
+ assert_array_equal(np.bincount([], minlength=minlength),
+ np.zeros(minlength, dtype=int))
+
+ def test_with_incorrect_minlength(self):
+ x = np.array([], dtype=int)
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
+ lambda: np.bincount(x, minlength="foobar"))
+ assert_raises_regex(ValueError,
+ "must not be negative",
+ lambda: np.bincount(x, minlength=-1))
+
+ x = np.arange(5)
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
+ lambda: np.bincount(x, minlength="foobar"))
+ assert_raises_regex(ValueError,
+ "must not be negative",
+ lambda: np.bincount(x, minlength=-1))
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_dtype_reference_leaks(self):
+ # gh-6805
+ intp_refcount = sys.getrefcount(np.dtype(np.intp))
+ double_refcount = sys.getrefcount(np.dtype(np.double))
+
+ for j in range(10):
+ np.bincount([1, 2, 3])
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
+ assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
+
+ for j in range(10):
+ np.bincount([1, 2, 3], [4, 5, 6])
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
+ assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
+
+ @pytest.mark.parametrize("vals", [[[2, 2]], 2])
+ def test_error_not_1d(self, vals):
+ # Test that values has to be 1-D (both as array and nested list)
+ vals_arr = np.asarray(vals)
+ with assert_raises(ValueError):
+ np.bincount(vals_arr)
+ with assert_raises(ValueError):
+ np.bincount(vals)
+
+ @pytest.mark.parametrize("dt", np.typecodes["AllInteger"])
+ def test_gh_28354(self, dt):
+ a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt)
+ actual = np.bincount(a)
+ expected = [1, 3, 1, 1, 0, 0, 0, 1]
+ assert_array_equal(actual, expected)
+
+ def test_contiguous_handling(self):
+ # check for absence of hard crash
+ np.bincount(np.arange(10000)[::2])
+
+ def test_gh_28354_array_like(self):
+ class A:
+ def __array__(self):
+ return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64)
+
+ a = A()
+ actual = np.bincount(a)
+ expected = [1, 3, 1, 1, 0, 0, 0, 1]
+ assert_array_equal(actual, expected)
+
+
+class TestInterp:
+
+ def test_exceptions(self):
+ assert_raises(ValueError, interp, 0, [], [])
+ assert_raises(ValueError, interp, 0, [0], [1, 2])
+ assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
+ assert_raises(ValueError, interp, 0, [], [], period=360)
+ assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
+
+ def test_basic(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5)
+ x0 = np.linspace(0, 1, 50)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+
+ def test_right_left_behavior(self):
+ # Needs range of sizes to test different code paths.
+ # size ==1 is special cased, 1 < size < 5 is linear search, and
+ # size >= 5 goes through local search and possibly binary search.
+ for size in range(1, 10):
+ xp = np.arange(size, dtype=np.double)
+ yp = np.ones(size, dtype=np.double)
+ incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
+ decpts = incpts[::-1]
+
+ incres = interp(incpts, xp, yp)
+ decres = interp(decpts, xp, yp)
+ inctgt = np.array([1, 1, 1, 1], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0)
+ decres = interp(decpts, xp, yp, left=0)
+ inctgt = np.array([0, 1, 1, 1], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, right=2)
+ decres = interp(decpts, xp, yp, right=2)
+ inctgt = np.array([1, 1, 1, 2], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0, right=2)
+ decres = interp(decpts, xp, yp, left=0, right=2)
+ inctgt = np.array([0, 1, 1, 2], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ def test_scalar_interpolation_point(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5)
+ x0 = 0
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = .3
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = np.float32(.3)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = np.float64(.3)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = np.nan
+ assert_almost_equal(np.interp(x0, x, y), x0)
+
+ def test_non_finite_behavior_exact_x(self):
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
+ fp = [1, 2, np.nan, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
+
+ @pytest.fixture(params=[
+ np.float64,
+ lambda x: _make_complex(x, 0),
+ lambda x: _make_complex(0, x),
+ lambda x: _make_complex(x, np.multiply(x, -2))
+ ], ids=[
+ 'real',
+ 'complex-real',
+ 'complex-imag',
+ 'complex-both'
+ ])
+ def sc(self, request):
+ """ scale function used by the below tests """
+ return request.param
+
+ def test_non_finite_any_nan(self, sc):
+ """ test that nans are propagated """
+ assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan))
+
+ def test_non_finite_inf(self, sc):
+ """ Test that interp between opposite infs gives nan """
+ assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan))
+
+ # unless the y values are equal
+ assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10))
+
+ def test_non_finite_half_inf_xf(self, sc):
+ """ Test that interp where both axes have a bound at inf gives nan """
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan))
+
+ def test_non_finite_half_inf_x(self, sc):
+ """ Test interp where the x axis has a bound at inf """
+ assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
+ assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) # noqa: E202
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0))
+ assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
+
+ def test_non_finite_half_inf_f(self, sc):
+ """ Test interp where the f axis has a bound at inf """
+ assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf))
+
+ def test_complex_interp(self):
+ # test complex interpolation
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j
+ x0 = 0.3
+ y0 = x0 + (1 + x0) * 1.0j
+ assert_almost_equal(np.interp(x0, x, y), y0)
+ # test complex left and right
+ x0 = -1
+ left = 2 + 3.0j
+ assert_almost_equal(np.interp(x0, x, y, left=left), left)
+ x0 = 2.0
+ right = 2 + 3.0j
+ assert_almost_equal(np.interp(x0, x, y, right=right), right)
+ # test complex non finite
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2 + 1j, np.inf, 4]
+ y = [1, 2 + 1j, np.inf + 0.5j, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), y)
+ # test complex periodic
+ x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ xp = [190, -190, 350, -350]
+ fp = [5 + 1.0j, 10 + 2j, 3 + 3j, 4 + 4j]
+ y = [7.5 + 1.5j, 5. + 1.0j, 8.75 + 1.75j, 6.25 + 1.25j, 3. + 3j, 3.25 + 3.25j,
+ 3.5 + 3.5j, 3.75 + 3.75j]
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+
+ def test_zero_dimensional_interpolation_point(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5)
+ x0 = np.array(.3)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+
+ xp = np.array([0, 2, 4])
+ fp = np.array([1, -1, 1])
+
+ actual = np.interp(np.array(1), xp, fp)
+ assert_equal(actual, 0)
+ assert_(isinstance(actual, np.float64))
+
+ actual = np.interp(np.array(4.5), xp, fp, period=4)
+ assert_equal(actual, 0.5)
+ assert_(isinstance(actual, np.float64))
+
+ def test_if_len_x_is_small(self):
+ xp = np.arange(0, 10, 0.0001)
+ fp = np.sin(xp)
+ assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
+
+ def test_period(self):
+ x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ xp = [190, -190, 350, -350]
+ fp = [5, 10, 3, 4]
+ y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+ x = np.array(x, order='F').reshape(2, -1)
+ y = np.array(y, order='C').reshape(2, -1)
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+
+
+class TestPercentile:
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.percentile(x, 0), 0.)
+ assert_equal(np.percentile(x, 100), 3.5)
+ assert_equal(np.percentile(x, 50), 1.75)
+ x[1] = np.nan
+ assert_equal(np.percentile(x, 0), np.nan)
+ assert_equal(np.percentile(x, 0, method='nearest'), np.nan)
+ assert_equal(np.percentile(x, 0, method='inverted_cdf'), np.nan)
+ assert_equal(
+ np.percentile(x, 0, method='inverted_cdf',
+ weights=np.ones_like(x)),
+ np.nan,
+ )
+
+ def test_fraction(self):
+ x = [Fraction(i, 2) for i in range(8)]
+
+ p = np.percentile(x, Fraction(0))
+ assert_equal(p, Fraction(0))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, Fraction(100))
+ assert_equal(p, Fraction(7, 2))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, Fraction(50))
+ assert_equal(p, Fraction(7, 4))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, [Fraction(50)])
+ assert_equal(p, np.array([Fraction(7, 4)]))
+ assert_equal(type(p), np.ndarray)
+
+ def test_api(self):
+ d = np.ones(5)
+ np.percentile(d, 5, None, None, False)
+ np.percentile(d, 5, None, None, False, 'linear')
+ o = np.ones((1,))
+ np.percentile(d, 5, None, o, False, 'linear')
+
+ def test_complex(self):
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+
+ def test_2D(self):
+ x = np.array([[1, 1, 1],
+ [1, 1, 1],
+ [4, 4, 3],
+ [1, 1, 1],
+ [1, 1, 1]])
+ assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
+
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
+ def test_linear_nan_1D(self, dtype):
+ # METHOD 1 of H&F
+ arr = np.asarray([15.0, np.nan, 35.0, 40.0, 50.0], dtype=dtype)
+ res = np.percentile(
+ arr,
+ 40.0,
+ method="linear")
+ np.testing.assert_equal(res, np.nan)
+ np.testing.assert_equal(res.dtype, arr.dtype)
+
+ H_F_TYPE_CODES = [(int_type, np.float64)
+ for int_type in np.typecodes["AllInteger"]
+ ] + [(np.float16, np.float16),
+ (np.float32, np.float32),
+ (np.float64, np.float64),
+ (np.longdouble, np.longdouble),
+ (np.dtype("O"), np.float64)]
+
+ @pytest.mark.parametrize(["function", "quantile"],
+ [(np.quantile, 0.4),
+ (np.percentile, 40.0)])
+ @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES)
+ @pytest.mark.parametrize(["method", "weighted", "expected"],
+ [("inverted_cdf", False, 20),
+ ("inverted_cdf", True, 20),
+ ("averaged_inverted_cdf", False, 27.5),
+ ("closest_observation", False, 20),
+ ("interpolated_inverted_cdf", False, 20),
+ ("hazen", False, 27.5),
+ ("weibull", False, 26),
+ ("linear", False, 29),
+ ("median_unbiased", False, 27),
+ ("normal_unbiased", False, 27.125),
+ ])
+ def test_linear_interpolation(self,
+ function,
+ quantile,
+ method,
+ weighted,
+ expected,
+ input_dtype,
+ expected_dtype):
+ expected_dtype = np.dtype(expected_dtype)
+
+ arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype)
+ weights = np.ones_like(arr) if weighted else None
+ if input_dtype is np.longdouble:
+ if function is np.quantile:
+ # 0.4 is not exactly representable and it matters
+ # for "averaged_inverted_cdf", so we need to cheat.
+ quantile = input_dtype("0.4")
+ # We want to use nulp, but that does not work for longdouble
+ test_function = np.testing.assert_almost_equal
+ else:
+ test_function = np.testing.assert_array_almost_equal_nulp
+
+ actual = function(arr, quantile, method=method, weights=weights)
+
+ test_function(actual, expected_dtype.type(expected))
+
+ if method in ["inverted_cdf", "closest_observation"]:
+ if input_dtype == "O":
+ np.testing.assert_equal(np.asarray(actual).dtype, np.float64)
+ else:
+ np.testing.assert_equal(np.asarray(actual).dtype,
+ np.dtype(input_dtype))
+ else:
+ np.testing.assert_equal(np.asarray(actual).dtype,
+ np.dtype(expected_dtype))
+
+ TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O"
+
+ @pytest.mark.parametrize("dtype", TYPE_CODES)
+ def test_lower_higher(self, dtype):
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 50,
+ method='lower'), 4)
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 50,
+ method='higher'), 5)
+
+ @pytest.mark.parametrize("dtype", TYPE_CODES)
+ def test_midpoint(self, dtype):
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 51,
+ method='midpoint'), 4.5)
+ assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50,
+ method='midpoint'), 5)
+ assert_equal(np.percentile(np.arange(11, dtype=dtype), 51,
+ method='midpoint'), 5.5)
+ assert_equal(np.percentile(np.arange(11, dtype=dtype), 50,
+ method='midpoint'), 5)
+
+ @pytest.mark.parametrize("dtype", TYPE_CODES)
+ def test_nearest(self, dtype):
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 51,
+ method='nearest'), 5)
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 49,
+ method='nearest'), 4)
+
+ def test_linear_interpolation_extrapolation(self):
+ arr = np.random.rand(5)
+
+ actual = np.percentile(arr, 100)
+ np.testing.assert_equal(actual, arr.max())
+
+ actual = np.percentile(arr, 0)
+ np.testing.assert_equal(actual, arr.min())
+
+ def test_sequence(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75])
+
+ def test_axis(self):
+ x = np.arange(12).reshape(3, 4)
+
+ assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0])
+
+ r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
+ assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0)
+
+ r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]]
+ assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T)
+
+ # ensure qth axis is always first as with np.array(old_percentile(..))
+ x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ assert_equal(np.percentile(x, (25, 50)).shape, (2,))
+ assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,))
+ assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5))
+ assert_equal(
+ np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6))
+ assert_equal(np.percentile(x, (25, 50),
+ method="higher").shape, (2,))
+ assert_equal(np.percentile(x, (25, 50, 75),
+ method="higher").shape, (3,))
+ assert_equal(np.percentile(x, (25, 50), axis=0,
+ method="higher").shape, (2, 4, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=1,
+ method="higher").shape, (2, 3, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=2,
+ method="higher").shape, (2, 3, 4, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=3,
+ method="higher").shape, (2, 3, 4, 5))
+ assert_equal(np.percentile(x, (25, 50, 75), axis=1,
+ method="higher").shape, (3, 3, 5, 6))
+
+ def test_scalar_q(self):
+ # test for no empty dimensions for compatibility with old percentile
+ x = np.arange(12).reshape(3, 4)
+ assert_equal(np.percentile(x, 50), 5.5)
+ assert_(np.isscalar(np.percentile(x, 50)))
+ r0 = np.array([4., 5., 6., 7.])
+ assert_equal(np.percentile(x, 50, axis=0), r0)
+ assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
+ r1 = np.array([1.5, 5.5, 9.5])
+ assert_almost_equal(np.percentile(x, 50, axis=1), r1)
+ assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape)
+
+ out = np.empty(1)
+ assert_equal(np.percentile(x, 50, out=out), 5.5)
+ assert_equal(out, 5.5)
+ out = np.empty(4)
+ assert_equal(np.percentile(x, 50, axis=0, out=out), r0)
+ assert_equal(out, r0)
+ out = np.empty(3)
+ assert_equal(np.percentile(x, 50, axis=1, out=out), r1)
+ assert_equal(out, r1)
+
+ # test for no empty dimensions for compatibility with old percentile
+ x = np.arange(12).reshape(3, 4)
+ assert_equal(np.percentile(x, 50, method='lower'), 5.)
+ assert_(np.isscalar(np.percentile(x, 50)))
+ r0 = np.array([4., 5., 6., 7.])
+ c0 = np.percentile(x, 50, method='lower', axis=0)
+ assert_equal(c0, r0)
+ assert_equal(c0.shape, r0.shape)
+ r1 = np.array([1., 5., 9.])
+ c1 = np.percentile(x, 50, method='lower', axis=1)
+ assert_almost_equal(c1, r1)
+ assert_equal(c1.shape, r1.shape)
+
+ out = np.empty((), dtype=x.dtype)
+ c = np.percentile(x, 50, method='lower', out=out)
+ assert_equal(c, 5)
+ assert_equal(out, 5)
+ out = np.empty(4, dtype=x.dtype)
+ c = np.percentile(x, 50, method='lower', axis=0, out=out)
+ assert_equal(c, r0)
+ assert_equal(out, r0)
+ out = np.empty(3, dtype=x.dtype)
+ c = np.percentile(x, 50, method='lower', axis=1, out=out)
+ assert_equal(c, r1)
+ assert_equal(out, r1)
+
+ def test_exception(self):
+ assert_raises(ValueError, np.percentile, [1, 2], 56,
+ method='foobar')
+ assert_raises(ValueError, np.percentile, [1], 101)
+ assert_raises(ValueError, np.percentile, [1], -1)
+ assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])
+ assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])
+
+ def test_percentile_list(self):
+ assert_equal(np.percentile([1, 2, 3], 0), 1)
+
+ @pytest.mark.parametrize(
+ "percentile, with_weights",
+ [
+ (np.percentile, False),
+ (partial(np.percentile, method="inverted_cdf"), True),
+ ]
+ )
+ def test_percentile_out(self, percentile, with_weights):
+ out_dtype = int if with_weights else float
+ x = np.array([1, 2, 3])
+ y = np.zeros((3,), dtype=out_dtype)
+ p = (1, 2, 3)
+ weights = np.ones_like(x) if with_weights else None
+ r = percentile(x, p, out=y, weights=weights)
+ assert r is y
+ assert_equal(percentile(x, p, weights=weights), y)
+
+ x = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ y = np.zeros((3, 3), dtype=out_dtype)
+ weights = np.ones_like(x) if with_weights else None
+ r = percentile(x, p, axis=0, out=y, weights=weights)
+ assert r is y
+ assert_equal(percentile(x, p, weights=weights, axis=0), y)
+
+ y = np.zeros((3, 2), dtype=out_dtype)
+ percentile(x, p, axis=1, out=y, weights=weights)
+ assert_equal(percentile(x, p, weights=weights, axis=1), y)
+
+ x = np.arange(12).reshape(3, 4)
+ # q.dim > 1, float
+ if with_weights:
+ r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
+ else:
+ r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]])
+ out = np.empty((2, 4), dtype=out_dtype)
+ weights = np.ones_like(x) if with_weights else None
+ assert_equal(
+ percentile(x, (25, 50), axis=0, out=out, weights=weights), r0
+ )
+ assert_equal(out, r0)
+ r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]])
+ out = np.empty((2, 3))
+ assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1)
+ assert_equal(out, r1)
+
+ # q.dim > 1, int
+ r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
+ out = np.empty((2, 4), dtype=x.dtype)
+ c = np.percentile(x, (25, 50), method='lower', axis=0, out=out)
+ assert_equal(c, r0)
+ assert_equal(out, r0)
+ r1 = np.array([[0, 4, 8], [1, 5, 9]])
+ out = np.empty((2, 3), dtype=x.dtype)
+ c = np.percentile(x, (25, 50), method='lower', axis=1, out=out)
+ assert_equal(c, r1)
+ assert_equal(out, r1)
+
+ def test_percentile_empty_dim(self):
+ # empty dims are preserved
+ d = np.arange(11 * 2).reshape(11, 1, 2, 1)
+ assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1))
+ assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1))
+ assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1))
+ assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2))
+ assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2))
+ assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1))
+ assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1))
+ assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1))
+
+ assert_array_equal(np.percentile(d, 50, axis=2,
+ method='midpoint').shape,
+ (11, 1, 1))
+ assert_array_equal(np.percentile(d, 50, axis=-2,
+ method='midpoint').shape,
+ (11, 1, 1))
+
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape,
+ (2, 1, 2, 1))
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape,
+ (2, 11, 2, 1))
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape,
+ (2, 11, 1, 1))
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape,
+ (2, 11, 1, 2))
+
+ def test_percentile_no_overwrite(self):
+ a = np.array([2, 3, 4, 1])
+ np.percentile(a, [50], overwrite_input=False)
+ assert_equal(a, np.array([2, 3, 4, 1]))
+
+ a = np.array([2, 3, 4, 1])
+ np.percentile(a, [50])
+ assert_equal(a, np.array([2, 3, 4, 1]))
+
+ def test_no_p_overwrite(self):
+ p = np.linspace(0., 100., num=5)
+ np.percentile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, np.linspace(0., 100., num=5))
+ p = np.linspace(0., 100., num=5).tolist()
+ np.percentile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, np.linspace(0., 100., num=5).tolist())
+
+ def test_percentile_overwrite(self):
+ a = np.array([2, 3, 4, 1])
+ b = np.percentile(a, [50], overwrite_input=True)
+ assert_equal(b, np.array([2.5]))
+
+ b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
+ assert_equal(b, np.array([2.5]))
+
+ def test_extended_axis(self):
+ o = np.random.normal(size=(71, 23))
+ x = np.dstack([o] * 10)
+ assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
+ x = np.moveaxis(x, -1, 0)
+ assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
+ x = x.swapaxes(0, 1).copy()
+ assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
+ x = x.swapaxes(0, 1).copy()
+
+ assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)),
+ np.percentile(x, [25, 60], axis=None))
+ assert_equal(np.percentile(x, [25, 60], axis=(0,)),
+ np.percentile(x, [25, 60], axis=0))
+
+ d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
+ np.random.shuffle(d.ravel())
+ assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0],
+ np.percentile(d[:, :, :, 0].flatten(), 25))
+ assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1],
+ np.percentile(d[:, :, 1, :].flatten(), [10, 90]))
+ assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2],
+ np.percentile(d[:, :, 2, :].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2],
+ np.percentile(d[2, :, :, :].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1],
+ np.percentile(d[2, 1, :, :].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1],
+ np.percentile(d[2, :, :, 1].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2],
+ np.percentile(d[2, :, 2, :].flatten(), 25))
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(AxisError, np.percentile, d, axis=-5, q=25)
+ assert_raises(AxisError, np.percentile, d, axis=(0, -5), q=25)
+ assert_raises(AxisError, np.percentile, d, axis=4, q=25)
+ assert_raises(AxisError, np.percentile, d, axis=(0, 4), q=25)
+ # each of these refers to the same axis twice
+ assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)
+ assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25)
+ assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25)
+
+ def test_keepdims(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,
+ (1, 1, 7, 11))
+ assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,
+ (1, 5, 7, 1))
+ assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,
+ (3, 1, 7, 11))
+ assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,
+ (1, 1, 7, 1))
+
+ assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),
+ keepdims=True).shape, (2, 1, 1, 7, 1))
+ assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
+ keepdims=True).shape, (2, 1, 5, 7, 1))
+
+ @pytest.mark.parametrize('q', [7, [1, 7]])
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1,),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, q, axis):
+ d = np.ones((3, 5, 7, 11))
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ shape_out = np.shape(q) + shape_out
+
+ out = np.empty(shape_out)
+ result = np.percentile(d, q, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ def test_out(self):
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ assert_equal(np.percentile(d, 0, 0, out=o), o)
+ assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.percentile(d, 1, 1, out=o), o)
+ assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o)
+
+ o = np.zeros(())
+ assert_equal(np.percentile(d, 2, out=o), o)
+ assert_equal(np.percentile(d, 2, method='nearest', out=o), o)
+
+ @pytest.mark.parametrize("method, weighted", [
+ ("linear", False),
+ ("nearest", False),
+ ("inverted_cdf", False),
+ ("inverted_cdf", True),
+ ])
+ def test_out_nan(self, method, weighted):
+ if weighted:
+ kwargs = {"weights": np.ones((3, 4)), "method": method}
+ else:
+ kwargs = {"method": method}
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ d[2, 1] = np.nan
+ assert_equal(np.percentile(d, 0, 0, out=o, **kwargs), o)
+
+ o = np.zeros((3,))
+ assert_equal(np.percentile(d, 1, 1, out=o, **kwargs), o)
+
+ o = np.zeros(())
+ assert_equal(np.percentile(d, 1, out=o, **kwargs), o)
+
+ def test_nan_behavior(self):
+ a = np.arange(24, dtype=float)
+ a[2] = np.nan
+ assert_equal(np.percentile(a, 0.3), np.nan)
+ assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
+ assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
+ np.array([np.nan] * 2))
+
+ a = np.arange(24, dtype=float).reshape(2, 3, 4)
+ a[1, 2, 3] = np.nan
+ a[1, 1, 2] = np.nan
+
+ # no axis
+ assert_equal(np.percentile(a, 0.3), np.nan)
+ assert_equal(np.percentile(a, 0.3).ndim, 0)
+
+ # axis0 zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)
+ b[2, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.percentile(a, 0.3, 0), b)
+
+ # axis0 not zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
+ [0.3, 0.6], 0)
+ b[:, 2, 3] = np.nan
+ b[:, 1, 2] = np.nan
+ assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
+
+ # axis1 zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)
+ b[1, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.percentile(a, 0.3, 1), b)
+ # axis1 not zerod
+ b = np.percentile(
+ np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)
+ b[:, 1, 3] = np.nan
+ b[:, 1, 2] = np.nan
+ assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
+
+ # axis02 zerod
+ b = np.percentile(
+ np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))
+ b[1] = np.nan
+ b[2] = np.nan
+ assert_equal(np.percentile(a, 0.3, (0, 2)), b)
+ # axis02 not zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
+ [0.3, 0.6], (0, 2))
+ b[:, 1] = np.nan
+ b[:, 2] = np.nan
+ assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
+ # axis02 not zerod with method='nearest'
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
+ [0.3, 0.6], (0, 2), method='nearest')
+ b[:, 1] = np.nan
+ b[:, 2] = np.nan
+ assert_equal(np.percentile(
+ a, [0.3, 0.6], (0, 2), method='nearest'), b)
+
+ def test_nan_q(self):
+ # GH18830
+ with pytest.raises(ValueError, match="Percentiles must be in"):
+ np.percentile([1, 2, 3, 4.0], np.nan)
+ with pytest.raises(ValueError, match="Percentiles must be in"):
+ np.percentile([1, 2, 3, 4.0], [np.nan])
+ q = np.linspace(1.0, 99.0, 16)
+ q[0] = np.nan
+ with pytest.raises(ValueError, match="Percentiles must be in"):
+ np.percentile([1, 2, 3, 4.0], q)
+
+ @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"])
+ @pytest.mark.parametrize("pos", [0, 23, 10])
+ def test_nat_basic(self, dtype, pos):
+ # TODO: Note that times have dubious rounding as of fixing NaTs!
+ # NaT and NaN should behave the same, do basic tests for NaT:
+ a = np.arange(0, 24, dtype=dtype)
+ a[pos] = "NaT"
+ res = np.percentile(a, 30)
+ assert res.dtype == dtype
+ assert np.isnat(res)
+ res = np.percentile(a, [30, 60])
+ assert res.dtype == dtype
+ assert np.isnat(res).all()
+
+ a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3)
+ a[pos, 1] = "NaT"
+ res = np.percentile(a, 30, axis=0)
+ assert_array_equal(np.isnat(res), [False, True, False])
+
+
+quantile_methods = [
+ 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
+ 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
+ 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher',
+ 'midpoint']
+
+
+methods_supporting_weights = ["inverted_cdf"]
+
+
+class TestQuantile:
+ # most of this is already tested by TestPercentile
+
+ def V(self, x, y, alpha):
+ # Identification function used in several tests.
+ return (x >= y) - alpha
+
+ def test_max_ulp(self):
+ x = [0.0, 0.2, 0.4]
+ a = np.quantile(x, 0.45)
+ # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18.
+ # 0.18 is not exactly representable and the formula leads to a 1 ULP
+ # different result. Ensure it is this exact within 1 ULP, see gh-20331.
+ np.testing.assert_array_max_ulp(a, 0.18, maxulp=1)
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ def test_correct_quantile_value(self):
+ a = np.array([True])
+ tf_quant = np.quantile(True, False)
+ assert_equal(tf_quant, a[0])
+ assert_equal(type(tf_quant), a.dtype)
+ a = np.array([False, True, True])
+ quant_res = np.quantile(a, a)
+ assert_array_equal(quant_res, a)
+ assert_equal(quant_res.dtype, a.dtype)
+
+ def test_fraction(self):
+ # fractional input, integral quantile
+ x = [Fraction(i, 2) for i in range(8)]
+ q = np.quantile(x, 0)
+ assert_equal(q, 0)
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, 1)
+ assert_equal(q, Fraction(7, 2))
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, .5)
+ assert_equal(q, 1.75)
+ assert_equal(type(q), np.float64)
+
+ q = np.quantile(x, Fraction(1, 2))
+ assert_equal(q, Fraction(7, 4))
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, [Fraction(1, 2)])
+ assert_equal(q, np.array([Fraction(7, 4)]))
+ assert_equal(type(q), np.ndarray)
+
+ q = np.quantile(x, [[Fraction(1, 2)]])
+ assert_equal(q, np.array([[Fraction(7, 4)]]))
+ assert_equal(type(q), np.ndarray)
+
+ # repeat with integral input but fractional quantile
+ x = np.arange(8)
+ assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
+
+ def test_complex(self):
+ # gh-22652
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+ def test_quantile_preserve_int_type(self, dtype):
+ res = np.quantile(np.array([1, 2], dtype=dtype), [0.5],
+ method="nearest")
+ assert res.dtype == dtype
+
+ @pytest.mark.parametrize("method", quantile_methods)
+ def test_q_zero_one(self, method):
+ # gh-24710
+ arr = [10, 11, 12]
+ quantile = np.quantile(arr, q=[0, 1], method=method)
+ assert_equal(quantile, np.array([10, 12]))
+
+ @pytest.mark.parametrize("method", quantile_methods)
+ def test_quantile_monotonic(self, method):
+ # GH 14685
+ # test that the return value of quantile is monotonic if p0 is ordered
+ # Also tests that the boundary values are not mishandled.
+ p0 = np.linspace(0, 1, 101)
+ quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9,
+ 8, 8, 7]) * 0.1, p0, method=method)
+ assert_equal(np.sort(quantile), quantile)
+
+ # Also test one where the number of data points is clearly divisible:
+ quantile = np.quantile([0., 1., 2., 3.], p0, method=method)
+ assert_equal(np.sort(quantile), quantile)
+
+ @hypothesis.given(
+ arr=arrays(dtype=np.float64,
+ shape=st.integers(min_value=3, max_value=1000),
+ elements=st.floats(allow_infinity=False, allow_nan=False,
+ min_value=-1e300, max_value=1e300)))
+ def test_quantile_monotonic_hypo(self, arr):
+ p0 = np.arange(0, 1, 0.01)
+ quantile = np.quantile(arr, p0)
+ assert_equal(np.sort(quantile), quantile)
+
+ def test_quantile_scalar_nan(self):
+ a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ a[0][1] = np.nan
+ actual = np.quantile(a, 0.5)
+ assert np.isscalar(actual)
+ assert_equal(np.quantile(a, 0.5), np.nan)
+
+ @pytest.mark.parametrize("weights", [False, True])
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_identification_equation(self, weights, method, alpha):
+ # Test that the identification equation holds for the empirical
+ # CDF:
+ # E[V(x, Y)] = 0 <=> x is quantile
+ # with Y the random variable for which we have observed values and
+ # V(x, y) the canonical identification function for the quantile (at
+ # level alpha), see
+ # https://doi.org/10.48550/arXiv.0912.0902
+ if weights and method not in methods_supporting_weights:
+ pytest.skip("Weights not supported by method.")
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we cover 3 cases:
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ w = rng.integers(low=0, high=10, size=n) if weights else None
+ x = np.quantile(y, alpha, method=method, weights=w)
+
+ if method in ("higher",):
+ # These methods do not fulfill the identification equation.
+ assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n
+ elif int(n * alpha) == n * alpha and not weights:
+ # We can expect exact results, up to machine precision.
+ assert_allclose(
+ np.average(self.V(x, y, alpha), weights=w), 0, atol=1e-14,
+ )
+ else:
+ # V = (x >= y) - alpha cannot sum to zero exactly but within
+ # "sample precision".
+ assert_allclose(np.average(self.V(x, y, alpha), weights=w), 0,
+ atol=1 / n / np.amin([alpha, 1 - alpha]))
+
+ @pytest.mark.parametrize("weights", [False, True])
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_add_and_multiply_constant(self, weights, method, alpha):
+ # Test that
+ # 1. quantile(c + x) = c + quantile(x)
+ # 2. quantile(c * x) = c * quantile(x)
+ # 3. quantile(-x) = -quantile(x, 1 - alpha)
+ # On empirical quantiles, this equation does not hold exactly.
+ # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these
+ # properties equivariance.
+ if weights and method not in methods_supporting_weights:
+ pytest.skip("Weights not supported by method.")
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we have cases for
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ w = rng.integers(low=0, high=10, size=n) if weights else None
+ q = np.quantile(y, alpha, method=method, weights=w)
+ c = 13.5
+
+ # 1
+ assert_allclose(np.quantile(c + y, alpha, method=method, weights=w),
+ c + q)
+ # 2
+ assert_allclose(np.quantile(c * y, alpha, method=method, weights=w),
+ c * q)
+ # 3
+ if weights:
+ # From here on, we would need more methods to support weights.
+ return
+ q = -np.quantile(-y, 1 - alpha, method=method)
+ if method == "inverted_cdf":
+ if (
+ n * alpha == int(n * alpha)
+ or np.round(n * alpha) == int(n * alpha) + 1
+ ):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "closest_observation":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif np.round(n * alpha) == int(n * alpha) + 1:
+ assert_allclose(
+ q, np.quantile(y, alpha + 1 / n, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "interpolated_inverted_cdf":
+ assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method))
+ elif method == "nearest":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+ elif method == "lower":
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif method == "higher":
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ else:
+ # "averaged_inverted_cdf", "hazen", "weibull", "linear",
+ # "median_unbiased", "normal_unbiased", "midpoint"
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+
+ @pytest.mark.parametrize("method", methods_supporting_weights)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_constant_weights(self, method, alpha):
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we have cases for
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ q = np.quantile(y, alpha, method=method)
+
+ w = np.ones_like(y)
+ qw = np.quantile(y, alpha, method=method, weights=w)
+ assert_allclose(qw, q)
+
+ w = 8.125 * np.ones_like(y)
+ qw = np.quantile(y, alpha, method=method, weights=w)
+ assert_allclose(qw, q)
+
+ @pytest.mark.parametrize("method", methods_supporting_weights)
+ @pytest.mark.parametrize("alpha", [0, 0.2, 0.5, 0.9, 1])
+ def test_quantile_with_integer_weights(self, method, alpha):
+ # Integer weights can be interpreted as repeated observations.
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we have cases for
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ w = rng.integers(low=0, high=10, size=n, dtype=np.int32)
+
+ qw = np.quantile(y, alpha, method=method, weights=w)
+ q = np.quantile(np.repeat(y, w), alpha, method=method)
+ assert_allclose(qw, q)
+
+ @pytest.mark.parametrize("method", methods_supporting_weights)
+ def test_quantile_with_weights_and_axis(self, method):
+ rng = np.random.default_rng(4321)
+
+ # 1d weight and single alpha
+ y = rng.random((2, 10, 3))
+ w = np.abs(rng.random(10))
+ alpha = 0.5
+ q = np.quantile(y, alpha, weights=w, method=method, axis=1)
+ q_res = np.zeros(shape=(2, 3))
+ for i in range(2):
+ for j in range(3):
+ q_res[i, j] = np.quantile(
+ y[i, :, j], alpha, method=method, weights=w
+ )
+ assert_allclose(q, q_res)
+
+ # 1d weight and 1d alpha
+ alpha = [0, 0.2, 0.4, 0.6, 0.8, 1] # shape (6,)
+ q = np.quantile(y, alpha, weights=w, method=method, axis=1)
+ q_res = np.zeros(shape=(6, 2, 3))
+ for i in range(2):
+ for j in range(3):
+ q_res[:, i, j] = np.quantile(
+ y[i, :, j], alpha, method=method, weights=w
+ )
+ assert_allclose(q, q_res)
+
+ # 1d weight and 2d alpha
+ alpha = [[0, 0.2], [0.4, 0.6], [0.8, 1]] # shape (3, 2)
+ q = np.quantile(y, alpha, weights=w, method=method, axis=1)
+ q_res = q_res.reshape((3, 2, 2, 3))
+ assert_allclose(q, q_res)
+
+ # shape of weights equals shape of y
+ w = np.abs(rng.random((2, 10, 3)))
+ alpha = 0.5
+ q = np.quantile(y, alpha, weights=w, method=method, axis=1)
+ q_res = np.zeros(shape=(2, 3))
+ for i in range(2):
+ for j in range(3):
+ q_res[i, j] = np.quantile(
+ y[i, :, j], alpha, method=method, weights=w[i, :, j]
+ )
+ assert_allclose(q, q_res)
+
+ @pytest.mark.parametrize("method", methods_supporting_weights)
+ def test_quantile_weights_min_max(self, method):
+ # Test weighted quantile at 0 and 1 with leading and trailing zero
+ # weights.
+ w = [0, 0, 1, 2, 3, 0]
+ y = np.arange(6)
+ y_min = np.quantile(y, 0, weights=w, method="inverted_cdf")
+ y_max = np.quantile(y, 1, weights=w, method="inverted_cdf")
+ assert y_min == y[2] # == 2
+ assert y_max == y[4] # == 4
+
+ def test_quantile_weights_raises_negative_weights(self):
+ y = [1, 2]
+ w = [-0.5, 1]
+ with pytest.raises(ValueError, match="Weights must be non-negative"):
+ np.quantile(y, 0.5, weights=w, method="inverted_cdf")
+
+ @pytest.mark.parametrize(
+ "method",
+ sorted(set(quantile_methods) - set(methods_supporting_weights)),
+ )
+ def test_quantile_weights_raises_unsupported_methods(self, method):
+ y = [1, 2]
+ w = [0.5, 1]
+ msg = "Only method 'inverted_cdf' supports weights"
+ with pytest.raises(ValueError, match=msg):
+ np.quantile(y, 0.5, weights=w, method=method)
+
+ def test_weibull_fraction(self):
+ arr = [Fraction(0, 1), Fraction(1, 10)]
+ quantile = np.quantile(arr, [0, ], method='weibull')
+ assert_equal(quantile, np.array(Fraction(0, 1)))
+ quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull')
+ assert_equal(quantile, np.array(Fraction(1, 20)))
+
+ def test_closest_observation(self):
+ # Round ties to nearest even order statistic (see #26656)
+ m = 'closest_observation'
+ q = 0.5
+ arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ assert_equal(2, np.quantile(arr[0:3], q, method=m))
+ assert_equal(2, np.quantile(arr[0:4], q, method=m))
+ assert_equal(2, np.quantile(arr[0:5], q, method=m))
+ assert_equal(3, np.quantile(arr[0:6], q, method=m))
+ assert_equal(4, np.quantile(arr[0:7], q, method=m))
+ assert_equal(4, np.quantile(arr[0:8], q, method=m))
+ assert_equal(4, np.quantile(arr[0:9], q, method=m))
+ assert_equal(5, np.quantile(arr, q, method=m))
+
+
+class TestLerp:
+ @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ t1=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ a=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300),
+ b=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300))
+ def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b):
+ l0 = nfb._lerp(a, b, t0)
+ l1 = nfb._lerp(a, b, t1)
+ if t0 == t1 or a == b:
+ assert l0 == l1 # uninteresting
+ elif (t0 < t1) == (a < b):
+ assert l0 <= l1
+ else:
+ assert l0 >= l1
+
+ @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ a=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300),
+ b=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300))
+ def test_linear_interpolation_formula_bounded(self, t, a, b):
+ if a <= b:
+ assert a <= nfb._lerp(a, b, t) <= b
+ else:
+ assert b <= nfb._lerp(a, b, t) <= a
+
+ @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ a=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300),
+ b=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300))
+ def test_linear_interpolation_formula_symmetric(self, t, a, b):
+ # double subtraction is needed to remove the extra precision of t < 0.5
+ left = nfb._lerp(a, b, 1 - (1 - t))
+ right = nfb._lerp(b, a, 1 - t)
+ assert_allclose(left, right)
+
+ def test_linear_interpolation_formula_0d_inputs(self):
+ a = np.array(2)
+ b = np.array(5)
+ t = np.array(0.2)
+ assert nfb._lerp(a, b, t) == 2.6
+
+
+class TestMedian:
+
+ def test_basic(self):
+ a0 = np.array(1)
+ a1 = np.arange(2)
+ a2 = np.arange(6).reshape(2, 3)
+ assert_equal(np.median(a0), 1)
+ assert_allclose(np.median(a1), 0.5)
+ assert_allclose(np.median(a2), 2.5)
+ assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
+ assert_equal(np.median(a2, axis=1), [1, 4])
+ assert_allclose(np.median(a2, axis=None), 2.5)
+
+ a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
+ assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
+ a = np.array([0.0463301, 0.0444502, 0.141249])
+ assert_equal(a[0], np.median(a))
+ a = np.array([0.0444502, 0.141249, 0.0463301])
+ assert_equal(a[-1], np.median(a))
+ # check array scalar result
+ assert_equal(np.median(a).ndim, 0)
+ a[1] = np.nan
+ assert_equal(np.median(a).ndim, 0)
+
+ def test_axis_keyword(self):
+ a3 = np.array([[2, 3],
+ [0, 1],
+ [6, 7],
+ [4, 5]])
+ for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
+ orig = a.copy()
+ np.median(a, axis=None)
+ for ax in range(a.ndim):
+ np.median(a, axis=ax)
+ assert_array_equal(a, orig)
+
+ assert_allclose(np.median(a3, axis=0), [3, 4])
+ assert_allclose(np.median(a3.T, axis=1), [3, 4])
+ assert_allclose(np.median(a3), 3.5)
+ assert_allclose(np.median(a3, axis=None), 3.5)
+ assert_allclose(np.median(a3.T), 3.5)
+
+ def test_overwrite_keyword(self):
+ a3 = np.array([[2, 3],
+ [0, 1],
+ [6, 7],
+ [4, 5]])
+ a0 = np.array(1)
+ a1 = np.arange(2)
+ a2 = np.arange(6).reshape(2, 3)
+ assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
+ assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
+ assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
+ assert_allclose(
+ np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5])
+ assert_allclose(
+ np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
+ assert_allclose(
+ np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
+ assert_allclose(
+ np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
+ assert_allclose(
+ np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4])
+
+ a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
+ np.random.shuffle(a4.ravel())
+ assert_allclose(np.median(a4, axis=None),
+ np.median(a4.copy(), axis=None, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=0),
+ np.median(a4.copy(), axis=0, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=1),
+ np.median(a4.copy(), axis=1, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=2),
+ np.median(a4.copy(), axis=2, overwrite_input=True))
+
+ def test_array_like(self):
+ x = [1, 2, 3]
+ assert_almost_equal(np.median(x), 2)
+ x2 = [x]
+ assert_almost_equal(np.median(x2), 2)
+ assert_allclose(np.median(x2, axis=0), x)
+
+ def test_subclass(self):
+ # gh-3846
+ class MySubClass(np.ndarray):
+
+ def __new__(cls, input_array, info=None):
+ obj = np.asarray(input_array).view(cls)
+ obj.info = info
+ return obj
+
+ def mean(self, axis=None, dtype=None, out=None):
+ return -7
+
+ a = MySubClass([1, 2, 3])
+ assert_equal(np.median(a), -7)
+
+ @pytest.mark.parametrize('arr',
+ ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.))
+ def test_subclass2(self, arr):
+ """Check that we return subclasses, even if a NaN scalar."""
+ class MySubclass(np.ndarray):
+ pass
+
+ m = np.median(np.array(arr).view(MySubclass))
+ assert isinstance(m, MySubclass)
+
+ def test_out(self):
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ assert_equal(np.median(d, 0, out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.median(d, 1, out=o), o)
+ o = np.zeros(())
+ assert_equal(np.median(d, out=o), o)
+
+ def test_out_nan(self):
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ d[2, 1] = np.nan
+ assert_equal(np.median(d, 0, out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.median(d, 1, out=o), o)
+ o = np.zeros(())
+ assert_equal(np.median(d, out=o), o)
+
+ def test_nan_behavior(self):
+ a = np.arange(24, dtype=float)
+ a[2] = np.nan
+ assert_equal(np.median(a), np.nan)
+ assert_equal(np.median(a, axis=0), np.nan)
+
+ a = np.arange(24, dtype=float).reshape(2, 3, 4)
+ a[1, 2, 3] = np.nan
+ a[1, 1, 2] = np.nan
+
+ # no axis
+ assert_equal(np.median(a), np.nan)
+ assert_equal(np.median(a).ndim, 0)
+
+ # axis0
+ b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
+ b[2, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.median(a, 0), b)
+
+ # axis1
+ b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
+ b[1, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.median(a, 1), b)
+
+ # axis02
+ b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
+ b[1] = np.nan
+ b[2] = np.nan
+ assert_equal(np.median(a, (0, 2)), b)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly")
+ def test_empty(self):
+ # mean(empty array) emits two warnings: empty slice and divide by 0
+ a = np.array([], dtype=float)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.median(a), np.nan)
+ assert_(w[0].category is RuntimeWarning)
+ assert_equal(len(w), 2)
+
+ # multiple dimensions
+ a = np.array([], dtype=float, ndmin=3)
+ # no axis
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.median(a), np.nan)
+ assert_(w[0].category is RuntimeWarning)
+
+ # axis 0 and 1
+ b = np.array([], dtype=float, ndmin=2)
+ assert_equal(np.median(a, axis=0), b)
+ assert_equal(np.median(a, axis=1), b)
+
+ # axis 2
+ b = np.array(np.nan, dtype=float, ndmin=2)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.median(a, axis=2), b)
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_object(self):
+ o = np.arange(7.)
+ assert_(type(np.median(o.astype(object))), float)
+ o[2] = np.nan
+ assert_(type(np.median(o.astype(object))), float)
+
+ def test_extended_axis(self):
+ o = np.random.normal(size=(71, 23))
+ x = np.dstack([o] * 10)
+ assert_equal(np.median(x, axis=(0, 1)), np.median(o))
+ x = np.moveaxis(x, -1, 0)
+ assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
+ x = x.swapaxes(0, 1).copy()
+ assert_equal(np.median(x, axis=(0, -1)), np.median(o))
+
+ assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))
+ assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))
+ assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))
+
+ d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
+ np.random.shuffle(d.ravel())
+ assert_equal(np.median(d, axis=(0, 1, 2))[0],
+ np.median(d[:, :, :, 0].flatten()))
+ assert_equal(np.median(d, axis=(0, 1, 3))[1],
+ np.median(d[:, :, 1, :].flatten()))
+ assert_equal(np.median(d, axis=(3, 1, -4))[2],
+ np.median(d[:, :, 2, :].flatten()))
+ assert_equal(np.median(d, axis=(3, 1, 2))[2],
+ np.median(d[2, :, :, :].flatten()))
+ assert_equal(np.median(d, axis=(3, 2))[2, 1],
+ np.median(d[2, 1, :, :].flatten()))
+ assert_equal(np.median(d, axis=(1, -2))[2, 1],
+ np.median(d[2, :, :, 1].flatten()))
+ assert_equal(np.median(d, axis=(1, 3))[2, 2],
+ np.median(d[2, :, 2, :].flatten()))
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(AxisError, np.median, d, axis=-5)
+ assert_raises(AxisError, np.median, d, axis=(0, -5))
+ assert_raises(AxisError, np.median, d, axis=4)
+ assert_raises(AxisError, np.median, d, axis=(0, 4))
+ assert_raises(ValueError, np.median, d, axis=(1, 1))
+
+ def test_keepdims(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_equal(np.median(d, axis=None, keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,
+ (1, 1, 7, 11))
+ assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,
+ (1, 5, 7, 1))
+ assert_equal(np.median(d, axis=(1,), keepdims=True).shape,
+ (3, 1, 7, 11))
+ assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
+ (1, 1, 7, 1))
+
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, axis):
+ d = np.ones((3, 5, 7, 11))
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = np.empty(shape_out)
+ result = np.median(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ @pytest.mark.parametrize("dtype", ["m8[s]"])
+ @pytest.mark.parametrize("pos", [0, 23, 10])
+ def test_nat_behavior(self, dtype, pos):
+ # TODO: Median does not support Datetime, due to `mean`.
+ # NaT and NaN should behave the same, do basic tests for NaT.
+ a = np.arange(0, 24, dtype=dtype)
+ a[pos] = "NaT"
+ res = np.median(a)
+ assert res.dtype == dtype
+ assert np.isnat(res)
+ res = np.percentile(a, [30, 60])
+ assert res.dtype == dtype
+ assert np.isnat(res).all()
+
+ a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3)
+ a[pos, 1] = "NaT"
+ res = np.median(a, axis=0)
+ assert_array_equal(np.isnat(res), [False, True, False])
+
+
+class TestSortComplex:
+
+ @pytest.mark.parametrize("type_in, type_out", [
+ ('l', 'D'),
+ ('h', 'F'),
+ ('H', 'F'),
+ ('b', 'F'),
+ ('B', 'F'),
+ ('g', 'G'),
+ ])
+ def test_sort_real(self, type_in, type_out):
+ # sort_complex() type casting for real input types
+ a = np.array([5, 3, 6, 2, 1], dtype=type_in)
+ actual = np.sort_complex(a)
+ expected = np.sort(a).astype(type_out)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
+
+ def test_sort_complex(self):
+ # sort_complex() handling of complex input
+ a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
+ expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
+ actual = np.sort_complex(a)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py
new file mode 100644
index 0000000..b7752d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_histograms.py
@@ -0,0 +1,855 @@
+import pytest
+
+import numpy as np
+from numpy import histogram, histogram_bin_edges, histogramdd
+from numpy.testing import (
+ assert_,
+ assert_allclose,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_array_max_ulp,
+ assert_equal,
+ assert_raises,
+ assert_raises_regex,
+ suppress_warnings,
+)
+
+
+class TestHistogram:
+
+ def setup_method(self):
+ pass
+
+ def teardown_method(self):
+ pass
+
+ def test_simple(self):
+ n = 100
+ v = np.random.rand(n)
+ (a, b) = histogram(v)
+ # check if the sum of the bins equals the number of samples
+ assert_equal(np.sum(a, axis=0), n)
+ # check that the bin counts are evenly spaced when the data is from
+ # a linear function
+ (a, b) = histogram(np.linspace(0, 10, 100))
+ assert_array_equal(a, 10)
+
+ def test_one_bin(self):
+ # Ticket 632
+ hist, edges = histogram([1, 2, 3, 4], [1, 2])
+ assert_array_equal(hist, [2, ])
+ assert_array_equal(edges, [1, 2])
+ assert_raises(ValueError, histogram, [1, 2], bins=0)
+ h, e = histogram([1, 2], bins=1)
+ assert_equal(h, np.array([2]))
+ assert_allclose(e, np.array([1., 2.]))
+
+ def test_density(self):
+ # Check that the integral of the density equals 1.
+ n = 100
+ v = np.random.rand(n)
+ a, b = histogram(v, density=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+
+ # Check with non-constant bin widths
+ v = np.arange(10)
+ bins = [0, 1, 3, 6, 10]
+ a, b = histogram(v, bins, density=True)
+ assert_array_equal(a, .1)
+ assert_equal(np.sum(a * np.diff(b)), 1)
+
+ # Test that passing False works too
+ a, b = histogram(v, bins, density=False)
+ assert_array_equal(a, [1, 2, 3, 4])
+
+ # Variable bin widths are especially useful to deal with
+ # infinities.
+ v = np.arange(10)
+ bins = [0, 1, 3, 6, np.inf]
+ a, b = histogram(v, bins, density=True)
+ assert_array_equal(a, [.1, .1, .1, 0.])
+
+ # Taken from a bug report from N. Becker on the numpy-discussion
+ # mailing list Aug. 6, 2010.
+ counts, dmy = np.histogram(
+ [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
+ assert_equal(counts, [.25, 0])
+
+ def test_outliers(self):
+ # Check that outliers are not tallied
+ a = np.arange(10) + .5
+
+ # Lower outliers
+ h, b = histogram(a, range=[0, 9])
+ assert_equal(h.sum(), 9)
+
+ # Upper outliers
+ h, b = histogram(a, range=[1, 10])
+ assert_equal(h.sum(), 9)
+
+ # Normalization
+ h, b = histogram(a, range=[1, 9], density=True)
+ assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
+
+ # Weights
+ w = np.arange(10) + .5
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+ assert_equal((h * np.diff(b)).sum(), 1)
+
+ h, b = histogram(a, bins=8, range=[1, 9], weights=w)
+ assert_equal(h, w[1:-1])
+
+ def test_arr_weights_mismatch(self):
+ a = np.arange(10) + .5
+ w = np.arange(11) + .5
+ with assert_raises_regex(ValueError, "same shape as"):
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+
+ def test_type(self):
+ # Check the type of the returned histogram
+ a = np.arange(10) + .5
+ h, b = histogram(a)
+ assert_(np.issubdtype(h.dtype, np.integer))
+
+ h, b = histogram(a, density=True)
+ assert_(np.issubdtype(h.dtype, np.floating))
+
+ h, b = histogram(a, weights=np.ones(10, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
+
+ h, b = histogram(a, weights=np.ones(10, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
+
+ def test_f32_rounding(self):
+ # gh-4799, check that the rounding of the edges works with float32
+ x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
+ y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
+ counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
+ assert_equal(counts_hist.sum(), 3.)
+
+ def test_bool_conversion(self):
+ # gh-12107
+ # Reference integer histogram
+ a = np.array([1, 1, 0], dtype=np.uint8)
+ int_hist, int_edges = np.histogram(a)
+
+ # Should raise an warning on booleans
+ # Ensure that the histograms are equivalent, need to suppress
+ # the warnings to get the actual outputs
+ with suppress_warnings() as sup:
+ rec = sup.record(RuntimeWarning, 'Converting input from .*')
+ hist, edges = np.histogram([True, True, False])
+ # A warning should be issued
+ assert_equal(len(rec), 1)
+ assert_array_equal(hist, int_hist)
+ assert_array_equal(edges, int_edges)
+
+ def test_weights(self):
+ v = np.random.rand(100)
+ w = np.ones(100) * 5
+ a, b = histogram(v)
+ na, nb = histogram(v, density=True)
+ wa, wb = histogram(v, weights=w)
+ nwa, nwb = histogram(v, weights=w, density=True)
+ assert_array_almost_equal(a * 5, wa)
+ assert_array_almost_equal(na, nwa)
+
+ # Check weights are properly applied.
+ v = np.linspace(0, 10, 10)
+ w = np.concatenate((np.zeros(5), np.ones(5)))
+ wa, wb = histogram(v, bins=np.arange(11), weights=w)
+ assert_array_almost_equal(wa, w)
+
+ # Check with integer weights
+ wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
+ assert_array_equal(wa, [4, 5, 0, 1])
+ wa, wb = histogram(
+ [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
+ assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
+
+ # Check weights with non-uniform bin widths
+ a, b = histogram(
+ np.arange(9), [0, 1, 3, 6, 10],
+ weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
+ assert_almost_equal(a, [.2, .1, .1, .075])
+
+ def test_exotic_weights(self):
+
+ # Test the use of weights that are not integer or floats, but e.g.
+ # complex numbers or object types.
+
+ # Complex weights
+ values = np.array([1.3, 2.5, 2.3])
+ weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
+
+ # Check with custom bins
+ wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
+ assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
+
+ # Check with even bins
+ wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
+ assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
+
+ # Decimal weights
+ from decimal import Decimal
+ values = np.array([1.3, 2.5, 2.3])
+ weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
+
+ # Check with custom bins
+ wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
+ assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
+
+ # Check with even bins
+ wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
+ assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
+
+ def test_no_side_effects(self):
+ # This is a regression test that ensures that values passed to
+ # ``histogram`` are unchanged.
+ values = np.array([1.3, 2.5, 2.3])
+ np.histogram(values, range=[-10, 10], bins=100)
+ assert_array_almost_equal(values, [1.3, 2.5, 2.3])
+
+ def test_empty(self):
+ a, b = histogram([], bins=([0, 1]))
+ assert_array_equal(a, np.array([0]))
+ assert_array_equal(b, np.array([0, 1]))
+
+ def test_error_binnum_type(self):
+ # Tests if right Error is raised if bins argument is float
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, 5)
+ assert_raises(TypeError, histogram, vals, 2.4)
+
+ def test_finite_range(self):
+ # Normal ranges should be fine
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, range=[0.25, 0.75])
+ assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75])
+ assert_raises(ValueError, histogram, vals, range=[0.25, np.inf])
+
+ def test_invalid_range(self):
+ # start of range must be < end of range
+ vals = np.linspace(0.0, 1.0, num=100)
+ with assert_raises_regex(ValueError, "max must be larger than"):
+ np.histogram(vals, range=[0.1, 0.01])
+
+ def test_bin_edge_cases(self):
+ # Ensure that floating-point computations correctly place edge cases.
+ arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
+ hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
+ mask = hist > 0
+ left_edges = edges[:-1][mask]
+ right_edges = edges[1:][mask]
+ for x, left, right in zip(arr, left_edges, right_edges):
+ assert_(x >= left)
+ assert_(x < right)
+
+ def test_last_bin_inclusive_range(self):
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
+ assert_equal(hist[-1], 1)
+
+ def test_bin_array_dims(self):
+ # gracefully handle bins object > 1 dimension
+ vals = np.linspace(0.0, 1.0, num=100)
+ bins = np.array([[0, 0.5], [0.6, 1.0]])
+ with assert_raises_regex(ValueError, "must be 1d"):
+ np.histogram(vals, bins=bins)
+
+ def test_unsigned_monotonicity_check(self):
+ # Ensures ValueError is raised if bins not increasing monotonically
+ # when bins contain unsigned values (see #9222)
+ arr = np.array([2])
+ bins = np.array([1, 3, 1], dtype='uint64')
+ with assert_raises(ValueError):
+ hist, edges = np.histogram(arr, bins=bins)
+
+ def test_object_array_of_0d(self):
+ # gh-7864
+ assert_raises(ValueError,
+ histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
+ assert_raises(ValueError,
+ histogram, [np.array(0.4) for i in range(10)] + [np.inf])
+
+ # these should not crash
+ np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002])
+ np.histogram([np.array(0.5) for i in range(10)] + [.5])
+
+ def test_some_nan_values(self):
+ # gh-7503
+ one_nan = np.array([0, 1, np.nan])
+ all_nan = np.array([np.nan, np.nan])
+
+ # the internal comparisons with NaN give warnings
+ sup = suppress_warnings()
+ sup.filter(RuntimeWarning)
+ with sup:
+ # can't infer range with nan
+ assert_raises(ValueError, histogram, one_nan, bins='auto')
+ assert_raises(ValueError, histogram, all_nan, bins='auto')
+
+ # explicit range solves the problem
+ h, b = histogram(one_nan, bins='auto', range=(0, 1))
+ assert_equal(h.sum(), 2) # nan is not counted
+ h, b = histogram(all_nan, bins='auto', range=(0, 1))
+ assert_equal(h.sum(), 0) # nan is not counted
+
+ # as does an explicit set of bins
+ h, b = histogram(one_nan, bins=[0, 1])
+ assert_equal(h.sum(), 2) # nan is not counted
+ h, b = histogram(all_nan, bins=[0, 1])
+ assert_equal(h.sum(), 0) # nan is not counted
+
+ def test_datetime(self):
+ begin = np.datetime64('2000-01-01', 'D')
+ offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
+ bins = np.array([0, 2, 7, 20])
+ dates = begin + offsets
+ date_bins = begin + bins
+
+ td = np.dtype('timedelta64[D]')
+
+ # Results should be the same for integer offsets or datetime values.
+ # For now, only explicit bins are supported, since linspace does not
+ # work on datetimes or timedeltas
+ d_count, d_edge = histogram(dates, bins=date_bins)
+ t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
+ i_count, i_edge = histogram(offsets, bins=bins)
+
+ assert_equal(d_count, i_count)
+ assert_equal(t_count, i_count)
+
+ assert_equal((d_edge - begin).astype(int), i_edge)
+ assert_equal(t_edge.astype(int), i_edge)
+
+ assert_equal(d_edge.dtype, dates.dtype)
+ assert_equal(t_edge.dtype, td)
+
+ def do_signed_overflow_bounds(self, dtype):
+ exponent = 8 * np.dtype(dtype).itemsize - 1
+ arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
+ hist, e = histogram(arr, bins=2)
+ assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
+ assert_equal(hist, [1, 1])
+
+ def test_signed_overflow_bounds(self):
+ self.do_signed_overflow_bounds(np.byte)
+ self.do_signed_overflow_bounds(np.short)
+ self.do_signed_overflow_bounds(np.intc)
+ self.do_signed_overflow_bounds(np.int_)
+ self.do_signed_overflow_bounds(np.longlong)
+
+ def do_precision_lower_bound(self, float_small, float_large):
+ eps = np.finfo(float_large).eps
+
+ arr = np.array([1.0], float_small)
+ range = np.array([1.0 + eps, 2.0], float_large)
+
+ # test is looking for behavior when the bounds change between dtypes
+ if range.astype(float_small)[0] != 1:
+ return
+
+ # previously crashed
+ count, x_loc = np.histogram(arr, bins=1, range=range)
+ assert_equal(count, [0])
+ assert_equal(x_loc.dtype, float_large)
+
+ def do_precision_upper_bound(self, float_small, float_large):
+ eps = np.finfo(float_large).eps
+
+ arr = np.array([1.0], float_small)
+ range = np.array([0.0, 1.0 - eps], float_large)
+
+ # test is looking for behavior when the bounds change between dtypes
+ if range.astype(float_small)[-1] != 1:
+ return
+
+ # previously crashed
+ count, x_loc = np.histogram(arr, bins=1, range=range)
+ assert_equal(count, [0])
+
+ assert_equal(x_loc.dtype, float_large)
+
+ def do_precision(self, float_small, float_large):
+ self.do_precision_lower_bound(float_small, float_large)
+ self.do_precision_upper_bound(float_small, float_large)
+
+ def test_precision(self):
+ # not looping results in a useful stack trace upon failure
+ self.do_precision(np.half, np.single)
+ self.do_precision(np.half, np.double)
+ self.do_precision(np.half, np.longdouble)
+ self.do_precision(np.single, np.double)
+ self.do_precision(np.single, np.longdouble)
+ self.do_precision(np.double, np.longdouble)
+
+ def test_histogram_bin_edges(self):
+ hist, e = histogram([1, 2, 3, 4], [1, 2])
+ edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
+ assert_array_equal(edges, e)
+
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, e = histogram(arr, bins=30, range=(-0.5, 5))
+ edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
+ assert_array_equal(edges, e)
+
+ hist, e = histogram(arr, bins='auto', range=(0, 1))
+ edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ assert_array_equal(edges, e)
+
+ def test_small_value_range(self):
+ arr = np.array([1, 1 + 2e-16] * 10)
+ with pytest.raises(ValueError, match="Too many bins for data range"):
+ histogram(arr, bins=10)
+
+ # @requires_memory(free_bytes=1e10)
+ # @pytest.mark.slow
+ @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")
+ def test_big_arrays(self):
+ sample = np.zeros([100000000, 3])
+ xbins = 400
+ ybins = 400
+ zbins = np.arange(16000)
+ hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
+ assert_equal(type(hist), type((1, 2)))
+
+ def test_gh_23110(self):
+ hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'),
+ bins=2,
+ range=(-1e-308, -2e-313))
+ expected_hist = np.array([1, 0])
+ assert_array_equal(hist, expected_hist)
+
+ def test_gh_28400(self):
+ e = 1 + 1e-12
+ Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2]
+ counts, edges = np.histogram(Z, bins="auto")
+ assert len(counts) < 10
+ assert edges[0] == Z[0]
+ assert edges[-1] == Z[-1]
+
+class TestHistogramOptimBinNums:
+ """
+ Provide test coverage when using provided estimators for optimal number of
+ bins
+ """
+
+ def test_empty(self):
+ estimator_list = ['fd', 'scott', 'rice', 'sturges',
+ 'doane', 'sqrt', 'auto', 'stone']
+ # check it can deal with empty data
+ for estimator in estimator_list:
+ a, b = histogram([], bins=estimator)
+ assert_array_equal(a, np.array([0]))
+ assert_array_equal(b, np.array([0, 1]))
+
+ def test_simple(self):
+ """
+ Straightforward testing with a mixture of linspace data (for
+ consistency). All test values have been precomputed and the values
+ shouldn't change
+ """
+ # Some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
+ 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
+ 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
+ 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
+ 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
+ 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
+
+ for testlen, expectedResults in basic_test.items():
+ # Create some sort of non uniform data to test with
+ # (2 peak uniform mixture)
+ x1 = np.linspace(-10, -1, testlen // 5 * 2)
+ x2 = np.linspace(1, 10, testlen // 5 * 3)
+ x = np.concatenate((x1, x2))
+ for estimator, numbins in expectedResults.items():
+ a, b = np.histogram(x, estimator)
+ assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator "
+ f"with datasize of {testlen}")
+
+ def test_small(self):
+ """
+ Smaller datasets have the potential to cause issues with the data
+ adaptive methods, especially the FD method. All bin numbers have been
+ precalculated.
+ """
+ small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
+ 'doane': 1, 'sqrt': 1, 'stone': 1},
+ 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
+ 'doane': 1, 'sqrt': 2, 'stone': 1},
+ 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
+ 'doane': 3, 'sqrt': 2, 'stone': 1}}
+
+ for testlen, expectedResults in small_dat.items():
+ testdat = np.arange(testlen).astype(float)
+ for estimator, expbins in expectedResults.items():
+ a, b = np.histogram(testdat, estimator)
+ assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator "
+ f"with datasize of {testlen}")
+
+ def test_incorrect_methods(self):
+ """
+ Check a Value Error is thrown when an unknown string is passed in
+ """
+ check_list = ['mad', 'freeman', 'histograms', 'IQR']
+ for estimator in check_list:
+ assert_raises(ValueError, histogram, [1, 2, 3], estimator)
+
+ def test_novariance(self):
+ """
+ Check that methods handle no variance in data
+ Primarily for Scott and FD as the SD and IQR are both 0 in this case
+ """
+ novar_dataset = np.ones(100)
+ novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
+ 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
+
+ for estimator, numbins in novar_resultdict.items():
+ a, b = np.histogram(novar_dataset, estimator)
+ assert_equal(len(a), numbins,
+ err_msg=f"{estimator} estimator, No Variance test")
+
+ def test_limited_variance(self):
+ """
+ Check when IQR is 0, but variance exists, we return a reasonable value.
+ """
+ lim_var_data = np.ones(1000)
+ lim_var_data[:3] = 0
+ lim_var_data[-4:] = 100
+
+ edges_auto = histogram_bin_edges(lim_var_data, 'auto')
+ assert_equal(edges_auto[0], 0)
+ assert_equal(edges_auto[-1], 100.)
+ assert len(edges_auto) < 100
+
+ edges_fd = histogram_bin_edges(lim_var_data, 'fd')
+ assert_equal(edges_fd, np.array([0, 100]))
+
+ edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
+ assert_equal(edges_sturges, np.linspace(0, 100, 12))
+
+ def test_outlier(self):
+ """
+ Check the FD, Scott and Doane with outliers.
+
+ The FD estimates a smaller binwidth since it's less affected by
+ outliers. Since the range is so (artificially) large, this means more
+ bins, most of which will be empty, but the data of interest usually is
+ unaffected. The Scott estimator is more affected and returns fewer bins,
+ despite most of the variance being in one area of the data. The Doane
+ estimator lies somewhere between the other two.
+ """
+ xcenter = np.linspace(-10, 10, 50)
+ outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
+
+ outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
+
+ for estimator, numbins in outlier_resultdict.items():
+ a, b = np.histogram(outlier_dataset, estimator)
+ assert_equal(len(a), numbins)
+
+ def test_scott_vs_stone(self):
+ """Verify that Scott's rule and Stone's rule converges for normally distributed data"""
+
+ def nbins_ratio(seed, size):
+ rng = np.random.RandomState(seed)
+ x = rng.normal(loc=0, scale=2, size=size)
+ a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
+ return a / (a + b)
+
+ ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
+ for seed in range(10)]
+
+ # the average difference between the two methods decreases as the dataset size increases.
+ avg = abs(np.mean(ll, axis=0) - 0.5)
+ assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
+
+ def test_simple_range(self):
+ """
+ Straightforward testing with a mixture of linspace data (for
+ consistency). Adding in a 3rd mixture that will then be
+ completely ignored. All test values have been precomputed and
+ the shouldn't change.
+ """
+ # some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {
+ 50: {'fd': 8, 'scott': 8, 'rice': 15,
+ 'sturges': 14, 'auto': 14, 'stone': 8},
+ 500: {'fd': 15, 'scott': 16, 'rice': 32,
+ 'sturges': 20, 'auto': 20, 'stone': 80},
+ 5000: {'fd': 33, 'scott': 33, 'rice': 69,
+ 'sturges': 27, 'auto': 33, 'stone': 80}
+ }
+
+ for testlen, expectedResults in basic_test.items():
+ # create some sort of non uniform data to test with
+ # (3 peak uniform mixture)
+ x1 = np.linspace(-10, -1, testlen // 5 * 2)
+ x2 = np.linspace(1, 10, testlen // 5 * 3)
+ x3 = np.linspace(-100, -50, testlen)
+ x = np.hstack((x1, x2, x3))
+ for estimator, numbins in expectedResults.items():
+ a, b = np.histogram(x, estimator, range=(-20, 20))
+ msg = f"For the {estimator} estimator"
+ msg += f" with datasize of {testlen}"
+ assert_equal(len(a), numbins, err_msg=msg)
+
+ @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
+ 'stone', 'rice', 'sturges'])
+ def test_signed_integer_data(self, bins):
+ # Regression test for gh-14379.
+ a = np.array([-2, 0, 127], dtype=np.int8)
+ hist, edges = np.histogram(a, bins=bins)
+ hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
+ assert_array_equal(hist, hist32)
+ assert_array_equal(edges, edges32)
+
+ @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
+ 'stone', 'rice', 'sturges'])
+ def test_integer(self, bins):
+ """
+ Test that bin width for integer data is at least 1.
+ """
+ with suppress_warnings() as sup:
+ if bins == 'stone':
+ sup.filter(RuntimeWarning)
+ assert_equal(
+ np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins),
+ np.arange(9))
+
+ def test_integer_non_auto(self):
+ """
+ Test that the bin-width>=1 requirement *only* applies to auto binning.
+ """
+ assert_equal(
+ np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16),
+ np.arange(17) / 2)
+ assert_equal(
+ np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]),
+ [.1, .2])
+
+ def test_simple_weighted(self):
+ """
+ Check that weighted data raises a TypeError
+ """
+ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
+ for estimator in estimator_list:
+ assert_raises(TypeError, histogram, [1, 2, 3],
+ estimator, weights=[1, 2, 3])
+
+
+class TestHistogramdd:
+
+ def test_simple(self):
+ x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
+ [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
+ H, edges = histogramdd(x, (2, 3, 3),
+ range=[[-1, 1], [0, 3], [0, 3]])
+ answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
+ [[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
+ assert_array_equal(H, answer)
+
+ # Check normalization
+ ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
+ H, edges = histogramdd(x, bins=ed, density=True)
+ assert_(np.all(H == answer / 12.))
+
+ # Check that H has the correct shape.
+ H, edges = histogramdd(x, (2, 3, 4),
+ range=[[-1, 1], [0, 3], [0, 4]],
+ density=True)
+ answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
+ [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
+ assert_array_almost_equal(H, answer / 6., 4)
+ # Check that a sequence of arrays is accepted and H has the correct
+ # shape.
+ z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
+ H, edges = histogramdd(
+ z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
+ answer = np.array([[[0, 0], [0, 0], [0, 0]],
+ [[0, 1], [0, 0], [1, 0]],
+ [[0, 1], [0, 0], [0, 0]],
+ [[0, 0], [0, 0], [0, 0]]])
+ assert_array_equal(H, answer)
+
+ Z = np.zeros((5, 5, 5))
+ Z[list(range(5)), list(range(5)), list(range(5))] = 1.
+ H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
+ assert_array_equal(H, Z)
+
+ def test_shape_3d(self):
+ # All possible permutations for bins of different lengths in 3D.
+ bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
+ (4, 5, 6))
+ r = np.random.rand(10, 3)
+ for b in bins:
+ H, edges = histogramdd(r, b)
+ assert_(H.shape == b)
+
+ def test_shape_4d(self):
+ # All possible permutations for bins of different lengths in 4D.
+ bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
+ (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
+ (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
+ (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
+ (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
+ (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
+
+ r = np.random.rand(10, 4)
+ for b in bins:
+ H, edges = histogramdd(r, b)
+ assert_(H.shape == b)
+
+ def test_weights(self):
+ v = np.random.rand(100, 2)
+ hist, edges = histogramdd(v)
+ n_hist, edges = histogramdd(v, density=True)
+ w_hist, edges = histogramdd(v, weights=np.ones(100))
+ assert_array_equal(w_hist, hist)
+ w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
+ assert_array_equal(w_hist, n_hist)
+ w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
+ assert_array_equal(w_hist, 2 * hist)
+
+ def test_identical_samples(self):
+ x = np.zeros((10, 2), int)
+ hist, edges = histogramdd(x, bins=2)
+ assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
+
+ def test_empty(self):
+ a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
+ assert_array_max_ulp(a, np.array([[0.]]))
+ a, b = np.histogramdd([[], [], []], bins=2)
+ assert_array_max_ulp(a, np.zeros((2, 2, 2)))
+
+ def test_bins_errors(self):
+ # There are two ways to specify bins. Check for the right errors
+ # when mixing those.
+ x = np.arange(8).reshape(2, 4)
+ assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
+ assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
+ assert_raises(
+ ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
+ assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
+
+ def test_inf_edges(self):
+ # Test using +/-inf bin edges works. See #1788.
+ with np.errstate(invalid='ignore'):
+ x = np.arange(6).reshape(3, 2)
+ expected = np.array([[1, 0], [0, 1], [0, 1]])
+ h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
+ assert_allclose(h, expected)
+ h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
+ assert_allclose(h, expected)
+ h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
+ assert_allclose(h, expected)
+
+ def test_rightmost_binedge(self):
+ # Test event very close to rightmost binedge. See Github issue #4266
+ x = [0.9999999995]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 1.)
+ x = [1.0]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 1.)
+ x = [1.0000000001]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 0.0)
+ x = [1.0001]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 0.0)
+
+ def test_finite_range(self):
+ vals = np.random.random((100, 3))
+ histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
+
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
+
+ def test_density_non_uniform_2d(self):
+ # Defines the following grid:
+ #
+ # 0 2 8
+ # 0+-+-----+
+ # + | +
+ # + | +
+ # 6+-+-----+
+ # 8+-+-----+
+ x_edges = np.array([0, 2, 8])
+ y_edges = np.array([0, 6, 8])
+ relative_areas = np.array([
+ [3, 9],
+ [1, 3]])
+
+ # ensure the number of points in each region is proportional to its area
+ x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9)
+ y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9)
+
+ # sanity check that the above worked as intended
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
+ assert_equal(hist, relative_areas)
+
+ # resulting histogram should be uniform, since counts and areas are proportional
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
+ assert_equal(hist, 1 / (8 * 8))
+
+ def test_density_non_uniform_1d(self):
+ # compare to histogram to show the results are the same
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ hist, edges = histogram(v, bins, density=True)
+ hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
+ assert_equal(hist, hist_dd)
+ assert_equal(edges, edges_dd[0])
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py
new file mode 100644
index 0000000..ed8709d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_index_tricks.py
@@ -0,0 +1,568 @@
+import pytest
+
+import numpy as np
+from numpy.lib._index_tricks_impl import (
+ c_,
+ diag_indices,
+ diag_indices_from,
+ fill_diagonal,
+ index_exp,
+ ix_,
+ mgrid,
+ ndenumerate,
+ ndindex,
+ ogrid,
+ r_,
+ s_,
+)
+from numpy.testing import (
+ assert_,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+ assert_raises_regex,
+)
+
+
+class TestRavelUnravelIndex:
+ def test_basic(self):
+ assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
+
+ # test that new shape argument works properly
+ assert_equal(np.unravel_index(indices=2,
+ shape=(2, 2)),
+ (1, 0))
+
+ # test that an invalid second keyword argument
+ # is properly handled, including the old name `dims`.
+ with assert_raises(TypeError):
+ np.unravel_index(indices=2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(254, ims=(17, 94))
+
+ with assert_raises(TypeError):
+ np.unravel_index(254, dims=(17, 94))
+
+ assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
+ assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
+ assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
+ assert_raises(ValueError, np.unravel_index, -1, (2, 2))
+ assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
+ assert_raises(ValueError, np.unravel_index, 4, (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
+ assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
+
+ assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4])
+ assert_equal(
+ np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4)
+
+ arr = np.array([[3, 6, 6], [4, 5, 1]])
+ assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
+ assert_equal(
+ np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
+ assert_equal(
+ np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
+ assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
+ [12, 13, 13])
+ assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
+
+ assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
+ [[3, 6, 6], [4, 5, 1]])
+ assert_equal(
+ np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
+ [[3, 6, 6], [4, 5, 1]])
+ assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
+
+ def test_empty_indices(self):
+ msg1 = 'indices must be integral: the provided empty sequence was'
+ msg2 = 'only int indices permitted'
+ assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
+ assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
+ assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
+ (10, 3, 5))
+ assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)),
+ [[], [], []])
+ assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
+ (10, 3))
+ assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
+ (10, 3))
+ assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
+ (np.array([]), np.array([])), (5, 3))
+ assert_equal(np.ravel_multi_index(
+ (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
+ assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
+ (5, 3)), [])
+
+ def test_big_indices(self):
+ # ravel_multi_index for big indices (issue #7546)
+ if np.intp == np.int64:
+ arr = ([1, 29], [3, 5], [3, 117], [19, 2],
+ [2379, 1284], [2, 2], [0, 1])
+ assert_equal(
+ np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
+ [5627771580, 117259570957])
+
+ # test unravel_index for big indices (issue #9538)
+ assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1))
+
+ # test overflow checking for too big array (issue #7546)
+ dummy_arr = ([0], [0])
+ half_max = np.iinfo(np.intp).max // 2
+ assert_equal(
+ np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
+ assert_raises(ValueError,
+ np.ravel_multi_index, dummy_arr, (half_max + 1, 2))
+ assert_equal(
+ np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
+ assert_raises(ValueError,
+ np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F')
+
+ def test_dtypes(self):
+ # Test with different data types
+ for dtype in [np.int16, np.uint16, np.int32,
+ np.uint32, np.int64, np.uint64]:
+ coords = np.array(
+ [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
+ shape = (5, 8)
+ uncoords = 8 * coords[0] + coords[1]
+ assert_equal(np.ravel_multi_index(coords, shape), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape))
+ uncoords = coords[0] + 5 * coords[1]
+ assert_equal(
+ np.ravel_multi_index(coords, shape, order='F'), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
+
+ coords = np.array(
+ [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
+ dtype=dtype)
+ shape = (5, 8, 10)
+ uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2]
+ assert_equal(np.ravel_multi_index(coords, shape), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape))
+ uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2])
+ assert_equal(
+ np.ravel_multi_index(coords, shape, order='F'), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
+
+ def test_clipmodes(self):
+ # Test clipmodes
+ assert_equal(
+ np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
+ np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
+ assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
+ mode=(
+ 'wrap', 'raise', 'clip', 'raise')),
+ np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
+ assert_raises(
+ ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
+
+ def test_writeability(self):
+ # gh-7269
+ x, y = np.unravel_index([1, 2, 3], (4, 5))
+ assert_(x.flags.writeable)
+ assert_(y.flags.writeable)
+
+ def test_0d(self):
+ # gh-580
+ x = np.unravel_index(0, ())
+ assert_equal(x, ())
+
+ assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
+ assert_raises_regex(
+ ValueError, "out of bounds", np.unravel_index, [1], ())
+
+ @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
+ def test_empty_array_ravel(self, mode):
+ res = np.ravel_multi_index(
+ np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
+ assert res.shape == (0,)
+
+ with assert_raises(ValueError):
+ np.ravel_multi_index(
+ np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
+
+ def test_empty_array_unravel(self):
+ res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
+ # res is a tuple of three empty arrays
+ assert len(res) == 3
+ assert all(a.shape == (0,) for a in res)
+
+ with assert_raises(ValueError):
+ np.unravel_index([1], (2, 1, 0))
+
+class TestGrid:
+ def test_basic(self):
+ a = mgrid[-1:1:10j]
+ b = mgrid[-1:1:0.1]
+ assert_(a.shape == (10,))
+ assert_(b.shape == (20,))
+ assert_(a[0] == -1)
+ assert_almost_equal(a[-1], 1)
+ assert_(b[0] == -1)
+ assert_almost_equal(b[1] - b[0], 0.1, 11)
+ assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11)
+ assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11)
+
+ def test_linspace_equivalence(self):
+ y, st = np.linspace(2, 10, retstep=True)
+ assert_almost_equal(st, 8 / 49.0)
+ assert_array_almost_equal(y, mgrid[2:10:50j], 13)
+
+ def test_nd(self):
+ c = mgrid[-1:1:10j, -2:2:10j]
+ d = mgrid[-1:1:0.1, -2:2:0.2]
+ assert_(c.shape == (2, 10, 10))
+ assert_(d.shape == (2, 20, 20))
+ assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
+ assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd'))
+ assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
+ assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11)
+ assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
+ 0.1 * np.ones(20, 'd'), 11)
+ assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
+ 0.2 * np.ones(20, 'd'), 11)
+
+ def test_sparse(self):
+ grid_full = mgrid[-1:1:10j, -2:2:10j]
+ grid_sparse = ogrid[-1:1:10j, -2:2:10j]
+
+ # sparse grids can be made dense by broadcasting
+ grid_broadcast = np.broadcast_arrays(*grid_sparse)
+ for f, b in zip(grid_full, grid_broadcast):
+ assert_equal(f, b)
+
+ @pytest.mark.parametrize("start, stop, step, expected", [
+ (None, 10, 10j, (200, 10)),
+ (-10, 20, None, (1800, 30)),
+ ])
+ def test_mgrid_size_none_handling(self, start, stop, step, expected):
+ # regression test None value handling for
+ # start and step values used by mgrid;
+ # internally, this aims to cover previously
+ # unexplored code paths in nd_grid()
+ grid = mgrid[start:stop:step, start:stop:step]
+ # need a smaller grid to explore one of the
+ # untested code paths
+ grid_small = mgrid[start:stop:step]
+ assert_equal(grid.size, expected[0])
+ assert_equal(grid_small.size, expected[1])
+
+ def test_accepts_npfloating(self):
+ # regression test for #16466
+ grid64 = mgrid[0.1:0.33:0.1, ]
+ grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ]
+ assert_array_almost_equal(grid64, grid32)
+ # At some point this was float64, but NEP 50 changed it:
+ assert grid32.dtype == np.float32
+ assert grid64.dtype == np.float64
+
+ # different code path for single slice
+ grid64 = mgrid[0.1:0.33:0.1]
+ grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)]
+ assert_(grid32.dtype == np.float64)
+ assert_array_almost_equal(grid64, grid32)
+
+ def test_accepts_longdouble(self):
+ # regression tests for #16945
+ grid64 = mgrid[0.1:0.33:0.1, ]
+ grid128 = mgrid[
+ np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1),
+ ]
+ assert_(grid128.dtype == np.longdouble)
+ assert_array_almost_equal(grid64, grid128)
+
+ grid128c_a = mgrid[0:np.longdouble(1):3.4j]
+ grid128c_b = mgrid[0:np.longdouble(1):3.4j, ]
+ assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble)
+ assert_array_equal(grid128c_a, grid128c_b[0])
+
+ # different code path for single slice
+ grid64 = mgrid[0.1:0.33:0.1]
+ grid128 = mgrid[
+ np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1)
+ ]
+ assert_(grid128.dtype == np.longdouble)
+ assert_array_almost_equal(grid64, grid128)
+
+ def test_accepts_npcomplexfloating(self):
+ # Related to #16466
+ assert_array_almost_equal(
+ mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ]
+ )
+
+ # different code path for single slice
+ assert_array_almost_equal(
+ mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)]
+ )
+
+ # Related to #16945
+ grid64_a = mgrid[0.1:0.3:3.3j]
+ grid64_b = mgrid[0.1:0.3:3.3j, ][0]
+ assert_(grid64_a.dtype == grid64_b.dtype == np.float64)
+ assert_array_equal(grid64_a, grid64_b)
+
+ grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)]
+ grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0]
+ assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble)
+ assert_array_equal(grid64_a, grid64_b)
+
+
+class TestConcatenator:
+ def test_1d(self):
+ assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
+ b = np.ones(5)
+ c = r_[b, 0, 0, b]
+ assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
+
+ def test_mixed_type(self):
+ g = r_[10.1, 1:10]
+ assert_(g.dtype == 'f8')
+
+ def test_more_mixed_type(self):
+ g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
+ assert_(g.dtype == 'f8')
+
+ def test_complex_step(self):
+ # Regression test for #12262
+ g = r_[0:36:100j]
+ assert_(g.shape == (100,))
+
+ # Related to #16466
+ g = r_[0:36:np.complex64(100j)]
+ assert_(g.shape == (100,))
+
+ def test_2d(self):
+ b = np.random.rand(5, 5)
+ c = np.random.rand(5, 5)
+ d = r_['1', b, c] # append columns
+ assert_(d.shape == (5, 10))
+ assert_array_equal(d[:, :5], b)
+ assert_array_equal(d[:, 5:], c)
+ d = r_[b, c]
+ assert_(d.shape == (10, 5))
+ assert_array_equal(d[:5, :], b)
+ assert_array_equal(d[5:, :], c)
+
+ def test_0d(self):
+ assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
+ assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
+ assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
+
+
+class TestNdenumerate:
+ def test_basic(self):
+ a = np.array([[1, 2], [3, 4]])
+ assert_equal(list(ndenumerate(a)),
+ [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
+
+
+class TestIndexExpression:
+ def test_regression_1(self):
+ # ticket #1196
+ a = np.arange(2)
+ assert_equal(a[:-1], a[s_[:-1]])
+ assert_equal(a[:-1], a[index_exp[:-1]])
+
+ def test_simple_1(self):
+ a = np.random.rand(4, 5, 6)
+
+ assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
+ assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
+
+
+class TestIx_:
+ def test_regression_1(self):
+ # Test empty untyped inputs create outputs of indexing type, gh-5804
+ a, = np.ix_(range(0))
+ assert_equal(a.dtype, np.intp)
+
+ a, = np.ix_([])
+ assert_equal(a.dtype, np.intp)
+
+ # but if the type is specified, don't change it
+ a, = np.ix_(np.array([], dtype=np.float32))
+ assert_equal(a.dtype, np.float32)
+
+ def test_shape_and_dtype(self):
+ sizes = (4, 5, 3, 2)
+ # Test both lists and arrays
+ for func in (range, np.arange):
+ arrays = np.ix_(*[func(sz) for sz in sizes])
+ for k, (a, sz) in enumerate(zip(arrays, sizes)):
+ assert_equal(a.shape[k], sz)
+ assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
+ assert_(np.issubdtype(a.dtype, np.integer))
+
+ def test_bool(self):
+ bool_a = [True, False, True, True]
+ int_a, = np.nonzero(bool_a)
+ assert_equal(np.ix_(bool_a)[0], int_a)
+
+ def test_1d_only(self):
+ idx2d = [[1, 2, 3], [4, 5, 6]]
+ assert_raises(ValueError, np.ix_, idx2d)
+
+ def test_repeated_input(self):
+ length_of_vector = 5
+ x = np.arange(length_of_vector)
+ out = ix_(x, x)
+ assert_equal(out[0].shape, (length_of_vector, 1))
+ assert_equal(out[1].shape, (1, length_of_vector))
+ # check that input shape is not modified
+ assert_equal(x.shape, (length_of_vector,))
+
+
+def test_c_():
+ a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
+ assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
+
+
+class TestFillDiagonal:
+ def test_basic(self):
+ a = np.zeros((3, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+ )
+
+ def test_tall_matrix(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ )
+
+ def test_tall_matrix_wrap(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5, True)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0]])
+ )
+
+ def test_wide_matrix(self):
+ a = np.zeros((3, 10), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
+ )
+
+ def test_operate_4d_array(self):
+ a = np.zeros((3, 3, 3, 3), int)
+ fill_diagonal(a, 4)
+ i = np.array([0, 1, 2])
+ assert_equal(np.where(a != 0), (i, i, i, i))
+
+ def test_low_dim_handling(self):
+ # raise error with low dimensionality
+ a = np.zeros(3, int)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ fill_diagonal(a, 5)
+
+ def test_hetero_shape_handling(self):
+ # raise error with high dimensionality and
+ # shape mismatch
+ a = np.zeros((3, 3, 7, 3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ fill_diagonal(a, 2)
+
+
+def test_diag_indices():
+ di = diag_indices(4)
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ a[di] = 100
+ assert_array_equal(
+ a, np.array([[100, 2, 3, 4],
+ [5, 100, 7, 8],
+ [9, 10, 100, 12],
+ [13, 14, 15, 100]])
+ )
+
+ # Now, we create indices to manipulate a 3-d array:
+ d3 = diag_indices(2, 3)
+
+ # And use it to set the diagonal of a zeros array to 1:
+ a = np.zeros((2, 2, 2), int)
+ a[d3] = 1
+ assert_array_equal(
+ a, np.array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+ )
+
+
+class TestDiagIndicesFrom:
+
+ def test_diag_indices_from(self):
+ x = np.random.random((4, 4))
+ r, c = diag_indices_from(x)
+ assert_array_equal(r, np.arange(4))
+ assert_array_equal(c, np.arange(4))
+
+ def test_error_small_input(self):
+ x = np.ones(7)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ diag_indices_from(x)
+
+ def test_error_shape_mismatch(self):
+ x = np.zeros((3, 3, 2, 3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ diag_indices_from(x)
+
+
+def test_ndindex():
+ x = list(ndindex(1, 2, 3))
+ expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
+ assert_array_equal(x, expected)
+
+ x = list(ndindex((1, 2, 3)))
+ assert_array_equal(x, expected)
+
+ # Test use of scalars and tuples
+ x = list(ndindex((3,)))
+ assert_array_equal(x, list(ndindex(3)))
+
+ # Make sure size argument is optional
+ x = list(ndindex())
+ assert_equal(x, [()])
+
+ x = list(ndindex(()))
+ assert_equal(x, [()])
+
+ # Make sure 0-sized ndindex works correctly
+ x = list(ndindex(*[0]))
+ assert_equal(x, [])
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py
new file mode 100644
index 0000000..303dcfe
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_io.py
@@ -0,0 +1,2848 @@
+import gc
+import gzip
+import locale
+import os
+import re
+import sys
+import threading
+import time
+import warnings
+import zipfile
+from ctypes import c_bool
+from datetime import datetime
+from io import BytesIO, StringIO
+from multiprocessing import Value, get_context
+from pathlib import Path
+from tempfile import NamedTemporaryFile
+
+import pytest
+
+import numpy as np
+import numpy.ma as ma
+from numpy._utils import asbytes
+from numpy.exceptions import VisibleDeprecationWarning
+from numpy.lib import _npyio_impl
+from numpy.lib._iotools import ConversionWarning, ConverterError
+from numpy.lib._npyio_impl import recfromcsv, recfromtxt
+from numpy.ma.testutils import assert_equal
+from numpy.testing import (
+ HAS_REFCOUNT,
+ IS_PYPY,
+ IS_WASM,
+ assert_,
+ assert_allclose,
+ assert_array_equal,
+ assert_no_gc_cycles,
+ assert_no_warnings,
+ assert_raises,
+ assert_raises_regex,
+ assert_warns,
+ break_cycles,
+ suppress_warnings,
+ tempdir,
+ temppath,
+)
+from numpy.testing._private.utils import requires_memory
+
+
+class TextIO(BytesIO):
+ """Helper IO class.
+
+ Writes encode strings to bytes if needed, reads return bytes.
+ This makes it easier to emulate files opened in binary mode
+ without needing to explicitly convert strings to bytes in
+ setting up the test data.
+
+ """
+ def __init__(self, s=""):
+ BytesIO.__init__(self, asbytes(s))
+
+ def write(self, s):
+ BytesIO.write(self, asbytes(s))
+
+ def writelines(self, lines):
+ BytesIO.writelines(self, [asbytes(s) for s in lines])
+
+
+IS_64BIT = sys.maxsize > 2**32
+try:
+ import bz2
+ HAS_BZ2 = True
+except ImportError:
+ HAS_BZ2 = False
+try:
+ import lzma
+ HAS_LZMA = True
+except ImportError:
+ HAS_LZMA = False
+
+
+def strptime(s, fmt=None):
+ """
+ This function is available in the datetime module only from Python >=
+ 2.5.
+
+ """
+ if isinstance(s, bytes):
+ s = s.decode("latin1")
+ return datetime(*time.strptime(s, fmt)[:3])
+
+
+class RoundtripTest:
+ def roundtrip(self, save_func, *args, **kwargs):
+ """
+ save_func : callable
+ Function used to save arrays to file.
+ file_on_disk : bool
+ If true, store the file on disk, instead of in a
+ string buffer.
+ save_kwds : dict
+ Parameters passed to `save_func`.
+ load_kwds : dict
+ Parameters passed to `numpy.load`.
+ args : tuple of arrays
+ Arrays stored to file.
+
+ """
+ save_kwds = kwargs.get('save_kwds', {})
+ load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
+ file_on_disk = kwargs.get('file_on_disk', False)
+
+ if file_on_disk:
+ target_file = NamedTemporaryFile(delete=False)
+ load_file = target_file.name
+ else:
+ target_file = BytesIO()
+ load_file = target_file
+
+ try:
+ arr = args
+
+ save_func(target_file, *arr, **save_kwds)
+ target_file.flush()
+ target_file.seek(0)
+
+ if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
+ target_file.close()
+
+ arr_reloaded = np.load(load_file, **load_kwds)
+
+ self.arr = arr
+ self.arr_reloaded = arr_reloaded
+ finally:
+ if not isinstance(target_file, BytesIO):
+ target_file.close()
+ # holds an open file descriptor so it can't be deleted on win
+ if 'arr_reloaded' in locals():
+ if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
+ os.remove(target_file.name)
+
+ def check_roundtrips(self, a):
+ self.roundtrip(a)
+ self.roundtrip(a, file_on_disk=True)
+ self.roundtrip(np.asfortranarray(a))
+ self.roundtrip(np.asfortranarray(a), file_on_disk=True)
+ if a.shape[0] > 1:
+ # neither C nor Fortran contiguous for 2D arrays or more
+ self.roundtrip(np.asfortranarray(a)[1:])
+ self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
+
+ def test_array(self):
+ a = np.array([], float)
+ self.check_roundtrips(a)
+
+ a = np.array([[1, 2], [3, 4]], float)
+ self.check_roundtrips(a)
+
+ a = np.array([[1, 2], [3, 4]], int)
+ self.check_roundtrips(a)
+
+ a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
+ self.check_roundtrips(a)
+
+ a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
+ self.check_roundtrips(a)
+
+ def test_array_object(self):
+ a = np.array([], object)
+ self.check_roundtrips(a)
+
+ a = np.array([[1, 2], [3, 4]], object)
+ self.check_roundtrips(a)
+
+ def test_1D(self):
+ a = np.array([1, 2, 3, 4], int)
+ self.roundtrip(a)
+
+ @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
+ def test_mmap(self):
+ a = np.array([[1, 2.5], [4, 7.3]])
+ self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
+
+ a = np.asfortranarray([[1, 2.5], [4, 7.3]])
+ self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
+
+ def test_record(self):
+ a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ self.check_roundtrips(a)
+
+ @pytest.mark.slow
+ def test_format_2_0(self):
+ dt = [(("%d" % i) * 100, float) for i in range(500)]
+ a = np.ones(1000, dtype=dt)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', '', UserWarning)
+ self.check_roundtrips(a)
+
+
+class TestSaveLoad(RoundtripTest):
+ def roundtrip(self, *args, **kwargs):
+ RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
+ assert_equal(self.arr[0], self.arr_reloaded)
+ assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
+ assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
+
+
+class TestSavezLoad(RoundtripTest):
+ def roundtrip(self, *args, **kwargs):
+ RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
+ try:
+ for n, arr in enumerate(self.arr):
+ reloaded = self.arr_reloaded['arr_%d' % n]
+ assert_equal(arr, reloaded)
+ assert_equal(arr.dtype, reloaded.dtype)
+ assert_equal(arr.flags.fnc, reloaded.flags.fnc)
+ finally:
+ # delete tempfile, must be done here on windows
+ if self.arr_reloaded.fid:
+ self.arr_reloaded.fid.close()
+ os.remove(self.arr_reloaded.fid.name)
+
+ def test_load_non_npy(self):
+ """Test loading non-.npy files and name mapping in .npz."""
+ with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp:
+ with zipfile.ZipFile(tmp, "w") as npz:
+ with npz.open("test1.npy", "w") as out_file:
+ np.save(out_file, np.arange(10))
+ with npz.open("test2", "w") as out_file:
+ np.save(out_file, np.arange(10))
+ with npz.open("metadata", "w") as out_file:
+ out_file.write(b"Name: Test")
+ with np.load(tmp) as npz:
+ assert len(npz["test1"]) == 10
+ assert len(npz["test1.npy"]) == 10
+ assert len(npz["test2"]) == 10
+ assert npz["metadata"] == b"Name: Test"
+
+ @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy")
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ @pytest.mark.slow
+ def test_big_arrays(self):
+ L = (1 << 31) + 100000
+ a = np.empty(L, dtype=np.uint8)
+ with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
+ np.savez(tmp, a=a)
+ del a
+ npfile = np.load(tmp)
+ a = npfile['a'] # Should succeed
+ npfile.close()
+
+ def test_multiple_arrays(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ self.roundtrip(a, b)
+
+ def test_named_arrays(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ c = BytesIO()
+ np.savez(c, file_a=a, file_b=b)
+ c.seek(0)
+ l = np.load(c)
+ assert_equal(a, l['file_a'])
+ assert_equal(b, l['file_b'])
+
+ def test_tuple_getitem_raises(self):
+ # gh-23748
+ a = np.array([1, 2, 3])
+ f = BytesIO()
+ np.savez(f, a=a)
+ f.seek(0)
+ l = np.load(f)
+ with pytest.raises(KeyError, match="(1, 2)"):
+ l[1, 2]
+
+ def test_BagObj(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ c = BytesIO()
+ np.savez(c, file_a=a, file_b=b)
+ c.seek(0)
+ l = np.load(c)
+ assert_equal(sorted(dir(l.f)), ['file_a', 'file_b'])
+ assert_equal(a, l.f.file_a)
+ assert_equal(b, l.f.file_b)
+
+ @pytest.mark.skipif(IS_WASM, reason="Cannot start thread")
+ def test_savez_filename_clashes(self):
+ # Test that issue #852 is fixed
+ # and savez functions in multithreaded environment
+
+ def writer(error_list):
+ with temppath(suffix='.npz') as tmp:
+ arr = np.random.randn(500, 500)
+ try:
+ np.savez(tmp, arr=arr)
+ except OSError as err:
+ error_list.append(err)
+
+ errors = []
+ threads = [threading.Thread(target=writer, args=(errors,))
+ for j in range(3)]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ if errors:
+ raise AssertionError(errors)
+
+ def test_not_closing_opened_fid(self):
+ # Test that issue #2178 is fixed:
+ # verify could seek on 'loaded' file
+ with temppath(suffix='.npz') as tmp:
+ with open(tmp, 'wb') as fp:
+ np.savez(fp, data='LOVELY LOAD')
+ with open(tmp, 'rb', 10000) as fp:
+ fp.seek(0)
+ assert_(not fp.closed)
+ np.load(fp)['data']
+ # fp must not get closed by .load
+ assert_(not fp.closed)
+ fp.seek(0)
+ assert_(not fp.closed)
+
+ @pytest.mark.slow_pypy
+ def test_closing_fid(self):
+ # Test that issue #1517 (too many opened files) remains closed
+ # It might be a "weak" test since failed to get triggered on
+ # e.g. Debian sid of 2012 Jul 05 but was reported to
+ # trigger the failure on Ubuntu 10.04:
+ # http://projects.scipy.org/numpy/ticket/1517#comment:2
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, data='LOVELY LOAD')
+ # We need to check if the garbage collector can properly close
+ # numpy npz file returned by np.load when their reference count
+ # goes to zero. Python running in debug mode raises a
+ # ResourceWarning when file closing is left to the garbage
+ # collector, so we catch the warnings.
+ with suppress_warnings() as sup:
+ sup.filter(ResourceWarning) # TODO: specify exact message
+ for i in range(1, 1025):
+ try:
+ np.load(tmp)["data"]
+ except Exception as e:
+ msg = f"Failed to load data from a file: {e}"
+ raise AssertionError(msg)
+ finally:
+ if IS_PYPY:
+ gc.collect()
+
+ def test_closing_zipfile_after_load(self):
+ # Check that zipfile owns file and can close it. This needs to
+ # pass a file name to load for the test. On windows failure will
+ # cause a second error will be raised when the attempt to remove
+ # the open file is made.
+ prefix = 'numpy_test_closing_zipfile_after_load_'
+ with temppath(suffix='.npz', prefix=prefix) as tmp:
+ np.savez(tmp, lab='place holder')
+ data = np.load(tmp)
+ fp = data.zip.fp
+ data.close()
+ assert_(fp.closed)
+
+ @pytest.mark.parametrize("count, expected_repr", [
+ (1, "NpzFile {fname!r} with keys: arr_0"),
+ (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"),
+ # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are
+ # expected to end in '...'
+ (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."),
+ ])
+ def test_repr_lists_keys(self, count, expected_repr):
+ a = np.array([[1, 2], [3, 4]], float)
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, *[a] * count)
+ l = np.load(tmp)
+ assert repr(l) == expected_repr.format(fname=tmp)
+ l.close()
+
+
+class TestSaveTxt:
+ def test_array(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ fmt = "%.18e"
+ c = BytesIO()
+ np.savetxt(c, a, fmt=fmt)
+ c.seek(0)
+ assert_equal(c.readlines(),
+ [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
+ asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
+
+ a = np.array([[1, 2], [3, 4]], int)
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+
+ def test_1D(self):
+ a = np.array([1, 2, 3, 4], int)
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%d')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
+
+ def test_0D_3D(self):
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, np.array(1))
+ assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
+
+ def test_structured(self):
+ a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+
+ def test_structured_padded(self):
+ # gh-13297
+ a = np.array([(1, 2, 3), (4, 5, 6)], dtype=[
+ ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
+ ])
+ c = BytesIO()
+ np.savetxt(c, a[['foo', 'baz']], fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
+
+ def test_multifield_view(self):
+ a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
+ v = a[['x', 'z']]
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ np.save(path, v)
+ data = np.load(path)
+ assert_array_equal(data, v)
+
+ def test_delimiter(self):
+ a = np.array([[1., 2.], [3., 4.]])
+ c = BytesIO()
+ np.savetxt(c, a, delimiter=',', fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
+
+ def test_format(self):
+ a = np.array([(1, 2), (3, 4)])
+ c = BytesIO()
+ # Sequence of formats
+ np.savetxt(c, a, fmt=['%02d', '%3.1f'])
+ c.seek(0)
+ assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
+
+ # A single multiformat string
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%02d : %3.1f')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
+
+ # Specify delimiter, should be overridden
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
+
+ # Bad fmt, should raise a ValueError
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, a, fmt=99)
+
+ def test_header_footer(self):
+ # Test the functionality of the header and footer keyword argument.
+
+ c = BytesIO()
+ a = np.array([(1, 2), (3, 4)], dtype=int)
+ test_header_footer = 'Test header / footer'
+ # Test the header keyword argument
+ np.savetxt(c, a, fmt='%1d', header=test_header_footer)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
+ # Test the footer keyword argument
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
+ # Test the commentstr keyword argument used on the header
+ c = BytesIO()
+ commentstr = '% '
+ np.savetxt(c, a, fmt='%1d',
+ header=test_header_footer, comments=commentstr)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
+ # Test the commentstr keyword argument used on the footer
+ c = BytesIO()
+ commentstr = '% '
+ np.savetxt(c, a, fmt='%1d',
+ footer=test_header_footer, comments=commentstr)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
+
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ def test_file_roundtrip(self, filename_type):
+ with temppath() as name:
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(filename_type(name), a)
+ b = np.loadtxt(filename_type(name))
+ assert_array_equal(a, b)
+
+ def test_complex_arrays(self):
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re + 1.0j * im
+
+ # One format only
+ c = BytesIO()
+ np.savetxt(c, a, fmt=' %+.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
+ b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
+
+ # One format for each real and imaginary part
+ c = BytesIO()
+ np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
+ b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
+
+ # One format for each complex number
+ c = BytesIO()
+ np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
+ b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+
+ def test_complex_negative_exponent(self):
+ # Previous to 1.15, some formats generated x+-yj, gh 7895
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
+ b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
+
+ def test_custom_writer(self):
+
+ class CustomWriter(list):
+ def write(self, text):
+ self.extend(text.split(b'\n'))
+
+ w = CustomWriter()
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(w, a)
+ b = np.loadtxt(w)
+ assert_array_equal(a, b)
+
+ def test_unicode(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.str_)
+ with tempdir() as tmpdir:
+ # set encoding as on windows it may not be unicode even on py3
+ np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
+ encoding='UTF-8')
+
+ def test_unicode_roundtrip(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.str_)
+ # our gz wrapper support encoding
+ suffixes = ['', '.gz']
+ if HAS_BZ2:
+ suffixes.append('.bz2')
+ if HAS_LZMA:
+ suffixes.extend(['.xz', '.lzma'])
+ with tempdir() as tmpdir:
+ for suffix in suffixes:
+ np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
+ fmt=['%s'], encoding='UTF-16-LE')
+ b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
+ encoding='UTF-16-LE', dtype=np.str_)
+ assert_array_equal(a, b)
+
+ def test_unicode_bytestream(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.str_)
+ s = BytesIO()
+ np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
+ s.seek(0)
+ assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
+
+ def test_unicode_stringstream(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.str_)
+ s = StringIO()
+ np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
+ s.seek(0)
+ assert_equal(s.read(), utf8 + '\n')
+
+ @pytest.mark.parametrize("iotype", [StringIO, BytesIO])
+ def test_unicode_and_bytes_fmt(self, iotype):
+ # string type of fmt should not matter, see also gh-4053
+ a = np.array([1.])
+ s = iotype()
+ np.savetxt(s, a, fmt="%f")
+ s.seek(0)
+ if iotype is StringIO:
+ assert_equal(s.read(), "%f\n" % 1.)
+ else:
+ assert_equal(s.read(), b"%f\n" % 1.)
+
+ @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work")
+ @pytest.mark.slow
+ @requires_memory(free_bytes=7e9)
+ def test_large_zip(self):
+ def check_large_zip(memoryerror_raised):
+ memoryerror_raised.value = False
+ try:
+ # The test takes at least 6GB of memory, writes a file larger
+ # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile``
+ test_data = np.asarray([np.random.rand(
+ np.random.randint(50, 100), 4)
+ for i in range(800000)], dtype=object)
+ with tempdir() as tmpdir:
+ np.savez(os.path.join(tmpdir, 'test.npz'),
+ test_data=test_data)
+ except MemoryError:
+ memoryerror_raised.value = True
+ raise
+ # run in a subprocess to ensure memory is released on PyPy, see gh-15775
+ # Use an object in shared memory to re-raise the MemoryError exception
+ # in our process if needed, see gh-16889
+ memoryerror_raised = Value(c_bool)
+
+ # Since Python 3.8, the default start method for multiprocessing has
+ # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
+ # on memory sharing model, leading to failed test for check_large_zip
+ ctx = get_context('fork')
+ p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
+ p.start()
+ p.join()
+ if memoryerror_raised.value:
+ raise MemoryError("Child process raised a MemoryError exception")
+ # -9 indicates a SIGKILL, probably an OOM.
+ if p.exitcode == -9:
+ pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient")
+ assert p.exitcode == 0
+
+class LoadTxtBase:
+ def check_compressed(self, fopen, suffixes):
+ # Test that we can load data from a compressed file
+ wanted = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ for suffix in suffixes:
+ with temppath(suffix=suffix) as name:
+ with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
+ f.write(data)
+ res = self.loadfunc(name, encoding='UTF-32-LE')
+ assert_array_equal(res, wanted)
+ with fopen(name, "rt", encoding='UTF-32-LE') as f:
+ res = self.loadfunc(f)
+ assert_array_equal(res, wanted)
+
+ def test_compressed_gzip(self):
+ self.check_compressed(gzip.open, ('.gz',))
+
+ @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
+ def test_compressed_bz2(self):
+ self.check_compressed(bz2.open, ('.bz2',))
+
+ @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
+ def test_compressed_lzma(self):
+ self.check_compressed(lzma.open, ('.xz', '.lzma'))
+
+ def test_encoding(self):
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write('0.\n1.\n2.'.encode("UTF-16"))
+ x = self.loadfunc(path, encoding="UTF-16")
+ assert_array_equal(x, [0., 1., 2.])
+
+ def test_stringload(self):
+ # umlaute
+ nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write(nonascii.encode("UTF-16"))
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_)
+ assert_array_equal(x, nonascii)
+
+ def test_binary_decode(self):
+ utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
+ v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16')
+ assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
+
+ def test_converters_decode(self):
+ # test converters that decode strings
+ c = TextIO()
+ c.write(b'\xcf\x96')
+ c.seek(0)
+ x = self.loadfunc(c, dtype=np.str_, encoding="bytes",
+ converters={0: lambda x: x.decode('UTF-8')})
+ a = np.array([b'\xcf\x96'.decode('UTF-8')])
+ assert_array_equal(x, a)
+
+ def test_converters_nodecode(self):
+ # test native string converters enabled by setting an encoding
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ with temppath() as path:
+ with open(path, 'wt', encoding='UTF-8') as f:
+ f.write(utf8)
+ x = self.loadfunc(path, dtype=np.str_,
+ converters={0: lambda x: x + 't'},
+ encoding='UTF-8')
+ a = np.array([utf8 + 't'])
+ assert_array_equal(x, a)
+
+
+class TestLoadTxt(LoadTxtBase):
+ loadfunc = staticmethod(np.loadtxt)
+
+ def setup_method(self):
+ # lower chunksize for testing
+ self.orig_chunk = _npyio_impl._loadtxt_chunksize
+ _npyio_impl._loadtxt_chunksize = 1
+
+ def teardown_method(self):
+ _npyio_impl._loadtxt_chunksize = self.orig_chunk
+
+ def test_record(self):
+ c = TextIO()
+ c.write('1 2\n3 4')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
+ a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ assert_array_equal(x, a)
+
+ d = TextIO()
+ d.write('M 64 75.0\nF 25 60.0')
+ d.seek(0)
+ mydescriptor = {'names': ('gender', 'age', 'weight'),
+ 'formats': ('S1', 'i4', 'f4')}
+ b = np.array([('M', 64.0, 75.0),
+ ('F', 25.0, 60.0)], dtype=mydescriptor)
+ y = np.loadtxt(d, dtype=mydescriptor)
+ assert_array_equal(y, b)
+
+ def test_array(self):
+ c = TextIO()
+ c.write('1 2\n3 4')
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int)
+ a = np.array([[1, 2], [3, 4]], int)
+ assert_array_equal(x, a)
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float)
+ a = np.array([[1, 2], [3, 4]], float)
+ assert_array_equal(x, a)
+
+ def test_1D(self):
+ c = TextIO()
+ c.write('1\n2\n3\n4\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int)
+ a = np.array([1, 2, 3, 4], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('1,2,3,4\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',')
+ a = np.array([1, 2, 3, 4], int)
+ assert_array_equal(x, a)
+
+ def test_missing(self):
+ c = TextIO()
+ c.write('1,2,3,,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)})
+ a = np.array([1, 2, 3, -999, 5], int)
+ assert_array_equal(x, a)
+
+ def test_converters_with_usecols(self):
+ c = TextIO()
+ c.write('1,2,3,,5\n6,7,8,9,10\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)},
+ usecols=(1, 3,))
+ a = np.array([[2, -999], [7, 9]], int)
+ assert_array_equal(x, a)
+
+ def test_comments_unicode(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments='#')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_comments_byte(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments=b'#')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_comments_multiple(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments=['#', '@', '//'])
+ a = np.array([[1, 2, 3], [4, 5, 6]], int)
+ assert_array_equal(x, a)
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_comments_multi_chars(self):
+ c = TextIO()
+ c.write('/* comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments='/*')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ # Check that '/*' is not transformed to ['/', '*']
+ c = TextIO()
+ c.write('*/ comment\n1,2,3,5\n')
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
+ comments='/*')
+
+ def test_skiprows(self):
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_usecols(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ c = BytesIO()
+ np.savetxt(c, a)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(1,))
+ assert_array_equal(x, a[:, 1])
+
+ a = np.array([[1, 2, 3], [3, 4, 5]], float)
+ c = BytesIO()
+ np.savetxt(c, a)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(1, 2))
+ assert_array_equal(x, a[:, 1:])
+
+ # Testing with arrays instead of tuples.
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
+ assert_array_equal(x, a[:, 1:])
+
+ # Testing with an integer instead of a sequence
+ for int_type in [int, np.int8, np.int16,
+ np.int32, np.int64, np.uint8, np.uint16,
+ np.uint32, np.uint64]:
+ to_read = int_type(1)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=to_read)
+ assert_array_equal(x, a[:, 1])
+
+ # Testing with some crazy custom integer type
+ class CrazyInt:
+ def __index__(self):
+ return 1
+
+ crazy_int = CrazyInt()
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=crazy_int)
+ assert_array_equal(x, a[:, 1])
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
+ assert_array_equal(x, a[:, 1])
+
+ # Checking with dtypes defined converters.
+ data = '''JOE 70.1 25.3
+ BOB 60.5 27.9
+ '''
+ c = TextIO(data)
+ names = ['stid', 'temp']
+ dtypes = ['S4', 'f8']
+ arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
+ assert_equal(arr['stid'], [b"JOE", b"BOB"])
+ assert_equal(arr['temp'], [25.3, 27.9])
+
+ # Testing non-ints in usecols
+ c.seek(0)
+ bogus_idx = 1.5
+ assert_raises_regex(
+ TypeError,
+ f'^usecols must be.*{type(bogus_idx).__name__}',
+ np.loadtxt, c, usecols=bogus_idx
+ )
+
+ assert_raises_regex(
+ TypeError,
+ f'^usecols must be.*{type(bogus_idx).__name__}',
+ np.loadtxt, c, usecols=[0, bogus_idx, 0]
+ )
+
+ def test_bad_usecols(self):
+ with pytest.raises(OverflowError):
+ np.loadtxt(["1\n"], usecols=[2**64], delimiter=",")
+ with pytest.raises((ValueError, OverflowError)):
+ # Overflow error on 32bit platforms
+ np.loadtxt(["1\n"], usecols=[2**62], delimiter=",")
+ with pytest.raises(TypeError,
+ match="If a structured dtype .*. But 1 usecols were given and "
+ "the number of fields is 3."):
+ np.loadtxt(["1,1\n"], dtype="i,2i", usecols=[0], delimiter=",")
+
+ def test_fancy_dtype(self):
+ c = TextIO()
+ c.write('1,2,3.0\n4,5,6.0\n')
+ c.seek(0)
+ dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ x = np.loadtxt(c, dtype=dt, delimiter=',')
+ a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
+ assert_array_equal(x, a)
+
+ def test_shaped_dtype(self):
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
+ dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ('block', int, (2, 3))])
+ x = np.loadtxt(c, dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_3d_shaped_dtype(self):
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
+ dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ('block', int, (2, 2, 3))])
+ x = np.loadtxt(c, dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0,
+ [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_str_dtype(self):
+ # see gh-8033
+ c = ["str1", "str2"]
+
+ for dt in (str, np.bytes_):
+ a = np.array(["str1", "str2"], dtype=dt)
+ x = np.loadtxt(c, dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_empty_file(self):
+ with pytest.warns(UserWarning, match="input contained no data"):
+ c = TextIO()
+ x = np.loadtxt(c)
+ assert_equal(x.shape, (0,))
+ x = np.loadtxt(c, dtype=np.int64)
+ assert_equal(x.shape, (0,))
+ assert_(x.dtype == np.int64)
+
+ def test_unused_converter(self):
+ c = TextIO()
+ c.writelines(['1 21\n', '3 42\n'])
+ c.seek(0)
+ data = np.loadtxt(c, usecols=(1,),
+ converters={0: lambda s: int(s, 16)})
+ assert_array_equal(data, [21, 42])
+
+ c.seek(0)
+ data = np.loadtxt(c, usecols=(1,),
+ converters={1: lambda s: int(s, 16)})
+ assert_array_equal(data, [33, 66])
+
+ def test_dtype_with_object(self):
+ # Test using an explicit dtype with an object
+ data = """ 1; 2001-01-01
+ 2; 2002-01-31 """
+ ndtype = [('idx', int), ('code', object)]
+ func = lambda s: strptime(s.strip(), "%Y-%m-%d")
+ converters = {1: func}
+ test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
+ converters=converters)
+ control = np.array(
+ [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
+ dtype=ndtype)
+ assert_equal(test, control)
+
+ def test_uint64_type(self):
+ tgt = (9223372043271415339, 9223372043271415853)
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=np.uint64)
+ assert_equal(res, tgt)
+
+ def test_int64_type(self):
+ tgt = (-9223372036854775807, 9223372036854775807)
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=np.int64)
+ assert_equal(res, tgt)
+
+ def test_from_float_hex(self):
+ # IEEE doubles and floats only, otherwise the float32
+ # conversion may fail.
+ tgt = np.logspace(-10, 10, 5).astype(np.float32)
+ tgt = np.hstack((tgt, -tgt)).astype(float)
+ inp = '\n'.join(map(float.hex, tgt))
+ c = TextIO()
+ c.write(inp)
+ for dt in [float, np.float32]:
+ c.seek(0)
+ res = np.loadtxt(
+ c, dtype=dt, converters=float.fromhex, encoding="latin1")
+ assert_equal(res, tgt, err_msg=f"{dt}")
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_default_float_converter_no_default_hex_conversion(self):
+ """
+ Ensure that fromhex is only used for values with the correct prefix and
+ is not called by default. Regression test related to gh-19598.
+ """
+ c = TextIO("a b c")
+ with pytest.raises(ValueError,
+ match=".*convert string 'a' to float64 at row 0, column 1"):
+ np.loadtxt(c)
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_default_float_converter_exception(self):
+ """
+ Ensure that the exception message raised during failed floating point
+ conversion is correct. Regression test related to gh-19598.
+ """
+ c = TextIO("qrs tuv") # Invalid values for default float converter
+ with pytest.raises(ValueError,
+ match="could not convert string 'qrs' to float64"):
+ np.loadtxt(c)
+
+ def test_from_complex(self):
+ tgt = (complex(1, 1), complex(1, -1))
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, tgt)
+
+ def test_complex_misformatted(self):
+ # test for backward compatibility
+ # some complex formats used to generate x+-yj
+ a = np.zeros((2, 2), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.16e')
+ c.seek(0)
+ txt = c.read()
+ c.seek(0)
+ # misformat the sign on the imaginary part, gh 7895
+ txt_bad = txt.replace(b'e+00-', b'e00+-')
+ assert_(txt_bad != txt)
+ c.write(txt_bad)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, a)
+
+ def test_universal_newline(self):
+ with temppath() as name:
+ with open(name, 'w') as f:
+ f.write('1 21\r3 42\r')
+ data = np.loadtxt(name)
+ assert_array_equal(data, [[1, 21], [3, 42]])
+
+ def test_empty_field_after_tab(self):
+ c = TextIO()
+ c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
+ c.seek(0)
+ dt = {'names': ('x', 'y', 'z', 'comment'),
+ 'formats': ('<i4', '<i4', '<f4', '|S8')}
+ x = np.loadtxt(c, dtype=dt, delimiter='\t')
+ a = np.array([b'start ', b' ', b''])
+ assert_array_equal(x['comment'], a)
+
+ def test_unpack_structured(self):
+ txt = TextIO("M 21 72\nF 35 58")
+ dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
+ a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
+ assert_(a.dtype.str == '|S1')
+ assert_(b.dtype.str == '<i4')
+ assert_(c.dtype.str == '<f4')
+ assert_array_equal(a, np.array([b'M', b'F']))
+ assert_array_equal(b, np.array([21, 35]))
+ assert_array_equal(c, np.array([72., 58.]))
+
+ def test_ndmin_keyword(self):
+ c = TextIO()
+ c.write('1,2,3\n4,5,6')
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, ndmin=3)
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
+ a = np.array([[1, 2, 3], [4, 5, 6]])
+ assert_array_equal(x, a)
+
+ d = TextIO()
+ d.write('0,1,2')
+ d.seek(0)
+ x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
+ assert_(x.shape == (1, 3))
+ d.seek(0)
+ x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
+ assert_(x.shape == (3,))
+ d.seek(0)
+ x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
+ assert_(x.shape == (3,))
+
+ e = TextIO()
+ e.write('0\n1\n2')
+ e.seek(0)
+ x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
+ assert_(x.shape == (3, 1))
+ e.seek(0)
+ x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
+ assert_(x.shape == (3,))
+ e.seek(0)
+ x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
+ assert_(x.shape == (3,))
+
+ # Test ndmin kw with empty file.
+ with pytest.warns(UserWarning, match="input contained no data"):
+ f = TextIO()
+ assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
+ assert_(np.loadtxt(f, ndmin=1).shape == (0,))
+
+ def test_generator_source(self):
+ def count():
+ for i in range(10):
+ yield "%d" % i
+
+ res = np.loadtxt(count())
+ assert_array_equal(res, np.arange(10))
+
+ def test_bad_line(self):
+ c = TextIO()
+ c.write('1 2 3\n4 5 6\n2 3')
+ c.seek(0)
+
+ # Check for exception and that exception contains line number
+ assert_raises_regex(ValueError, "3", np.loadtxt, c)
+
+ def test_none_as_string(self):
+ # gh-5155, None should work as string when format demands it
+ c = TextIO()
+ c.write('100,foo,200\n300,None,400')
+ c.seek(0)
+ dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
+ np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
+
+ @pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
+ reason="Wrong preferred encoding")
+ def test_binary_load(self):
+ butf8 = b"5,6,7,\xc3\x95scarscar\r\n15,2,3,hello\r\n"\
+ b"20,2,3,\xc3\x95scar\r\n"
+ sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write(butf8)
+ with open(path, "rb") as f:
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.str_)
+ assert_array_equal(x, sutf8)
+ # test broken latin1 conversion people now rely on
+ with open(path, "rb") as f:
+ x = np.loadtxt(f, encoding="UTF-8", dtype="S")
+ x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
+ assert_array_equal(x, np.array(x, dtype="S"))
+
+ def test_max_rows(self):
+ c = TextIO()
+ c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ max_rows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_with_skiprows(self):
+ c = TextIO()
+ c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=2)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_with_read_continuation(self):
+ c = TextIO()
+ c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ max_rows=2)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
+ assert_array_equal(x, a)
+ # test continuation
+ x = np.loadtxt(c, dtype=int, delimiter=',')
+ a = np.array([2, 1, 4, 5], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_larger(self):
+ #test max_rows > num rows
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=6)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
+ assert_array_equal(x, a)
+
+ @pytest.mark.parametrize(["skip", "data"], [
+ (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]),
+ # "Bad" lines that do not end in newlines:
+ (1, ["ignored", "1,2", "", "3,4"]),
+ (1, StringIO("ignored\n1,2\n\n3,4")),
+ # Same as above, but do not skip any lines:
+ (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]),
+ (0, ["-1,0", "1,2", "", "3,4"]),
+ (0, StringIO("-1,0\n1,2\n\n3,4"))])
+ def test_max_rows_empty_lines(self, skip, data):
+ with pytest.warns(UserWarning,
+ match=f"Input line 3.*max_rows={3 - skip}"):
+ res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",",
+ max_rows=3 - skip)
+ assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:])
+
+ if isinstance(data, StringIO):
+ data.seek(0)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", UserWarning)
+ with pytest.raises(UserWarning):
+ np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",",
+ max_rows=3 - skip)
+
+class Testfromregex:
+ def test_record(self):
+ c = TextIO()
+ c.write('1.312 foo\n1.534 bar\n4.444 qux')
+ c.seek(0)
+
+ dt = [('num', np.float64), ('val', 'S3')]
+ x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
+ a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_record_2(self):
+ c = TextIO()
+ c.write('1312 foo\n1534 bar\n4444 qux')
+ c.seek(0)
+
+ dt = [('num', np.int32), ('val', 'S3')]
+ x = np.fromregex(c, r"(\d+)\s+(...)", dt)
+ a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_record_3(self):
+ c = TextIO()
+ c.write('1312 foo\n1534 bar\n4444 qux')
+ c.seek(0)
+
+ dt = [('num', np.float64)]
+ x = np.fromregex(c, r"(\d+)\s+...", dt)
+ a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
+ assert_array_equal(x, a)
+
+ @pytest.mark.parametrize("path_type", [str, Path])
+ def test_record_unicode(self, path_type):
+ utf8 = b'\xcf\x96'
+ with temppath() as str_path:
+ path = path_type(str_path)
+ with open(path, 'wb') as f:
+ f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
+
+ dt = [('num', np.float64), ('val', 'U4')]
+ x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
+ a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
+ (4.444, 'qux')], dtype=dt)
+ assert_array_equal(x, a)
+
+ regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
+ x = np.fromregex(path, regexp, dt, encoding='UTF-8')
+ assert_array_equal(x, a)
+
+ def test_compiled_bytes(self):
+ regexp = re.compile(br'(\d)')
+ c = BytesIO(b'123')
+ dt = [('num', np.float64)]
+ a = np.array([1, 2, 3], dtype=dt)
+ x = np.fromregex(c, regexp, dt)
+ assert_array_equal(x, a)
+
+ def test_bad_dtype_not_structured(self):
+ regexp = re.compile(br'(\d)')
+ c = BytesIO(b'123')
+ with pytest.raises(TypeError, match='structured datatype'):
+ np.fromregex(c, regexp, dtype=np.float64)
+
+
+#####--------------------------------------------------------------------------
+
+
+class TestFromTxt(LoadTxtBase):
+ loadfunc = staticmethod(np.genfromtxt)
+
+ def test_record(self):
+ # Test w/ explicit dtype
+ data = TextIO('1 2\n3 4')
+ test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
+ control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ assert_equal(test, control)
+ #
+ data = TextIO('M 64.0 75.0\nF 25.0 60.0')
+ descriptor = {'names': ('gender', 'age', 'weight'),
+ 'formats': ('S1', 'i4', 'f4')}
+ control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
+ dtype=descriptor)
+ test = np.genfromtxt(data, dtype=descriptor)
+ assert_equal(test, control)
+
+ def test_array(self):
+ # Test outputting a standard ndarray
+ data = TextIO('1 2\n3 4')
+ control = np.array([[1, 2], [3, 4]], dtype=int)
+ test = np.genfromtxt(data, dtype=int)
+ assert_array_equal(test, control)
+ #
+ data.seek(0)
+ control = np.array([[1, 2], [3, 4]], dtype=float)
+ test = np.loadtxt(data, dtype=float)
+ assert_array_equal(test, control)
+
+ def test_1D(self):
+ # Test squeezing to 1D
+ control = np.array([1, 2, 3, 4], int)
+ #
+ data = TextIO('1\n2\n3\n4\n')
+ test = np.genfromtxt(data, dtype=int)
+ assert_array_equal(test, control)
+ #
+ data = TextIO('1,2,3,4\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',')
+ assert_array_equal(test, control)
+
+ def test_comments(self):
+ # Test the stripping of comments
+ control = np.array([1, 2, 3, 5], int)
+ # Comment on its own line
+ data = TextIO('# comment\n1,2,3,5\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
+ assert_equal(test, control)
+ # Comment at the end of a line
+ data = TextIO('1,2,3,5# comment\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
+ assert_equal(test, control)
+
+ def test_skiprows(self):
+ # Test row skipping
+ control = np.array([1, 2, 3, 5], int)
+ kwargs = {"dtype": int, "delimiter": ','}
+ #
+ data = TextIO('comment\n1,2,3,5\n')
+ test = np.genfromtxt(data, skip_header=1, **kwargs)
+ assert_equal(test, control)
+ #
+ data = TextIO('# comment\n1,2,3,5\n')
+ test = np.loadtxt(data, skiprows=1, **kwargs)
+ assert_equal(test, control)
+
+ def test_skip_footer(self):
+ data = [f"# {i}" for i in range(1, 6)]
+ data.append("A, B, C")
+ data.extend([f"{i},{i:3.1f},{i:03d}" for i in range(51)])
+ data[-1] = "99,99"
+ kwargs = {"delimiter": ",", "names": True, "skip_header": 5, "skip_footer": 10}
+ test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
+ ctrl = np.array([(f"{i:f}", f"{i:f}", f"{i:f}") for i in range(41)],
+ dtype=[(_, float) for _ in "ABC"])
+ assert_equal(test, ctrl)
+
+ def test_skip_footer_with_invalid(self):
+ with suppress_warnings() as sup:
+ sup.filter(ConversionWarning)
+ basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
+ # Footer too small to get rid of all invalid values
+ assert_raises(ValueError, np.genfromtxt,
+ TextIO(basestr), skip_footer=1)
+ # except ValueError:
+ # pass
+ a = np.genfromtxt(
+ TextIO(basestr), skip_footer=1, invalid_raise=False)
+ assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
+ #
+ a = np.genfromtxt(TextIO(basestr), skip_footer=3)
+ assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
+ #
+ basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
+ a = np.genfromtxt(
+ TextIO(basestr), skip_footer=1, invalid_raise=False)
+ assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
+ a = np.genfromtxt(
+ TextIO(basestr), skip_footer=3, invalid_raise=False)
+ assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
+
+ def test_header(self):
+ # Test retrieving a header
+ data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(data, dtype=None, names=True,
+ encoding='bytes')
+ assert_(w[0].category is VisibleDeprecationWarning)
+ control = {'gender': np.array([b'M', b'F']),
+ 'age': np.array([64.0, 25.0]),
+ 'weight': np.array([75.0, 60.0])}
+ assert_equal(test['gender'], control['gender'])
+ assert_equal(test['age'], control['age'])
+ assert_equal(test['weight'], control['weight'])
+
+ def test_auto_dtype(self):
+ # Test the automatic definition of the output dtype
+ data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(data, dtype=None, encoding='bytes')
+ assert_(w[0].category is VisibleDeprecationWarning)
+ control = [np.array([b'A', b'BCD']),
+ np.array([64, 25]),
+ np.array([75.0, 60.0]),
+ np.array([3 + 4j, 5 + 6j]),
+ np.array([True, False]), ]
+ assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
+ for (i, ctrl) in enumerate(control):
+ assert_equal(test[f'f{i}'], ctrl)
+
+ def test_auto_dtype_uniform(self):
+ # Tests whether the output dtype can be uniformized
+ data = TextIO('1 2 3 4\n5 6 7 8\n')
+ test = np.genfromtxt(data, dtype=None)
+ control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
+ assert_equal(test, control)
+
+ def test_fancy_dtype(self):
+ # Check that a nested dtype isn't MIA
+ data = TextIO('1,2,3.0\n4,5,6.0\n')
+ fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
+ control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
+ assert_equal(test, control)
+
+ def test_names_overwrite(self):
+ # Test overwriting the names of the dtype
+ descriptor = {'names': ('g', 'a', 'w'),
+ 'formats': ('S1', 'i4', 'f4')}
+ data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
+ names = ('gender', 'age', 'weight')
+ test = np.genfromtxt(data, dtype=descriptor, names=names)
+ descriptor['names'] = names
+ control = np.array([('M', 64.0, 75.0),
+ ('F', 25.0, 60.0)], dtype=descriptor)
+ assert_equal(test, control)
+
+ def test_bad_fname(self):
+ with pytest.raises(TypeError, match='fname must be a string,'):
+ np.genfromtxt(123)
+
+ def test_commented_header(self):
+ # Check that names can be retrieved even if the line is commented out.
+ data = TextIO("""
+#gender age weight
+M 21 72.100000
+F 35 58.330000
+M 33 21.99
+ """)
+ # The # is part of the first name and should be deleted automatically.
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(data, names=True, dtype=None,
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
+ dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
+ assert_equal(test, ctrl)
+ # Ditto, but we should get rid of the first element
+ data = TextIO(b"""
+# gender age weight
+M 21 72.100000
+F 35 58.330000
+M 33 21.99
+ """)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(data, names=True, dtype=None,
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ assert_equal(test, ctrl)
+
+ def test_names_and_comments_none(self):
+ # Tests case when names is true but comments is None (gh-10780)
+ data = TextIO('col1 col2\n 1 2\n 3 4')
+ test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
+ control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
+ assert_equal(test, control)
+
+ def test_file_is_closed_on_error(self):
+ # gh-13200
+ with tempdir() as tmpdir:
+ fpath = os.path.join(tmpdir, "test.csv")
+ with open(fpath, "wb") as f:
+ f.write('\N{GREEK PI SYMBOL}'.encode())
+
+ # ResourceWarnings are emitted from a destructor, so won't be
+ # detected by regular propagation to errors.
+ with assert_no_warnings():
+ with pytest.raises(UnicodeDecodeError):
+ np.genfromtxt(fpath, encoding="ascii")
+
+ def test_autonames_and_usecols(self):
+ # Tests names and usecols
+ data = TextIO('A B C D\n aaaa 121 45 9.1')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
+ names=True, dtype=None, encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ control = np.array(('aaaa', 45, 9.1),
+ dtype=[('A', '|S4'), ('C', int), ('D', float)])
+ assert_equal(test, control)
+
+ def test_converters_with_usecols(self):
+ # Test the combination user-defined converters and usecol
+ data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)},
+ usecols=(1, 3,))
+ control = np.array([[2, -999], [7, 9]], int)
+ assert_equal(test, control)
+
+ def test_converters_with_usecols_and_names(self):
+ # Tests names and usecols
+ data = TextIO('A B C D\n aaaa 121 45 9.1')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
+ dtype=None, encoding="bytes",
+ converters={'C': lambda s: 2 * int(s)})
+ assert_(w[0].category is VisibleDeprecationWarning)
+ control = np.array(('aaaa', 90, 9.1),
+ dtype=[('A', '|S4'), ('C', int), ('D', float)])
+ assert_equal(test, control)
+
+ def test_converters_cornercases(self):
+ # Test the conversion to datetime.
+ converter = {
+ 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
+ data = TextIO('2009-02-03 12:00:00Z, 72214.0')
+ test = np.genfromtxt(data, delimiter=',', dtype=None,
+ names=['date', 'stid'], converters=converter)
+ control = np.array((datetime(2009, 2, 3), 72214.),
+ dtype=[('date', np.object_), ('stid', float)])
+ assert_equal(test, control)
+
+ def test_converters_cornercases2(self):
+ # Test the conversion to datetime64.
+ converter = {
+ 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
+ data = TextIO('2009-02-03 12:00:00Z, 72214.0')
+ test = np.genfromtxt(data, delimiter=',', dtype=None,
+ names=['date', 'stid'], converters=converter)
+ control = np.array((datetime(2009, 2, 3), 72214.),
+ dtype=[('date', 'datetime64[us]'), ('stid', float)])
+ assert_equal(test, control)
+
+ def test_unused_converter(self):
+ # Test whether unused converters are forgotten
+ data = TextIO("1 21\n 3 42\n")
+ test = np.genfromtxt(data, usecols=(1,),
+ converters={0: lambda s: int(s, 16)})
+ assert_equal(test, [21, 42])
+ #
+ data.seek(0)
+ test = np.genfromtxt(data, usecols=(1,),
+ converters={1: lambda s: int(s, 16)})
+ assert_equal(test, [33, 66])
+
+ def test_invalid_converter(self):
+ strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
+ ((b'r' not in x.lower() and x.strip()) or 0.0))
+ strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
+ ((b'%' not in x.lower() and x.strip()) or 0.0))
+ s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
+ "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
+ "D02N03,10/10/2004,R 1,,7,145.55")
+ kwargs = {
+ "converters": {2: strip_per, 3: strip_rand}, "delimiter": ",",
+ "dtype": None, "encoding": "bytes"}
+ assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
+
+ def test_tricky_converter_bug1666(self):
+ # Test some corner cases
+ s = TextIO('q1,2\nq3,4')
+ cnv = lambda s: float(s[1:])
+ test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
+ control = np.array([[1., 2.], [3., 4.]])
+ assert_equal(test, control)
+
+ def test_dtype_with_converters(self):
+ dstr = "2009; 23; 46"
+ test = np.genfromtxt(TextIO(dstr,),
+ delimiter=";", dtype=float, converters={0: bytes})
+ control = np.array([('2009', 23., 46)],
+ dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
+ assert_equal(test, control)
+ test = np.genfromtxt(TextIO(dstr,),
+ delimiter=";", dtype=float, converters={0: float})
+ control = np.array([2009., 23., 46],)
+ assert_equal(test, control)
+
+ @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning")
+ def test_dtype_with_converters_and_usecols(self):
+ dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
+ dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3}
+ dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')]
+ conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
+ test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
+ names=None, converters=conv, encoding="bytes")
+ control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp)
+ assert_equal(test, control)
+ dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')]
+ test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
+ usecols=(0, 1, 3), names=None, converters=conv,
+ encoding="bytes")
+ control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp)
+ assert_equal(test, control)
+
+ def test_dtype_with_object(self):
+ # Test using an explicit dtype with an object
+ data = """ 1; 2001-01-01
+ 2; 2002-01-31 """
+ ndtype = [('idx', int), ('code', object)]
+ func = lambda s: strptime(s.strip(), "%Y-%m-%d")
+ converters = {1: func}
+ test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
+ converters=converters)
+ control = np.array(
+ [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
+ dtype=ndtype)
+ assert_equal(test, control)
+
+ ndtype = [('nest', [('idx', int), ('code', object)])]
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
+ test = np.genfromtxt(TextIO(data), delimiter=";",
+ dtype=ndtype, converters=converters)
+
+ # nested but empty fields also aren't supported
+ ndtype = [('idx', int), ('code', object), ('nest', [])]
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
+ test = np.genfromtxt(TextIO(data), delimiter=";",
+ dtype=ndtype, converters=converters)
+
+ def test_dtype_with_object_no_converter(self):
+ # Object without a converter uses bytes:
+ parsed = np.genfromtxt(TextIO("1"), dtype=object)
+ assert parsed[()] == b"1"
+ parsed = np.genfromtxt(TextIO("string"), dtype=object)
+ assert parsed[()] == b"string"
+
+ def test_userconverters_with_explicit_dtype(self):
+ # Test user_converters w/ explicit (standard) dtype
+ data = TextIO('skip,skip,2001-01-01,1.0,skip')
+ test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
+ usecols=(2, 3), converters={2: bytes})
+ control = np.array([('2001-01-01', 1.)],
+ dtype=[('', '|S10'), ('', float)])
+ assert_equal(test, control)
+
+ def test_utf8_userconverters_with_explicit_dtype(self):
+ utf8 = b'\xcf\x96'
+ with temppath() as path:
+ with open(path, 'wb') as f:
+ f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
+ test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
+ usecols=(2, 3), converters={2: str},
+ encoding='UTF-8')
+ control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
+ dtype=[('', '|U11'), ('', float)])
+ assert_equal(test, control)
+
+ def test_spacedelimiter(self):
+ # Test space delimiter
+ data = TextIO("1 2 3 4 5\n6 7 8 9 10")
+ test = np.genfromtxt(data)
+ control = np.array([[1., 2., 3., 4., 5.],
+ [6., 7., 8., 9., 10.]])
+ assert_equal(test, control)
+
+ def test_integer_delimiter(self):
+ # Test using an integer for delimiter
+ data = " 1 2 3\n 4 5 67\n890123 4"
+ test = np.genfromtxt(TextIO(data), delimiter=3)
+ control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
+ assert_equal(test, control)
+
+ def test_missing(self):
+ data = TextIO('1,2,3,,5\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)})
+ control = np.array([1, 2, 3, -999, 5], int)
+ assert_equal(test, control)
+
+ def test_missing_with_tabs(self):
+ # Test w/ a delimiter tab
+ txt = "1\t2\t3\n\t2\t\n1\t\t3"
+ test = np.genfromtxt(TextIO(txt), delimiter="\t",
+ usemask=True,)
+ ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
+ ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
+ assert_equal(test.data, ctrl_d)
+ assert_equal(test.mask, ctrl_m)
+
+ def test_usecols(self):
+ # Test the selection of columns
+ # Select 1 column
+ control = np.array([[1, 2], [3, 4]], float)
+ data = TextIO()
+ np.savetxt(data, control)
+ data.seek(0)
+ test = np.genfromtxt(data, dtype=float, usecols=(1,))
+ assert_equal(test, control[:, 1])
+ #
+ control = np.array([[1, 2, 3], [3, 4, 5]], float)
+ data = TextIO()
+ np.savetxt(data, control)
+ data.seek(0)
+ test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
+ assert_equal(test, control[:, 1:])
+ # Testing with arrays instead of tuples.
+ data.seek(0)
+ test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
+ assert_equal(test, control[:, 1:])
+
+ def test_usecols_as_css(self):
+ # Test giving usecols with a comma-separated string
+ data = "1 2 3\n4 5 6"
+ test = np.genfromtxt(TextIO(data),
+ names="a, b, c", usecols="a, c")
+ ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
+ assert_equal(test, ctrl)
+
+ def test_usecols_with_structured_dtype(self):
+ # Test usecols with an explicit structured dtype
+ data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
+ names = ['stid', 'temp']
+ dtypes = ['S4', 'f8']
+ test = np.genfromtxt(
+ data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
+ assert_equal(test['stid'], [b"JOE", b"BOB"])
+ assert_equal(test['temp'], [25.3, 27.9])
+
+ def test_usecols_with_integer(self):
+ # Test usecols with an integer
+ test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
+ assert_equal(test, np.array([1., 4.]))
+
+ def test_usecols_with_named_columns(self):
+ # Test usecols with named columns
+ ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
+ data = "1 2 3\n4 5 6"
+ kwargs = {"names": "a, b, c"}
+ test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
+ assert_equal(test, ctrl)
+ test = np.genfromtxt(TextIO(data),
+ usecols=('a', 'c'), **kwargs)
+ assert_equal(test, ctrl)
+
+ def test_empty_file(self):
+ # Test that an empty file raises the proper warning.
+ with suppress_warnings() as sup:
+ sup.filter(message="genfromtxt: Empty input file:")
+ data = TextIO()
+ test = np.genfromtxt(data)
+ assert_equal(test, np.array([]))
+
+ # when skip_header > 0
+ test = np.genfromtxt(data, skip_header=1)
+ assert_equal(test, np.array([]))
+
+ def test_fancy_dtype_alt(self):
+ # Check that a nested dtype isn't MIA
+ data = TextIO('1,2,3.0\n4,5,6.0\n')
+ fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
+ control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
+ assert_equal(test, control)
+
+ def test_shaped_dtype(self):
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
+ dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ('block', int, (2, 3))])
+ x = np.genfromtxt(c, dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_withmissing(self):
+ data = TextIO('A,B\n0,1\n2,N/A')
+ kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True}
+ test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ #
+ data.seek(0)
+ test = np.genfromtxt(data, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', float), ('B', float)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_user_missing_values(self):
+ data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
+ basekwargs = {"dtype": None, "delimiter": ",", "names": True}
+ mdtype = [('A', int), ('B', float), ('C', complex)]
+ #
+ test = np.genfromtxt(TextIO(data), missing_values="N/A",
+ **basekwargs)
+ control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
+ (-9, 2.2, -999j), (3, -99, 3j)],
+ mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
+ dtype=mdtype)
+ assert_equal(test, control)
+ #
+ basekwargs['dtype'] = mdtype
+ test = np.genfromtxt(TextIO(data),
+ missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
+ control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
+ (-9, 2.2, -999j), (3, -99, 3j)],
+ mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
+ dtype=mdtype)
+ assert_equal(test, control)
+ #
+ test = np.genfromtxt(TextIO(data),
+ missing_values={0: -9, 'B': -99, 'C': -999j},
+ usemask=True,
+ **basekwargs)
+ control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
+ (-9, 2.2, -999j), (3, -99, 3j)],
+ mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
+ dtype=mdtype)
+ assert_equal(test, control)
+
+ def test_user_filling_values(self):
+ # Test with missing and filling values
+ ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
+ data = "N/A, 2, 3\n4, ,???"
+ kwargs = {"delimiter": ",",
+ "dtype": int,
+ "names": "a,b,c",
+ "missing_values": {0: "N/A", 'b': " ", 2: "???"},
+ "filling_values": {0: 0, 'b': 0, 2: -999}}
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ ctrl = np.array([(0, 2, 3), (4, 0, -999)],
+ dtype=[(_, int) for _ in "abc"])
+ assert_equal(test, ctrl)
+ #
+ test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
+ ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
+ assert_equal(test, ctrl)
+
+ data2 = "1,2,*,4\n5,*,7,8\n"
+ test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
+ missing_values="*", filling_values=0)
+ ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
+ assert_equal(test, ctrl)
+ test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
+ missing_values="*", filling_values=-1)
+ ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
+ assert_equal(test, ctrl)
+
+ def test_withmissing_float(self):
+ data = TextIO('A,B\n0,1.5\n2,-999.00')
+ test = np.genfromtxt(data, dtype=None, delimiter=',',
+ missing_values='-999.0', names=True, usemask=True)
+ control = ma.array([(0, 1.5), (2, -1.)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', float)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_with_masked_column_uniform(self):
+ # Test masked column
+ data = TextIO('1 2 3\n4 5 6\n')
+ test = np.genfromtxt(data, dtype=None,
+ missing_values='2,5', usemask=True)
+ control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
+ assert_equal(test, control)
+
+ def test_with_masked_column_various(self):
+ # Test masked column
+ data = TextIO('True 2 3\nFalse 5 6\n')
+ test = np.genfromtxt(data, dtype=None,
+ missing_values='2,5', usemask=True)
+ control = ma.array([(1, 2, 3), (0, 5, 6)],
+ mask=[(0, 1, 0), (0, 1, 0)],
+ dtype=[('f0', bool), ('f1', bool), ('f2', int)])
+ assert_equal(test, control)
+
+ def test_invalid_raise(self):
+ # Test invalid raise
+ data = ["1, 1, 1, 1, 1"] * 50
+ for i in range(5):
+ data[10 * i] = "2, 2, 2, 2 2"
+ data.insert(0, "a, b, c, d, e")
+ mdata = TextIO("\n".join(data))
+
+ kwargs = {"delimiter": ",", "dtype": None, "names": True}
+
+ def f():
+ return np.genfromtxt(mdata, invalid_raise=False, **kwargs)
+ mtest = assert_warns(ConversionWarning, f)
+ assert_equal(len(mtest), 45)
+ assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
+ #
+ mdata.seek(0)
+ assert_raises(ValueError, np.genfromtxt, mdata,
+ delimiter=",", names=True)
+
+ def test_invalid_raise_with_usecols(self):
+ # Test invalid_raise with usecols
+ data = ["1, 1, 1, 1, 1"] * 50
+ for i in range(5):
+ data[10 * i] = "2, 2, 2, 2 2"
+ data.insert(0, "a, b, c, d, e")
+ mdata = TextIO("\n".join(data))
+
+ kwargs = {"delimiter": ",", "dtype": None, "names": True,
+ "invalid_raise": False}
+
+ def f():
+ return np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
+ mtest = assert_warns(ConversionWarning, f)
+ assert_equal(len(mtest), 45)
+ assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
+ #
+ mdata.seek(0)
+ mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
+ assert_equal(len(mtest), 50)
+ control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
+ control[[10 * _ for _ in range(5)]] = (2, 2)
+ assert_equal(mtest, control)
+
+ def test_inconsistent_dtype(self):
+ # Test inconsistent dtype
+ data = ["1, 1, 1, 1, -1.1"] * 50
+ mdata = TextIO("\n".join(data))
+
+ converters = {4: lambda x: f"({x.decode()})"}
+ kwargs = {"delimiter": ",", "converters": converters,
+ "dtype": [(_, int) for _ in 'abcde'], "encoding": "bytes"}
+ assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
+
+ def test_default_field_format(self):
+ # Test default format
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=None, defaultfmt="f%02i")
+ ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
+ dtype=[("f00", int), ("f01", int), ("f02", float)])
+ assert_equal(mtest, ctrl)
+
+ def test_single_dtype_wo_names(self):
+ # Test single dtype w/o names
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=float, defaultfmt="f%02i")
+ ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
+ assert_equal(mtest, ctrl)
+
+ def test_single_dtype_w_explicit_names(self):
+ # Test single dtype w explicit names
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=float, names="a, b, c")
+ ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
+ dtype=[(_, float) for _ in "abc"])
+ assert_equal(mtest, ctrl)
+
+ def test_single_dtype_w_implicit_names(self):
+ # Test single dtype w implicit names
+ data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=float, names=True)
+ ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
+ dtype=[(_, float) for _ in "abc"])
+ assert_equal(mtest, ctrl)
+
+ def test_easy_structured_dtype(self):
+ # Test easy structured dtype
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data), delimiter=",",
+ dtype=(int, float, float), defaultfmt="f_%02i")
+ ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
+ dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
+ assert_equal(mtest, ctrl)
+
+ def test_autostrip(self):
+ # Test autostrip
+ data = "01/01/2003 , 1.3, abcde"
+ kwargs = {"delimiter": ",", "dtype": None, "encoding": "bytes"}
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ mtest = np.genfromtxt(TextIO(data), **kwargs)
+ assert_(w[0].category is VisibleDeprecationWarning)
+ ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
+ dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
+ assert_equal(mtest, ctrl)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
+ assert_(w[0].category is VisibleDeprecationWarning)
+ ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
+ dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
+ assert_equal(mtest, ctrl)
+
+ def test_replace_space(self):
+ # Test the 'replace_space' option
+ txt = "A.A, B (B), C:C\n1, 2, 3.14"
+ # Test default: replace ' ' by '_' and delete non-alphanum chars
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=None)
+ ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
+ ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no replace, no delete
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=None,
+ replace_space='', deletechars='')
+ ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
+ ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no delete (spaces are replaced by _)
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=None,
+ deletechars='')
+ ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
+ ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+
+ def test_replace_space_known_dtype(self):
+ # Test the 'replace_space' (and related) options when dtype != None
+ txt = "A.A, B (B), C:C\n1, 2, 3"
+ # Test default: replace ' ' by '_' and delete non-alphanum chars
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int)
+ ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no replace, no delete
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int,
+ replace_space='', deletechars='')
+ ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no delete (spaces are replaced by _)
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int,
+ deletechars='')
+ ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+
+ def test_incomplete_names(self):
+ # Test w/ incomplete names
+ data = "A,,C\n0,1,2\n3,4,5"
+ kwargs = {"delimiter": ",", "names": True}
+ # w/ dtype=None
+ ctrl = np.array([(0, 1, 2), (3, 4, 5)],
+ dtype=[(_, int) for _ in ('A', 'f0', 'C')])
+ test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
+ assert_equal(test, ctrl)
+ # w/ default dtype
+ ctrl = np.array([(0, 1, 2), (3, 4, 5)],
+ dtype=[(_, float) for _ in ('A', 'f0', 'C')])
+ test = np.genfromtxt(TextIO(data), **kwargs)
+
+ def test_names_auto_completion(self):
+ # Make sure that names are properly completed
+ data = "1 2 3\n 4 5 6"
+ test = np.genfromtxt(TextIO(data),
+ dtype=(int, float, int), names="a")
+ ctrl = np.array([(1, 2, 3), (4, 5, 6)],
+ dtype=[('a', int), ('f0', float), ('f1', int)])
+ assert_equal(test, ctrl)
+
+ def test_names_with_usecols_bug1636(self):
+ # Make sure we pick up the right names w/ usecols
+ data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
+ ctrl_names = ("A", "C", "E")
+ test = np.genfromtxt(TextIO(data),
+ dtype=(int, int, int), delimiter=",",
+ usecols=(0, 2, 4), names=True)
+ assert_equal(test.dtype.names, ctrl_names)
+ #
+ test = np.genfromtxt(TextIO(data),
+ dtype=(int, int, int), delimiter=",",
+ usecols=("A", "C", "E"), names=True)
+ assert_equal(test.dtype.names, ctrl_names)
+ #
+ test = np.genfromtxt(TextIO(data),
+ dtype=int, delimiter=",",
+ usecols=("A", "C", "E"), names=True)
+ assert_equal(test.dtype.names, ctrl_names)
+
+ def test_fixed_width_names(self):
+ # Test fix-width w/ names
+ data = " A B C\n 0 1 2.3\n 45 67 9."
+ kwargs = {"delimiter": (5, 5, 4), "names": True, "dtype": None}
+ ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
+ dtype=[('A', int), ('B', int), ('C', float)])
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ assert_equal(test, ctrl)
+ #
+ kwargs = {"delimiter": 5, "names": True, "dtype": None}
+ ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
+ dtype=[('A', int), ('B', int), ('C', float)])
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ assert_equal(test, ctrl)
+
+ def test_filling_values(self):
+ # Test missing values
+ data = b"1, 2, 3\n1, , 5\n0, 6, \n"
+ kwargs = {"delimiter": ",", "dtype": None, "filling_values": -999}
+ ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ assert_equal(test, ctrl)
+
+ def test_comments_is_none(self):
+ # Github issue 329 (None was previously being converted to 'None').
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
+ dtype=None, comments=None, delimiter=',',
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ assert_equal(test[1], b'testNonetherestofthedata')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
+ dtype=None, comments=None, delimiter=',',
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ assert_equal(test[1], b' testNonetherestofthedata')
+
+ def test_latin1(self):
+ latin1 = b'\xf6\xfc\xf6'
+ norm = b"norm1,norm2,norm3\n"
+ enc = b"test1,testNonethe" + latin1 + b",test3\n"
+ s = norm + enc + norm
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',',
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ assert_equal(test[1, 0], b"test1")
+ assert_equal(test[1, 1], b"testNonethe" + latin1)
+ assert_equal(test[1, 2], b"test3")
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',',
+ encoding='latin1')
+ assert_equal(test[1, 0], "test1")
+ assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1'))
+ assert_equal(test[1, 2], "test3")
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
+ dtype=None, comments=None, delimiter=',',
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ assert_equal(test['f0'], 0)
+ assert_equal(test['f1'], b"testNonethe" + latin1)
+
+ def test_binary_decode_autodtype(self):
+ utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
+ v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
+ assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
+
+ def test_utf8_byte_encoding(self):
+ utf8 = b"\xcf\x96"
+ norm = b"norm1,norm2,norm3\n"
+ enc = b"test1,testNonethe" + utf8 + b",test3\n"
+ s = norm + enc + norm
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',',
+ encoding="bytes")
+ assert_(w[0].category is VisibleDeprecationWarning)
+ ctl = np.array([
+ [b'norm1', b'norm2', b'norm3'],
+ [b'test1', b'testNonethe' + utf8, b'test3'],
+ [b'norm1', b'norm2', b'norm3']])
+ assert_array_equal(test, ctl)
+
+ def test_utf8_file(self):
+ utf8 = b"\xcf\x96"
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="UTF-8")
+ ctl = np.array([
+ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
+ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
+ dtype=np.str_)
+ assert_array_equal(test, ctl)
+
+ # test a mixed dtype
+ with open(path, "wb") as f:
+ f.write(b"0,testNonethe" + utf8)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="UTF-8")
+ assert_equal(test['f0'], 0)
+ assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
+
+ def test_utf8_file_nodtype_unicode(self):
+ # bytes encoding with non-latin1 -> unicode upcast
+ utf8 = '\u03d6'
+ latin1 = '\xf6\xfc\xf6'
+
+ # skip test if cannot encode utf8 test string with preferred
+ # encoding. The preferred encoding is assumed to be the default
+ # encoding of open. Will need to change this for PyTest, maybe
+ # using pytest.mark.xfail(raises=***).
+ try:
+ encoding = locale.getpreferredencoding()
+ utf8.encode(encoding)
+ except (UnicodeError, ImportError):
+ pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
+ 'unable to encode utf8 in preferred encoding')
+
+ with temppath() as path:
+ with open(path, "wt") as f:
+ f.write("norm1,norm2,norm3\n")
+ f.write("norm1," + latin1 + ",norm3\n")
+ f.write("test1,testNonethe" + utf8 + ",test3\n")
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '',
+ VisibleDeprecationWarning)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="bytes")
+ # Check for warning when encoding not specified.
+ assert_(w[0].category is VisibleDeprecationWarning)
+ ctl = np.array([
+ ["norm1", "norm2", "norm3"],
+ ["norm1", latin1, "norm3"],
+ ["test1", "testNonethe" + utf8, "test3"]],
+ dtype=np.str_)
+ assert_array_equal(test, ctl)
+
+ @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning")
+ def test_recfromtxt(self):
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True}
+ test = recfromtxt(data, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+ #
+ data = TextIO('A,B\n0,1\n2,N/A')
+ test = recfromtxt(data, dtype=None, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_equal(test.A, [0, 2])
+
+ @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning")
+ def test_recfromcsv(self):
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True,
+ "encoding": "bytes"}
+ test = recfromcsv(data, dtype=None, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+ #
+ data = TextIO('A,B\n0,1\n2,N/A')
+ test = recfromcsv(data, dtype=None, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_equal(test.A, [0, 2])
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ test = recfromcsv(data, missing_values='N/A',)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('a', int), ('b', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ dtype = [('a', int), ('b', float)]
+ test = recfromcsv(data, missing_values='N/A', dtype=dtype)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=dtype)
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+ # gh-10394
+ data = TextIO('color\n"red"\n"blue"')
+ test = recfromcsv(data, converters={0: lambda x: x.strip('\"')})
+ control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))])
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test, control)
+
+ def test_max_rows(self):
+ # Test the `max_rows` keyword argument.
+ data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
+ txt = TextIO(data)
+ a1 = np.genfromtxt(txt, max_rows=3)
+ a2 = np.genfromtxt(txt)
+ assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
+ assert_equal(a2, [[7, 8], [9, 10]])
+
+ # max_rows must be at least 1.
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
+
+ # An input with several invalid rows.
+ data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
+
+ test = np.genfromtxt(TextIO(data), max_rows=2)
+ control = np.array([[1., 1.], [2., 2.]])
+ assert_equal(test, control)
+
+ # Test keywords conflict
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
+ max_rows=4)
+
+ # Test with invalid value
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
+
+ # Test with invalid not raise
+ with suppress_warnings() as sup:
+ sup.filter(ConversionWarning)
+
+ test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
+ control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
+ assert_equal(test, control)
+
+ test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
+ control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
+ assert_equal(test, control)
+
+ # Structured array with field names.
+ data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
+
+ # Test with header, names and comments
+ txt = TextIO(data)
+ test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
+ control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
+ dtype=[('c', '<f8'), ('d', '<f8')])
+ assert_equal(test, control)
+ # To continue reading the same "file", don't use skip_header or
+ # names, and use the previously determined dtype.
+ test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
+ control = np.array([(4.0, 4.0), (5.0, 5.0)],
+ dtype=[('c', '<f8'), ('d', '<f8')])
+ assert_equal(test, control)
+
+ def test_gft_using_filename(self):
+ # Test that we can load data from a filename as well as a file
+ # object
+ tgt = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ with temppath() as name:
+ with open(name, 'w') as f:
+ f.write(data)
+ res = np.genfromtxt(name)
+ assert_array_equal(res, tgt)
+
+ def test_gft_from_gzip(self):
+ # Test that we can load data from a gzipped file
+ wanted = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ s = BytesIO()
+ with gzip.GzipFile(fileobj=s, mode='w') as g:
+ g.write(asbytes(data))
+
+ with temppath(suffix='.gz2') as name:
+ with open(name, 'w') as f:
+ f.write(data)
+ assert_array_equal(np.genfromtxt(name), wanted)
+
+ def test_gft_using_generator(self):
+ # gft doesn't work with unicode.
+ def count():
+ for i in range(10):
+ yield asbytes("%d" % i)
+
+ res = np.genfromtxt(count())
+ assert_array_equal(res, np.arange(10))
+
+ def test_auto_dtype_largeint(self):
+ # Regression test for numpy/numpy#5635 whereby large integers could
+ # cause OverflowErrors.
+
+ # Test the automatic definition of the output dtype
+ #
+ # 2**66 = 73786976294838206464 => should convert to float
+ # 2**34 = 17179869184 => should convert to int64
+ # 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
+ # int64 on 64-bit systems)
+
+ data = TextIO('73786976294838206464 17179869184 1024')
+
+ test = np.genfromtxt(data, dtype=None)
+
+ assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
+
+ assert_(test.dtype['f0'] == float)
+ assert_(test.dtype['f1'] == np.int64)
+ assert_(test.dtype['f2'] == np.int_)
+
+ assert_allclose(test['f0'], 73786976294838206464.)
+ assert_equal(test['f1'], 17179869184)
+ assert_equal(test['f2'], 1024)
+
+ def test_unpack_float_data(self):
+ txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0")
+ a, b, c = np.loadtxt(txt, delimiter=",", unpack=True)
+ assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0]))
+ assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0]))
+ assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0]))
+
+ def test_unpack_structured(self):
+ # Regression test for gh-4341
+ # Unpacking should work on structured arrays
+ txt = TextIO("M 21 72\nF 35 58")
+ dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')}
+ a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_equal(a.dtype, np.dtype('S1'))
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(c.dtype, np.dtype('f4'))
+ assert_array_equal(a, np.array([b'M', b'F']))
+ assert_array_equal(b, np.array([21, 35]))
+ assert_array_equal(c, np.array([72., 58.]))
+
+ def test_unpack_auto_dtype(self):
+ # Regression test for gh-4341
+ # Unpacking should work when dtype=None
+ txt = TextIO("M 21 72.\nF 35 58.")
+ expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.]))
+ test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8")
+ for arr, result in zip(expected, test):
+ assert_array_equal(arr, result)
+ assert_equal(arr.dtype, result.dtype)
+
+ def test_unpack_single_name(self):
+ # Regression test for gh-4341
+ # Unpacking should work when structured dtype has only one field
+ txt = TextIO("21\n35")
+ dt = {'names': ('a',), 'formats': ('i4',)}
+ expected = np.array([21, 35], dtype=np.int32)
+ test = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_array_equal(expected, test)
+ assert_equal(expected.dtype, test.dtype)
+
+ def test_squeeze_scalar(self):
+ # Regression test for gh-4341
+ # Unpacking a scalar should give zero-dim output,
+ # even if dtype is structured
+ txt = TextIO("1")
+ dt = {'names': ('a',), 'formats': ('i4',)}
+ expected = np.array((1,), dtype=np.int32)
+ test = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_array_equal(expected, test)
+ assert_equal((), test.shape)
+ assert_equal(expected.dtype, test.dtype)
+
+ @pytest.mark.parametrize("ndim", [0, 1, 2])
+ def test_ndmin_keyword(self, ndim: int):
+ # lets have the same behaviour of ndmin as loadtxt
+ # as they should be the same for non-missing values
+ txt = "42"
+
+ a = np.loadtxt(StringIO(txt), ndmin=ndim)
+ b = np.genfromtxt(StringIO(txt), ndmin=ndim)
+
+ assert_array_equal(a, b)
+
+
+class TestPathUsage:
+ # Test that pathlib.Path can be used
+ def test_loadtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ a = np.array([[1.1, 2], [3, 4]])
+ np.savetxt(path, a)
+ x = np.loadtxt(path)
+ assert_array_equal(x, a)
+
+ def test_save_load(self):
+ # Test that pathlib.Path instances can be used with save.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path)
+ assert_array_equal(data, a)
+
+ def test_save_load_memmap(self):
+ # Test that pathlib.Path instances can be loaded mem-mapped.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path, mmap_mode='r')
+ assert_array_equal(data, a)
+ # close the mem-mapped file
+ del data
+ if IS_PYPY:
+ break_cycles()
+ break_cycles()
+
+ @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly")
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ def test_save_load_memmap_readwrite(self, filename_type):
+ with temppath(suffix='.npy') as path:
+ path = filename_type(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ b = np.load(path, mmap_mode='r+')
+ a[0][0] = 5
+ b[0][0] = 5
+ del b # closes the file
+ if IS_PYPY:
+ break_cycles()
+ break_cycles()
+ data = np.load(path)
+ assert_array_equal(data, a)
+
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ def test_savez_load(self, filename_type):
+ with temppath(suffix='.npz') as path:
+ path = filename_type(path)
+ np.savez(path, lab='place holder')
+ with np.load(path) as data:
+ assert_array_equal(data['lab'], 'place holder')
+
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ def test_savez_compressed_load(self, filename_type):
+ with temppath(suffix='.npz') as path:
+ path = filename_type(path)
+ np.savez_compressed(path, lab='place holder')
+ data = np.load(path)
+ assert_array_equal(data['lab'], 'place holder')
+ data.close()
+
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ def test_genfromtxt(self, filename_type):
+ with temppath(suffix='.txt') as path:
+ path = filename_type(path)
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(path, a)
+ data = np.genfromtxt(path)
+ assert_array_equal(a, data)
+
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning")
+ def test_recfromtxt(self, filename_type):
+ with temppath(suffix='.txt') as path:
+ path = filename_type(path)
+ with open(path, 'w') as f:
+ f.write('A,B\n0,1\n2,3')
+
+ kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True}
+ test = recfromtxt(path, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+ @pytest.mark.parametrize("filename_type", [Path, str])
+ @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning")
+ def test_recfromcsv(self, filename_type):
+ with temppath(suffix='.txt') as path:
+ path = filename_type(path)
+ with open(path, 'w') as f:
+ f.write('A,B\n0,1\n2,3')
+
+ kwargs = {
+ "missing_values": "N/A", "names": True, "case_sensitive": True
+ }
+ test = recfromcsv(path, dtype=None, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+
+def test_gzip_load():
+ a = np.random.random((5, 5))
+
+ s = BytesIO()
+ f = gzip.GzipFile(fileobj=s, mode="w")
+
+ np.save(f, a)
+ f.close()
+ s.seek(0)
+
+ f = gzip.GzipFile(fileobj=s, mode="r")
+ assert_array_equal(np.load(f), a)
+
+
+# These next two classes encode the minimal API needed to save()/load() arrays.
+# The `test_ducktyping` ensures they work correctly
+class JustWriter:
+ def __init__(self, base):
+ self.base = base
+
+ def write(self, s):
+ return self.base.write(s)
+
+ def flush(self):
+ return self.base.flush()
+
+class JustReader:
+ def __init__(self, base):
+ self.base = base
+
+ def read(self, n):
+ return self.base.read(n)
+
+ def seek(self, off, whence=0):
+ return self.base.seek(off, whence)
+
+
+def test_ducktyping():
+ a = np.random.random((5, 5))
+
+ s = BytesIO()
+ f = JustWriter(s)
+
+ np.save(f, a)
+ f.flush()
+ s.seek(0)
+
+ f = JustReader(s)
+ assert_array_equal(np.load(f), a)
+
+
+def test_gzip_loadtxt():
+ # Thanks to another windows brokenness, we can't use
+ # NamedTemporaryFile: a file created from this function cannot be
+ # reopened by another open call. So we first put the gzipped string
+ # of the test reference array, write it to a securely opened file,
+ # which is then read from by the loadtxt function
+ s = BytesIO()
+ g = gzip.GzipFile(fileobj=s, mode='w')
+ g.write(b'1 2 3\n')
+ g.close()
+
+ s.seek(0)
+ with temppath(suffix='.gz') as name:
+ with open(name, 'wb') as f:
+ f.write(s.read())
+ res = np.loadtxt(name)
+ s.close()
+
+ assert_array_equal(res, [1, 2, 3])
+
+
+def test_gzip_loadtxt_from_string():
+ s = BytesIO()
+ f = gzip.GzipFile(fileobj=s, mode="w")
+ f.write(b'1 2 3\n')
+ f.close()
+ s.seek(0)
+
+ f = gzip.GzipFile(fileobj=s, mode="r")
+ assert_array_equal(np.loadtxt(f), [1, 2, 3])
+
+
+def test_npzfile_dict():
+ s = BytesIO()
+ x = np.zeros((3, 3))
+ y = np.zeros((3, 3))
+
+ np.savez(s, x=x, y=y)
+ s.seek(0)
+
+ z = np.load(s)
+
+ assert_('x' in z)
+ assert_('y' in z)
+ assert_('x' in z.keys())
+ assert_('y' in z.keys())
+
+ for f, a in z.items():
+ assert_(f in ['x', 'y'])
+ assert_equal(a.shape, (3, 3))
+
+ for a in z.values():
+ assert_equal(a.shape, (3, 3))
+
+ assert_(len(z.items()) == 2)
+
+ for f in z:
+ assert_(f in ['x', 'y'])
+
+ assert_('x' in z.keys())
+ assert (z.get('x') == z['x']).all()
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_load_refcount():
+ # Check that objects returned by np.load are directly freed based on
+ # their refcount, rather than needing the gc to collect them.
+
+ f = BytesIO()
+ np.savez(f, [1, 2, 3])
+ f.seek(0)
+
+ with assert_no_gc_cycles():
+ np.load(f)
+
+ f.seek(0)
+ dt = [("a", 'u1', 2), ("b", 'u1', 2)]
+ with assert_no_gc_cycles():
+ x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
+
+
+def test_load_multiple_arrays_until_eof():
+ f = BytesIO()
+ np.save(f, 1)
+ np.save(f, 2)
+ f.seek(0)
+ out1 = np.load(f)
+ assert out1 == 1
+ out2 = np.load(f)
+ assert out2 == 2
+ with pytest.raises(EOFError):
+ np.load(f)
+
+
+def test_savez_nopickle():
+ obj_array = np.array([1, 'hello'], dtype=object)
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, obj_array)
+
+ with temppath(suffix='.npz') as tmp:
+ with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"):
+ np.savez(tmp, obj_array, allow_pickle=False)
+
+ with temppath(suffix='.npz') as tmp:
+ np.savez_compressed(tmp, obj_array)
+
+ with temppath(suffix='.npz') as tmp:
+ with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"):
+ np.savez_compressed(tmp, obj_array, allow_pickle=False)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py
new file mode 100644
index 0000000..a2022a0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_loadtxt.py
@@ -0,0 +1,1101 @@
+"""
+Tests specific to `np.loadtxt` added during the move of loadtxt to be backed
+by C code.
+These tests complement those found in `test_io.py`.
+"""
+
+import os
+import sys
+from io import StringIO
+from tempfile import NamedTemporaryFile, mkstemp
+
+import pytest
+
+import numpy as np
+from numpy.ma.testutils import assert_equal
+from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal
+
+
+def test_scientific_notation():
+ """Test that both 'e' and 'E' are parsed correctly."""
+ data = StringIO(
+
+ "1.0e-1,2.0E1,3.0\n"
+ "4.0e-2,5.0E-1,6.0\n"
+ "7.0e-3,8.0E1,9.0\n"
+ "0.0e-4,1.0E-1,2.0"
+
+ )
+ expected = np.array(
+ [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]]
+ )
+ assert_array_equal(np.loadtxt(data, delimiter=","), expected)
+
+
+@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"])
+def test_comment_multiple_chars(comment):
+ content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n"
+ txt = StringIO(content.replace("#", comment))
+ a = np.loadtxt(txt, delimiter=",", comments=comment)
+ assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]])
+
+
+@pytest.fixture
+def mixed_types_structured():
+ """
+ Fixture providing heterogeneous input data with a structured dtype, along
+ with the associated structured array.
+ """
+ data = StringIO(
+
+ "1000;2.4;alpha;-34\n"
+ "2000;3.1;beta;29\n"
+ "3500;9.9;gamma;120\n"
+ "4090;8.1;delta;0\n"
+ "5001;4.4;epsilon;-99\n"
+ "6543;7.8;omega;-1\n"
+
+ )
+ dtype = np.dtype(
+ [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]
+ )
+ expected = np.array(
+ [
+ (1000, 2.4, "alpha", -34),
+ (2000, 3.1, "beta", 29),
+ (3500, 9.9, "gamma", 120),
+ (4090, 8.1, "delta", 0),
+ (5001, 4.4, "epsilon", -99),
+ (6543, 7.8, "omega", -1)
+ ],
+ dtype=dtype
+ )
+ return data, dtype, expected
+
+
+@pytest.mark.parametrize('skiprows', [0, 1, 2, 3])
+def test_structured_dtype_and_skiprows_no_empty_lines(
+ skiprows, mixed_types_structured):
+ data, dtype, expected = mixed_types_structured
+ a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows)
+ assert_array_equal(a, expected[skiprows:])
+
+
+def test_unpack_structured(mixed_types_structured):
+ data, dtype, expected = mixed_types_structured
+
+ a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True)
+ assert_array_equal(a, expected["f0"])
+ assert_array_equal(b, expected["f1"])
+ assert_array_equal(c, expected["f2"])
+ assert_array_equal(d, expected["f3"])
+
+
+def test_structured_dtype_with_shape():
+ dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)])
+ data = StringIO("0,1,2,3\n6,7,8,9\n")
+ expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype)
+ assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected)
+
+
+def test_structured_dtype_with_multi_shape():
+ dtype = np.dtype([("a", "u1", (2, 2))])
+ data = StringIO("0 1 2 3\n")
+ expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype)
+ assert_array_equal(np.loadtxt(data, dtype=dtype), expected)
+
+
+def test_nested_structured_subarray():
+ # Test from gh-16678
+ point = np.dtype([('x', float), ('y', float)])
+ dt = np.dtype([('code', int), ('points', point, (2,))])
+ data = StringIO("100,1,2,3,4\n200,5,6,7,8\n")
+ expected = np.array(
+ [
+ (100, [(1., 2.), (3., 4.)]),
+ (200, [(5., 6.), (7., 8.)]),
+ ],
+ dtype=dt
+ )
+ assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected)
+
+
+def test_structured_dtype_offsets():
+ # An aligned structured dtype will have additional padding
+ dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True)
+ data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n")
+ expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt)
+ assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected)
+
+
+@pytest.mark.parametrize("param", ("skiprows", "max_rows"))
+def test_exception_negative_row_limits(param):
+ """skiprows and max_rows should raise for negative parameters."""
+ with pytest.raises(ValueError, match="argument must be nonnegative"):
+ np.loadtxt("foo.bar", **{param: -3})
+
+
+@pytest.mark.parametrize("param", ("skiprows", "max_rows"))
+def test_exception_noninteger_row_limits(param):
+ with pytest.raises(TypeError, match="argument must be an integer"):
+ np.loadtxt("foo.bar", **{param: 1.0})
+
+
+@pytest.mark.parametrize(
+ "data, shape",
+ [
+ ("1 2 3 4 5\n", (1, 5)), # Single row
+ ("1\n2\n3\n4\n5\n", (5, 1)), # Single column
+ ]
+)
+def test_ndmin_single_row_or_col(data, shape):
+ arr = np.array([1, 2, 3, 4, 5])
+ arr2d = arr.reshape(shape)
+
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr)
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr)
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr)
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d)
+
+
+@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"])
+def test_bad_ndmin(badval):
+ with pytest.raises(ValueError, match="Illegal value of ndmin keyword"):
+ np.loadtxt("foo.bar", ndmin=badval)
+
+
+@pytest.mark.parametrize(
+ "ws",
+ (
+ " ", # space
+ "\t", # tab
+ "\u2003", # em
+ "\u00A0", # non-break
+ "\u3000", # ideographic space
+ )
+)
+def test_blank_lines_spaces_delimit(ws):
+ txt = StringIO(
+ f"1 2{ws}30\n\n{ws}\n"
+ f"4 5 60{ws}\n {ws} \n"
+ f"7 8 {ws} 90\n # comment\n"
+ f"3 2 1"
+ )
+ # NOTE: It is unclear that the ` # comment` should succeed. Except
+ # for delimiter=None, which should use any whitespace (and maybe
+ # should just be implemented closer to Python
+ expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])
+ assert_equal(
+ np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected
+ )
+
+
+def test_blank_lines_normal_delimiter():
+ txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1')
+ expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])
+ assert_equal(
+ np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected
+ )
+
+
+@pytest.mark.parametrize("dtype", (float, object))
+def test_maxrows_no_blank_lines(dtype):
+ txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0")
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2)
+ assert_equal(res.dtype, dtype)
+ assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype))
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2")))
+def test_exception_message_bad_values(dtype):
+ txt = StringIO("1,2\n3,XXX\n5,6")
+ msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2"
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(txt, dtype=dtype, delimiter=",")
+
+
+def test_converters_negative_indices():
+ txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0')
+ conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}
+ expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]])
+ res = np.loadtxt(txt, dtype=np.float64, delimiter=",", converters=conv)
+ assert_equal(res, expected)
+
+
+def test_converters_negative_indices_with_usecols():
+ txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n')
+ conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}
+ expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]])
+ res = np.loadtxt(
+ txt,
+ dtype=np.float64,
+ delimiter=",",
+ converters=conv,
+ usecols=[0, -1],
+ )
+ assert_equal(res, expected)
+
+ # Second test with variable number of rows:
+ res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",",
+ usecols=[0, -1], converters={-1: (lambda x: -1)})
+ assert_array_equal(res, [[0, -1], [0, -1]])
+
+
+def test_ragged_error():
+ rows = ["1,2,3", "1,2,3", "4,3,2,1"]
+ with pytest.raises(ValueError,
+ match="the number of columns changed from 3 to 4 at row 3"):
+ np.loadtxt(rows, delimiter=",")
+
+
+def test_ragged_usecols():
+ # usecols, and negative ones, work even with varying number of columns.
+ txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
+ expected = np.array([[0, 0], [0, 0], [0, 0]])
+ res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])
+ assert_equal(res, expected)
+
+ txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n")
+ with pytest.raises(ValueError,
+ match="invalid column index -2 at row 2 with 1 columns"):
+ # There is no -2 column in the second row:
+ np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])
+
+
+def test_empty_usecols():
+ txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
+ res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[])
+ assert res.shape == (3,)
+ assert res.dtype == np.dtype([])
+
+
+@pytest.mark.parametrize("c1", ["a", "の", "🫕"])
+@pytest.mark.parametrize("c2", ["a", "の", "🫕"])
+def test_large_unicode_characters(c1, c2):
+ # c1 and c2 span ascii, 16bit and 32bit range.
+ txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g")
+ res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",")
+ expected = np.array(
+ [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")],
+ dtype=np.dtype('U12')
+ )
+ assert_equal(res, expected)
+
+
+def test_unicode_with_converter():
+ txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n")
+ conv = {0: lambda s: s.upper()}
+ res = np.loadtxt(
+ txt,
+ dtype=np.dtype("U12"),
+ converters=conv,
+ delimiter=",",
+ encoding=None
+ )
+ expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']])
+ assert_equal(res, expected)
+
+
+def test_converter_with_structured_dtype():
+ txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n')
+ dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')])
+ conv = {0: lambda s: int(10 * float(s)), -1: lambda s: s.upper()}
+ res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv)
+ expected = np.array(
+ [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt
+ )
+ assert_equal(res, expected)
+
+
+def test_converter_with_unicode_dtype():
+ """
+ With the 'bytes' encoding, tokens are encoded prior to being
+ passed to the converter. This means that the output of the converter may
+ be bytes instead of unicode as expected by `read_rows`.
+
+ This test checks that outputs from the above scenario are properly decoded
+ prior to parsing by `read_rows`.
+ """
+ txt = StringIO('abc,def\nrst,xyz')
+ conv = bytes.upper
+ res = np.loadtxt(
+ txt, dtype=np.dtype("U3"), converters=conv, delimiter=",",
+ encoding="bytes")
+ expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])
+ assert_equal(res, expected)
+
+
+def test_read_huge_row():
+ row = "1.5, 2.5," * 50000
+ row = row[:-1] + "\n"
+ txt = StringIO(row * 2)
+ res = np.loadtxt(txt, delimiter=",", dtype=float)
+ assert_equal(res, np.tile([1.5, 2.5], (2, 50000)))
+
+
+@pytest.mark.parametrize("dtype", "edfgFDG")
+def test_huge_float(dtype):
+ # Covers a non-optimized path that is rarely taken:
+ field = "0" * 1000 + ".123456789"
+ dtype = np.dtype(dtype)
+ value = np.loadtxt([field], dtype=dtype)[()]
+ assert value == dtype.type("0.123456789")
+
+
+@pytest.mark.parametrize(
+ ("given_dtype", "expected_dtype"),
+ [
+ ("S", np.dtype("S5")),
+ ("U", np.dtype("U5")),
+ ],
+)
+def test_string_no_length_given(given_dtype, expected_dtype):
+ """
+ The given dtype is just 'S' or 'U' with no length. In these cases, the
+ length of the resulting dtype is determined by the longest string found
+ in the file.
+ """
+ txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n")
+ res = np.loadtxt(txt, dtype=given_dtype, delimiter=",")
+ expected = np.array(
+ [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype
+ )
+ assert_equal(res, expected)
+ assert_equal(res.dtype, expected_dtype)
+
+
+def test_float_conversion():
+ """
+ Some tests that the conversion to float64 works as accurately as the
+ Python built-in `float` function. In a naive version of the float parser,
+ these strings resulted in values that were off by an ULP or two.
+ """
+ strings = [
+ '0.9999999999999999',
+ '9876543210.123456',
+ '5.43215432154321e+300',
+ '0.901',
+ '0.333',
+ ]
+ txt = StringIO('\n'.join(strings))
+ res = np.loadtxt(txt)
+ expected = np.array([float(s) for s in strings])
+ assert_equal(res, expected)
+
+
+def test_bool():
+ # Simple test for bool via integer
+ txt = StringIO("1, 0\n10, -1")
+ res = np.loadtxt(txt, dtype=bool, delimiter=",")
+ assert res.dtype == bool
+ assert_array_equal(res, [[True, False], [True, True]])
+ # Make sure we use only 1 and 0 on the byte level:
+ assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]])
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")
+def test_integer_signs(dtype):
+ dtype = np.dtype(dtype)
+ assert np.loadtxt(["+2"], dtype=dtype) == 2
+ if dtype.kind == "u":
+ with pytest.raises(ValueError):
+ np.loadtxt(["-1\n"], dtype=dtype)
+ else:
+ assert np.loadtxt(["-2\n"], dtype=dtype) == -2
+
+ for sign in ["++", "+-", "--", "-+"]:
+ with pytest.raises(ValueError):
+ np.loadtxt([f"{sign}2\n"], dtype=dtype)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")
+def test_implicit_cast_float_to_int_fails(dtype):
+ txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6")
+ with pytest.raises(ValueError):
+ np.loadtxt(txt, dtype=dtype, delimiter=",")
+
+@pytest.mark.parametrize("dtype", (np.complex64, np.complex128))
+@pytest.mark.parametrize("with_parens", (False, True))
+def test_complex_parsing(dtype, with_parens):
+ s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)"
+ if not with_parens:
+ s = s.replace("(", "").replace(")", "")
+
+ res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",")
+ expected = np.array(
+ [[1.0 - 2.5j, 3.75, 7 - 5j], [4.0, -1900j, 0]], dtype=dtype
+ )
+ assert_equal(res, expected)
+
+
+def test_read_from_generator():
+ def gen():
+ for i in range(4):
+ yield f"{i},{2 * i},{i**2}"
+
+ res = np.loadtxt(gen(), dtype=int, delimiter=",")
+ expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]])
+ assert_equal(res, expected)
+
+
+def test_read_from_generator_multitype():
+ def gen():
+ for i in range(3):
+ yield f"{i} {i / 4}"
+
+ res = np.loadtxt(gen(), dtype="i, d", delimiter=" ")
+ expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d")
+ assert_equal(res, expected)
+
+
+def test_read_from_bad_generator():
+ def gen():
+ yield from ["1,2", b"3, 5", 12738]
+
+ with pytest.raises(
+ TypeError, match=r"non-string returned while reading data"):
+ np.loadtxt(gen(), dtype="i, i", delimiter=",")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_object_cleanup_on_read_error():
+ sentinel = object()
+ already_read = 0
+
+ def conv(x):
+ nonlocal already_read
+ if already_read > 4999:
+ raise ValueError("failed half-way through!")
+ already_read += 1
+ return sentinel
+
+ txt = StringIO("x\n" * 10000)
+
+ with pytest.raises(ValueError, match="at row 5000, column 1"):
+ np.loadtxt(txt, dtype=object, converters={0: conv})
+
+ assert sys.getrefcount(sentinel) == 2
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_character_not_bytes_compatible():
+ """Test exception when a character cannot be encoded as 'S'."""
+ data = StringIO("–") # == \u2013
+ with pytest.raises(ValueError):
+ np.loadtxt(data, dtype="S5")
+
+
+@pytest.mark.parametrize("conv", (0, [float], ""))
+def test_invalid_converter(conv):
+ msg = (
+ "converters must be a dictionary mapping columns to converter "
+ "functions or a single callable."
+ )
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(StringIO("1 2\n3 4"), converters=conv)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_converters_dict_raises_non_integer_key():
+ with pytest.raises(TypeError, match="keys of the converters dict"):
+ np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int})
+ with pytest.raises(TypeError, match="keys of the converters dict"):
+ np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0)
+
+
+@pytest.mark.parametrize("bad_col_ind", (3, -3))
+def test_converters_dict_raises_non_col_key(bad_col_ind):
+ data = StringIO("1 2\n3 4")
+ with pytest.raises(ValueError, match="converter specified for column"):
+ np.loadtxt(data, converters={bad_col_ind: int})
+
+
+def test_converters_dict_raises_val_not_callable():
+ with pytest.raises(TypeError,
+ match="values of the converters dictionary must be callable"):
+ np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1})
+
+
+@pytest.mark.parametrize("q", ('"', "'", "`"))
+def test_quoted_field(q):
+ txt = StringIO(
+ f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n"
+ )
+ dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
+ expected = np.array(
+ [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
+ )
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q)
+ assert_array_equal(res, expected)
+
+
+@pytest.mark.parametrize("q", ('"', "'", "`"))
+def test_quoted_field_with_whitepace_delimiter(q):
+ txt = StringIO(
+ f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n"
+ )
+ dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
+ expected = np.array(
+ [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
+ )
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q)
+ assert_array_equal(res, expected)
+
+
+def test_quote_support_default():
+ """Support for quoted fields is disabled by default."""
+ txt = StringIO('"lat,long", 45, 30\n')
+ dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])
+
+ with pytest.raises(ValueError,
+ match="the dtype passed requires 3 columns but 4 were"):
+ np.loadtxt(txt, dtype=dtype, delimiter=",")
+
+ # Enable quoting support with non-None value for quotechar param
+ txt.seek(0)
+ expected = np.array([("lat,long", 45., 30.)], dtype=dtype)
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')
+ assert_array_equal(res, expected)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_quotechar_multichar_error():
+ txt = StringIO("1,2\n3,4")
+ msg = r".*must be a single unicode character or None"
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, delimiter=",", quotechar="''")
+
+
+def test_comment_multichar_error_with_quote():
+ txt = StringIO("1,2\n3,4")
+ msg = (
+ "when multiple comments or a multi-character comment is given, "
+ "quotes are not supported."
+ )
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(txt, delimiter=",", comments="123", quotechar='"')
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"')
+
+ # A single character string in a tuple is unpacked though:
+ res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'")
+ assert_equal(res, [[1, 2], [3, 4]])
+
+
+def test_structured_dtype_with_quotes():
+ data = StringIO(
+
+ "1000;2.4;'alpha';-34\n"
+ "2000;3.1;'beta';29\n"
+ "3500;9.9;'gamma';120\n"
+ "4090;8.1;'delta';0\n"
+ "5001;4.4;'epsilon';-99\n"
+ "6543;7.8;'omega';-1\n"
+
+ )
+ dtype = np.dtype(
+ [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]
+ )
+ expected = np.array(
+ [
+ (1000, 2.4, "alpha", -34),
+ (2000, 3.1, "beta", 29),
+ (3500, 9.9, "gamma", 120),
+ (4090, 8.1, "delta", 0),
+ (5001, 4.4, "epsilon", -99),
+ (6543, 7.8, "omega", -1)
+ ],
+ dtype=dtype
+ )
+ res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'")
+ assert_array_equal(res, expected)
+
+
+def test_quoted_field_is_not_empty():
+ txt = StringIO('1\n\n"4"\n""')
+ expected = np.array(["1", "4", ""], dtype="U1")
+ res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')
+ assert_equal(res, expected)
+
+def test_quoted_field_is_not_empty_nonstrict():
+ # Same as test_quoted_field_is_not_empty but check that we are not strict
+ # about missing closing quote (this is the `csv.reader` default also)
+ txt = StringIO('1\n\n"4"\n"')
+ expected = np.array(["1", "4", ""], dtype="U1")
+ res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')
+ assert_equal(res, expected)
+
+def test_consecutive_quotechar_escaped():
+ txt = StringIO('"Hello, my name is ""Monty""!"')
+ expected = np.array('Hello, my name is "Monty"!', dtype="U40")
+ res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"')
+ assert_equal(res, expected)
+
+
+@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n"))
+@pytest.mark.parametrize("ndmin", (0, 1, 2))
+@pytest.mark.parametrize("usecols", [None, (1, 2, 3)])
+def test_warn_on_no_data(data, ndmin, usecols):
+ """Check that a UserWarning is emitted when no data is read from input."""
+ if usecols is not None:
+ expected_shape = (0, 3)
+ elif ndmin == 2:
+ expected_shape = (0, 1) # guess a single column?!
+ else:
+ expected_shape = (0,)
+
+ txt = StringIO(data)
+ with pytest.warns(UserWarning, match="input contained no data"):
+ res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)
+ assert res.shape == expected_shape
+
+ with NamedTemporaryFile(mode="w") as fh:
+ fh.write(data)
+ fh.seek(0)
+ with pytest.warns(UserWarning, match="input contained no data"):
+ res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)
+ assert res.shape == expected_shape
+
+@pytest.mark.parametrize("skiprows", (2, 3))
+def test_warn_on_skipped_data(skiprows):
+ data = "1 2 3\n4 5 6"
+ txt = StringIO(data)
+ with pytest.warns(UserWarning, match="input contained no data"):
+ np.loadtxt(txt, skiprows=skiprows)
+
+
+@pytest.mark.parametrize(["dtype", "value"], [
+ ("i2", 0x0001), ("u2", 0x0001),
+ ("i4", 0x00010203), ("u4", 0x00010203),
+ ("i8", 0x0001020304050607), ("u8", 0x0001020304050607),
+ # The following values are constructed to lead to unique bytes:
+ ("float16", 3.07e-05),
+ ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j),
+ ("float64", -1.758571353180402e-24),
+ # Here and below, the repr side-steps a small loss of precision in
+ # complex `str` in PyPy (which is probably fine, as repr works):
+ ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)),
+ # Use integer values that fit into double. Everything else leads to
+ # problems due to longdoubles going via double and decimal strings
+ # causing rounding errors.
+ ("longdouble", 0x01020304050607),
+ ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))),
+ ("U2", "\U00010203\U000a0b0c")])
+@pytest.mark.parametrize("swap", [True, False])
+def test_byteswapping_and_unaligned(dtype, value, swap):
+ # Try to create "interesting" values within the valid unicode range:
+ dtype = np.dtype(dtype)
+ data = [f"x,{value}\n"] # repr as PyPy `str` truncates some
+ if swap:
+ dtype = dtype.newbyteorder()
+ full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False)
+ # The above ensures that the interesting "b" field is unaligned:
+ assert full_dt.fields["b"][1] == 1
+ res = np.loadtxt(data, dtype=full_dt, delimiter=",",
+ max_rows=1) # max-rows prevents over-allocation
+ assert res["b"] == dtype.type(value)
+
+
+@pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + "efdFD" + "?")
+def test_unicode_whitespace_stripping(dtype):
+ # Test that all numeric types (and bool) strip whitespace correctly
+ # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted.
+ # Currently, skip float128 as it did not always support this and has no
+ # "custom" parsing:
+ txt = StringIO(' 3 ,"\u202F2\n"')
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')
+ assert_array_equal(res, np.array([3, 2]).astype(dtype))
+
+
+@pytest.mark.parametrize("dtype", "FD")
+def test_unicode_whitespace_stripping_complex(dtype):
+ # Complex has a few extra cases since it has two components and
+ # parentheses
+ line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n"
+ data = [line, line.replace(" ", "\u202F")]
+ res = np.loadtxt(data, dtype=dtype, delimiter=',')
+ assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2))
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", "FD")
+@pytest.mark.parametrize("field",
+ ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"])
+def test_bad_complex(dtype, field):
+ with pytest.raises(ValueError):
+ np.loadtxt([field + "\n"], dtype=dtype, delimiter=",")
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + "efgdFDG" + "?")
+def test_nul_character_error(dtype):
+ # Test that a \0 character is correctly recognized as an error even if
+ # what comes before is valid (not everything gets parsed internally).
+ if dtype.lower() == "g":
+ pytest.xfail("longdouble/clongdouble assignment may misbehave.")
+ with pytest.raises(ValueError):
+ np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"')
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + "efgdFDG" + "?")
+def test_no_thousands_support(dtype):
+ # Mainly to document behaviour, Python supports thousands like 1_1.
+ # (e and G may end up using different conversion and support it, this is
+ # a bug but happens...)
+ if dtype == "e":
+ pytest.skip("half assignment currently uses Python float converter")
+ if dtype in "eG":
+ pytest.xfail("clongdouble assignment is buggy (uses `complex`?).")
+
+ assert int("1_1") == float("1_1") == complex("1_1") == 11
+ with pytest.raises(ValueError):
+ np.loadtxt(["1_1\n"], dtype=dtype)
+
+
+@pytest.mark.parametrize("data", [
+ ["1,2\n", "2\n,3\n"],
+ ["1,2\n", "2\r,3\n"]])
+def test_bad_newline_in_iterator(data):
+ # In NumPy <=1.22 this was accepted, because newlines were completely
+ # ignored when the input was an iterable. This could be changed, but right
+ # now, we raise an error.
+ msg = "Found an unquoted embedded newline within a single line"
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(data, delimiter=",")
+
+
+@pytest.mark.parametrize("data", [
+ ["1,2\n", "2,3\r\n"], # a universal newline
+ ["1,2\n", "'2\n',3\n"], # a quoted newline
+ ["1,2\n", "'2\r',3\n"],
+ ["1,2\n", "'2\r\n',3\n"],
+])
+def test_good_newline_in_iterator(data):
+ # The quoted newlines will be untransformed here, but are just whitespace.
+ res = np.loadtxt(data, delimiter=",", quotechar="'")
+ assert_array_equal(res, [[1., 2.], [2., 3.]])
+
+
+@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"])
+def test_universal_newlines_quoted(newline):
+ # Check that universal newline support within the tokenizer is not applied
+ # to quoted fields. (note that lines must end in newline or quoted
+ # fields will not include a newline at all)
+ data = ['1,"2\n"\n', '3,"4\n', '1"\n']
+ data = [row.replace("\n", newline) for row in data]
+ res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"')
+ assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']])
+
+
+def test_null_character():
+ # Basic tests to check that the NUL character is not special:
+ res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000")
+ assert_array_equal(res, [[1, 2, 3], [4, 5, 6]])
+
+ # Also not as part of a field (avoid unicode/arrays as unicode strips \0)
+ res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"],
+ delimiter=",", dtype=object)
+ assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]]
+
+
+def test_iterator_fails_getting_next_line():
+ class BadSequence:
+ def __len__(self):
+ return 100
+
+ def __getitem__(self, item):
+ if item == 50:
+ raise RuntimeError("Bad things happened!")
+ return f"{item}, {item + 1}"
+
+ with pytest.raises(RuntimeError, match="Bad things happened!"):
+ np.loadtxt(BadSequence(), dtype=int, delimiter=",")
+
+
+class TestCReaderUnitTests:
+ # These are internal tests for path that should not be possible to hit
+ # unless things go very very wrong somewhere.
+ def test_not_an_filelike(self):
+ with pytest.raises(AttributeError, match=".*read"):
+ np._core._multiarray_umath._load_from_filelike(
+ object(), dtype=np.dtype("i"), filelike=True)
+
+ def test_filelike_read_fails(self):
+ # Can only be reached if loadtxt opens the file, so it is hard to do
+ # via the public interface (although maybe not impossible considering
+ # the current "DataClass" backing).
+ class BadFileLike:
+ counter = 0
+
+ def read(self, size):
+ self.counter += 1
+ if self.counter > 20:
+ raise RuntimeError("Bad bad bad!")
+ return "1,2,3\n"
+
+ with pytest.raises(RuntimeError, match="Bad bad bad!"):
+ np._core._multiarray_umath._load_from_filelike(
+ BadFileLike(), dtype=np.dtype("i"), filelike=True)
+
+ def test_filelike_bad_read(self):
+ # Can only be reached if loadtxt opens the file, so it is hard to do
+ # via the public interface (although maybe not impossible considering
+ # the current "DataClass" backing).
+
+ class BadFileLike:
+ counter = 0
+
+ def read(self, size):
+ return 1234 # not a string!
+
+ with pytest.raises(TypeError,
+ match="non-string returned while reading data"):
+ np._core._multiarray_umath._load_from_filelike(
+ BadFileLike(), dtype=np.dtype("i"), filelike=True)
+
+ def test_not_an_iter(self):
+ with pytest.raises(TypeError,
+ match="error reading from object, expected an iterable"):
+ np._core._multiarray_umath._load_from_filelike(
+ object(), dtype=np.dtype("i"), filelike=False)
+
+ def test_bad_type(self):
+ with pytest.raises(TypeError, match="internal error: dtype must"):
+ np._core._multiarray_umath._load_from_filelike(
+ object(), dtype="i", filelike=False)
+
+ def test_bad_encoding(self):
+ with pytest.raises(TypeError, match="encoding must be a unicode"):
+ np._core._multiarray_umath._load_from_filelike(
+ object(), dtype=np.dtype("i"), filelike=False, encoding=123)
+
+ @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"])
+ def test_manual_universal_newlines(self, newline):
+ # This is currently not available to users, because we should always
+ # open files with universal newlines enabled `newlines=None`.
+ # (And reading from an iterator uses slightly different code paths.)
+ # We have no real support for `newline="\r"` or `newline="\n" as the
+ # user cannot specify those options.
+ data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline),
+ newline="")
+
+ res = np._core._multiarray_umath._load_from_filelike(
+ data, dtype=np.dtype("U10"), filelike=True,
+ quote='"', comment="#", skiplines=1)
+ assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "])
+
+
+def test_delimiter_comment_collision_raises():
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",")
+
+
+def test_delimiter_quotechar_collision_raises():
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",")
+
+
+def test_comment_quotechar_collision_raises():
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#")
+
+
+def test_delimiter_and_multiple_comments_collision_raises():
+ with pytest.raises(
+ TypeError, match="Comment characters.*cannot include the delimiter"
+ ):
+ np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","])
+
+
+@pytest.mark.parametrize(
+ "ws",
+ (
+ " ", # space
+ "\t", # tab
+ "\u2003", # em
+ "\u00A0", # non-break
+ "\u3000", # ideographic space
+ )
+)
+def test_collision_with_default_delimiter_raises(ws):
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws)
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws)
+
+
+@pytest.mark.parametrize("nl", ("\n", "\r"))
+def test_control_character_newline_raises(nl):
+ txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}")
+ msg = "control character.*cannot be a newline"
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, delimiter=nl)
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, comments=nl)
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, quotechar=nl)
+
+
+@pytest.mark.parametrize(
+ ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"),
+ [
+ ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes
+ ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str
+ ],
+)
+@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize
+def test_parametric_unit_discovery(
+ generic_data, long_datum, unitless_dtype, expected_dtype, nrows
+):
+ """Check that the correct unit (e.g. month, day, second) is discovered from
+ the data when a user specifies a unitless datetime."""
+ # Unit should be "D" (days) due to last entry
+ data = [generic_data] * nrows + [long_datum]
+ expected = np.array(data, dtype=expected_dtype)
+ assert len(data) == nrows + 1
+ assert len(data) == len(expected)
+
+ # file-like path
+ txt = StringIO("\n".join(data))
+ a = np.loadtxt(txt, dtype=unitless_dtype)
+ assert len(a) == len(expected)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+ # file-obj path
+ fd, fname = mkstemp()
+ os.close(fd)
+ with open(fname, "w") as fh:
+ fh.write("\n".join(data) + "\n")
+ # loading the full file...
+ a = np.loadtxt(fname, dtype=unitless_dtype)
+ assert len(a) == len(expected)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+ # loading half of the file...
+ a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows / 2))
+ os.remove(fname)
+ assert len(a) == int(nrows / 2)
+ assert_equal(a, expected[:int(nrows / 2)])
+
+
+def test_str_dtype_unit_discovery_with_converter():
+ data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"]
+ expected = np.array(
+ ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17"
+ )
+ conv = lambda s: s.removeprefix("XXX")
+
+ # file-like path
+ txt = StringIO("\n".join(data))
+ a = np.loadtxt(txt, dtype="U", converters=conv)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+ # file-obj path
+ fd, fname = mkstemp()
+ os.close(fd)
+ with open(fname, "w") as fh:
+ fh.write("\n".join(data))
+ a = np.loadtxt(fname, dtype="U", converters=conv)
+ os.remove(fname)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_control_character_empty():
+ with pytest.raises(TypeError, match="Text reading control character must"):
+ np.loadtxt(StringIO("1 2 3"), delimiter="")
+ with pytest.raises(TypeError, match="Text reading control character must"):
+ np.loadtxt(StringIO("1 2 3"), quotechar="")
+ with pytest.raises(ValueError, match="comments cannot be an empty string"):
+ np.loadtxt(StringIO("1 2 3"), comments="")
+ with pytest.raises(ValueError, match="comments cannot be an empty string"):
+ np.loadtxt(StringIO("1 2 3"), comments=["#", ""])
+
+
+def test_control_characters_as_bytes():
+ """Byte control characters (comments, delimiter) are supported."""
+ a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",")
+ assert_equal(a, [1, 2, 3])
+
+
+@pytest.mark.filterwarnings('ignore::UserWarning')
+def test_field_growing_cases():
+ # Test empty field appending/growing (each field still takes 1 character)
+ # to see if the final field appending does not create issues.
+ res = np.loadtxt([""], delimiter=",", dtype=bytes)
+ assert len(res) == 0
+
+ for i in range(1, 1024):
+ res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10)
+ assert len(res) == i + 1
+
+@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000))
+def test_maxrows_exceeding_chunksize(nmax):
+ # tries to read all of the file,
+ # or less, equal, greater than _loadtxt_chunksize
+ file_length = 60000
+
+ # file-like path
+ data = ["a 0.5 1"] * file_length
+ txt = StringIO("\n".join(data))
+ res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax)
+ assert len(res) == nmax
+
+ # file-obj path
+ fd, fname = mkstemp()
+ os.close(fd)
+ with open(fname, "w") as fh:
+ fh.write("\n".join(data))
+ res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax)
+ os.remove(fname)
+ assert len(res) == nmax
+
+@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000))
+def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip):
+ # tries to read a file in chunks by skipping a variable amount of lines,
+ # less, equal, greater than max_rows
+ file_length = 110000
+ data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1))
+ expected_length = min(60000, file_length - nskip)
+ expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str)
+
+ # file-like path
+ txt = StringIO(data)
+ res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000)
+ assert len(res) == expected_length
+ # are the right lines read in res?
+ assert_array_equal(expected, res[:, 0])
+
+ # file-obj path
+ tmp_file = tmpdir / "test_data.txt"
+ tmp_file.write(data)
+ fname = str(tmp_file)
+ res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000)
+ assert len(res) == expected_length
+ # are the right lines read in res?
+ assert_array_equal(expected, res[:, 0])
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py
new file mode 100644
index 0000000..f0aec15
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_mixins.py
@@ -0,0 +1,215 @@
+import numbers
+import operator
+
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises
+
+# NOTE: This class should be kept as an exact copy of the example from the
+# docstring for NDArrayOperatorsMixin.
+
+class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
+ def __init__(self, value):
+ self.value = np.asarray(value)
+
+ # One might also consider adding the built-in list type to this
+ # list, to support operations like np.add(array_like, list)
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ out = kwargs.get('out', ())
+ for x in inputs + out:
+ # Only support operations with instances of _HANDLED_TYPES.
+ # Use ArrayLike instead of type(self) for isinstance to
+ # allow subclasses that don't override __array_ufunc__ to
+ # handle ArrayLike objects.
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
+ return NotImplemented
+
+ # Defer to the implementation of the ufunc on unwrapped values.
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
+ for x in inputs)
+ if out:
+ kwargs['out'] = tuple(
+ x.value if isinstance(x, ArrayLike) else x
+ for x in out)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
+
+ if type(result) is tuple:
+ # multiple return values
+ return tuple(type(self)(x) for x in result)
+ elif method == 'at':
+ # no return value
+ return None
+ else:
+ # one return value
+ return type(self)(result)
+
+ def __repr__(self):
+ return f'{type(self).__name__}({self.value!r})'
+
+
+def wrap_array_like(result):
+ if type(result) is tuple:
+ return tuple(ArrayLike(r) for r in result)
+ else:
+ return ArrayLike(result)
+
+
+def _assert_equal_type_and_value(result, expected, err_msg=None):
+ assert_equal(type(result), type(expected), err_msg=err_msg)
+ if isinstance(result, tuple):
+ assert_equal(len(result), len(expected), err_msg=err_msg)
+ for result_item, expected_item in zip(result, expected):
+ _assert_equal_type_and_value(result_item, expected_item, err_msg)
+ else:
+ assert_equal(result.value, expected.value, err_msg=err_msg)
+ assert_equal(getattr(result.value, 'dtype', None),
+ getattr(expected.value, 'dtype', None), err_msg=err_msg)
+
+
+_ALL_BINARY_OPERATORS = [
+ operator.lt,
+ operator.le,
+ operator.eq,
+ operator.ne,
+ operator.gt,
+ operator.ge,
+ operator.add,
+ operator.sub,
+ operator.mul,
+ operator.truediv,
+ operator.floordiv,
+ operator.mod,
+ divmod,
+ pow,
+ operator.lshift,
+ operator.rshift,
+ operator.and_,
+ operator.xor,
+ operator.or_,
+]
+
+
+class TestNDArrayOperatorsMixin:
+
+ def test_array_like_add(self):
+
+ def check(result):
+ _assert_equal_type_and_value(result, ArrayLike(0))
+
+ check(ArrayLike(0) + 0)
+ check(0 + ArrayLike(0))
+
+ check(ArrayLike(0) + np.array(0))
+ check(np.array(0) + ArrayLike(0))
+
+ check(ArrayLike(np.array(0)) + 0)
+ check(0 + ArrayLike(np.array(0)))
+
+ check(ArrayLike(np.array(0)) + np.array(0))
+ check(np.array(0) + ArrayLike(np.array(0)))
+
+ def test_inplace(self):
+ array_like = ArrayLike(np.array([0]))
+ array_like += 1
+ _assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
+
+ array = np.array([0])
+ array += ArrayLike(1)
+ _assert_equal_type_and_value(array, ArrayLike(np.array([1])))
+
+ def test_opt_out(self):
+
+ class OptOut:
+ """Object that opts out of __array_ufunc__."""
+ __array_ufunc__ = None
+
+ def __add__(self, other):
+ return self
+
+ def __radd__(self, other):
+ return self
+
+ array_like = ArrayLike(1)
+ opt_out = OptOut()
+
+ # supported operations
+ assert_(array_like + opt_out is opt_out)
+ assert_(opt_out + array_like is opt_out)
+
+ # not supported
+ with assert_raises(TypeError):
+ # don't use the Python default, array_like = array_like + opt_out
+ array_like += opt_out
+ with assert_raises(TypeError):
+ array_like - opt_out
+ with assert_raises(TypeError):
+ opt_out - array_like
+
+ def test_subclass(self):
+
+ class SubArrayLike(ArrayLike):
+ """Should take precedence over ArrayLike."""
+
+ x = ArrayLike(0)
+ y = SubArrayLike(1)
+ _assert_equal_type_and_value(x + y, y)
+ _assert_equal_type_and_value(y + x, y)
+
+ def test_object(self):
+ x = ArrayLike(0)
+ obj = object()
+ with assert_raises(TypeError):
+ x + obj
+ with assert_raises(TypeError):
+ obj + x
+ with assert_raises(TypeError):
+ x += obj
+
+ def test_unary_methods(self):
+ array = np.array([-1, 0, 1, 2])
+ array_like = ArrayLike(array)
+ for op in [operator.neg,
+ operator.pos,
+ abs,
+ operator.invert]:
+ _assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
+
+ def test_forward_binary_methods(self):
+ array = np.array([-1, 0, 1, 2])
+ array_like = ArrayLike(array)
+ for op in _ALL_BINARY_OPERATORS:
+ expected = wrap_array_like(op(array, 1))
+ actual = op(array_like, 1)
+ err_msg = f'failed for operator {op}'
+ _assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+
+ def test_reflected_binary_methods(self):
+ for op in _ALL_BINARY_OPERATORS:
+ expected = wrap_array_like(op(2, 1))
+ actual = op(2, ArrayLike(1))
+ err_msg = f'failed for operator {op}'
+ _assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+
+ def test_matmul(self):
+ array = np.array([1, 2], dtype=np.float64)
+ array_like = ArrayLike(array)
+ expected = ArrayLike(np.float64(5))
+ _assert_equal_type_and_value(expected, np.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array, array_like))
+
+ def test_ufunc_at(self):
+ array = ArrayLike(np.array([1, 2, 3, 4]))
+ assert_(np.negative.at(array, np.array([0, 1])) is None)
+ _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
+
+ def test_ufunc_two_outputs(self):
+ mantissa, exponent = np.frexp(2 ** -3)
+ expected = (ArrayLike(mantissa), ArrayLike(exponent))
+ _assert_equal_type_and_value(
+ np.frexp(ArrayLike(2 ** -3)), expected)
+ _assert_equal_type_and_value(
+ np.frexp(ArrayLike(np.array(2 ** -3))), expected)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py
new file mode 100644
index 0000000..89a6d1f
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_nanfunctions.py
@@ -0,0 +1,1438 @@
+import inspect
+import warnings
+from functools import partial
+
+import pytest
+
+import numpy as np
+from numpy._core.numeric import normalize_axis_tuple
+from numpy.exceptions import AxisError, ComplexWarning
+from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan
+from numpy.testing import (
+ assert_,
+ assert_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+ assert_raises_regex,
+ suppress_warnings,
+)
+
+# Test data
+_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
+ [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
+ [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
+ [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
+
+
+# Rows of _ndat with nans removed
+_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
+ np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
+ np.array([0.1042, -0.5954]),
+ np.array([0.1610, 0.1859, 0.3146])]
+
+# Rows of _ndat with nans converted to ones
+_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
+ [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
+ [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
+ [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
+
+# Rows of _ndat with nans converted to zeros
+_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
+ [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
+ [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
+ [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
+
+
+class TestSignatureMatch:
+ NANFUNCS = {
+ np.nanmin: np.amin,
+ np.nanmax: np.amax,
+ np.nanargmin: np.argmin,
+ np.nanargmax: np.argmax,
+ np.nansum: np.sum,
+ np.nanprod: np.prod,
+ np.nancumsum: np.cumsum,
+ np.nancumprod: np.cumprod,
+ np.nanmean: np.mean,
+ np.nanmedian: np.median,
+ np.nanpercentile: np.percentile,
+ np.nanquantile: np.quantile,
+ np.nanvar: np.var,
+ np.nanstd: np.std,
+ }
+ IDS = [k.__name__ for k in NANFUNCS]
+
+ @staticmethod
+ def get_signature(func, default="..."):
+ """Construct a signature and replace all default parameter-values."""
+ prm_list = []
+ signature = inspect.signature(func)
+ for prm in signature.parameters.values():
+ if prm.default is inspect.Parameter.empty:
+ prm_list.append(prm)
+ else:
+ prm_list.append(prm.replace(default=default))
+ return inspect.Signature(prm_list)
+
+ @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS)
+ def test_signature_match(self, nan_func, func):
+ # Ignore the default parameter-values as they can sometimes differ
+ # between the two functions (*e.g.* one has `False` while the other
+ # has `np._NoValue`)
+ signature = self.get_signature(func)
+ nan_signature = self.get_signature(nan_func)
+ np.testing.assert_equal(signature, nan_signature)
+
+ def test_exhaustiveness(self):
+ """Validate that all nan functions are actually tested."""
+ np.testing.assert_equal(
+ set(self.IDS), set(np.lib._nanfunctions_impl.__all__)
+ )
+
+
+class TestNanFunctions_MinMax:
+
+ nanfuncs = [np.nanmin, np.nanmax]
+ stdfuncs = [np.min, np.max]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for axis in [None, 0, 1]:
+ tgt = rf(mat, axis=axis, keepdims=True)
+ res = nf(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.zeros(3)
+ tgt = rf(mat, axis=1)
+ res = nf(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = rf(mat, axis=1).dtype.type
+ res = nf(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, axis=None).dtype.type
+ res = nf(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_result_values(self):
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ tgt = [rf(d) for d in _rdat]
+ res = nf(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ match = "All-NaN slice encountered"
+ for func in self.nanfuncs:
+ with pytest.warns(RuntimeWarning, match=match):
+ out = func(array, axis=axis)
+ assert np.isnan(out).all()
+ assert out.dtype == array.dtype
+
+ def test_masked(self):
+ mat = np.ma.fix_invalid(_ndat)
+ msk = mat._mask.copy()
+ for f in [np.nanmin]:
+ res = f(mat, axis=1)
+ tgt = f(_ndat, axis=1)
+ assert_equal(res, tgt)
+ assert_equal(mat._mask, msk)
+ assert_(not np.isinf(mat).any())
+
+ def test_scalar(self):
+ for f in self.nanfuncs:
+ assert_(f(0.) == 0.)
+
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
+ # Check that it works and that type and
+ # shape are preserved
+ mine = np.eye(3).view(MyNDArray)
+ for f in self.nanfuncs:
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
+ # check that rows of nan are dealt with for subclasses (#4628)
+ mine[1] = np.nan
+ for f in self.nanfuncs:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(not np.any(np.isnan(res)))
+ assert_(len(w) == 0)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mine)
+ assert_(res.shape == ())
+ assert_(res != np.nan)
+ assert_(len(w) == 0)
+
+ def test_object_array(self):
+ arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)
+ assert_equal(np.nanmin(arr), 1.0)
+ assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ # assert_equal does not work on object arrays of nan
+ assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_initial(self, dtype):
+ class MyNDArray(np.ndarray):
+ pass
+
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ initial = 100 if f is np.nanmax else 0
+
+ ret1 = f(ar, initial=initial)
+ assert ret1.dtype == dtype
+ assert ret1 == initial
+
+ ret2 = f(ar.view(MyNDArray), initial=initial)
+ assert ret2.dtype == dtype
+ assert ret2 == initial
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ class MyNDArray(np.ndarray):
+ pass
+
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool)
+ where[:, 0] = False
+
+ for f in self.nanfuncs:
+ reference = 4 if f is np.nanmin else 8
+
+ ret1 = f(ar, where=where, initial=5)
+ assert ret1.dtype == dtype
+ assert ret1 == reference
+
+ ret2 = f(ar.view(MyNDArray), where=where, initial=5)
+ assert ret2.dtype == dtype
+ assert ret2 == reference
+
+
+class TestNanFunctions_ArgminArgmax:
+
+ nanfuncs = [np.nanargmin, np.nanargmax]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_result_values(self):
+ for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
+ for row in _ndat:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in")
+ ind = f(row)
+ val = row[ind]
+ # comparing with NaN is tricky as the result
+ # is always false except for NaN != NaN
+ assert_(not np.isnan(val))
+ assert_(not fcmp(val, row).any())
+ assert_(not np.equal(val, row[:ind]).any())
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ for func in self.nanfuncs:
+ with pytest.raises(ValueError, match="All-NaN slice encountered"):
+ func(array, axis=axis)
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for f in self.nanfuncs:
+ for axis in [0, None]:
+ assert_raises_regex(
+ ValueError,
+ "attempt to get argm.. of an empty sequence",
+ f, mat, axis=axis)
+ for axis in [1]:
+ res = f(mat, axis=axis)
+ assert_equal(res, np.zeros(0))
+
+ def test_scalar(self):
+ for f in self.nanfuncs:
+ assert_(f(0.) == 0.)
+
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
+ # Check that it works and that type and
+ # shape are preserved
+ mine = np.eye(3).view(MyNDArray)
+ for f in self.nanfuncs:
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_keepdims(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ reference = 5 if f is np.nanargmin else 8
+ ret = f(ar, keepdims=True)
+ assert ret.ndim == ar.ndim
+ assert ret == reference
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_out(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ out = np.zeros((), dtype=np.intp)
+ reference = 5 if f is np.nanargmin else 8
+ ret = f(ar, out=out)
+ assert ret is out
+ assert ret == reference
+
+
+_TEST_ARRAYS = {
+ "0d": np.array(5),
+ "1d": np.array([127, 39, 93, 87, 46])
+}
+for _v in _TEST_ARRAYS.values():
+ _v.setflags(write=False)
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O",
+)
+@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys())
+class TestNanFunctions_NumberTypes:
+ nanfuncs = {
+ np.nanmin: np.min,
+ np.nanmax: np.max,
+ np.nanargmin: np.argmin,
+ np.nanargmax: np.argmax,
+ np.nansum: np.sum,
+ np.nanprod: np.prod,
+ np.nancumsum: np.cumsum,
+ np.nancumprod: np.cumprod,
+ np.nanmean: np.mean,
+ np.nanmedian: np.median,
+ np.nanvar: np.var,
+ np.nanstd: np.std,
+ }
+ nanfunc_ids = [i.__name__ for i in nanfuncs]
+
+ @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids)
+ @np.errstate(over="ignore")
+ def test_nanfunc(self, mat, dtype, nanfunc, func):
+ mat = mat.astype(dtype)
+ tgt = func(mat)
+ out = nanfunc(mat)
+
+ assert_almost_equal(out, tgt)
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
+
+ @pytest.mark.parametrize(
+ "nanfunc,func",
+ [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)],
+ ids=["nanquantile", "nanpercentile"],
+ )
+ def test_nanfunc_q(self, mat, dtype, nanfunc, func):
+ mat = mat.astype(dtype)
+ if mat.dtype.kind == "c":
+ assert_raises(TypeError, func, mat, q=1)
+ assert_raises(TypeError, nanfunc, mat, q=1)
+
+ else:
+ tgt = func(mat, q=1)
+ out = nanfunc(mat, q=1)
+
+ assert_almost_equal(out, tgt)
+
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
+
+ @pytest.mark.parametrize(
+ "nanfunc,func",
+ [(np.nanvar, np.var), (np.nanstd, np.std)],
+ ids=["nanvar", "nanstd"],
+ )
+ def test_nanfunc_ddof(self, mat, dtype, nanfunc, func):
+ mat = mat.astype(dtype)
+ tgt = func(mat, ddof=0.5)
+ out = nanfunc(mat, ddof=0.5)
+
+ assert_almost_equal(out, tgt)
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
+
+ @pytest.mark.parametrize(
+ "nanfunc", [np.nanvar, np.nanstd]
+ )
+ def test_nanfunc_correction(self, mat, dtype, nanfunc):
+ mat = mat.astype(dtype)
+ assert_almost_equal(
+ nanfunc(mat, correction=0.5), nanfunc(mat, ddof=0.5)
+ )
+
+ err_msg = "ddof and correction can't be provided simultaneously."
+ with assert_raises_regex(ValueError, err_msg):
+ nanfunc(mat, ddof=0.5, correction=0.5)
+
+ with assert_raises_regex(ValueError, err_msg):
+ nanfunc(mat, ddof=1, correction=0)
+
+
+class SharedNanFunctionsTestsMixin:
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for axis in [None, 0, 1]:
+ tgt = rf(mat, axis=axis, keepdims=True)
+ res = nf(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.zeros(3)
+ tgt = rf(mat, axis=1)
+ res = nf(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_dtype(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ with suppress_warnings() as sup:
+ if nf in {np.nanstd, np.nanvar} and c in 'FDG':
+ # Giving the warning is a small bug, see gh-8000
+ sup.filter(ComplexWarning)
+ tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
+ res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
+ res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_char(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ with suppress_warnings() as sup:
+ if nf in {np.nanstd, np.nanvar} and c in 'FDG':
+ # Giving the warning is a small bug, see gh-8000
+ sup.filter(ComplexWarning)
+ tgt = rf(mat, dtype=c, axis=1).dtype.type
+ res = nf(mat, dtype=c, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, dtype=c, axis=None).dtype.type
+ res = nf(mat, dtype=c, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = rf(mat, axis=1).dtype.type
+ res = nf(mat, axis=1).dtype.type
+ assert_(res is tgt, f"res {res}, tgt {tgt}")
+ # scalar case
+ tgt = rf(mat, axis=None).dtype.type
+ res = nf(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_result_values(self):
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ tgt = [rf(d) for d in _rdat]
+ res = nf(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_scalar(self):
+ for f in self.nanfuncs:
+ assert_(f(0.) == 0.)
+
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
+ # Check that it works and that type and
+ # shape are preserved
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
+ for f in self.nanfuncs:
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+
+
+class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
+
+ nanfuncs = [np.nansum, np.nanprod]
+ stdfuncs = [np.sum, np.prod]
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ for func, identity in zip(self.nanfuncs, [0, 1]):
+ out = func(array, axis=axis)
+ assert np.all(out == identity)
+ assert out.dtype == array.dtype
+
+ def test_empty(self):
+ for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
+ mat = np.zeros((0, 3))
+ tgt = [tgt_value] * 3
+ res = f(mat, axis=0)
+ assert_equal(res, tgt)
+ tgt = []
+ res = f(mat, axis=1)
+ assert_equal(res, tgt)
+ tgt = tgt_value
+ res = f(mat, axis=None)
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_initial(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ reference = 28 if f is np.nansum else 3360
+ ret = f(ar, initial=2)
+ assert ret.dtype == dtype
+ assert ret == reference
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool)
+ where[:, 0] = False
+
+ for f in self.nanfuncs:
+ reference = 26 if f is np.nansum else 2240
+ ret = f(ar, where=where, initial=2)
+ assert ret.dtype == dtype
+ assert ret == reference
+
+
+class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
+
+ nanfuncs = [np.nancumsum, np.nancumprod]
+ stdfuncs = [np.cumsum, np.cumprod]
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan)
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ for func, identity in zip(self.nanfuncs, [0, 1]):
+ out = func(array)
+ assert np.all(out == identity)
+ assert out.dtype == array.dtype
+
+ def test_empty(self):
+ for f, tgt_value in zip(self.nanfuncs, [0, 1]):
+ mat = np.zeros((0, 3))
+ tgt = tgt_value * np.ones((0, 3))
+ res = f(mat, axis=0)
+ assert_equal(res, tgt)
+ tgt = mat
+ res = f(mat, axis=1)
+ assert_equal(res, tgt)
+ tgt = np.zeros(0)
+ res = f(mat, axis=None)
+ assert_equal(res, tgt)
+
+ def test_keepdims(self):
+ for f, g in zip(self.nanfuncs, self.stdfuncs):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = f(mat, axis=axis, out=None)
+ res = g(mat, axis=axis, out=None)
+ assert_(res.ndim == tgt.ndim)
+
+ for f in self.nanfuncs:
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ rs = np.random.RandomState(0)
+ d[rs.rand(*d.shape) < 0.5] = np.nan
+ res = f(d, axis=None)
+ assert_equal(res.shape, (1155,))
+ for axis in np.arange(4):
+ res = f(d, axis=axis)
+ assert_equal(res.shape, (3, 5, 7, 11))
+
+ def test_result_values(self):
+ for axis in (-2, -1, 0, 1, None):
+ tgt = np.cumprod(_ndat_ones, axis=axis)
+ res = np.nancumprod(_ndat, axis=axis)
+ assert_almost_equal(res, tgt)
+ tgt = np.cumsum(_ndat_zeros, axis=axis)
+ res = np.nancumsum(_ndat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.eye(3)
+ for axis in (-2, -1, 0, 1):
+ tgt = rf(mat, axis=axis)
+ res = nf(mat, axis=axis, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+
+class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
+
+ nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
+ stdfuncs = [np.mean, np.var, np.std]
+
+ def test_dtype_error(self):
+ for f in self.nanfuncs:
+ for dtype in [np.bool, np.int_, np.object_]:
+ assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
+
+ def test_out_dtype_error(self):
+ for f in self.nanfuncs:
+ for dtype in [np.bool, np.int_, np.object_]:
+ out = np.empty(_ndat.shape[0], dtype=dtype)
+ assert_raises(TypeError, f, _ndat, axis=1, out=out)
+
+ def test_ddof(self):
+ nanfuncs = [np.nanvar, np.nanstd]
+ stdfuncs = [np.var, np.std]
+ for nf, rf in zip(nanfuncs, stdfuncs):
+ for ddof in [0, 1]:
+ tgt = [rf(d, ddof=ddof) for d in _rdat]
+ res = nf(_ndat, axis=1, ddof=ddof)
+ assert_almost_equal(res, tgt)
+
+ def test_ddof_too_big(self):
+ nanfuncs = [np.nanvar, np.nanstd]
+ stdfuncs = [np.var, np.std]
+ dsize = [len(d) for d in _rdat]
+ for nf, rf in zip(nanfuncs, stdfuncs):
+ for ddof in range(5):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ sup.filter(ComplexWarning)
+ tgt = [ddof >= d for d in dsize]
+ res = nf(_ndat, axis=1, ddof=ddof)
+ assert_equal(np.isnan(res), tgt)
+ if any(tgt):
+ assert_(len(sup.log) == 1)
+ else:
+ assert_(len(sup.log) == 0)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)"
+ for func in self.nanfuncs:
+ with pytest.warns(RuntimeWarning, match=match):
+ out = func(array, axis=axis)
+ assert np.isnan(out).all()
+
+ # `nanvar` and `nanstd` convert complex inputs to their
+ # corresponding floating dtype
+ if func is np.nanmean:
+ assert out.dtype == array.dtype
+ else:
+ assert out.dtype == np.abs(array).dtype
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for f in self.nanfuncs:
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(mat, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(f(mat, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool)
+ where[:, 0] = False
+
+ for f, f_std in zip(self.nanfuncs, self.stdfuncs):
+ reference = f_std(ar[where][2:])
+ dtype_reference = dtype if f is np.nanmean else ar.real.dtype
+
+ ret = f(ar, where=where)
+ assert ret.dtype == dtype_reference
+ np.testing.assert_allclose(ret, reference)
+
+ def test_nanstd_with_mean_keyword(self):
+ # Setting the seed to make the test reproducible
+ rng = np.random.RandomState(1234)
+ A = rng.randn(10, 20, 5) + 0.5
+ A[:, 5, :] = np.nan
+
+ mean_out = np.zeros((10, 1, 5))
+ std_out = np.zeros((10, 1, 5))
+
+ mean = np.nanmean(A,
+ out=mean_out,
+ axis=1,
+ keepdims=True)
+
+ # The returned object should be the object specified during calling
+ assert mean_out is mean
+
+ std = np.nanstd(A,
+ out=std_out,
+ axis=1,
+ keepdims=True,
+ mean=mean)
+
+ # The returned object should be the object specified during calling
+ assert std_out is std
+
+ # Shape of returned mean and std should be same
+ assert std.shape == mean.shape
+ assert std.shape == (10, 1, 5)
+
+ # Output should be the same as from the individual algorithms
+ std_old = np.nanstd(A, axis=1, keepdims=True)
+
+ assert std_old.shape == mean.shape
+ assert_almost_equal(std, std_old)
+
+
+_TIME_UNITS = (
+ "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"
+)
+
+# All `inexact` + `timdelta64` type codes
+_TYPE_CODES = list(np.typecodes["AllFloat"])
+_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS]
+
+
+class TestNanFunctions_Median:
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ np.nanmedian(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
+ res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
+ assert_(res.ndim == tgt.ndim)
+
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ res = np.nanmedian(d, axis=None, keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanmedian(d, axis=(0, 1), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 11))
+ res = np.nanmedian(d, axis=(0, 3), keepdims=True)
+ assert_equal(res.shape, (1, 5, 7, 1))
+ res = np.nanmedian(d, axis=(1,), keepdims=True)
+ assert_equal(res.shape, (3, 1, 7, 11))
+ res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 1))
+
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
+ def test_keepdims_out(self, axis):
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = np.empty(shape_out)
+ result = np.nanmedian(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ def test_out(self):
+ mat = np.random.rand(3, 3)
+ nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
+ resout = np.zeros(3)
+ tgt = np.median(mat, axis=1)
+ res = np.nanmedian(nan_mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ # 0-d output:
+ resout = np.zeros(())
+ tgt = np.median(mat, axis=None)
+ res = np.nanmedian(nan_mat, axis=None, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_small_large(self):
+ # test the small and large code paths, current cutoff 400 elements
+ for s in [5, 20, 51, 200, 1000]:
+ d = np.random.randn(4, s)
+ # Randomly set some elements to NaN:
+ w = np.random.randint(0, d.size, size=d.size // 5)
+ d.ravel()[w] = np.nan
+ d[:, 0] = 1. # ensure at least one good value
+ # use normal median without nans to compare
+ tgt = []
+ for x in d:
+ nonan = np.compress(~np.isnan(x), x)
+ tgt.append(np.median(nonan, overwrite_input=True))
+
+ assert_array_equal(np.nanmedian(d, axis=-1), tgt)
+
+ def test_result_values(self):
+ tgt = [np.median(d) for d in _rdat]
+ res = np.nanmedian(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", _TYPE_CODES)
+ def test_allnans(self, dtype, axis):
+ mat = np.full((3, 3), np.nan).astype(dtype)
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+
+ output = np.nanmedian(mat, axis=axis)
+ assert output.dtype == mat.dtype
+ assert np.isnan(output).all()
+
+ if axis is None:
+ assert_(len(sup.log) == 1)
+ else:
+ assert_(len(sup.log) == 3)
+
+ # Check scalar
+ scalar = np.array(np.nan).astype(dtype)[()]
+ output_scalar = np.nanmedian(scalar)
+ assert output_scalar.dtype == scalar.dtype
+ assert np.isnan(output_scalar)
+
+ if axis is None:
+ assert_(len(sup.log) == 2)
+ else:
+ assert_(len(sup.log) == 4)
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+ def test_scalar(self):
+ assert_(np.nanmedian(0.) == 0.)
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(AxisError, np.nanmedian, d, axis=-5)
+ assert_raises(AxisError, np.nanmedian, d, axis=(0, -5))
+ assert_raises(AxisError, np.nanmedian, d, axis=4)
+ assert_raises(AxisError, np.nanmedian, d, axis=(0, 4))
+ assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
+
+ def test_float_special(self):
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for inf in [np.inf, -np.inf]:
+ a = np.array([[inf, np.nan], [np.nan, np.nan]])
+ assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])
+ assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])
+ assert_equal(np.nanmedian(a), inf)
+
+ # minimum fill value check
+ a = np.array([[np.nan, np.nan, inf],
+ [np.nan, np.nan, inf]])
+ assert_equal(np.nanmedian(a), inf)
+ assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
+ assert_equal(np.nanmedian(a, axis=1), inf)
+
+ # no mask path
+ a = np.array([[inf, inf], [inf, inf]])
+ assert_equal(np.nanmedian(a, axis=1), inf)
+
+ a = np.array([[inf, 7, -inf, -9],
+ [-10, np.nan, np.nan, 5],
+ [4, np.nan, np.nan, inf]],
+ dtype=np.float32)
+ if inf > 0:
+ assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
+ assert_equal(np.nanmedian(a), 4.5)
+ else:
+ assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
+ assert_equal(np.nanmedian(a), -2.5)
+ assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])
+
+ for i in range(10):
+ for j in range(1, 10):
+ a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
+ assert_equal(np.nanmedian(a), inf)
+ assert_equal(np.nanmedian(a, axis=1), inf)
+ assert_equal(np.nanmedian(a, axis=0),
+ ([np.nan] * i) + [inf] * j)
+
+ a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
+ assert_equal(np.nanmedian(a), -inf)
+ assert_equal(np.nanmedian(a, axis=1), -inf)
+ assert_equal(np.nanmedian(a, axis=0),
+ ([np.nan] * i) + [-inf] * j)
+
+
+class TestNanFunctions_Percentile:
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ np.nanpercentile(ndat, 30)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = np.percentile(mat, 70, axis=axis, out=None,
+ overwrite_input=False)
+ res = np.nanpercentile(mat, 70, axis=axis, out=None,
+ overwrite_input=False)
+ assert_(res.ndim == tgt.ndim)
+
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ res = np.nanpercentile(d, 90, axis=None, keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 11))
+ res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
+ assert_equal(res.shape, (1, 5, 7, 1))
+ res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
+ assert_equal(res.shape, (3, 1, 7, 11))
+ res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 1))
+
+ @pytest.mark.parametrize('q', [7, [1, 7]])
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1,),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
+ def test_keepdims_out(self, q, axis):
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ shape_out = np.shape(q) + shape_out
+
+ out = np.empty(shape_out)
+ result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ @pytest.mark.parametrize("weighted", [False, True])
+ def test_out(self, weighted):
+ mat = np.random.rand(3, 3)
+ nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
+ resout = np.zeros(3)
+ if weighted:
+ w_args = {"weights": np.ones_like(mat), "method": "inverted_cdf"}
+ nan_w_args = {
+ "weights": np.ones_like(nan_mat), "method": "inverted_cdf"
+ }
+ else:
+ w_args = {}
+ nan_w_args = {}
+ tgt = np.percentile(mat, 42, axis=1, **w_args)
+ res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ # 0-d output:
+ resout = np.zeros(())
+ tgt = np.percentile(mat, 42, axis=None, **w_args)
+ res = np.nanpercentile(
+ nan_mat, 42, axis=None, out=resout, **nan_w_args
+ )
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ res = np.nanpercentile(
+ nan_mat, 42, axis=(0, 1), out=resout, **nan_w_args
+ )
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_complex(self):
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+
+ @pytest.mark.parametrize("weighted", [False, True])
+ @pytest.mark.parametrize("use_out", [False, True])
+ def test_result_values(self, weighted, use_out):
+ if weighted:
+ percentile = partial(np.percentile, method="inverted_cdf")
+ nanpercentile = partial(np.nanpercentile, method="inverted_cdf")
+
+ def gen_weights(d):
+ return np.ones_like(d)
+
+ else:
+ percentile = np.percentile
+ nanpercentile = np.nanpercentile
+
+ def gen_weights(d):
+ return None
+
+ tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat]
+ out = np.empty_like(tgt) if use_out else None
+ res = nanpercentile(_ndat, 28, axis=1,
+ weights=gen_weights(_ndat), out=out)
+ assert_almost_equal(res, tgt)
+ # Transpose the array to fit the output convention of numpy.percentile
+ tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d))
+ for d in _rdat])
+ out = np.empty_like(tgt) if use_out else None
+ res = nanpercentile(_ndat, (28, 98), axis=1,
+ weights=gen_weights(_ndat), out=out)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):
+ out = np.nanpercentile(array, 60, axis=axis)
+ assert np.isnan(out).all()
+ assert out.dtype == array.dtype
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+ def test_scalar(self):
+ assert_equal(np.nanpercentile(0., 100), 0.)
+ a = np.arange(6)
+ r = np.nanpercentile(a, 50, axis=0)
+ assert_equal(r, 2.5)
+ assert_(np.isscalar(r))
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(AxisError, np.nanpercentile, d, q=5, axis=-5)
+ assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, -5))
+ assert_raises(AxisError, np.nanpercentile, d, q=5, axis=4)
+ assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, 4))
+ assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
+
+ def test_multiple_percentiles(self):
+ perc = [50, 100]
+ mat = np.ones((4, 3))
+ nan_mat = np.nan * mat
+ # For checking consistency in higher dimensional case
+ large_mat = np.ones((3, 4, 5))
+ large_mat[:, 0:2:4, :] = 0
+ large_mat[:, :, 3:] *= 2
+ for axis in [None, 0, 1]:
+ for keepdim in [False, True]:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "All-NaN slice encountered")
+ val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
+ nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
+ keepdims=keepdim)
+ assert_equal(nan_val.shape, val.shape)
+
+ val = np.percentile(large_mat, perc, axis=axis,
+ keepdims=keepdim)
+ nan_val = np.nanpercentile(large_mat, perc, axis=axis,
+ keepdims=keepdim)
+ assert_equal(nan_val, val)
+
+ megamat = np.ones((3, 4, 5, 6))
+ assert_equal(
+ np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)
+ )
+
+ @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200])
+ def test_nan_value_with_weight(self, nan_weight):
+ x = [1, np.nan, 2, 3]
+ result = np.float64(2.0)
+ q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf")
+ assert_equal(q_unweighted, result)
+
+ # The weight value at the nan position should not matter.
+ w = [1.0, nan_weight, 1.0, 1.0]
+ q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf")
+ assert_equal(q_weighted, result)
+
+ @pytest.mark.parametrize("axis", [0, 1, 2])
+ def test_nan_value_with_weight_ndim(self, axis):
+ # Create a multi-dimensional array to test
+ np.random.seed(1)
+ x_no_nan = np.random.random(size=(100, 99, 2))
+ # Set some places to NaN (not particularly smart) so there is always
+ # some non-Nan.
+ x = x_no_nan.copy()
+ x[np.arange(99), np.arange(99), 0] = np.nan
+
+ p = np.array([[20., 50., 30], [70, 33, 80]])
+
+ # We just use ones as weights, but replace it with 0 or 1e200 at the
+ # NaN positions below.
+ weights = np.ones_like(x)
+
+ # For comparison use weighted normal percentile with nan weights at
+ # 0 (and no NaNs); not sure this is strictly identical but should be
+ # sufficiently so (if a percentile lies exactly on a 0 value).
+ weights[np.isnan(x)] = 0
+ p_expected = np.percentile(
+ x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf")
+
+ p_unweighted = np.nanpercentile(
+ x, p, axis=axis, method="inverted_cdf")
+ # The normal and unweighted versions should be identical:
+ assert_equal(p_unweighted, p_expected)
+
+ weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter
+ p_weighted = np.nanpercentile(
+ x, p, axis=axis, weights=weights, method="inverted_cdf")
+ assert_equal(p_weighted, p_expected)
+ # Also check with out passed:
+ out = np.empty_like(p_weighted)
+ res = np.nanpercentile(
+ x, p, axis=axis, weights=weights, out=out, method="inverted_cdf")
+
+ assert res is out
+ assert_equal(out, p_expected)
+
+
+class TestNanFunctions_Quantile:
+ # most of this is already tested by TestPercentile
+
+ @pytest.mark.parametrize("weighted", [False, True])
+ def test_regression(self, weighted):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+ if weighted:
+ w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"}
+ else:
+ w_args = {}
+
+ assert_equal(np.nanquantile(ar, q=0.5, **w_args),
+ np.nanpercentile(ar, q=50, **w_args))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0, **w_args),
+ np.nanpercentile(ar, q=50, axis=0, **w_args))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1, **w_args),
+ np.nanpercentile(ar, q=50, axis=1, **w_args))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1, **w_args),
+ np.nanpercentile(ar, q=[50], axis=1, **w_args))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1, **w_args),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1, **w_args))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_complex(self):
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+ arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip("`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):
+ out = np.nanquantile(array, 1, axis=axis)
+ assert np.isnan(out).all()
+ assert out.dtype == array.dtype
+
+@pytest.mark.parametrize("arr, expected", [
+ # array of floats with some nans
+ (np.array([np.nan, 5.0, np.nan, np.inf]),
+ np.array([False, True, False, True])),
+ # int64 array that can't possibly have nans
+ (np.array([1, 5, 7, 9], dtype=np.int64),
+ True),
+ # bool array that can't possibly have nans
+ (np.array([False, True, False, True]),
+ True),
+ # 2-D complex array with nans
+ (np.array([[np.nan, 5.0],
+ [np.nan, np.inf]], dtype=np.complex64),
+ np.array([[False, True],
+ [False, True]])),
+ ])
+def test__nan_mask(arr, expected):
+ for out in [None, np.empty(arr.shape, dtype=np.bool)]:
+ actual = _nan_mask(arr, out=out)
+ assert_equal(actual, expected)
+ # the above won't distinguish between True proper
+ # and an array of True values; we want True proper
+ # for types that can't possibly contain NaN
+ if type(expected) is not np.ndarray:
+ assert actual is True
+
+
+def test__replace_nan():
+ """ Test that _replace_nan returns the original array if there are no
+ NaNs, not a copy.
+ """
+ for dtype in [np.bool, np.int32, np.int64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 0)
+ assert mask is None
+ # do not make a copy if there are no nans
+ assert result is arr
+
+ for dtype in [np.float32, np.float64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 2)
+ assert (mask == False).all()
+ # mask is not None, so we make a copy
+ assert result is not arr
+ assert_equal(result, arr)
+
+ arr_nan = np.array([0, 1, np.nan], dtype=dtype)
+ result_nan, mask_nan = _replace_nan(arr_nan, 2)
+ assert_equal(mask_nan, np.array([False, False, True]))
+ assert result_nan is not arr_nan
+ assert_equal(result_nan, np.array([0, 1, 2]))
+ assert np.isnan(arr_nan[-1])
+
+
+def test_memmap_takes_fast_route(tmpdir):
+ # We want memory mapped arrays to take the fast route through nanmax,
+ # which avoids creating a mask by using fmax.reduce (see gh-28721). So we
+ # check that on bad input, the error is from fmax (rather than maximum).
+ a = np.arange(10., dtype=float)
+ with open(tmpdir.join("data.bin"), "w+b") as fh:
+ fh.write(a.tobytes())
+ mm = np.memmap(fh, dtype=a.dtype, shape=a.shape)
+ with pytest.raises(ValueError, match="reduction operation fmax"):
+ np.nanmax(mm, out=np.zeros(2))
+ # For completeness, same for nanmin.
+ with pytest.raises(ValueError, match="reduction operation fmin"):
+ np.nanmin(mm, out=np.zeros(2))
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py
new file mode 100644
index 0000000..0b0e9d1
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_packbits.py
@@ -0,0 +1,376 @@
+from itertools import chain
+
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
+
+
+def test_packbits():
+ # Copied from the docstring.
+ a = [[[1, 0, 1], [0, 1, 0]],
+ [[1, 1, 0], [0, 0, 1]]]
+ for dt in '?bBhHiIlLqQ':
+ arr = np.array(a, dtype=dt)
+ b = np.packbits(arr, axis=-1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
+
+ assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
+
+
+def test_packbits_empty():
+ shapes = [
+ (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
+ (0, 0, 20), (0, 0, 0),
+ ]
+ for dt in '?bBhHiIlLqQ':
+ for shape in shapes:
+ a = np.empty(shape, dtype=dt)
+ b = np.packbits(a)
+ assert_equal(b.dtype, np.uint8)
+ assert_equal(b.shape, (0,))
+
+
+def test_packbits_empty_with_axis():
+ # Original shapes and lists of packed shapes for different axes.
+ shapes = [
+ ((0,), [(0,)]),
+ ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
+ ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
+ ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
+ ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
+ ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
+ ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
+ ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
+ ]
+ for dt in '?bBhHiIlLqQ':
+ for in_shape, out_shapes in shapes:
+ for ax, out_shape in enumerate(out_shapes):
+ a = np.empty(in_shape, dtype=dt)
+ b = np.packbits(a, axis=ax)
+ assert_equal(b.dtype, np.uint8)
+ assert_equal(b.shape, out_shape)
+
+@pytest.mark.parametrize('bitorder', ('little', 'big'))
+def test_packbits_large(bitorder):
+ # test data large enough for 16 byte vectorization
+ a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
+ 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
+ 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
+ 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
+ 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
+ 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,
+ 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
+ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
+ 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
+ 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
+ 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])
+ a = a.repeat(3)
+ for dtype in '?bBhHiIlLqQ':
+ arr = np.array(a, dtype=dtype)
+ b = np.packbits(arr, axis=None, bitorder=bitorder)
+ assert_equal(b.dtype, np.uint8)
+ r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
+ 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
+ 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,
+ 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,
+ 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,
+ 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
+ 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
+ 129, 248, 227, 129, 199, 31, 128]
+ if bitorder == 'big':
+ assert_array_equal(b, r)
+ # equal for size being multiple of 8
+ assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
+
+ # check last byte of different remainders (16 byte vectorization)
+ b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
+ assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,
+ 198, 196, 192])
+
+ arr = arr.reshape(36, 25)
+ b = np.packbits(arr, axis=0)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,
+ 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,
+ 107, 75, 74, 88],
+ [72, 216, 248, 241, 227, 195, 202, 90, 90, 83,
+ 83, 119, 127, 109, 73, 64, 208, 244, 189, 45,
+ 41, 104, 122, 90, 18],
+ [113, 120, 248, 216, 152, 24, 60, 52, 182, 150,
+ 150, 150, 146, 210, 210, 246, 255, 255, 223,
+ 151, 21, 17, 17, 131, 163],
+ [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,
+ 92, 78, 110, 39, 181, 149, 220, 222, 218, 218,
+ 202, 234, 170, 168],
+ [0, 128, 128, 192, 80, 112, 48, 160, 160, 224,
+ 240, 208, 144, 128, 160, 224, 240, 208, 144,
+ 144, 176, 240, 224, 192, 128]])
+
+ b = np.packbits(arr, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[252, 127, 192, 0],
+ [ 7, 252, 15, 128],
+ [240, 0, 28, 0],
+ [255, 128, 0, 128],
+ [192, 31, 255, 128],
+ [142, 63, 0, 0],
+ [255, 240, 7, 0],
+ [ 7, 224, 14, 0],
+ [126, 0, 224, 0],
+ [255, 255, 199, 0],
+ [ 56, 28, 126, 0],
+ [113, 248, 227, 128],
+ [227, 142, 63, 0],
+ [ 0, 28, 112, 0],
+ [ 15, 248, 3, 128],
+ [ 28, 126, 56, 0],
+ [ 56, 255, 241, 128],
+ [240, 7, 224, 0],
+ [227, 129, 192, 128],
+ [255, 255, 254, 0],
+ [126, 0, 224, 0],
+ [ 3, 241, 248, 0],
+ [ 0, 255, 241, 128],
+ [128, 0, 255, 128],
+ [224, 1, 255, 128],
+ [248, 252, 126, 0],
+ [ 0, 7, 3, 128],
+ [224, 113, 248, 0],
+ [ 0, 252, 127, 128],
+ [142, 63, 224, 0],
+ [224, 14, 63, 0],
+ [ 7, 3, 128, 0],
+ [113, 255, 255, 128],
+ [ 28, 113, 199, 0],
+ [ 7, 227, 142, 0],
+ [ 14, 56, 252, 0]])
+
+ arr = arr.T.copy()
+ b = np.packbits(arr, axis=0)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,
+ 56, 113, 227, 0, 15, 28, 56, 240, 227, 255,
+ 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,
+ 7, 113, 28, 7, 14],
+ [127, 252, 0, 128, 31, 63, 240, 224, 0, 255,
+ 28, 248, 142, 28, 248, 126, 255, 7, 129, 255,
+ 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,
+ 3, 255, 113, 227, 56],
+ [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,
+ 227, 63, 112, 3, 56, 241, 224, 192, 254, 224,
+ 248, 241, 255, 255, 126, 3, 248, 127, 224, 63,
+ 128, 255, 199, 142, 252],
+ [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,
+ 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,
+ 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])
+
+ b = np.packbits(arr, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[190, 72, 113, 214, 0],
+ [186, 216, 120, 210, 128],
+ [178, 248, 248, 210, 128],
+ [178, 241, 216, 64, 192],
+ [150, 227, 152, 68, 80],
+ [215, 195, 24, 5, 112],
+ [ 87, 202, 60, 5, 48],
+ [ 83, 90, 52, 1, 160],
+ [ 83, 90, 182, 72, 160],
+ [195, 83, 150, 88, 224],
+ [199, 83, 150, 92, 240],
+ [206, 119, 150, 92, 208],
+ [204, 127, 146, 78, 144],
+ [204, 109, 210, 110, 128],
+ [140, 73, 210, 39, 160],
+ [140, 64, 246, 181, 224],
+ [136, 208, 255, 149, 240],
+ [136, 244, 255, 220, 208],
+ [ 8, 189, 223, 222, 144],
+ [ 40, 45, 151, 218, 144],
+ [105, 41, 21, 218, 176],
+ [107, 104, 17, 202, 240],
+ [ 75, 122, 17, 234, 224],
+ [ 74, 90, 131, 170, 192],
+ [ 88, 18, 163, 168, 128]])
+
+ # result is the same if input is multiplied with a nonzero value
+ for dtype in 'bBhHiIlLqQ':
+ arr = np.array(a, dtype=dtype)
+ rnd = np.random.randint(low=np.iinfo(dtype).min,
+ high=np.iinfo(dtype).max, size=arr.size,
+ dtype=dtype)
+ rnd[rnd == 0] = 1
+ arr *= rnd.astype(dtype)
+ b = np.packbits(arr, axis=-1)
+ assert_array_equal(np.unpackbits(b)[:-4], a)
+
+ assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
+
+
+def test_packbits_very_large():
+ # test some with a larger arrays gh-8637
+ # code is covered earlier but larger array makes crash on bug more likely
+ for s in range(950, 1050):
+ for dt in '?bBhHiIlLqQ':
+ x = np.ones((200, s), dtype=bool)
+ np.packbits(x, axis=1)
+
+
+def test_unpackbits():
+ # Copied from the docstring.
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]]))
+
+def test_pack_unpack_order():
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ b_little = np.unpackbits(a, axis=1, bitorder='little')
+ b_big = np.unpackbits(a, axis=1, bitorder='big')
+ assert_array_equal(b, b_big)
+ assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
+ assert_array_equal(b[:, ::-1], b_little)
+ assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
+ assert_raises(ValueError, np.unpackbits, a, bitorder='r')
+ assert_raises(TypeError, np.unpackbits, a, bitorder=10)
+
+
+def test_unpackbits_empty():
+ a = np.empty((0,), dtype=np.uint8)
+ b = np.unpackbits(a)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, np.empty((0,)))
+
+
+def test_unpackbits_empty_with_axis():
+ # Lists of packed shapes for different axes and unpacked shapes.
+ shapes = [
+ ([(0,)], (0,)),
+ ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
+ ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
+ ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
+ ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
+ ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
+ ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
+ ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
+ ]
+ for in_shapes, out_shape in shapes:
+ for ax, in_shape in enumerate(in_shapes):
+ a = np.empty(in_shape, dtype=np.uint8)
+ b = np.unpackbits(a, axis=ax)
+ assert_equal(b.dtype, np.uint8)
+ assert_equal(b.shape, out_shape)
+
+
+def test_unpackbits_large():
+ # test all possible numbers via comparison to already tested packbits
+ d = np.arange(277, dtype=np.uint8)
+ assert_array_equal(np.packbits(np.unpackbits(d)), d)
+ assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
+ d = np.tile(d, (3, 1))
+ assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
+ d = d.T.copy()
+ assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
+
+
+class TestCount:
+ x = np.array([
+ [1, 0, 1, 0, 0, 1, 0],
+ [0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 1],
+ [1, 1, 0, 0, 0, 1, 1],
+ [1, 0, 1, 0, 1, 0, 1],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ ], dtype=np.uint8)
+ padded1 = np.zeros(57, dtype=np.uint8)
+ padded1[:49] = x.ravel()
+ padded1b = np.zeros(57, dtype=np.uint8)
+ padded1b[:49] = x[::-1].copy().ravel()
+ padded2 = np.zeros((9, 9), dtype=np.uint8)
+ padded2[:7, :7] = x
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
+ def test_roundtrip(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ # test complete invertibility of packbits and unpackbits with count
+ packed = np.packbits(self.x, bitorder=bitorder)
+ unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
+ assert_equal(unpacked.dtype, np.uint8)
+ assert_array_equal(unpacked, self.padded1[:cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ ])
+ def test_count(self, kwargs):
+ packed = np.packbits(self.x)
+ unpacked = np.unpackbits(packed, **kwargs)
+ assert_equal(unpacked.dtype, np.uint8)
+ assert_array_equal(unpacked, self.padded1[:-1])
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ # delta==-1 when count<0 because one extra zero of padding
+ @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
+ def test_roundtrip_axis(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
+ unpacked0 = np.unpackbits(packed0, axis=0, count=count,
+ bitorder=bitorder)
+ assert_equal(unpacked0.dtype, np.uint8)
+ assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
+ unpacked1 = np.unpackbits(packed1, axis=1, count=count,
+ bitorder=bitorder)
+ assert_equal(unpacked1.dtype, np.uint8)
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ {'bitorder': 'little'},
+ {'bitorder': 'little', 'count': None},
+ {'bitorder': 'big'},
+ {'bitorder': 'big', 'count': None},
+ ])
+ def test_axis_count(self, kwargs):
+ packed0 = np.packbits(self.x, axis=0)
+ unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
+ assert_equal(unpacked0.dtype, np.uint8)
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
+ else:
+ assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1)
+ unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
+ assert_equal(unpacked1.dtype, np.uint8)
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
+ else:
+ assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
+
+ def test_bad_count(self):
+ packed0 = np.packbits(self.x, axis=0)
+ assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
+ packed1 = np.packbits(self.x, axis=1)
+ assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
+ packed = np.packbits(self.x)
+ assert_raises(ValueError, np.unpackbits, packed, count=-57)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py
new file mode 100644
index 0000000..c173ac3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_polynomial.py
@@ -0,0 +1,320 @@
+import pytest
+
+import numpy as np
+import numpy.polynomial.polynomial as poly
+from numpy.testing import (
+ assert_,
+ assert_allclose,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+)
+
+# `poly1d` has some support for `np.bool` and `np.timedelta64`,
+# but it is limited and they are therefore excluded here
+TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"
+
+
+class TestPolynomial:
+ def test_poly1d_str_and_repr(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(repr(p), 'poly1d([1., 2., 3.])')
+ assert_equal(str(p),
+ ' 2\n'
+ '1 x + 2 x + 3')
+
+ q = np.poly1d([3., 2, 1])
+ assert_equal(repr(q), 'poly1d([3., 2., 1.])')
+ assert_equal(str(q),
+ ' 2\n'
+ '3 x + 2 x + 1')
+
+ r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
+ assert_equal(str(r),
+ ' 3 2\n'
+ '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
+
+ assert_equal(str(np.poly1d([-3, -2, -1])),
+ ' 2\n'
+ '-3 x - 2 x - 1')
+
+ def test_poly1d_resolution(self):
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p(0), 3.0)
+ assert_equal(p(5), 38.0)
+ assert_equal(q(0), 1.0)
+ assert_equal(q(5), 86.0)
+
+ def test_poly1d_math(self):
+ # here we use some simple coeffs to make calculations easier
+ p = np.poly1d([1., 2, 4])
+ q = np.poly1d([4., 2, 1])
+ assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
+ assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.]))
+ assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.]))
+
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
+ assert_equal(p + q, np.poly1d([4., 4., 4.]))
+ assert_equal(p - q, np.poly1d([-2., 0., 2.]))
+ assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
+ assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
+ assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
+ assert_equal(p.deriv(), np.poly1d([2., 2.]))
+ assert_equal(p.deriv(2), np.poly1d([2.]))
+ assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
+ (np.poly1d([1., -1.]), np.poly1d([0.])))
+
+ @pytest.mark.parametrize("type_code", TYPE_CODES)
+ def test_poly1d_misc(self, type_code: str) -> None:
+ dtype = np.dtype(type_code)
+ ar = np.array([1, 2, 3], dtype=dtype)
+ p = np.poly1d(ar)
+
+ # `__eq__`
+ assert_equal(np.asarray(p), ar)
+ assert_equal(np.asarray(p).dtype, dtype)
+ assert_equal(len(p), 2)
+
+ # `__getitem__`
+ comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0}
+ for index, ref in comparison_dct.items():
+ scalar = p[index]
+ assert_equal(scalar, ref)
+ if dtype == np.object_:
+ assert isinstance(scalar, int)
+ else:
+ assert_equal(scalar.dtype, dtype)
+
+ def test_poly1d_variable_arg(self):
+ q = np.poly1d([1., 2, 3], variable='y')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 y + 2 y + 3')
+ q = np.poly1d([1., 2, 3], variable='lambda')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 lambda + 2 lambda + 3')
+
+ def test_poly(self):
+ assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
+ [1, -3, -2, 6])
+
+ # From matlab docs
+ A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
+ assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])
+
+ # Should produce real output for perfect conjugates
+ assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))
+ assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j,
+ 1 - 2j, 1. + 3.5j, 1 - 3.5j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))
+ assert_(np.isrealobj(np.poly([1j, -1j])))
+ assert_(np.isrealobj(np.poly([1, -1])))
+
+ assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))
+
+ np.random.seed(42)
+ a = np.random.randn(100) + 1j * np.random.randn(100)
+ assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
+
+ def test_roots(self):
+ assert_array_equal(np.roots([1, 0, 0]), [0, 0])
+
+ # Testing for larger root values
+ for i in np.logspace(10, 25, num=1000, base=10):
+ tgt = np.array([-1, 1, i])
+ res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1]))
+ assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error
+
+ for i in np.logspace(10, 25, num=1000, base=10):
+ tgt = np.array([-1, 1.01, i])
+ res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1]))
+ assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error
+
+ def test_str_leading_zeros(self):
+ p = np.poly1d([4, 3, 2, 1])
+ p[3] = 0
+ assert_equal(str(p),
+ " 2\n"
+ "3 x + 2 x + 1")
+
+ p = np.poly1d([1, 2])
+ p[0] = 0
+ p[1] = 0
+ assert_equal(str(p), " \n0")
+
+ def test_polyfit(self):
+ c = np.array([3., 2., 1.])
+ x = np.linspace(0, 2, 7)
+ y = np.polyval(c, x)
+ err = [1, -1, 1, -1, 1, -1, 1]
+ weights = np.arange(8, 1, -1)**2 / 7.0
+
+ # Check exception when too few points for variance estimate. Note that
+ # the estimate requires the number of data points to exceed
+ # degree + 1
+ assert_raises(ValueError, np.polyfit,
+ [1], [1], deg=0, cov=True)
+
+ # check 1D case
+ m, cov = np.polyfit(x, y + err, 2, cov=True)
+ est = [3.8571, 0.2857, 1.619]
+ assert_almost_equal(est, m, decimal=4)
+ val0 = [[ 1.4694, -2.9388, 0.8163],
+ [-2.9388, 6.3673, -2.1224],
+ [ 0.8163, -2.1224, 1.161 ]] # noqa: E202
+ assert_almost_equal(val0, cov, decimal=4)
+
+ m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True)
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
+ val = [[ 4.3964, -5.0052, 0.4878],
+ [-5.0052, 6.8067, -0.9089],
+ [ 0.4878, -0.9089, 0.3337]]
+ assert_almost_equal(val, cov2, decimal=4)
+
+ m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled")
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)
+ val = [[ 0.1473, -0.1677, 0.0163],
+ [-0.1677, 0.228 , -0.0304], # noqa: E203
+ [ 0.0163, -0.0304, 0.0112]]
+ assert_almost_equal(val, cov3, decimal=4)
+
+ # check 2D (n,1) case
+ y = y[:, np.newaxis]
+ c = c[:, np.newaxis]
+ assert_almost_equal(c, np.polyfit(x, y, 2))
+ # check 2D (n,2) case
+ yy = np.concatenate((y, y), axis=1)
+ cc = np.concatenate((c, c), axis=1)
+ assert_almost_equal(cc, np.polyfit(x, yy, 2))
+
+ m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
+ assert_almost_equal(est, m[:, 0], decimal=4)
+ assert_almost_equal(est, m[:, 1], decimal=4)
+ assert_almost_equal(val0, cov[:, :, 0], decimal=4)
+ assert_almost_equal(val0, cov[:, :, 1], decimal=4)
+
+ # check order 1 (deg=0) case, were the analytic results are simple
+ np.random.seed(123)
+ y = np.random.normal(size=(4, 10000))
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)
+ # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # Without scaling, since reduced chi2 is 1, the result should be the same.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),
+ deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.5)
+ # If we estimate our errors wrong, no change with scaling:
+ w = np.full(y.shape[0], 1. / 0.5)
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # But if we do not scale, our estimate for the error in the mean will
+ # differ.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.25)
+
+ def test_objects(self):
+ from decimal import Decimal
+ p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
+ p2 = p * Decimal('1.333333333333333')
+ assert_(p2[1] == Decimal("3.9999999999999990"))
+ p2 = p.deriv()
+ assert_(p2[1] == Decimal('8.0'))
+ p2 = p.integ()
+ assert_(p2[3] == Decimal("1.333333333333333333333333333"))
+ assert_(p2[2] == Decimal('1.5'))
+ assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
+ p = np.poly([Decimal(1), Decimal(2)])
+ assert_equal(np.poly([Decimal(1), Decimal(2)]),
+ [1, Decimal(-3), Decimal(2)])
+
+ def test_complex(self):
+ p = np.poly1d([3j, 2j, 1j])
+ p2 = p.integ()
+ assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())
+ p2 = p.deriv()
+ assert_((p2.coeffs == [6j, 2j]).all())
+
+ def test_integ_coeffs(self):
+ p = np.poly1d([3, 2, 1])
+ p2 = p.integ(3, k=[9, 7, 6])
+ assert_(
+ (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all())
+
+ def test_zero_dims(self):
+ try:
+ np.poly(np.zeros((0, 0)))
+ except ValueError:
+ pass
+
+ def test_poly_int_overflow(self):
+ """
+ Regression test for gh-5096.
+ """
+ v = np.arange(1, 21)
+ assert_almost_equal(np.poly(v), np.poly(np.diag(v)))
+
+ def test_zero_poly_dtype(self):
+ """
+ Regression test for gh-16354.
+ """
+ z = np.array([0, 0, 0])
+ p = np.poly1d(z.astype(np.int64))
+ assert_equal(p.coeffs.dtype, np.int64)
+
+ p = np.poly1d(z.astype(np.float32))
+ assert_equal(p.coeffs.dtype, np.float32)
+
+ p = np.poly1d(z.astype(np.complex64))
+ assert_equal(p.coeffs.dtype, np.complex64)
+
+ def test_poly_eq(self):
+ p = np.poly1d([1, 2, 3])
+ p2 = np.poly1d([1, 2, 4])
+ assert_equal(p == None, False) # noqa: E711
+ assert_equal(p != None, True) # noqa: E711
+ assert_equal(p == p, True)
+ assert_equal(p == p2, False)
+ assert_equal(p != p2, True)
+
+ def test_polydiv(self):
+ b = np.poly1d([2, 6, 6, 1])
+ a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1])
+ q, r = np.polydiv(b, a)
+ assert_equal(q.coeffs.dtype, np.complex128)
+ assert_equal(r.coeffs.dtype, np.complex128)
+ assert_equal(q * a + r, b)
+
+ c = [1, 2, 3]
+ d = np.poly1d([1, 2, 3])
+ s, t = np.polydiv(c, d)
+ assert isinstance(s, np.poly1d)
+ assert isinstance(t, np.poly1d)
+ u, v = np.polydiv(d, c)
+ assert isinstance(u, np.poly1d)
+ assert isinstance(v, np.poly1d)
+
+ def test_poly_coeffs_mutable(self):
+ """ Coefficients should be modifiable """
+ p = np.poly1d([1, 2, 3])
+
+ p.coeffs += 1
+ assert_equal(p.coeffs, [2, 3, 4])
+
+ p.coeffs[2] += 10
+ assert_equal(p.coeffs, [2, 3, 14])
+
+ # this never used to be allowed - let's not add features to deprecated
+ # APIs
+ assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py
new file mode 100644
index 0000000..eee1f47
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_recfunctions.py
@@ -0,0 +1,1052 @@
+
+import numpy as np
+import numpy.ma as ma
+from numpy.lib.recfunctions import (
+ append_fields,
+ apply_along_fields,
+ assign_fields_by_name,
+ drop_fields,
+ find_duplicates,
+ get_fieldstructure,
+ join_by,
+ merge_arrays,
+ recursive_fill_fields,
+ rename_fields,
+ repack_fields,
+ require_fields,
+ stack_arrays,
+ structured_to_unstructured,
+ unstructured_to_structured,
+)
+from numpy.ma.mrecords import MaskedRecords
+from numpy.ma.testutils import assert_equal
+from numpy.testing import assert_, assert_raises
+
+get_fieldspec = np.lib.recfunctions._get_fieldspec
+get_names = np.lib.recfunctions.get_names
+get_names_flat = np.lib.recfunctions.get_names_flat
+zip_descr = np.lib.recfunctions._zip_descr
+zip_dtype = np.lib.recfunctions._zip_dtype
+
+
+class TestRecFunctions:
+ # Misc tests
+
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array([('A', 1.), ('B', 2.)],
+ dtype=[('A', '|S3'), ('B', float)])
+ w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ self.data = (w, x, y, z)
+
+ def test_zip_descr(self):
+ # Test zip_descr
+ (w, x, y, z) = self.data
+
+ # Std array
+ test = zip_descr((x, x), flatten=True)
+ assert_equal(test,
+ np.dtype([('', int), ('', int)]))
+ test = zip_descr((x, x), flatten=False)
+ assert_equal(test,
+ np.dtype([('', int), ('', int)]))
+
+ # Std & flexible-dtype
+ test = zip_descr((x, z), flatten=True)
+ assert_equal(test,
+ np.dtype([('', int), ('A', '|S3'), ('B', float)]))
+ test = zip_descr((x, z), flatten=False)
+ assert_equal(test,
+ np.dtype([('', int),
+ ('', [('A', '|S3'), ('B', float)])]))
+
+ # Standard & nested dtype
+ test = zip_descr((x, w), flatten=True)
+ assert_equal(test,
+ np.dtype([('', int),
+ ('a', int),
+ ('ba', float), ('bb', int)]))
+ test = zip_descr((x, w), flatten=False)
+ assert_equal(test,
+ np.dtype([('', int),
+ ('', [('a', int),
+ ('b', [('ba', float), ('bb', int)])])]))
+
+ def test_drop_fields(self):
+ # Test drop_fields
+ a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+
+ # A basic field
+ test = drop_fields(a, 'a')
+ control = np.array([((2, 3.0),), ((5, 6.0),)],
+ dtype=[('b', [('ba', float), ('bb', int)])])
+ assert_equal(test, control)
+
+ # Another basic field (but nesting two fields)
+ test = drop_fields(a, 'b')
+ control = np.array([(1,), (4,)], dtype=[('a', int)])
+ assert_equal(test, control)
+
+ # A nested sub-field
+ test = drop_fields(a, ['ba', ])
+ control = np.array([(1, (3.0,)), (4, (6.0,))],
+ dtype=[('a', int), ('b', [('bb', int)])])
+ assert_equal(test, control)
+
+ # All the nested sub-field from a field: zap that field
+ test = drop_fields(a, ['ba', 'bb'])
+ control = np.array([(1,), (4,)], dtype=[('a', int)])
+ assert_equal(test, control)
+
+ # dropping all fields results in an array with no fields
+ test = drop_fields(a, ['a', 'b'])
+ control = np.array([(), ()], dtype=[])
+ assert_equal(test, control)
+
+ def test_rename_fields(self):
+ # Test rename fields
+ a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
+ dtype=[('a', int),
+ ('b', [('ba', float), ('bb', (float, 2))])])
+ test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
+ newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
+ control = a.view(newdtype)
+ assert_equal(test.dtype, newdtype)
+ assert_equal(test, control)
+
+ def test_get_names(self):
+ # Test get_names
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ test = get_names(ndtype)
+ assert_equal(test, ('A', 'B'))
+
+ ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
+ test = get_names(ndtype)
+ assert_equal(test, ('a', ('b', ('ba', 'bb'))))
+
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names(ndtype)
+ assert_equal(test, ('a', ('b', ())))
+
+ ndtype = np.dtype([])
+ test = get_names(ndtype)
+ assert_equal(test, ())
+
+ def test_get_names_flat(self):
+ # Test get_names_flat
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('A', 'B'))
+
+ ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('a', 'b', 'ba', 'bb'))
+
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('a', 'b'))
+
+ ndtype = np.dtype([])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ())
+
+ def test_get_fieldstructure(self):
+ # Test get_fieldstructure
+
+ # No nested fields
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {'A': [], 'B': []})
+
+ # One 1-nested field
+ ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
+
+ # One 2-nested fields
+ ndtype = np.dtype([('A', int),
+ ('B', [('BA', int),
+ ('BB', [('BBA', int), ('BBB', int)])])])
+ test = get_fieldstructure(ndtype)
+ control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
+ 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
+ assert_equal(test, control)
+
+ # 0 fields
+ ndtype = np.dtype([])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {})
+
+ def test_find_duplicates(self):
+ # Test find_duplicates
+ a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
+ (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
+ mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
+ (0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
+ dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
+
+ test = find_duplicates(a, ignoremask=False, return_index=True)
+ control = [0, 2]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='A', return_index=True)
+ control = [0, 1, 2, 3, 5]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='B', return_index=True)
+ control = [0, 1, 2, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='BA', return_index=True)
+ control = [0, 1, 2, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='BB', return_index=True)
+ control = [0, 1, 2, 3, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ def test_find_duplicates_ignoremask(self):
+ # Test the ignoremask option of find_duplicates
+ ndtype = [('a', int)]
+ a = ma.array([1, 1, 1, 2, 2, 3, 3],
+ mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
+ test = find_duplicates(a, ignoremask=True, return_index=True)
+ control = [0, 1, 3, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, ignoremask=False, return_index=True)
+ control = [0, 1, 2, 3, 4, 6]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ def test_repack_fields(self):
+ dt = np.dtype('u1,f4,i8', align=True)
+ a = np.zeros(2, dtype=dt)
+
+ assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
+ assert_equal(repack_fields(a).itemsize, 13)
+ assert_equal(repack_fields(repack_fields(dt), align=True), dt)
+
+ # make sure type is preserved
+ dt = np.dtype((np.record, dt))
+ assert_(repack_fields(dt).type is np.record)
+
+ def test_structured_to_unstructured(self, tmp_path):
+ a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ out = structured_to_unstructured(a)
+ assert_equal(out, np.zeros((4, 5), dtype='f8'))
+
+ b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ assert_equal(out, np.array([3., 5.5, 9., 11.]))
+ out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
+ assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203
+
+ c = np.arange(20).reshape((4, 5))
+ out = unstructured_to_structured(c, a.dtype)
+ want = np.array([( 0, ( 1., 2), [ 3., 4.]),
+ ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]),
+ (15, (16., 17), [18., 19.])],
+ dtype=[('a', 'i4'),
+ ('b', [('f0', 'f4'), ('f1', 'u2')]),
+ ('c', 'f4', (2,))])
+ assert_equal(out, want)
+
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ assert_equal(apply_along_fields(np.mean, d),
+ np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))
+ assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
+ np.array([ 3., 5.5, 9., 11.]))
+
+ # check that for uniform field dtypes we get a view, not a copy:
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(np.shares_memory(dd, d))
+ assert_(np.shares_memory(ddd, d))
+
+ # check that reversing the order of attributes works
+ dd_attrib_rev = structured_to_unstructured(d[['z', 'x']])
+ assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]])
+ assert_(np.shares_memory(dd_attrib_rev, d))
+
+ # including uniform fields with subarrays unpacked
+ d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
+ (8, [9, 10], [[11, 12], [13, 14]])],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2)))])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(np.shares_memory(dd, d))
+ assert_(np.shares_memory(ddd, d))
+
+ # check that reversing with sub-arrays works as expected
+ d_rev = d[::-1]
+ dd_rev = structured_to_unstructured(d_rev)
+ assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14],
+ [1, 2, 3, 4, 5, 6, 7]])
+
+ # check that sub-arrays keep the order of their values
+ d_attrib_rev = d[['x2', 'x1', 'x0']]
+ dd_attrib_rev = structured_to_unstructured(d_attrib_rev)
+ assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1],
+ [11, 12, 13, 14, 9, 10, 8]])
+
+ # with ignored field at the end
+ d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32),
+ (8, [9, 10], [[11, 12], [13, 14]], 64)],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2))), ('ignored', 'u1')])
+ dd = structured_to_unstructured(d[['x0', 'x1', 'x2']])
+ assert_(np.shares_memory(dd, d))
+ assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7],
+ [8, 9, 10, 11, 12, 13, 14]])
+
+ # test that nested fields with identical names don't break anything
+ point = np.dtype([('x', int), ('y', int)])
+ triangle = np.dtype([('a', point), ('b', point), ('c', point)])
+ arr = np.zeros(10, triangle)
+ res = structured_to_unstructured(arr, dtype=int)
+ assert_equal(res, np.zeros((10, 6), dtype=int))
+
+ # test nested combinations of subarrays and structured arrays, gh-13333
+ def subarray(dt, shape):
+ return np.dtype((dt, shape))
+
+ def structured(*dts):
+ return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)])
+
+ def inspect(dt, dtype=None):
+ arr = np.zeros((), dt)
+ ret = structured_to_unstructured(arr, dtype=dtype)
+ backarr = unstructured_to_structured(ret, dt)
+ return ret.shape, ret.dtype, backarr.dtype
+
+ dt = structured(subarray(structured(np.int32, np.int32), 3))
+ assert_equal(inspect(dt), ((6,), np.int32, dt))
+
+ dt = structured(subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((4,), np.int32, dt))
+
+ dt = structured(np.int32)
+ assert_equal(inspect(dt), ((1,), np.int32, dt))
+
+ dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((5,), np.int32, dt))
+
+ dt = structured()
+ assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
+
+ # these currently don't work, but we may make it work in the future
+ assert_raises(NotImplementedError, structured_to_unstructured,
+ np.zeros(3, dt), dtype=np.int32)
+ assert_raises(NotImplementedError, unstructured_to_structured,
+ np.zeros((3, 0), dtype=np.int32))
+
+ # test supported ndarray subclasses
+ d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')])
+ dd_expected = structured_to_unstructured(d_plain, copy=True)
+
+ # recarray
+ d = d_plain.view(np.recarray)
+
+ dd = structured_to_unstructured(d, copy=False)
+ ddd = structured_to_unstructured(d, copy=True)
+ assert_(np.shares_memory(d, dd))
+ assert_(type(dd) is np.recarray)
+ assert_(type(ddd) is np.recarray)
+ assert_equal(dd, dd_expected)
+ assert_equal(ddd, dd_expected)
+
+ # memmap
+ d = np.memmap(tmp_path / 'memmap',
+ mode='w+',
+ dtype=d_plain.dtype,
+ shape=d_plain.shape)
+ d[:] = d_plain
+ dd = structured_to_unstructured(d, copy=False)
+ ddd = structured_to_unstructured(d, copy=True)
+ assert_(np.shares_memory(d, dd))
+ assert_(type(dd) is np.memmap)
+ assert_(type(ddd) is np.memmap)
+ assert_equal(dd, dd_expected)
+ assert_equal(ddd, dd_expected)
+
+ def test_unstructured_to_structured(self):
+ # test if dtype is the args of np.dtype
+ a = np.zeros((20, 2))
+ test_dtype_args = [('x', float), ('y', float)]
+ test_dtype = np.dtype(test_dtype_args)
+ field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now
+ field2 = unstructured_to_structured(a, dtype=test_dtype) # before
+ assert_equal(field1, field2)
+
+ def test_field_assignment_by_name(self):
+ a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ newdt = [('b', 'f4'), ('c', 'u1')]
+
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+
+ b = np.array([(1, 2), (3, 4)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype))
+
+ # test nested fields
+ a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
+ newdt = [('a', [('c', 'u1')])]
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+ b = np.array([((2,),), ((3,),)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype))
+
+ # test unstructured code path for 0d arrays
+ a, b = np.array(3), np.array(0)
+ assign_fields_by_name(b, a)
+ assert_equal(b[()], 3)
+
+
+class TestRecursiveFillFields:
+ # Test recursive_fill_fields.
+ def test_simple_flexible(self):
+ # Test recursive_fill_fields on flexible-array
+ a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
+ b = np.zeros((3,), dtype=a.dtype)
+ test = recursive_fill_fields(a, b)
+ control = np.array([(1, 10.), (2, 20.), (0, 0.)],
+ dtype=[('A', int), ('B', float)])
+ assert_equal(test, control)
+
+ def test_masked_flexible(self):
+ # Test recursive_fill_fields on masked flexible-array
+ a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
+ dtype=[('A', int), ('B', float)])
+ b = ma.zeros((3,), dtype=a.dtype)
+ test = recursive_fill_fields(a, b)
+ control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
+ mask=[(0, 1), (1, 0), (0, 0)],
+ dtype=[('A', int), ('B', float)])
+ assert_equal(test, control)
+
+
+class TestMergeArrays:
+ # Test merge_arrays
+
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array(
+ [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
+ w = np.array(
+ [(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
+ self.data = (w, x, y, z)
+
+ def test_solo(self):
+ # Test merge_arrays on a single array.
+ (_, x, _, z) = self.data
+
+ test = merge_arrays(x)
+ control = np.array([(1,), (2,)], dtype=[('f0', int)])
+ assert_equal(test, control)
+ test = merge_arrays((x,))
+ assert_equal(test, control)
+
+ test = merge_arrays(z, flatten=False)
+ assert_equal(test, z)
+ test = merge_arrays(z, flatten=True)
+ assert_equal(test, z)
+
+ def test_solo_w_flatten(self):
+ # Test merge_arrays on a single array w & w/o flattening
+ w = self.data[0]
+ test = merge_arrays(w, flatten=False)
+ assert_equal(test, w)
+
+ test = merge_arrays(w, flatten=True)
+ control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
+ dtype=[('a', int), ('ba', float), ('bb', int)])
+ assert_equal(test, control)
+
+ def test_standard(self):
+ # Test standard & standard
+ # Test merge arrays
+ (_, x, y, _) = self.data
+ test = merge_arrays((x, y), usemask=False)
+ control = np.array([(1, 10), (2, 20), (-1, 30)],
+ dtype=[('f0', int), ('f1', int)])
+ assert_equal(test, control)
+
+ test = merge_arrays((x, y), usemask=True)
+ control = ma.array([(1, 10), (2, 20), (-1, 30)],
+ mask=[(0, 0), (0, 0), (1, 0)],
+ dtype=[('f0', int), ('f1', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_flatten(self):
+ # Test standard & flexible
+ (_, x, _, z) = self.data
+ test = merge_arrays((x, z), flatten=True)
+ control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
+ dtype=[('f0', int), ('A', '|S3'), ('B', float)])
+ assert_equal(test, control)
+
+ test = merge_arrays((x, z), flatten=False)
+ control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
+ dtype=[('f0', int),
+ ('f1', [('A', '|S3'), ('B', float)])])
+ assert_equal(test, control)
+
+ def test_flatten_wflexible(self):
+ # Test flatten standard & nested
+ (w, x, _, _) = self.data
+ test = merge_arrays((x, w), flatten=True)
+ control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
+ dtype=[('f0', int),
+ ('a', int), ('ba', float), ('bb', int)])
+ assert_equal(test, control)
+
+ test = merge_arrays((x, w), flatten=False)
+ controldtype = [('f0', int),
+ ('f1', [('a', int),
+ ('b', [('ba', float), ('bb', int), ('bc', [])])])]
+ control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
+ dtype=controldtype)
+ assert_equal(test, control)
+
+ def test_wmasked_arrays(self):
+ # Test merge_arrays masked arrays
+ (_, x, _, _) = self.data
+ mx = ma.array([1, 2, 3], mask=[1, 0, 0])
+ test = merge_arrays((x, mx), usemask=True)
+ control = ma.array([(1, 1), (2, 2), (-1, 3)],
+ mask=[(0, 1), (0, 0), (1, 0)],
+ dtype=[('f0', int), ('f1', int)])
+ assert_equal(test, control)
+ test = merge_arrays((x, mx), usemask=True, asrecarray=True)
+ assert_equal(test, control)
+ assert_(isinstance(test, MaskedRecords))
+
+ def test_w_singlefield(self):
+ # Test single field
+ test = merge_arrays((np.array([1, 2]).view([('a', int)]),
+ np.array([10., 20., 30.])),)
+ control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
+ mask=[(0, 0), (0, 0), (1, 0)],
+ dtype=[('a', int), ('f1', float)])
+ assert_equal(test, control)
+
+ def test_w_shorter_flex(self):
+ # Test merge_arrays w/ a shorter flexndarray.
+ z = self.data[-1]
+
+ # Fixme, this test looks incomplete and broken
+ #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
+ #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
+ # dtype=[('A', '|S3'), ('B', float), ('C', int)])
+ #assert_equal(test, control)
+
+ merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
+ np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
+ dtype=[('A', '|S3'), ('B', float), ('C', int)])
+
+ def test_singlerecord(self):
+ (_, x, y, z) = self.data
+ test = merge_arrays((x[0], y[0], z[0]), usemask=False)
+ control = np.array([(1, 10, ('A', 1))],
+ dtype=[('f0', int),
+ ('f1', int),
+ ('f2', [('A', '|S3'), ('B', float)])])
+ assert_equal(test, control)
+
+
+class TestAppendFields:
+ # Test append_fields
+
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array(
+ [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
+ w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ self.data = (w, x, y, z)
+
+ def test_append_single(self):
+ # Test simple case
+ (_, x, _, _) = self.data
+ test = append_fields(x, 'A', data=[10, 20, 30])
+ control = ma.array([(1, 10), (2, 20), (-1, 30)],
+ mask=[(0, 0), (0, 0), (1, 0)],
+ dtype=[('f0', int), ('A', int)],)
+ assert_equal(test, control)
+
+ def test_append_double(self):
+ # Test simple case
+ (_, x, _, _) = self.data
+ test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
+ control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
+ mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
+ dtype=[('f0', int), ('A', int), ('B', int)],)
+ assert_equal(test, control)
+
+ def test_append_on_flex(self):
+ # Test append_fields on flexible type arrays
+ z = self.data[-1]
+ test = append_fields(z, 'C', data=[10, 20, 30])
+ control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
+ mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
+ dtype=[('A', '|S3'), ('B', float), ('C', int)],)
+ assert_equal(test, control)
+
+ def test_append_on_nested(self):
+ # Test append_fields on nested fields
+ w = self.data[0]
+ test = append_fields(w, 'C', data=[10, 20, 30])
+ control = ma.array([(1, (2, 3.0), 10),
+ (4, (5, 6.0), 20),
+ (-1, (-1, -1.), 30)],
+ mask=[(
+ 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
+ dtype=[('a', int),
+ ('b', [('ba', float), ('bb', int)]),
+ ('C', int)],)
+ assert_equal(test, control)
+
+
+class TestStackArrays:
+ # Test stack_arrays
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array(
+ [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
+ w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ self.data = (w, x, y, z)
+
+ def test_solo(self):
+ # Test stack_arrays on single arrays
+ (_, x, _, _) = self.data
+ test = stack_arrays((x,))
+ assert_equal(test, x)
+ assert_(test is x)
+
+ test = stack_arrays(x)
+ assert_equal(test, x)
+ assert_(test is x)
+
+ def test_unnamed_fields(self):
+ # Tests combinations of arrays w/o named fields
+ (_, x, y, _) = self.data
+
+ test = stack_arrays((x, x), usemask=False)
+ control = np.array([1, 2, 1, 2])
+ assert_equal(test, control)
+
+ test = stack_arrays((x, y), usemask=False)
+ control = np.array([1, 2, 10, 20, 30])
+ assert_equal(test, control)
+
+ test = stack_arrays((y, x), usemask=False)
+ control = np.array([10, 20, 30, 1, 2])
+ assert_equal(test, control)
+
+ def test_unnamed_and_named_fields(self):
+ # Test combination of arrays w/ & w/o named fields
+ (_, x, _, z) = self.data
+
+ test = stack_arrays((x, z))
+ control = ma.array([(1, -1, -1), (2, -1, -1),
+ (-1, 'A', 1), (-1, 'B', 2)],
+ mask=[(0, 1, 1), (0, 1, 1),
+ (1, 0, 0), (1, 0, 0)],
+ dtype=[('f0', int), ('A', '|S3'), ('B', float)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ test = stack_arrays((z, x))
+ control = ma.array([('A', 1, -1), ('B', 2, -1),
+ (-1, -1, 1), (-1, -1, 2), ],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (1, 1, 0), (1, 1, 0)],
+ dtype=[('A', '|S3'), ('B', float), ('f2', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ test = stack_arrays((z, z, x))
+ control = ma.array([('A', 1, -1), ('B', 2, -1),
+ ('A', 1, -1), ('B', 2, -1),
+ (-1, -1, 1), (-1, -1, 2), ],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (0, 0, 1), (0, 0, 1),
+ (1, 1, 0), (1, 1, 0)],
+ dtype=[('A', '|S3'), ('B', float), ('f2', int)])
+ assert_equal(test, control)
+
+ def test_matching_named_fields(self):
+ # Test combination of arrays w/ matching field names
+ (_, x, _, z) = self.data
+ zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)])
+ test = stack_arrays((z, zz))
+ control = ma.array([('A', 1, -1), ('B', 2, -1),
+ (
+ 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (0, 0, 0), (0, 0, 0), (0, 0, 0)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ test = stack_arrays((z, zz, x))
+ ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
+ control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
+ ('a', 10., 100., -1), ('b', 20., 200., -1),
+ ('c', 30., 300., -1),
+ (-1, -1, -1, 1), (-1, -1, -1, 2)],
+ dtype=ndtype,
+ mask=[(0, 0, 1, 1), (0, 0, 1, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
+ (1, 1, 1, 0), (1, 1, 1, 0)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_defaults(self):
+ # Test defaults: no exception raised if keys of defaults are not fields.
+ (_, _, _, z) = self.data
+ zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)])
+ defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
+ test = stack_arrays((z, zz), defaults=defaults)
+ control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
+ (
+ 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (0, 0, 0), (0, 0, 0), (0, 0, 0)])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ def test_autoconversion(self):
+ # Tests autoconversion
+ adtype = [('A', int), ('B', bool), ('C', float)]
+ a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
+ bdtype = [('A', int), ('B', float), ('C', float)]
+ b = ma.array([(4, 5, 6)], dtype=bdtype)
+ control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
+ dtype=bdtype)
+ test = stack_arrays((a, b), autoconvert=True)
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ with assert_raises(TypeError):
+ stack_arrays((a, b), autoconvert=False)
+
+ def test_checktitles(self):
+ # Test using titles in the field names
+ adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
+ a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
+ bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
+ b = ma.array([(4, 5, 6)], dtype=bdtype)
+ test = stack_arrays((a, b))
+ control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
+ dtype=bdtype)
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_subdtype(self):
+ z = np.array([
+ ('A', 1), ('B', 2)
+ ], dtype=[('A', '|S3'), ('B', float, (1,))])
+ zz = np.array([
+ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
+ ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
+
+ res = stack_arrays((z, zz))
+ expected = ma.array(
+ data=[
+ (b'A', [1.0], 0),
+ (b'B', [2.0], 0),
+ (b'a', [10.0], 100.0),
+ (b'b', [20.0], 200.0),
+ (b'c', [30.0], 300.0)],
+ mask=[
+ (False, [False], True),
+ (False, [False], True),
+ (False, [False], False),
+ (False, [False], False),
+ (False, [False], False)
+ ],
+ dtype=zz.dtype
+ )
+ assert_equal(res.dtype, expected.dtype)
+ assert_equal(res, expected)
+ assert_equal(res.mask, expected.mask)
+
+
+class TestJoinBy:
+ def setup_method(self):
+ self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('c', int)])
+ self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('d', int)])
+
+ def test_inner_join(self):
+ # Basic test of join_by
+ a, b = self.a, self.b
+
+ test = join_by('a', a, b, jointype='inner')
+ control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
+ (7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
+ (9, 59, 69, 109, 104)],
+ dtype=[('a', int), ('b1', int), ('b2', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_join(self):
+ a, b = self.a, self.b
+
+ # Fixme, this test is broken
+ #test = join_by(('a', 'b'), a, b)
+ #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
+ # (7, 57, 107, 102), (8, 58, 108, 103),
+ # (9, 59, 109, 104)],
+ # dtype=[('a', int), ('b', int),
+ # ('c', int), ('d', int)])
+ #assert_equal(test, control)
+
+ join_by(('a', 'b'), a, b)
+ np.array([(5, 55, 105, 100), (6, 56, 106, 101),
+ (7, 57, 107, 102), (8, 58, 108, 103),
+ (9, 59, 109, 104)],
+ dtype=[('a', int), ('b', int),
+ ('c', int), ('d', int)])
+
+ def test_join_subdtype(self):
+ # tests the bug in https://stackoverflow.com/q/44769632/102441
+ foo = np.array([(1,)],
+ dtype=[('key', int)])
+ bar = np.array([(1, np.array([1, 2, 3]))],
+ dtype=[('key', int), ('value', 'uint16', 3)])
+ res = join_by('key', foo, bar)
+ assert_equal(res, bar.view(ma.MaskedArray))
+
+ def test_outer_join(self):
+ a, b = self.a, self.b
+
+ test = join_by(('a', 'b'), a, b, 'outer')
+ control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
+ (2, 52, 102, -1), (3, 53, 103, -1),
+ (4, 54, 104, -1), (5, 55, 105, -1),
+ (5, 65, -1, 100), (6, 56, 106, -1),
+ (6, 66, -1, 101), (7, 57, 107, -1),
+ (7, 67, -1, 102), (8, 58, 108, -1),
+ (8, 68, -1, 103), (9, 59, 109, -1),
+ (9, 69, -1, 104), (10, 70, -1, 105),
+ (11, 71, -1, 106), (12, 72, -1, 107),
+ (13, 73, -1, 108), (14, 74, -1, 109)],
+ mask=[(0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 1, 0),
+ (0, 0, 1, 0), (0, 0, 1, 0),
+ (0, 0, 1, 0), (0, 0, 1, 0)],
+ dtype=[('a', int), ('b', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_leftouter_join(self):
+ a, b = self.a, self.b
+
+ test = join_by(('a', 'b'), a, b, 'leftouter')
+ control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
+ (2, 52, 102, -1), (3, 53, 103, -1),
+ (4, 54, 104, -1), (5, 55, 105, -1),
+ (6, 56, 106, -1), (7, 57, 107, -1),
+ (8, 58, 108, -1), (9, 59, 109, -1)],
+ mask=[(0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1)],
+ dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_different_field_order(self):
+ # gh-8940
+ a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
+ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
+ # this should not give a FutureWarning:
+ j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
+ assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
+
+ def test_duplicate_keys(self):
+ a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
+ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
+ assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
+
+ def test_same_name_different_dtypes_key(self):
+ a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ expected_dtype = np.dtype([
+ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_same_name_different_dtypes(self):
+ # gh-9338
+ a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
+ expected_dtype = np.dtype([
+ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_subarray_key(self):
+ a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
+ a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
+
+ b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
+ b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
+
+ expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
+ expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
+
+ res = join_by('pos', a, b)
+ assert_equal(res.dtype, expected_dtype)
+ assert_equal(res, expected)
+
+ def test_padded_dtype(self):
+ dt = np.dtype('i1,f4', align=True)
+ dt.names = ('k', 'v')
+ assert_(len(dt.descr), 3) # padding field is inserted
+
+ a = np.array([(1, 3), (3, 2)], dt)
+ b = np.array([(1, 1), (2, 2)], dt)
+ res = join_by('k', a, b)
+
+ # no padding fields remain
+ expected_dtype = np.dtype([
+ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
+ ])
+
+ assert_equal(res.dtype, expected_dtype)
+
+
+class TestJoinBy2:
+ @classmethod
+ def setup_method(cls):
+ cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('c', int)])
+ cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('d', int)])
+
+ def test_no_r1postfix(self):
+ # Basic test of join_by no_r1postfix
+ a, b = self.a, self.b
+
+ test = join_by(
+ 'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
+ control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
+ (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
+ (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
+ (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
+ (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
+ dtype=[('a', int), ('b', int), ('b2', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_no_postfix(self):
+ assert_raises(ValueError, join_by, 'a', self.a, self.b,
+ r1postfix='', r2postfix='')
+
+ def test_no_r2postfix(self):
+ # Basic test of join_by no_r2postfix
+ a, b = self.a, self.b
+
+ test = join_by(
+ 'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
+ control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
+ (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
+ (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
+ (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
+ (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
+ dtype=[('a', int), ('b1', int), ('b', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_two_keys_two_vars(self):
+ a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
+ np.arange(50, 60), np.arange(10, 20))),
+ dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
+
+ b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
+ np.arange(65, 75), np.arange(0, 10))),
+ dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
+
+ control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
+ (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
+ (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
+ (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
+ (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
+ dtype=[('k', int), ('a', int), ('b1', int),
+ ('b2', int), ('c1', int), ('c2', int)])
+ test = join_by(
+ ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test, control)
+
+class TestAppendFieldsObj:
+ """
+ Test append_fields with arrays containing objects
+ """
+ # https://github.com/numpy/numpy/issues/2346
+
+ def setup_method(self):
+ from datetime import date
+ self.data = {'obj': date(2000, 1, 1)}
+
+ def test_append_to_objects(self):
+ "Test append_fields when the base array contains objects"
+ obj = self.data['obj']
+ x = np.array([(obj, 1.), (obj, 2.)],
+ dtype=[('A', object), ('B', float)])
+ y = np.array([10, 20], dtype=int)
+ test = append_fields(x, 'C', data=y, usemask=False)
+ control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
+ dtype=[('A', object), ('B', float), ('C', int)])
+ assert_equal(test, control)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_regression.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_regression.py
new file mode 100644
index 0000000..8839ed5
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_regression.py
@@ -0,0 +1,231 @@
+import os
+
+import numpy as np
+from numpy.testing import (
+ _assert_valid_refcount,
+ assert_,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+)
+
+
+class TestRegression:
+ def test_poly1d(self):
+ # Ticket #28
+ assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
+ np.poly1d([-1, 1]))
+
+ def test_cov_parameters(self):
+ # Ticket #91
+ x = np.random.random((3, 3))
+ y = x.copy()
+ np.cov(x, rowvar=True)
+ np.cov(y, rowvar=False)
+ assert_array_equal(x, y)
+
+ def test_mem_digitize(self):
+ # Ticket #95
+ for i in range(100):
+ np.digitize([1, 2, 3, 4], [1, 3])
+ np.digitize([0, 1, 2, 3, 4], [1, 3])
+
+ def test_unique_zero_sized(self):
+ # Ticket #205
+ assert_array_equal([], np.unique(np.array([])))
+
+ def test_mem_vectorise(self):
+ # Ticket #325
+ vt = np.vectorize(lambda *args: args)
+ vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
+ vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
+ 1, 2)), np.zeros((2, 2)))
+
+ def test_mgrid_single_element(self):
+ # Ticket #339
+ assert_array_equal(np.mgrid[0:0:1j], [0])
+ assert_array_equal(np.mgrid[0:0], [])
+
+ def test_refcount_vectorize(self):
+ # Ticket #378
+ def p(x, y):
+ return 123
+ v = np.vectorize(p)
+ _assert_valid_refcount(v)
+
+ def test_poly1d_nan_roots(self):
+ # Ticket #396
+ p = np.poly1d([np.nan, np.nan, 1], r=False)
+ assert_raises(np.linalg.LinAlgError, getattr, p, "r")
+
+ def test_mem_polymul(self):
+ # Ticket #448
+ np.polymul([], [1.])
+
+ def test_mem_string_concat(self):
+ # Ticket #469
+ x = np.array([])
+ np.append(x, 'asdasd\tasdasd')
+
+ def test_poly_div(self):
+ # Ticket #553
+ u = np.poly1d([1, 2, 3])
+ v = np.poly1d([1, 2, 3, 4, 5])
+ q, r = np.polydiv(u, v)
+ assert_equal(q * v + r, u)
+
+ def test_poly_eq(self):
+ # Ticket #554
+ x = np.poly1d([1, 2, 3])
+ y = np.poly1d([3, 4])
+ assert_(x != y)
+ assert_(x == x)
+
+ def test_polyfit_build(self):
+ # Ticket #628
+ ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
+ 9.95368241e+00, -3.14526520e+02]
+ x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176]
+ y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
+ 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
+ 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
+ 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
+ 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
+ 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
+ 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
+ tested = np.polyfit(x, y, 4)
+ assert_array_almost_equal(ref, tested)
+
+ def test_polydiv_type(self):
+ # Make polydiv work for complex types
+ msg = "Wrong type, should be complex"
+ x = np.ones(3, dtype=complex)
+ q, r = np.polydiv(x, x)
+ assert_(q.dtype == complex, msg)
+ msg = "Wrong type, should be float"
+ x = np.ones(3, dtype=int)
+ q, r = np.polydiv(x, x)
+ assert_(q.dtype == float, msg)
+
+ def test_histogramdd_too_many_bins(self):
+ # Ticket 928.
+ assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
+
+ def test_polyint_type(self):
+ # Ticket #944
+ msg = "Wrong type, should be complex"
+ x = np.ones(3, dtype=complex)
+ assert_(np.polyint(x).dtype == complex, msg)
+ msg = "Wrong type, should be float"
+ x = np.ones(3, dtype=int)
+ assert_(np.polyint(x).dtype == float, msg)
+
+ def test_ndenumerate_crash(self):
+ # Ticket 1140
+ # Shouldn't crash:
+ list(np.ndenumerate(np.array([[]])))
+
+ def test_large_fancy_indexing(self):
+ # Large enough to fail on 64-bit.
+ nbits = np.dtype(np.intp).itemsize * 8
+ thesize = int((2**nbits)**(1.0 / 5.0) + 1)
+
+ def dp():
+ n = 3
+ a = np.ones((n,) * 5)
+ i = np.random.randint(0, n, size=thesize)
+ a[np.ix_(i, i, i, i, i)] = 0
+
+ def dp2():
+ n = 3
+ a = np.ones((n,) * 5)
+ i = np.random.randint(0, n, size=thesize)
+ a[np.ix_(i, i, i, i, i)]
+
+ assert_raises(ValueError, dp)
+ assert_raises(ValueError, dp2)
+
+ def test_void_coercion(self):
+ dt = np.dtype([('a', 'f4'), ('b', 'i4')])
+ x = np.zeros((1,), dt)
+ assert_(np.r_[x, x].dtype == dt)
+
+ def test_include_dirs(self):
+ # As a sanity check, just test that get_include
+ # includes something reasonable. Somewhat
+ # related to ticket #1405.
+ include_dirs = [np.get_include()]
+ for path in include_dirs:
+ assert_(isinstance(path, str))
+ assert_(path != '')
+
+ def test_polyder_return_type(self):
+ # Ticket #1249
+ assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
+ assert_(isinstance(np.polyder([1], 0), np.ndarray))
+ assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
+ assert_(isinstance(np.polyder([1], 1), np.ndarray))
+
+ def test_append_fields_dtype_list(self):
+ # Ticket #1676
+ from numpy.lib.recfunctions import append_fields
+
+ base = np.array([1, 2, 3], dtype=np.int32)
+ names = ['a', 'b', 'c']
+ data = np.eye(3).astype(np.int32)
+ dlist = [np.float64, np.int32, np.int32]
+ try:
+ append_fields(base, names, data, dlist)
+ except Exception:
+ raise AssertionError
+
+ def test_loadtxt_fields_subarrays(self):
+ # For ticket #1936
+ from io import StringIO
+
+ dt = [("a", 'u1', 2), ("b", 'u1', 2)]
+ x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
+
+ dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]
+ x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))
+
+ dt = [("a", 'u1', (2, 2))]
+ x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
+
+ dt = [("a", 'u1', (2, 3, 2))]
+ x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
+ data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]
+ assert_equal(x, np.array(data, dtype=dt))
+
+ def test_nansum_with_boolean(self):
+ # gh-2978
+ a = np.zeros(2, dtype=bool)
+ try:
+ np.nansum(a)
+ except Exception:
+ raise AssertionError
+
+ def test_py3_compat(self):
+ # gh-2561
+ # Test if the oldstyle class test is bypassed in python3
+ class C:
+ """Old-style class in python2, normal class in python3"""
+ pass
+
+ out = open(os.devnull, 'w')
+ try:
+ np.info(C(), output=out)
+ except AttributeError:
+ raise AssertionError
+ finally:
+ out.close()
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_shape_base.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_shape_base.py
new file mode 100644
index 0000000..b0b68dd
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_shape_base.py
@@ -0,0 +1,813 @@
+import functools
+import sys
+
+import pytest
+
+import numpy as np
+from numpy import (
+ apply_along_axis,
+ apply_over_axes,
+ array_split,
+ column_stack,
+ dsplit,
+ dstack,
+ expand_dims,
+ hsplit,
+ kron,
+ put_along_axis,
+ split,
+ take_along_axis,
+ tile,
+ vsplit,
+)
+from numpy.exceptions import AxisError
+from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises
+
+IS_64BIT = sys.maxsize > 2**32
+
+
+def _add_keepdims(func):
+ """ hack in keepdims behavior into a function taking an axis """
+ @functools.wraps(func)
+ def wrapped(a, axis, **kwargs):
+ res = func(a, axis=axis, **kwargs)
+ if axis is None:
+ axis = 0 # res is now a scalar, so we can insert this anywhere
+ return np.expand_dims(res, axis=axis)
+ return wrapped
+
+
+class TestTakeAlongAxis:
+ def test_argequivalent(self):
+ """ Test it translates from arg<func> to <func> """
+ from numpy.random import rand
+ a = rand(3, 4, 5)
+
+ funcs = [
+ (np.sort, np.argsort, {}),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), {}),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), {}),
+ #(np.partition, np.argpartition, dict(kth=2)),
+ ]
+
+ for func, argfunc, kwargs in funcs:
+ for axis in list(range(a.ndim)) + [None]:
+ a_func = func(a, axis=axis, **kwargs)
+ ai_func = argfunc(a, axis=axis, **kwargs)
+ assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
+
+ def test_invalid(self):
+ """ Test it errors when indices has too few dimensions """
+ a = np.ones((10, 10))
+ ai = np.ones((10, 2), dtype=np.intp)
+
+ # sanity check
+ take_along_axis(a, ai, axis=1)
+
+ # not enough indices
+ assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
+ # bool arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
+ # float arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
+ # invalid axis
+ assert_raises(AxisError, take_along_axis, a, ai, axis=10)
+ # invalid indices
+ assert_raises(ValueError, take_along_axis, a, ai, axis=None)
+
+ def test_empty(self):
+ """ Test everything is ok with empty results, even with inserted dims """
+ a = np.ones((3, 4, 5))
+ ai = np.ones((3, 0, 5), dtype=np.intp)
+
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, ai.shape)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.ones((1, 2, 5), dtype=np.intp)
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, (3, 2, 5))
+
+
+class TestPutAlongAxis:
+ def test_replace_max(self):
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+
+ for axis in list(range(a_base.ndim)) + [None]:
+ # we mutate this in the loop
+ a = a_base.copy()
+
+ # replace the max with a small value
+ i_max = _add_keepdims(np.argmax)(a, axis=axis)
+ put_along_axis(a, i_max, -99, axis=axis)
+
+ # find the new minimum, which should max
+ i_min = _add_keepdims(np.argmin)(a, axis=axis)
+
+ assert_equal(i_min, i_max)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
+ put_along_axis(a, ai, 20, axis=1)
+ assert_equal(take_along_axis(a, ai, axis=1), 20)
+
+ def test_invalid(self):
+ """ Test invalid inputs """
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+ indices = np.array([[0], [1]])
+ values = np.array([[2], [1]])
+
+ # sanity check
+ a = a_base.copy()
+ put_along_axis(a, indices, values, axis=0)
+ assert np.all(a == [[2, 2, 2], [1, 1, 1]])
+
+ # invalid indices
+ a = a_base.copy()
+ with assert_raises(ValueError) as exc:
+ put_along_axis(a, indices, values, axis=None)
+ assert "single dimension" in str(exc.exception)
+
+
+class TestApplyAlongAxis:
+ def test_simple(self):
+ a = np.ones((20, 10), 'd')
+ assert_array_equal(
+ apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
+
+ def test_simple101(self):
+ a = np.ones((10, 101), 'd')
+ assert_array_equal(
+ apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
+
+ def test_3d(self):
+ a = np.arange(27).reshape((3, 3, 3))
+ assert_array_equal(apply_along_axis(np.sum, 0, a),
+ [[27, 30, 33], [36, 39, 42], [45, 48, 51]])
+
+ def test_preserve_subclass(self):
+ def double(row):
+ return row * 2
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
+
+ result = apply_along_axis(double, 0, m)
+ assert_(isinstance(result, MyNDArray))
+ assert_array_equal(result, expected)
+
+ result = apply_along_axis(double, 1, m)
+ assert_(isinstance(result, MyNDArray))
+ assert_array_equal(result, expected)
+
+ def test_subclass(self):
+ class MinimalSubclass(np.ndarray):
+ data = 1
+
+ def minimal_function(array):
+ return array.data
+
+ a = np.zeros((6, 3)).view(MinimalSubclass)
+
+ assert_array_equal(
+ apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])
+ )
+
+ def test_scalar_array(self, cls=np.ndarray):
+ a = np.ones((6, 3)).view(cls)
+ res = apply_along_axis(np.sum, 0, a)
+ assert_(isinstance(res, cls))
+ assert_array_equal(res, np.array([6, 6, 6]).view(cls))
+
+ def test_0d_array(self, cls=np.ndarray):
+ def sum_to_0d(x):
+ """ Sum x, returning a 0d array of the same class """
+ assert_equal(x.ndim, 1)
+ return np.squeeze(np.sum(x, keepdims=True))
+ a = np.ones((6, 3)).view(cls)
+ res = apply_along_axis(sum_to_0d, 0, a)
+ assert_(isinstance(res, cls))
+ assert_array_equal(res, np.array([6, 6, 6]).view(cls))
+
+ res = apply_along_axis(sum_to_0d, 1, a)
+ assert_(isinstance(res, cls))
+ assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))
+
+ def test_axis_insertion(self, cls=np.ndarray):
+ def f1to2(x):
+ """produces an asymmetric non-square matrix from x"""
+ assert_equal(x.ndim, 1)
+ return (x[::-1] * x[1:, None]).view(cls)
+
+ a2d = np.arange(6 * 3).reshape((6, 3))
+
+ # 2d insertion along first axis
+ actual = apply_along_axis(f1to2, 0, a2d)
+ expected = np.stack([
+ f1to2(a2d[:, i]) for i in range(a2d.shape[1])
+ ], axis=-1).view(cls)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual, expected)
+
+ # 2d insertion along last axis
+ actual = apply_along_axis(f1to2, 1, a2d)
+ expected = np.stack([
+ f1to2(a2d[i, :]) for i in range(a2d.shape[0])
+ ], axis=0).view(cls)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual, expected)
+
+ # 3d insertion along middle axis
+ a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3))
+
+ actual = apply_along_axis(f1to2, 1, a3d)
+ expected = np.stack([
+ np.stack([
+ f1to2(a3d[i, :, j]) for i in range(a3d.shape[0])
+ ], axis=0)
+ for j in range(a3d.shape[2])
+ ], axis=-1).view(cls)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual, expected)
+
+ def test_subclass_preservation(self):
+ class MinimalSubclass(np.ndarray):
+ pass
+ self.test_scalar_array(MinimalSubclass)
+ self.test_0d_array(MinimalSubclass)
+ self.test_axis_insertion(MinimalSubclass)
+
+ def test_axis_insertion_ma(self):
+ def f1to2(x):
+ """produces an asymmetric non-square matrix from x"""
+ assert_equal(x.ndim, 1)
+ res = x[::-1] * x[1:, None]
+ return np.ma.masked_where(res % 5 == 0, res)
+ a = np.arange(6 * 3).reshape((6, 3))
+ res = apply_along_axis(f1to2, 0, a)
+ assert_(isinstance(res, np.ma.masked_array))
+ assert_equal(res.ndim, 3)
+ assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask)
+ assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask)
+ assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask)
+
+ def test_tuple_func1d(self):
+ def sample_1d(x):
+ return x[1], x[0]
+ res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))
+ assert_array_equal(res, np.array([[2, 1], [4, 3]]))
+
+ def test_empty(self):
+ # can't apply_along_axis when there's no chance to call the function
+ def never_call(x):
+ assert_(False) # should never be reached
+
+ a = np.empty((0, 0))
+ assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)
+ assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)
+
+ # but it's sometimes ok with some non-zero dimensions
+ def empty_to_1(x):
+ assert_(len(x) == 0)
+ return 1
+
+ a = np.empty((10, 0))
+ actual = np.apply_along_axis(empty_to_1, 1, a)
+ assert_equal(actual, np.ones(10))
+ assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
+
+ def test_with_iterable_object(self):
+ # from issue 5248
+ d = np.array([
+ [{1, 11}, {2, 22}, {3, 33}],
+ [{4, 44}, {5, 55}, {6, 66}]
+ ])
+ actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
+ expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
+
+ assert_equal(actual, expected)
+
+ # issue 8642 - assert_equal doesn't detect this!
+ for i in np.ndindex(actual.shape):
+ assert_equal(type(actual[i]), type(expected[i]))
+
+
+class TestApplyOverAxes:
+ def test_simple(self):
+ a = np.arange(24).reshape(2, 3, 4)
+ aoa_a = apply_over_axes(np.sum, a, [0, 2])
+ assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
+
+
+class TestExpandDims:
+ def test_functionality(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ for axis in range(-5, 4):
+ b = expand_dims(a, axis)
+ assert_(b.shape[axis] == 1)
+ assert_(np.squeeze(b).shape == s)
+
+ def test_axis_tuple(self):
+ a = np.empty((3, 3, 3))
+ assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
+ assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
+ assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
+ assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
+
+ def test_axis_out_of_range(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ assert_raises(AxisError, expand_dims, a, -6)
+ assert_raises(AxisError, expand_dims, a, 5)
+
+ a = np.empty((3, 3, 3))
+ assert_raises(AxisError, expand_dims, a, (0, -6))
+ assert_raises(AxisError, expand_dims, a, (0, 5))
+
+ def test_repeated_axis(self):
+ a = np.empty((3, 3, 3))
+ assert_raises(ValueError, expand_dims, a, axis=(1, 1))
+
+ def test_subclasses(self):
+ a = np.arange(10).reshape((2, 5))
+ a = np.ma.array(a, mask=a % 3 == 0)
+
+ expanded = np.expand_dims(a, axis=1)
+ assert_(isinstance(expanded, np.ma.MaskedArray))
+ assert_equal(expanded.shape, (2, 1, 5))
+ assert_equal(expanded.mask.shape, (2, 1, 5))
+
+
+class TestArraySplit:
+ def test_integer_0_split(self):
+ a = np.arange(10)
+ assert_raises(ValueError, array_split, a, 0)
+
+ def test_integer_split(self):
+ a = np.arange(10)
+ res = array_split(a, 1)
+ desired = [np.arange(10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 2)
+ desired = [np.arange(5), np.arange(5, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 3)
+ desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 4)
+ desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),
+ np.arange(8, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 5)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 8), np.arange(8, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 6)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 7)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 8)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),
+ np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),
+ np.arange(8, 9), np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 9)
+ desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),
+ np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),
+ np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 10)
+ desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
+ np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 11)
+ desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
+ np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10), np.array([])]
+ compare_results(res, desired)
+
+ def test_integer_split_2D_rows(self):
+ a = np.array([np.arange(10), np.arange(10)])
+ res = array_split(a, 3, axis=0)
+ tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.zeros((0, 10))]
+ compare_results(res, tgt)
+ assert_(a.dtype.type is res[-1].dtype.type)
+
+ # Same thing for manual splits:
+ res = array_split(a, [0, 1], axis=0)
+ tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
+ np.array([np.arange(10)])]
+ compare_results(res, tgt)
+ assert_(a.dtype.type is res[-1].dtype.type)
+
+ def test_integer_split_2D_cols(self):
+ a = np.array([np.arange(10), np.arange(10)])
+ res = array_split(a, 3, axis=-1)
+ desired = [np.array([np.arange(4), np.arange(4)]),
+ np.array([np.arange(4, 7), np.arange(4, 7)]),
+ np.array([np.arange(7, 10), np.arange(7, 10)])]
+ compare_results(res, desired)
+
+ def test_integer_split_2D_default(self):
+ """ This will fail if we change default axis
+ """
+ a = np.array([np.arange(10), np.arange(10)])
+ res = array_split(a, 3)
+ tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.zeros((0, 10))]
+ compare_results(res, tgt)
+ assert_(a.dtype.type is res[-1].dtype.type)
+ # perhaps should check higher dimensions
+
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ def test_integer_split_2D_rows_greater_max_int32(self):
+ a = np.broadcast_to([0], (1 << 32, 2))
+ res = array_split(a, 4)
+ chunk = np.broadcast_to([0], (1 << 30, 2))
+ tgt = [chunk] * 4
+ for i in range(len(tgt)):
+ assert_equal(res[i].shape, tgt[i].shape)
+
+ def test_index_split_simple(self):
+ a = np.arange(10)
+ indices = [1, 5, 7]
+ res = array_split(a, indices, axis=-1)
+ desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),
+ np.arange(7, 10)]
+ compare_results(res, desired)
+
+ def test_index_split_low_bound(self):
+ a = np.arange(10)
+ indices = [0, 5, 7]
+ res = array_split(a, indices, axis=-1)
+ desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
+ np.arange(7, 10)]
+ compare_results(res, desired)
+
+ def test_index_split_high_bound(self):
+ a = np.arange(10)
+ indices = [0, 5, 7, 10, 12]
+ res = array_split(a, indices, axis=-1)
+ desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
+ np.arange(7, 10), np.array([]), np.array([])]
+ compare_results(res, desired)
+
+
+class TestSplit:
+ # The split function is essentially the same as array_split,
+ # except that it test if splitting will result in an
+ # equal split. Only test for this case.
+
+ def test_equal_split(self):
+ a = np.arange(10)
+ res = split(a, 2)
+ desired = [np.arange(5), np.arange(5, 10)]
+ compare_results(res, desired)
+
+ def test_unequal_split(self):
+ a = np.arange(10)
+ assert_raises(ValueError, split, a, 3)
+
+
+class TestColumnStack:
+ def test_non_iterable(self):
+ assert_raises(TypeError, column_stack, 1)
+
+ def test_1D_arrays(self):
+ # example from docstring
+ a = np.array((1, 2, 3))
+ b = np.array((2, 3, 4))
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_2D_arrays(self):
+ # same as hstack 2D docstring example
+ a = np.array([[1], [2], [3]])
+ b = np.array([[2], [3], [4]])
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_generator(self):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
+ column_stack(np.arange(3) for _ in range(2))
+
+
+class TestDstack:
+ def test_non_iterable(self):
+ assert_raises(TypeError, dstack, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ b = np.array(2)
+ res = dstack([a, b])
+ desired = np.array([[[1, 2]]])
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = np.array([1])
+ b = np.array([2])
+ res = dstack([a, b])
+ desired = np.array([[[1, 2]]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = np.array([[1], [2]])
+ b = np.array([[1], [2]])
+ res = dstack([a, b])
+ desired = np.array([[[1, 1]], [[2, 2, ]]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array2(self):
+ a = np.array([1, 2])
+ b = np.array([1, 2])
+ res = dstack([a, b])
+ desired = np.array([[[1, 1], [2, 2]]])
+ assert_array_equal(res, desired)
+
+ def test_generator(self):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
+ dstack(np.arange(3) for _ in range(2))
+
+
+# array_split has more comprehensive test of splitting.
+# only do simple test on hsplit, vsplit, and dsplit
+class TestHsplit:
+ """Only testing for integer splits.
+
+ """
+ def test_non_iterable(self):
+ assert_raises(ValueError, hsplit, 1, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ try:
+ hsplit(a, 2)
+ assert_(0)
+ except ValueError:
+ pass
+
+ def test_1D_array(self):
+ a = np.array([1, 2, 3, 4])
+ res = hsplit(a, 2)
+ desired = [np.array([1, 2]), np.array([3, 4])]
+ compare_results(res, desired)
+
+ def test_2D_array(self):
+ a = np.array([[1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ res = hsplit(a, 2)
+ desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
+ compare_results(res, desired)
+
+
+class TestVsplit:
+ """Only testing for integer splits.
+
+ """
+ def test_non_iterable(self):
+ assert_raises(ValueError, vsplit, 1, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ assert_raises(ValueError, vsplit, a, 2)
+
+ def test_1D_array(self):
+ a = np.array([1, 2, 3, 4])
+ try:
+ vsplit(a, 2)
+ assert_(0)
+ except ValueError:
+ pass
+
+ def test_2D_array(self):
+ a = np.array([[1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ res = vsplit(a, 2)
+ desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
+ compare_results(res, desired)
+
+
+class TestDsplit:
+ # Only testing for integer splits.
+ def test_non_iterable(self):
+ assert_raises(ValueError, dsplit, 1, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ assert_raises(ValueError, dsplit, a, 2)
+
+ def test_1D_array(self):
+ a = np.array([1, 2, 3, 4])
+ assert_raises(ValueError, dsplit, a, 2)
+
+ def test_2D_array(self):
+ a = np.array([[1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ try:
+ dsplit(a, 2)
+ assert_(0)
+ except ValueError:
+ pass
+
+ def test_3D_array(self):
+ a = np.array([[[1, 2, 3, 4],
+ [1, 2, 3, 4]],
+ [[1, 2, 3, 4],
+ [1, 2, 3, 4]]])
+ res = dsplit(a, 2)
+ desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
+ np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
+ compare_results(res, desired)
+
+
+class TestSqueeze:
+ def test_basic(self):
+ from numpy.random import rand
+
+ a = rand(20, 10, 10, 1, 1)
+ b = rand(20, 1, 10, 1, 20)
+ c = rand(1, 1, 20, 10)
+ assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
+ assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
+ assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))
+
+ # Squeezing to 0-dim should still give an ndarray
+ a = [[[1.5]]]
+ res = np.squeeze(a)
+ assert_equal(res, 1.5)
+ assert_equal(res.ndim, 0)
+ assert_equal(type(res), np.ndarray)
+
+
+class TestKron:
+ def test_basic(self):
+ # Using 0-dimensional ndarray
+ a = np.array(1)
+ b = np.array([[1, 2], [3, 4]])
+ k = np.array([[1, 2], [3, 4]])
+ assert_array_equal(np.kron(a, b), k)
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array(1)
+ assert_array_equal(np.kron(a, b), k)
+
+ # Using 1-dimensional ndarray
+ a = np.array([3])
+ b = np.array([[1, 2], [3, 4]])
+ k = np.array([[3, 6], [9, 12]])
+ assert_array_equal(np.kron(a, b), k)
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array([3])
+ assert_array_equal(np.kron(a, b), k)
+
+ # Using 3-dimensional ndarray
+ a = np.array([[[1]], [[2]]])
+ b = np.array([[1, 2], [3, 4]])
+ k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
+ assert_array_equal(np.kron(a, b), k)
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array([[[1]], [[2]]])
+ k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
+ assert_array_equal(np.kron(a, b), k)
+
+ def test_return_type(self):
+ class myarray(np.ndarray):
+ __array_priority__ = 1.0
+
+ a = np.ones([2, 2])
+ ma = myarray(a.shape, a.dtype, a.data)
+ assert_equal(type(kron(a, a)), np.ndarray)
+ assert_equal(type(kron(ma, ma)), myarray)
+ assert_equal(type(kron(a, ma)), myarray)
+ assert_equal(type(kron(ma, a)), myarray)
+
+ @pytest.mark.parametrize(
+ "array_class", [np.asarray, np.asmatrix]
+ )
+ def test_kron_smoke(self, array_class):
+ a = array_class(np.ones([3, 3]))
+ b = array_class(np.ones([3, 3]))
+ k = array_class(np.ones([9, 9]))
+
+ assert_array_equal(np.kron(a, b), k)
+
+ def test_kron_ma(self):
+ x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
+ k = np.ma.array(np.diag([1, 4, 4, 16]),
+ mask=~np.array(np.identity(4), dtype=bool))
+
+ assert_array_equal(k, np.kron(x, x))
+
+ @pytest.mark.parametrize(
+ "shape_a,shape_b", [
+ ((1, 1), (1, 1)),
+ ((1, 2, 3), (4, 5, 6)),
+ ((2, 2), (2, 2, 2)),
+ ((1, 0), (1, 1)),
+ ((2, 0, 2), (2, 2)),
+ ((2, 0, 0, 2), (2, 0, 2)),
+ ])
+ def test_kron_shape(self, shape_a, shape_b):
+ a = np.ones(shape_a)
+ b = np.ones(shape_b)
+ normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a
+ normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b
+ expected_shape = np.multiply(normalised_shape_a, normalised_shape_b)
+
+ k = np.kron(a, b)
+ assert np.array_equal(
+ k.shape, expected_shape), "Unexpected shape from kron"
+
+
+class TestTile:
+ def test_basic(self):
+ a = np.array([0, 1, 2])
+ b = [[1, 2], [3, 4]]
+ assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
+ assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
+ assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])
+ assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])
+ assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])
+ assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],
+ [1, 2, 1, 2], [3, 4, 3, 4]])
+
+ def test_tile_one_repetition_on_array_gh4679(self):
+ a = np.arange(5)
+ b = tile(a, 1)
+ b += 2
+ assert_equal(a, np.arange(5))
+
+ def test_empty(self):
+ a = np.array([[[]]])
+ b = np.array([[], []])
+ c = tile(b, 2).shape
+ d = tile(a, (3, 2, 5)).shape
+ assert_equal(c, (2, 0))
+ assert_equal(d, (3, 2, 0))
+
+ def test_kroncompare(self):
+ from numpy.random import randint
+
+ reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
+ shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
+ for s in shape:
+ b = randint(0, 10, size=s)
+ for r in reps:
+ a = np.ones(r, b.dtype)
+ large = tile(b, r)
+ klarge = kron(a, b)
+ assert_equal(large, klarge)
+
+
+class TestMayShareMemory:
+ def test_basic(self):
+ d = np.ones((50, 60))
+ d2 = np.ones((30, 60, 6))
+ assert_(np.may_share_memory(d, d))
+ assert_(np.may_share_memory(d, d[::-1]))
+ assert_(np.may_share_memory(d, d[::2]))
+ assert_(np.may_share_memory(d, d[1:, ::-1]))
+
+ assert_(not np.may_share_memory(d[::-1], d2))
+ assert_(not np.may_share_memory(d[::2], d2))
+ assert_(not np.may_share_memory(d[1:, ::-1], d2))
+ assert_(np.may_share_memory(d2[1:, ::-1], d2))
+
+
+# Utility
+def compare_results(res, desired):
+ """Compare lists of arrays."""
+ for x, y in zip(res, desired, strict=False):
+ assert_array_equal(x, y)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py
new file mode 100644
index 0000000..fe40c95
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_stride_tricks.py
@@ -0,0 +1,656 @@
+import pytest
+
+import numpy as np
+from numpy._core._rational_tests import rational
+from numpy.lib._stride_tricks_impl import (
+ _broadcast_shape,
+ as_strided,
+ broadcast_arrays,
+ broadcast_shapes,
+ broadcast_to,
+ sliding_window_view,
+)
+from numpy.testing import (
+ assert_,
+ assert_array_equal,
+ assert_equal,
+ assert_raises,
+ assert_raises_regex,
+ assert_warns,
+)
+
+
+def assert_shapes_correct(input_shapes, expected_shape):
+ # Broadcast a list of arrays with the given input shapes and check the
+ # common output shape.
+
+ inarrays = [np.zeros(s) for s in input_shapes]
+ outarrays = broadcast_arrays(*inarrays)
+ outshapes = [a.shape for a in outarrays]
+ expected = [expected_shape] * len(inarrays)
+ assert_equal(outshapes, expected)
+
+
+def assert_incompatible_shapes_raise(input_shapes):
+ # Broadcast a list of arrays with the given (incompatible) input shapes
+ # and check that they raise a ValueError.
+
+ inarrays = [np.zeros(s) for s in input_shapes]
+ assert_raises(ValueError, broadcast_arrays, *inarrays)
+
+
+def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
+ # Broadcast two shapes against each other and check that the data layout
+ # is the same as if a ufunc did the broadcasting.
+
+ x0 = np.zeros(shape0, dtype=int)
+ # Note that multiply.reduce's identity element is 1.0, so when shape1==(),
+ # this gives the desired n==1.
+ n = int(np.multiply.reduce(shape1))
+ x1 = np.arange(n).reshape(shape1)
+ if transposed:
+ x0 = x0.T
+ x1 = x1.T
+ if flipped:
+ x0 = x0[::-1]
+ x1 = x1[::-1]
+ # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
+ # result should be exactly the same as the broadcasted view of x1.
+ y = x0 + x1
+ b0, b1 = broadcast_arrays(x0, x1)
+ assert_array_equal(y, b1)
+
+
+def test_same():
+ x = np.arange(10)
+ y = np.arange(10)
+ bx, by = broadcast_arrays(x, y)
+ assert_array_equal(x, bx)
+ assert_array_equal(y, by)
+
+def test_broadcast_kwargs():
+ # ensure that a TypeError is appropriately raised when
+ # np.broadcast_arrays() is called with any keyword
+ # argument other than 'subok'
+ x = np.arange(10)
+ y = np.arange(10)
+
+ with assert_raises_regex(TypeError, 'got an unexpected keyword'):
+ broadcast_arrays(x, y, dtype='float64')
+
+
+def test_one_off():
+ x = np.array([[1, 2, 3]])
+ y = np.array([[1], [2], [3]])
+ bx, by = broadcast_arrays(x, y)
+ bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
+ by0 = bx0.T
+ assert_array_equal(bx0, bx)
+ assert_array_equal(by0, by)
+
+
+def test_same_input_shapes():
+ # Check that the final shape is just the input shape.
+
+ data = [
+ (),
+ (1,),
+ (3,),
+ (0, 1),
+ (0, 3),
+ (1, 0),
+ (3, 0),
+ (1, 3),
+ (3, 1),
+ (3, 3),
+ ]
+ for shape in data:
+ input_shapes = [shape]
+ # Single input.
+ assert_shapes_correct(input_shapes, shape)
+ # Double input.
+ input_shapes2 = [shape, shape]
+ assert_shapes_correct(input_shapes2, shape)
+ # Triple input.
+ input_shapes3 = [shape, shape, shape]
+ assert_shapes_correct(input_shapes3, shape)
+
+
+def test_two_compatible_by_ones_input_shapes():
+ # Check that two different input shapes of the same length, but some have
+ # ones, broadcast to the correct shape.
+
+ data = [
+ [[(1,), (3,)], (3,)],
+ [[(1, 3), (3, 3)], (3, 3)],
+ [[(3, 1), (3, 3)], (3, 3)],
+ [[(1, 3), (3, 1)], (3, 3)],
+ [[(1, 1), (3, 3)], (3, 3)],
+ [[(1, 1), (1, 3)], (1, 3)],
+ [[(1, 1), (3, 1)], (3, 1)],
+ [[(1, 0), (0, 0)], (0, 0)],
+ [[(0, 1), (0, 0)], (0, 0)],
+ [[(1, 0), (0, 1)], (0, 0)],
+ [[(1, 1), (0, 0)], (0, 0)],
+ [[(1, 1), (1, 0)], (1, 0)],
+ [[(1, 1), (0, 1)], (0, 1)],
+ ]
+ for input_shapes, expected_shape in data:
+ assert_shapes_correct(input_shapes, expected_shape)
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_shapes_correct(input_shapes[::-1], expected_shape)
+
+
+def test_two_compatible_by_prepending_ones_input_shapes():
+ # Check that two different input shapes (of different lengths) broadcast
+ # to the correct shape.
+
+ data = [
+ [[(), (3,)], (3,)],
+ [[(3,), (3, 3)], (3, 3)],
+ [[(3,), (3, 1)], (3, 3)],
+ [[(1,), (3, 3)], (3, 3)],
+ [[(), (3, 3)], (3, 3)],
+ [[(1, 1), (3,)], (1, 3)],
+ [[(1,), (3, 1)], (3, 1)],
+ [[(1,), (1, 3)], (1, 3)],
+ [[(), (1, 3)], (1, 3)],
+ [[(), (3, 1)], (3, 1)],
+ [[(), (0,)], (0,)],
+ [[(0,), (0, 0)], (0, 0)],
+ [[(0,), (0, 1)], (0, 0)],
+ [[(1,), (0, 0)], (0, 0)],
+ [[(), (0, 0)], (0, 0)],
+ [[(1, 1), (0,)], (1, 0)],
+ [[(1,), (0, 1)], (0, 1)],
+ [[(1,), (1, 0)], (1, 0)],
+ [[(), (1, 0)], (1, 0)],
+ [[(), (0, 1)], (0, 1)],
+ ]
+ for input_shapes, expected_shape in data:
+ assert_shapes_correct(input_shapes, expected_shape)
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_shapes_correct(input_shapes[::-1], expected_shape)
+
+
+def test_incompatible_shapes_raise_valueerror():
+ # Check that a ValueError is raised for incompatible shapes.
+
+ data = [
+ [(3,), (4,)],
+ [(2, 3), (2,)],
+ [(3,), (3,), (4,)],
+ [(1, 3, 4), (2, 3, 3)],
+ ]
+ for input_shapes in data:
+ assert_incompatible_shapes_raise(input_shapes)
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_incompatible_shapes_raise(input_shapes[::-1])
+
+
+def test_same_as_ufunc():
+ # Check that the data layout is the same as if a ufunc did the operation.
+
+ data = [
+ [[(1,), (3,)], (3,)],
+ [[(1, 3), (3, 3)], (3, 3)],
+ [[(3, 1), (3, 3)], (3, 3)],
+ [[(1, 3), (3, 1)], (3, 3)],
+ [[(1, 1), (3, 3)], (3, 3)],
+ [[(1, 1), (1, 3)], (1, 3)],
+ [[(1, 1), (3, 1)], (3, 1)],
+ [[(1, 0), (0, 0)], (0, 0)],
+ [[(0, 1), (0, 0)], (0, 0)],
+ [[(1, 0), (0, 1)], (0, 0)],
+ [[(1, 1), (0, 0)], (0, 0)],
+ [[(1, 1), (1, 0)], (1, 0)],
+ [[(1, 1), (0, 1)], (0, 1)],
+ [[(), (3,)], (3,)],
+ [[(3,), (3, 3)], (3, 3)],
+ [[(3,), (3, 1)], (3, 3)],
+ [[(1,), (3, 3)], (3, 3)],
+ [[(), (3, 3)], (3, 3)],
+ [[(1, 1), (3,)], (1, 3)],
+ [[(1,), (3, 1)], (3, 1)],
+ [[(1,), (1, 3)], (1, 3)],
+ [[(), (1, 3)], (1, 3)],
+ [[(), (3, 1)], (3, 1)],
+ [[(), (0,)], (0,)],
+ [[(0,), (0, 0)], (0, 0)],
+ [[(0,), (0, 1)], (0, 0)],
+ [[(1,), (0, 0)], (0, 0)],
+ [[(), (0, 0)], (0, 0)],
+ [[(1, 1), (0,)], (1, 0)],
+ [[(1,), (0, 1)], (0, 1)],
+ [[(1,), (1, 0)], (1, 0)],
+ [[(), (1, 0)], (1, 0)],
+ [[(), (0, 1)], (0, 1)],
+ ]
+ for input_shapes, expected_shape in data:
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1],
+ f"Shapes: {input_shapes[0]} {input_shapes[1]}")
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_same_as_ufunc(input_shapes[1], input_shapes[0])
+ # Try them transposed, too.
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
+ # ... and flipped for non-rank-0 inputs in order to test negative
+ # strides.
+ if () not in input_shapes:
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
+
+
+def test_broadcast_to_succeeds():
+ data = [
+ [np.array(0), (0,), np.array(0)],
+ [np.array(0), (1,), np.zeros(1)],
+ [np.array(0), (3,), np.zeros(3)],
+ [np.ones(1), (1,), np.ones(1)],
+ [np.ones(1), (2,), np.ones(2)],
+ [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
+ [np.arange(3), (3,), np.arange(3)],
+ [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
+ [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
+ # test if shape is not a tuple
+ [np.ones(0), 0, np.ones(0)],
+ [np.ones(1), 1, np.ones(1)],
+ [np.ones(1), 2, np.ones(2)],
+ # these cases with size 0 are strange, but they reproduce the behavior
+ # of broadcasting with ufuncs (see test_same_as_ufunc above)
+ [np.ones(1), (0,), np.ones(0)],
+ [np.ones((1, 2)), (0, 2), np.ones((0, 2))],
+ [np.ones((2, 1)), (2, 0), np.ones((2, 0))],
+ ]
+ for input_array, shape, expected in data:
+ actual = broadcast_to(input_array, shape)
+ assert_array_equal(expected, actual)
+
+
+def test_broadcast_to_raises():
+ data = [
+ [(0,), ()],
+ [(1,), ()],
+ [(3,), ()],
+ [(3,), (1,)],
+ [(3,), (2,)],
+ [(3,), (4,)],
+ [(1, 2), (2, 1)],
+ [(1, 1), (1,)],
+ [(1,), -1],
+ [(1,), (-1,)],
+ [(1, 2), (-1, 2)],
+ ]
+ for orig_shape, target_shape in data:
+ arr = np.zeros(orig_shape)
+ assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
+
+
+def test_broadcast_shape():
+ # tests internal _broadcast_shape
+ # _broadcast_shape is already exercised indirectly by broadcast_arrays
+ # _broadcast_shape is also exercised by the public broadcast_shapes function
+ assert_equal(_broadcast_shape(), ())
+ assert_equal(_broadcast_shape([1, 2]), (2,))
+ assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
+ assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
+ assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
+ assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
+
+ # regression tests for gh-5862
+ assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
+ bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
+ assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
+
+
+def test_broadcast_shapes_succeeds():
+ # tests public broadcast_shapes
+ data = [
+ [[], ()],
+ [[()], ()],
+ [[(7,)], (7,)],
+ [[(1, 2), (2,)], (1, 2)],
+ [[(1, 1)], (1, 1)],
+ [[(1, 1), (3, 4)], (3, 4)],
+ [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
+ [[(5, 6, 1)], (5, 6, 1)],
+ [[(1, 3), (3, 1)], (3, 3)],
+ [[(1, 0), (0, 0)], (0, 0)],
+ [[(0, 1), (0, 0)], (0, 0)],
+ [[(1, 0), (0, 1)], (0, 0)],
+ [[(1, 1), (0, 0)], (0, 0)],
+ [[(1, 1), (1, 0)], (1, 0)],
+ [[(1, 1), (0, 1)], (0, 1)],
+ [[(), (0,)], (0,)],
+ [[(0,), (0, 0)], (0, 0)],
+ [[(0,), (0, 1)], (0, 0)],
+ [[(1,), (0, 0)], (0, 0)],
+ [[(), (0, 0)], (0, 0)],
+ [[(1, 1), (0,)], (1, 0)],
+ [[(1,), (0, 1)], (0, 1)],
+ [[(1,), (1, 0)], (1, 0)],
+ [[(), (1, 0)], (1, 0)],
+ [[(), (0, 1)], (0, 1)],
+ [[(1,), (3,)], (3,)],
+ [[2, (3, 2)], (3, 2)],
+ ]
+ for input_shapes, target_shape in data:
+ assert_equal(broadcast_shapes(*input_shapes), target_shape)
+
+ assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))
+ assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))
+
+ # regression tests for gh-5862
+ assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))
+
+
+def test_broadcast_shapes_raises():
+ # tests public broadcast_shapes
+ data = [
+ [(3,), (4,)],
+ [(2, 3), (2,)],
+ [(3,), (3,), (4,)],
+ [(1, 3, 4), (2, 3, 3)],
+ [(1, 2), (3, 1), (3, 2), (10, 5)],
+ [2, (2, 3)],
+ ]
+ for input_shapes in data:
+ assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))
+
+ bad_args = [(2,)] * 32 + [(3,)] * 32
+ assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))
+
+
+def test_as_strided():
+ a = np.array([None])
+ a_view = as_strided(a)
+ expected = np.array([None])
+ assert_array_equal(a_view, np.array([None]))
+
+ a = np.array([1, 2, 3, 4])
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
+ expected = np.array([1, 3])
+ assert_array_equal(a_view, expected)
+
+ a = np.array([1, 2, 3, 4])
+ a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
+ expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
+ assert_array_equal(a_view, expected)
+
+ # Regression test for gh-5081
+ dt = np.dtype([('num', 'i4'), ('obj', 'O')])
+ a = np.empty((4,), dtype=dt)
+ a['num'] = np.arange(1, 5)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ expected_num = [[1, 2, 3, 4]] * 3
+ expected_obj = [[None] * 4] * 3
+ assert_equal(a_view.dtype, dt)
+ assert_array_equal(expected_num, a_view['num'])
+ assert_array_equal(expected_obj, a_view['obj'])
+
+ # Make sure that void types without fields are kept unchanged
+ a = np.empty((4,), dtype='V4')
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+
+ # Make sure that the only type that could fail is properly handled
+ dt = np.dtype({'names': [''], 'formats': ['V4']})
+ a = np.empty((4,), dtype=dt)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+
+ # Custom dtypes should not be lost (gh-9161)
+ r = [rational(i) for i in range(4)]
+ a = np.array(r, dtype=rational)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+ assert_array_equal([r] * 3, a_view)
+
+
+class TestSlidingWindowView:
+ def test_1d(self):
+ arr = np.arange(5)
+ arr_view = sliding_window_view(arr, 2)
+ expected = np.array([[0, 1],
+ [1, 2],
+ [2, 3],
+ [3, 4]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d(self):
+ i, j = np.ogrid[:3, :4]
+ arr = 10 * i + j
+ shape = (2, 2)
+ arr_view = sliding_window_view(arr, shape)
+ expected = np.array([[[[0, 1], [10, 11]],
+ [[1, 2], [11, 12]],
+ [[2, 3], [12, 13]]],
+ [[[10, 11], [20, 21]],
+ [[11, 12], [21, 22]],
+ [[12, 13], [22, 23]]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d_with_axis(self):
+ i, j = np.ogrid[:3, :4]
+ arr = 10 * i + j
+ arr_view = sliding_window_view(arr, 3, 0)
+ expected = np.array([[[0, 10, 20],
+ [1, 11, 21],
+ [2, 12, 22],
+ [3, 13, 23]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d_repeated_axis(self):
+ i, j = np.ogrid[:3, :4]
+ arr = 10 * i + j
+ arr_view = sliding_window_view(arr, (2, 3), (1, 1))
+ expected = np.array([[[[0, 1, 2],
+ [1, 2, 3]]],
+ [[[10, 11, 12],
+ [11, 12, 13]]],
+ [[[20, 21, 22],
+ [21, 22, 23]]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d_without_axis(self):
+ i, j = np.ogrid[:4, :4]
+ arr = 10 * i + j
+ shape = (2, 3)
+ arr_view = sliding_window_view(arr, shape)
+ expected = np.array([[[[0, 1, 2], [10, 11, 12]],
+ [[1, 2, 3], [11, 12, 13]]],
+ [[[10, 11, 12], [20, 21, 22]],
+ [[11, 12, 13], [21, 22, 23]]],
+ [[[20, 21, 22], [30, 31, 32]],
+ [[21, 22, 23], [31, 32, 33]]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_errors(self):
+ i, j = np.ogrid[:4, :4]
+ arr = 10 * i + j
+ with pytest.raises(ValueError, match='cannot contain negative values'):
+ sliding_window_view(arr, (-1, 3))
+ with pytest.raises(
+ ValueError,
+ match='must provide window_shape for all dimensions of `x`'):
+ sliding_window_view(arr, (1,))
+ with pytest.raises(
+ ValueError,
+ match='Must provide matching length window_shape and axis'):
+ sliding_window_view(arr, (1, 3, 4), axis=(0, 1))
+ with pytest.raises(
+ ValueError,
+ match='window shape cannot be larger than input array'):
+ sliding_window_view(arr, (5, 5))
+
+ def test_writeable(self):
+ arr = np.arange(5)
+ view = sliding_window_view(arr, 2, writeable=False)
+ assert_(not view.flags.writeable)
+ with pytest.raises(
+ ValueError,
+ match='assignment destination is read-only'):
+ view[0, 0] = 3
+ view = sliding_window_view(arr, 2, writeable=True)
+ assert_(view.flags.writeable)
+ view[0, 1] = 3
+ assert_array_equal(arr, np.array([0, 3, 2, 3, 4]))
+
+ def test_subok(self):
+ class MyArray(np.ndarray):
+ pass
+
+ arr = np.arange(5).view(MyArray)
+ assert_(not isinstance(sliding_window_view(arr, 2,
+ subok=False),
+ MyArray))
+ assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray))
+ # Default behavior
+ assert_(not isinstance(sliding_window_view(arr, 2), MyArray))
+
+
+def as_strided_writeable():
+ arr = np.ones(10)
+ view = as_strided(arr, writeable=False)
+ assert_(not view.flags.writeable)
+
+ # Check that writeable also is fine:
+ view = as_strided(arr, writeable=True)
+ assert_(view.flags.writeable)
+ view[...] = 3
+ assert_array_equal(arr, np.full_like(arr, 3))
+
+ # Test that things do not break down for readonly:
+ arr.flags.writeable = False
+ view = as_strided(arr, writeable=False)
+ view = as_strided(arr, writeable=True)
+ assert_(not view.flags.writeable)
+
+
+class VerySimpleSubClass(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, subok=True, **kwargs).view(cls)
+
+
+class SimpleSubClass(VerySimpleSubClass):
+ def __new__(cls, *args, **kwargs):
+ self = np.array(*args, subok=True, **kwargs).view(cls)
+ self.info = 'simple'
+ return self
+
+ def __array_finalize__(self, obj):
+ self.info = getattr(obj, 'info', '') + ' finalized'
+
+
+def test_subclasses():
+ # test that subclass is preserved only if subok=True
+ a = VerySimpleSubClass([1, 2, 3, 4])
+ assert_(type(a) is VerySimpleSubClass)
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
+ assert_(type(a_view) is np.ndarray)
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
+ assert_(type(a_view) is VerySimpleSubClass)
+ # test that if a subclass has __array_finalize__, it is used
+ a = SimpleSubClass([1, 2, 3, 4])
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+
+ # similar tests for broadcast_arrays
+ b = np.arange(len(a)).reshape(-1, 1)
+ a_view, b_view = broadcast_arrays(a, b)
+ assert_(type(a_view) is np.ndarray)
+ assert_(type(b_view) is np.ndarray)
+ assert_(a_view.shape == b_view.shape)
+ a_view, b_view = broadcast_arrays(a, b, subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+ assert_(type(b_view) is np.ndarray)
+ assert_(a_view.shape == b_view.shape)
+
+ # and for broadcast_to
+ shape = (2, 4)
+ a_view = broadcast_to(a, shape)
+ assert_(type(a_view) is np.ndarray)
+ assert_(a_view.shape == shape)
+ a_view = broadcast_to(a, shape, subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+ assert_(a_view.shape == shape)
+
+
+def test_writeable():
+ # broadcast_to should return a readonly array
+ original = np.array([1, 2, 3])
+ result = broadcast_to(original, (2, 3))
+ assert_equal(result.flags.writeable, False)
+ assert_raises(ValueError, result.__setitem__, slice(None), 0)
+
+ # but the result of broadcast_arrays needs to be writeable, to
+ # preserve backwards compatibility
+ test_cases = [((False,), broadcast_arrays(original,)),
+ ((True, False), broadcast_arrays(0, original))]
+ for is_broadcast, results in test_cases:
+ for array_is_broadcast, result in zip(is_broadcast, results):
+ # This will change to False in a future version
+ if array_is_broadcast:
+ with assert_warns(FutureWarning):
+ assert_equal(result.flags.writeable, True)
+ with assert_warns(DeprecationWarning):
+ result[:] = 0
+ # Warning not emitted, writing to the array resets it
+ assert_equal(result.flags.writeable, True)
+ else:
+ # No warning:
+ assert_equal(result.flags.writeable, True)
+
+ for results in [broadcast_arrays(original),
+ broadcast_arrays(0, original)]:
+ for result in results:
+ # resets the warn_on_write DeprecationWarning
+ result.flags.writeable = True
+ # check: no warning emitted
+ assert_equal(result.flags.writeable, True)
+ result[:] = 0
+
+ # keep readonly input readonly
+ original.flags.writeable = False
+ _, result = broadcast_arrays(0, original)
+ assert_equal(result.flags.writeable, False)
+
+ # regression test for GH6491
+ shape = (2,)
+ strides = [0]
+ tricky_array = as_strided(np.array(0), shape, strides)
+ other = np.zeros((1,))
+ first, second = broadcast_arrays(tricky_array, other)
+ assert_(first.shape == second.shape)
+
+
+def test_writeable_memoryview():
+ # The result of broadcast_arrays exports as a non-writeable memoryview
+ # because otherwise there is no good way to opt in to the new behaviour
+ # (i.e. you would need to set writeable to False explicitly).
+ # See gh-13929.
+ original = np.array([1, 2, 3])
+
+ test_cases = [((False, ), broadcast_arrays(original,)),
+ ((True, False), broadcast_arrays(0, original))]
+ for is_broadcast, results in test_cases:
+ for array_is_broadcast, result in zip(is_broadcast, results):
+ # This will change to False in a future version
+ if array_is_broadcast:
+ # memoryview(result, writable=True) will give warning but cannot
+ # be tested using the python API.
+ assert memoryview(result).readonly
+ else:
+ assert not memoryview(result).readonly
+
+
+def test_reference_types():
+ input_array = np.array('a', dtype=object)
+ expected = np.array(['a'] * 3, dtype=object)
+ actual = broadcast_to(input_array, (3,))
+ assert_array_equal(expected, actual)
+
+ actual, _ = broadcast_arrays(input_array, np.ones(3))
+ assert_array_equal(expected, actual)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py
new file mode 100644
index 0000000..eb6aa69
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_twodim_base.py
@@ -0,0 +1,559 @@
+"""Test functions for matrix module
+
+"""
+import pytest
+
+import numpy as np
+from numpy import (
+ add,
+ arange,
+ array,
+ diag,
+ eye,
+ fliplr,
+ flipud,
+ histogram2d,
+ mask_indices,
+ ones,
+ tri,
+ tril_indices,
+ tril_indices_from,
+ triu_indices,
+ triu_indices_from,
+ vander,
+ zeros,
+)
+from numpy.testing import (
+ assert_,
+ assert_array_almost_equal,
+ assert_array_equal,
+ assert_array_max_ulp,
+ assert_equal,
+ assert_raises,
+)
+
+
+def get_mat(n):
+ data = arange(n)
+ data = add.outer(data, data)
+ return data
+
+
+class TestEye:
+ def test_basic(self):
+ assert_equal(eye(4),
+ array([[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]]))
+
+ assert_equal(eye(4, dtype='f'),
+ array([[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]], 'f'))
+
+ assert_equal(eye(3) == 1,
+ eye(3, dtype=bool))
+
+ def test_uint64(self):
+ # Regression test for gh-9982
+ assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]]))
+ assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)),
+ array([[0, 1, 0, 0], [0, 0, 1, 0]]))
+
+ def test_diag(self):
+ assert_equal(eye(4, k=1),
+ array([[0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1],
+ [0, 0, 0, 0]]))
+
+ assert_equal(eye(4, k=-1),
+ array([[0, 0, 0, 0],
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0]]))
+
+ def test_2d(self):
+ assert_equal(eye(4, 3),
+ array([[1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 1],
+ [0, 0, 0]]))
+
+ assert_equal(eye(3, 4),
+ array([[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0]]))
+
+ def test_diag2d(self):
+ assert_equal(eye(3, 4, k=2),
+ array([[0, 0, 1, 0],
+ [0, 0, 0, 1],
+ [0, 0, 0, 0]]))
+
+ assert_equal(eye(4, 3, k=-2),
+ array([[0, 0, 0],
+ [0, 0, 0],
+ [1, 0, 0],
+ [0, 1, 0]]))
+
+ def test_eye_bounds(self):
+ assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
+ assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
+ assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
+ assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
+ assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
+ assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
+
+ def test_strings(self):
+ assert_equal(eye(2, 2, dtype='S3'),
+ [[b'1', b''], [b'', b'1']])
+
+ def test_bool(self):
+ assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
+
+ def test_order(self):
+ mat_c = eye(4, 3, k=-1)
+ mat_f = eye(4, 3, k=-1, order='F')
+ assert_equal(mat_c, mat_f)
+ assert mat_c.flags.c_contiguous
+ assert not mat_c.flags.f_contiguous
+ assert not mat_f.flags.c_contiguous
+ assert mat_f.flags.f_contiguous
+
+
+class TestDiag:
+ def test_vector(self):
+ vals = (100 * arange(5)).astype('l')
+ b = zeros((5, 5))
+ for k in range(5):
+ b[k, k] = vals[k]
+ assert_equal(diag(vals), b)
+ b = zeros((7, 7))
+ c = b.copy()
+ for k in range(5):
+ b[k, k + 2] = vals[k]
+ c[k + 2, k] = vals[k]
+ assert_equal(diag(vals, k=2), b)
+ assert_equal(diag(vals, k=-2), c)
+
+ def test_matrix(self, vals=None):
+ if vals is None:
+ vals = (100 * get_mat(5) + 1).astype('l')
+ b = zeros((5,))
+ for k in range(5):
+ b[k] = vals[k, k]
+ assert_equal(diag(vals), b)
+ b = b * 0
+ for k in range(3):
+ b[k] = vals[k, k + 2]
+ assert_equal(diag(vals, 2), b[:3])
+ for k in range(3):
+ b[k] = vals[k + 2, k]
+ assert_equal(diag(vals, -2), b[:3])
+
+ def test_fortran_order(self):
+ vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
+ self.test_matrix(vals)
+
+ def test_diag_bounds(self):
+ A = [[1, 2], [3, 4], [5, 6]]
+ assert_equal(diag(A, k=2), [])
+ assert_equal(diag(A, k=1), [2])
+ assert_equal(diag(A, k=0), [1, 4])
+ assert_equal(diag(A, k=-1), [3, 6])
+ assert_equal(diag(A, k=-2), [5])
+ assert_equal(diag(A, k=-3), [])
+
+ def test_failure(self):
+ assert_raises(ValueError, diag, [[[1]]])
+
+
+class TestFliplr:
+ def test_basic(self):
+ assert_raises(ValueError, fliplr, ones(4))
+ a = get_mat(4)
+ b = a[:, ::-1]
+ assert_equal(fliplr(a), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[2, 1, 0],
+ [5, 4, 3]]
+ assert_equal(fliplr(a), b)
+
+
+class TestFlipud:
+ def test_basic(self):
+ a = get_mat(4)
+ b = a[::-1, :]
+ assert_equal(flipud(a), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[3, 4, 5],
+ [0, 1, 2]]
+ assert_equal(flipud(a), b)
+
+
+class TestHistogram2d:
+ def test_simple(self):
+ x = array(
+ [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
+ y = array(
+ [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
+ xedges = np.linspace(0, 1, 10)
+ yedges = np.linspace(0, 1, 10)
+ H = histogram2d(x, y, (xedges, yedges))[0]
+ answer = array(
+ [[0, 0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+ assert_array_equal(H.T, answer)
+ H = histogram2d(x, y, xedges)[0]
+ assert_array_equal(H.T, answer)
+ H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
+ assert_array_equal(H, eye(10, 10))
+ assert_array_equal(xedges, np.linspace(0, 9, 11))
+ assert_array_equal(yedges, np.linspace(0, 9, 11))
+
+ def test_asym(self):
+ x = array([1, 1, 2, 3, 4, 4, 4, 5])
+ y = array([1, 3, 2, 0, 1, 2, 3, 4])
+ H, xed, yed = histogram2d(
+ x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
+ answer = array(
+ [[0., 0, 0, 0, 0],
+ [0, 1, 0, 1, 0],
+ [0, 0, 1, 0, 0],
+ [1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 1]])
+ assert_array_almost_equal(H, answer / 8., 3)
+ assert_array_equal(xed, np.linspace(0, 6, 7))
+ assert_array_equal(yed, np.linspace(0, 5, 6))
+
+ def test_density(self):
+ x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
+ H, xed, yed = histogram2d(
+ x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
+ answer = array([[1, 1, .5],
+ [1, 1, .5],
+ [.5, .5, .25]]) / 9.
+ assert_array_almost_equal(H, answer, 3)
+
+ def test_all_outliers(self):
+ r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
+ H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
+ assert_array_equal(H, 0)
+
+ def test_empty(self):
+ a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
+ assert_array_max_ulp(a, array([[0.]]))
+
+ a, edge1, edge2 = histogram2d([], [], bins=4)
+ assert_array_max_ulp(a, np.zeros((4, 4)))
+
+ def test_binparameter_combination(self):
+ x = array(
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ 0.59944483, 1])
+ y = array(
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ 0.15886423, 1])
+ edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
+ H, xe, ye = histogram2d(x, y, (edges, 4))
+ answer = array(
+ [[2., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [1., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 1.]])
+ assert_array_equal(H, answer)
+ assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
+ H, xe, ye = histogram2d(x, y, (4, edges))
+ answer = array(
+ [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
+ assert_array_equal(H, answer)
+ assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
+
+ def test_dispatch(self):
+ class ShouldDispatch:
+ def __array_function__(self, function, types, args, kwargs):
+ return types, args, kwargs
+
+ xy = [1, 2]
+ s_d = ShouldDispatch()
+ r = histogram2d(s_d, xy)
+ # Cannot use assert_equal since that dispatches...
+ assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
+ r = histogram2d(xy, s_d)
+ assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
+ r = histogram2d(xy, xy, bins=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d}))
+ r = histogram2d(xy, xy, bins=[s_d, 5])
+ assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]}))
+ assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
+ r = histogram2d(xy, xy, weights=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d}))
+
+ @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)])
+ def test_bad_length(self, x_len, y_len):
+ x, y = np.ones(x_len), np.ones(y_len)
+ with pytest.raises(ValueError,
+ match='x and y must have the same length.'):
+ histogram2d(x, y)
+
+
+class TestTri:
+ def test_dtype(self):
+ out = array([[1, 0, 0],
+ [1, 1, 0],
+ [1, 1, 1]])
+ assert_array_equal(tri(3), out)
+ assert_array_equal(tri(3, dtype=bool), out.astype(bool))
+
+
+def test_tril_triu_ndim2():
+ for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
+ a = np.ones((2, 2), dtype=dtype)
+ b = np.tril(a)
+ c = np.triu(a)
+ assert_array_equal(b, [[1, 0], [1, 1]])
+ assert_array_equal(c, b.T)
+ # should return the same dtype as the original array
+ assert_equal(b.dtype, a.dtype)
+ assert_equal(c.dtype, a.dtype)
+
+
+def test_tril_triu_ndim3():
+ for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
+ a = np.array([
+ [[1, 1], [1, 1]],
+ [[1, 1], [1, 0]],
+ [[1, 1], [0, 0]],
+ ], dtype=dtype)
+ a_tril_desired = np.array([
+ [[1, 0], [1, 1]],
+ [[1, 0], [1, 0]],
+ [[1, 0], [0, 0]],
+ ], dtype=dtype)
+ a_triu_desired = np.array([
+ [[1, 1], [0, 1]],
+ [[1, 1], [0, 0]],
+ [[1, 1], [0, 0]],
+ ], dtype=dtype)
+ a_triu_observed = np.triu(a)
+ a_tril_observed = np.tril(a)
+ assert_array_equal(a_triu_observed, a_triu_desired)
+ assert_array_equal(a_tril_observed, a_tril_desired)
+ assert_equal(a_triu_observed.dtype, a.dtype)
+ assert_equal(a_tril_observed.dtype, a.dtype)
+
+
+def test_tril_triu_with_inf():
+ # Issue 4859
+ arr = np.array([[1, 1, np.inf],
+ [1, 1, 1],
+ [np.inf, 1, 1]])
+ out_tril = np.array([[1, 0, 0],
+ [1, 1, 0],
+ [np.inf, 1, 1]])
+ out_triu = out_tril.T
+ assert_array_equal(np.triu(arr), out_triu)
+ assert_array_equal(np.tril(arr), out_tril)
+
+
+def test_tril_triu_dtype():
+ # Issue 4916
+ # tril and triu should return the same dtype as input
+ for c in np.typecodes['All']:
+ if c == 'V':
+ continue
+ arr = np.zeros((3, 3), dtype=c)
+ assert_equal(np.triu(arr).dtype, arr.dtype)
+ assert_equal(np.tril(arr).dtype, arr.dtype)
+
+ # check special cases
+ arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],
+ ['2004-01-01T12:00', '2003-01-03T13:45']],
+ dtype='datetime64')
+ assert_equal(np.triu(arr).dtype, arr.dtype)
+ assert_equal(np.tril(arr).dtype, arr.dtype)
+
+ arr = np.zeros((3, 3), dtype='f4,f4')
+ assert_equal(np.triu(arr).dtype, arr.dtype)
+ assert_equal(np.tril(arr).dtype, arr.dtype)
+
+
+def test_mask_indices():
+ # simple test without offset
+ iu = mask_indices(3, np.triu)
+ a = np.arange(9).reshape(3, 3)
+ assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
+ # Now with an offset
+ iu1 = mask_indices(3, np.triu, 1)
+ assert_array_equal(a[iu1], array([1, 2, 5]))
+
+
+def test_tril_indices():
+ # indices without and with offset
+ il1 = tril_indices(4)
+ il2 = tril_indices(4, k=2)
+ il3 = tril_indices(4, m=5)
+ il4 = tril_indices(4, k=2, m=5)
+
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ b = np.arange(1, 21).reshape(4, 5)
+
+ # indexing:
+ assert_array_equal(a[il1],
+ array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ assert_array_equal(b[il3],
+ array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
+
+ # And for assigning values:
+ a[il1] = -1
+ assert_array_equal(a,
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]]))
+ b[il3] = -1
+ assert_array_equal(b,
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
+ [-1, -1, -1, 14, 15],
+ [-1, -1, -1, -1, 20]]))
+ # These cover almost the whole array (two diagonals right of the main one):
+ a[il2] = -10
+ assert_array_equal(a,
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]]))
+ b[il4] = -10
+ assert_array_equal(b,
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
+ [-10, -10, -10, -10, -10],
+ [-10, -10, -10, -10, -10]]))
+
+
+class TestTriuIndices:
+ def test_triu_indices(self):
+ iu1 = triu_indices(4)
+ iu2 = triu_indices(4, k=2)
+ iu3 = triu_indices(4, m=5)
+ iu4 = triu_indices(4, k=2, m=5)
+
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ b = np.arange(1, 21).reshape(4, 5)
+
+ # Both for indexing:
+ assert_array_equal(a[iu1],
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ assert_array_equal(b[iu3],
+ array([1, 2, 3, 4, 5, 7, 8, 9,
+ 10, 13, 14, 15, 19, 20]))
+
+ # And for assigning values:
+ a[iu1] = -1
+ assert_array_equal(a,
+ array([[-1, -1, -1, -1],
+ [5, -1, -1, -1],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
+ b[iu3] = -1
+ assert_array_equal(b,
+ array([[-1, -1, -1, -1, -1],
+ [6, -1, -1, -1, -1],
+ [11, 12, -1, -1, -1],
+ [16, 17, 18, -1, -1]]))
+
+ # These cover almost the whole array (two diagonals right of the
+ # main one):
+ a[iu2] = -10
+ assert_array_equal(a,
+ array([[-1, -1, -10, -10],
+ [5, -1, -1, -10],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
+ b[iu4] = -10
+ assert_array_equal(b,
+ array([[-1, -1, -10, -10, -10],
+ [6, -1, -1, -10, -10],
+ [11, 12, -1, -1, -10],
+ [16, 17, 18, -1, -1]]))
+
+
+class TestTrilIndicesFrom:
+ def test_exceptions(self):
+ assert_raises(ValueError, tril_indices_from, np.ones((2,)))
+ assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
+ # assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
+
+
+class TestTriuIndicesFrom:
+ def test_exceptions(self):
+ assert_raises(ValueError, triu_indices_from, np.ones((2,)))
+ assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
+ # assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
+
+
+class TestVander:
+ def test_basic(self):
+ c = np.array([0, 1, -2, 3])
+ v = vander(c)
+ powers = np.array([[0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1],
+ [16, -8, 4, -2, 1],
+ [81, 27, 9, 3, 1]])
+ # Check default value of N:
+ assert_array_equal(v, powers[:, 1:])
+ # Check a range of N values, including 0 and 5 (greater than default)
+ m = powers.shape[1]
+ for n in range(6):
+ v = vander(c, N=n)
+ assert_array_equal(v, powers[:, m - n:m])
+
+ def test_dtypes(self):
+ c = array([11, -12, 13], dtype=np.int8)
+ v = vander(c)
+ expected = np.array([[121, 11, 1],
+ [144, -12, 1],
+ [169, 13, 1]])
+ assert_array_equal(v, expected)
+
+ c = array([1.0 + 1j, 1.0 - 1j])
+ v = vander(c, N=3)
+ expected = np.array([[2j, 1 + 1j, 1],
+ [-2j, 1 - 1j, 1]])
+ # The data is floating point, but the values are small integers,
+ # so assert_array_equal *should* be safe here (rather than, say,
+ # assert_array_almost_equal).
+ assert_array_equal(v, expected)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py
new file mode 100644
index 0000000..447c2c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_type_check.py
@@ -0,0 +1,473 @@
+import numpy as np
+from numpy import (
+ common_type,
+ iscomplex,
+ iscomplexobj,
+ isneginf,
+ isposinf,
+ isreal,
+ isrealobj,
+ mintypecode,
+ nan_to_num,
+ real_if_close,
+)
+from numpy.testing import assert_, assert_array_equal, assert_equal
+
+
+def assert_all(x):
+ assert_(np.all(x), x)
+
+
+class TestCommonType:
+ def test_basic(self):
+ ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
+ af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
+ af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
+ af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64)
+ acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128)
+ assert_(common_type(ai32) == np.float64)
+ assert_(common_type(af16) == np.float16)
+ assert_(common_type(af32) == np.float32)
+ assert_(common_type(af64) == np.float64)
+ assert_(common_type(acs) == np.complex64)
+ assert_(common_type(acd) == np.complex128)
+
+
+class TestMintypecode:
+
+ def test_default_1(self):
+ for itype in '1bcsuwil':
+ assert_equal(mintypecode(itype), 'd')
+ assert_equal(mintypecode('f'), 'f')
+ assert_equal(mintypecode('d'), 'd')
+ assert_equal(mintypecode('F'), 'F')
+ assert_equal(mintypecode('D'), 'D')
+
+ def test_default_2(self):
+ for itype in '1bcsuwil':
+ assert_equal(mintypecode(itype + 'f'), 'f')
+ assert_equal(mintypecode(itype + 'd'), 'd')
+ assert_equal(mintypecode(itype + 'F'), 'F')
+ assert_equal(mintypecode(itype + 'D'), 'D')
+ assert_equal(mintypecode('ff'), 'f')
+ assert_equal(mintypecode('fd'), 'd')
+ assert_equal(mintypecode('fF'), 'F')
+ assert_equal(mintypecode('fD'), 'D')
+ assert_equal(mintypecode('df'), 'd')
+ assert_equal(mintypecode('dd'), 'd')
+ #assert_equal(mintypecode('dF',savespace=1),'F')
+ assert_equal(mintypecode('dF'), 'D')
+ assert_equal(mintypecode('dD'), 'D')
+ assert_equal(mintypecode('Ff'), 'F')
+ #assert_equal(mintypecode('Fd',savespace=1),'F')
+ assert_equal(mintypecode('Fd'), 'D')
+ assert_equal(mintypecode('FF'), 'F')
+ assert_equal(mintypecode('FD'), 'D')
+ assert_equal(mintypecode('Df'), 'D')
+ assert_equal(mintypecode('Dd'), 'D')
+ assert_equal(mintypecode('DF'), 'D')
+ assert_equal(mintypecode('DD'), 'D')
+
+ def test_default_3(self):
+ assert_equal(mintypecode('fdF'), 'D')
+ #assert_equal(mintypecode('fdF',savespace=1),'F')
+ assert_equal(mintypecode('fdD'), 'D')
+ assert_equal(mintypecode('fFD'), 'D')
+ assert_equal(mintypecode('dFD'), 'D')
+
+ assert_equal(mintypecode('ifd'), 'd')
+ assert_equal(mintypecode('ifF'), 'F')
+ assert_equal(mintypecode('ifD'), 'D')
+ assert_equal(mintypecode('idF'), 'D')
+ #assert_equal(mintypecode('idF',savespace=1),'F')
+ assert_equal(mintypecode('idD'), 'D')
+
+
+class TestIsscalar:
+
+ def test_basic(self):
+ assert_(np.isscalar(3))
+ assert_(not np.isscalar([3]))
+ assert_(not np.isscalar((3,)))
+ assert_(np.isscalar(3j))
+ assert_(np.isscalar(4.0))
+
+
+class TestReal:
+
+ def test_real(self):
+ y = np.random.rand(10,)
+ assert_array_equal(y, np.real(y))
+
+ y = np.array(1)
+ out = np.real(y)
+ assert_array_equal(y, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1
+ out = np.real(y)
+ assert_equal(y, out)
+ assert_(not isinstance(out, np.ndarray))
+
+ def test_cmplx(self):
+ y = np.random.rand(10,) + 1j * np.random.rand(10,)
+ assert_array_equal(y.real, np.real(y))
+
+ y = np.array(1 + 1j)
+ out = np.real(y)
+ assert_array_equal(y.real, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1 + 1j
+ out = np.real(y)
+ assert_equal(1.0, out)
+ assert_(not isinstance(out, np.ndarray))
+
+
+class TestImag:
+
+ def test_real(self):
+ y = np.random.rand(10,)
+ assert_array_equal(0, np.imag(y))
+
+ y = np.array(1)
+ out = np.imag(y)
+ assert_array_equal(0, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1
+ out = np.imag(y)
+ assert_equal(0, out)
+ assert_(not isinstance(out, np.ndarray))
+
+ def test_cmplx(self):
+ y = np.random.rand(10,) + 1j * np.random.rand(10,)
+ assert_array_equal(y.imag, np.imag(y))
+
+ y = np.array(1 + 1j)
+ out = np.imag(y)
+ assert_array_equal(y.imag, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1 + 1j
+ out = np.imag(y)
+ assert_equal(1.0, out)
+ assert_(not isinstance(out, np.ndarray))
+
+
+class TestIscomplex:
+
+ def test_fail(self):
+ z = np.array([-1, 0, 1])
+ res = iscomplex(z)
+ assert_(not np.any(res, axis=0))
+
+ def test_pass(self):
+ z = np.array([-1j, 1, 0])
+ res = iscomplex(z)
+ assert_array_equal(res, [1, 0, 0])
+
+
+class TestIsreal:
+
+ def test_pass(self):
+ z = np.array([-1, 0, 1j])
+ res = isreal(z)
+ assert_array_equal(res, [1, 1, 0])
+
+ def test_fail(self):
+ z = np.array([-1j, 1, 0])
+ res = isreal(z)
+ assert_array_equal(res, [0, 1, 1])
+
+
+class TestIscomplexobj:
+
+ def test_basic(self):
+ z = np.array([-1, 0, 1])
+ assert_(not iscomplexobj(z))
+ z = np.array([-1j, 0, -1])
+ assert_(iscomplexobj(z))
+
+ def test_scalar(self):
+ assert_(not iscomplexobj(1.0))
+ assert_(iscomplexobj(1 + 0j))
+
+ def test_list(self):
+ assert_(iscomplexobj([3, 1 + 0j, True]))
+ assert_(not iscomplexobj([3, 1, True]))
+
+ def test_duck(self):
+ class DummyComplexArray:
+ @property
+ def dtype(self):
+ return np.dtype(complex)
+ dummy = DummyComplexArray()
+ assert_(iscomplexobj(dummy))
+
+ def test_pandas_duck(self):
+ # This tests a custom np.dtype duck-typed class, such as used by pandas
+ # (pandas.core.dtypes)
+ class PdComplex(np.complex128):
+ pass
+
+ class PdDtype:
+ name = 'category'
+ names = None
+ type = PdComplex
+ kind = 'c'
+ str = '<c16'
+ base = np.dtype('complex128')
+
+ class DummyPd:
+ @property
+ def dtype(self):
+ return PdDtype
+ dummy = DummyPd()
+ assert_(iscomplexobj(dummy))
+
+ def test_custom_dtype_duck(self):
+ class MyArray(list):
+ @property
+ def dtype(self):
+ return complex
+
+ a = MyArray([1 + 0j, 2 + 0j, 3 + 0j])
+ assert_(iscomplexobj(a))
+
+
+class TestIsrealobj:
+ def test_basic(self):
+ z = np.array([-1, 0, 1])
+ assert_(isrealobj(z))
+ z = np.array([-1j, 0, -1])
+ assert_(not isrealobj(z))
+
+
+class TestIsnan:
+
+ def test_goodvalues(self):
+ z = np.array((-1., 0., 1.))
+ res = np.isnan(z) == 0
+ assert_all(np.all(res, axis=0))
+
+ def test_posinf(self):
+ with np.errstate(divide='ignore'):
+ assert_all(np.isnan(np.array((1.,)) / 0.) == 0)
+
+ def test_neginf(self):
+ with np.errstate(divide='ignore'):
+ assert_all(np.isnan(np.array((-1.,)) / 0.) == 0)
+
+ def test_ind(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isnan(np.array((0.,)) / 0.) == 1)
+
+ def test_integer(self):
+ assert_all(np.isnan(1) == 0)
+
+ def test_complex(self):
+ assert_all(np.isnan(1 + 1j) == 0)
+
+ def test_complex1(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isnan(np.array(0 + 0j) / 0.) == 1)
+
+
+class TestIsfinite:
+ # Fixme, wrong place, isfinite now ufunc
+
+ def test_goodvalues(self):
+ z = np.array((-1., 0., 1.))
+ res = np.isfinite(z) == 1
+ assert_all(np.all(res, axis=0))
+
+ def test_posinf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((1.,)) / 0.) == 0)
+
+ def test_neginf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((-1.,)) / 0.) == 0)
+
+ def test_ind(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((0.,)) / 0.) == 0)
+
+ def test_integer(self):
+ assert_all(np.isfinite(1) == 1)
+
+ def test_complex(self):
+ assert_all(np.isfinite(1 + 1j) == 1)
+
+ def test_complex1(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array(1 + 1j) / 0.) == 0)
+
+
+class TestIsinf:
+ # Fixme, wrong place, isinf now ufunc
+
+ def test_goodvalues(self):
+ z = np.array((-1., 0., 1.))
+ res = np.isinf(z) == 0
+ assert_all(np.all(res, axis=0))
+
+ def test_posinf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((1.,)) / 0.) == 1)
+
+ def test_posinf_scalar(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array(1.,) / 0.) == 1)
+
+ def test_neginf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((-1.,)) / 0.) == 1)
+
+ def test_neginf_scalar(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array(-1.) / 0.) == 1)
+
+ def test_ind(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((0.,)) / 0.) == 0)
+
+
+class TestIsposinf:
+
+ def test_generic(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = isposinf(np.array((-1., 0, 1)) / 0.)
+ assert_(vals[0] == 0)
+ assert_(vals[1] == 0)
+ assert_(vals[2] == 1)
+
+
+class TestIsneginf:
+
+ def test_generic(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = isneginf(np.array((-1., 0, 1)) / 0.)
+ assert_(vals[0] == 1)
+ assert_(vals[1] == 0)
+ assert_(vals[2] == 0)
+
+
+class TestNanToNum:
+
+ def test_generic(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1)) / 0.)
+ assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
+ assert_(vals[1] == 0)
+ assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same tests but with nan, posinf and neginf keywords
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1)) / 0.,
+ nan=10, posinf=20, neginf=30)
+ assert_equal(vals, [30, 10, 20])
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same test but in-place
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = np.array((-1., 0, 1)) / 0.
+ result = nan_to_num(vals, copy=False)
+
+ assert_(result is vals)
+ assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
+ assert_(vals[1] == 0)
+ assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same test but in-place
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = np.array((-1., 0, 1)) / 0.
+ result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
+
+ assert_(result is vals)
+ assert_equal(vals, [30, 10, 20])
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_equal(type(vals), np.ndarray)
+
+ def test_array(self):
+ vals = nan_to_num([1])
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
+ vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
+
+ def test_integer(self):
+ vals = nan_to_num(1)
+ assert_all(vals == 1)
+ assert_equal(type(vals), np.int_)
+ vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1)
+ assert_equal(type(vals), np.int_)
+
+ def test_float(self):
+ vals = nan_to_num(1.0)
+ assert_all(vals == 1.0)
+ assert_equal(type(vals), np.float64)
+ vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1.1)
+ assert_equal(type(vals), np.float64)
+
+ def test_complex_good(self):
+ vals = nan_to_num(1 + 1j)
+ assert_all(vals == 1 + 1j)
+ assert_equal(type(vals), np.complex128)
+ vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1 + 1j)
+ assert_equal(type(vals), np.complex128)
+
+ def test_complex_bad(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ v = 1 + 1j
+ v += np.array(0 + 1.j) / 0.
+ vals = nan_to_num(v)
+ # !! This is actually (unexpectedly) zero
+ assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex128)
+
+ def test_complex_bad2(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ v = 1 + 1j
+ v += np.array(-1 + 1.j) / 0.
+ vals = nan_to_num(v)
+ assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex128)
+ # Fixme
+ #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
+ # !! This is actually (unexpectedly) positive
+ # !! inf. Comment out for now, and see if it
+ # !! changes
+ #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
+
+ def test_do_not_rewrite_previous_keyword(self):
+ # This is done to test that when, for instance, nan=np.inf then these
+ # values are not rewritten by posinf keyword to the posinf value.
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999)
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_all(vals[0] < -1e10)
+ assert_equal(vals[[1, 2]], [np.inf, 999])
+ assert_equal(type(vals), np.ndarray)
+
+
+class TestRealIfClose:
+
+ def test_basic(self):
+ a = np.random.rand(10)
+ b = real_if_close(a + 1e-15j)
+ assert_all(isrealobj(b))
+ assert_array_equal(a, b)
+ b = real_if_close(a + 1e-7j)
+ assert_all(iscomplexobj(b))
+ b = real_if_close(a + 1e-7j, tol=1e-6)
+ assert_all(isrealobj(b))
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py
new file mode 100644
index 0000000..b4257eb
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_ufunclike.py
@@ -0,0 +1,97 @@
+import numpy as np
+from numpy import fix, isneginf, isposinf
+from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises
+
+
+class TestUfunclike:
+
+ def test_isposinf(self):
+ a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
+ out = np.zeros(a.shape, bool)
+ tgt = np.array([True, False, False, False, False, False])
+
+ res = isposinf(a)
+ assert_equal(res, tgt)
+ res = isposinf(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+
+ a = a.astype(np.complex128)
+ with assert_raises(TypeError):
+ isposinf(a)
+
+ def test_isneginf(self):
+ a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
+ out = np.zeros(a.shape, bool)
+ tgt = np.array([False, True, False, False, False, False])
+
+ res = isneginf(a)
+ assert_equal(res, tgt)
+ res = isneginf(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+
+ a = a.astype(np.complex128)
+ with assert_raises(TypeError):
+ isneginf(a)
+
+ def test_fix(self):
+ a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
+ out = np.zeros(a.shape, float)
+ tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])
+
+ res = fix(a)
+ assert_equal(res, tgt)
+ res = fix(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+ assert_equal(fix(3.14), 3)
+
+ def test_fix_with_subclass(self):
+ class MyArray(np.ndarray):
+ def __new__(cls, data, metadata=None):
+ res = np.array(data, copy=True).view(cls)
+ res.metadata = metadata
+ return res
+
+ def __array_wrap__(self, obj, context=None, return_scalar=False):
+ if not isinstance(obj, MyArray):
+ obj = obj.view(MyArray)
+ if obj.metadata is None:
+ obj.metadata = self.metadata
+ return obj
+
+ def __array_finalize__(self, obj):
+ self.metadata = getattr(obj, 'metadata', None)
+ return self
+
+ a = np.array([1.1, -1.1])
+ m = MyArray(a, metadata='foo')
+ f = fix(m)
+ assert_array_equal(f, np.array([1, -1]))
+ assert_(isinstance(f, MyArray))
+ assert_equal(f.metadata, 'foo')
+
+ # check 0d arrays don't decay to scalars
+ m0d = m[0, ...]
+ m0d.metadata = 'bar'
+ f0d = fix(m0d)
+ assert_(isinstance(f0d, MyArray))
+ assert_equal(f0d.metadata, 'bar')
+
+ def test_scalar(self):
+ x = np.inf
+ actual = np.isposinf(x)
+ expected = np.True_
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+ x = -3.4
+ actual = np.fix(x)
+ expected = np.float64(-3.0)
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+ out = np.array(0.0)
+ actual = np.fix(x, out=out)
+ assert_(actual is out)
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py
new file mode 100644
index 0000000..0106ee0
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/tests/test_utils.py
@@ -0,0 +1,80 @@
+from io import StringIO
+
+import pytest
+
+import numpy as np
+import numpy.lib._utils_impl as _utils_impl
+from numpy.testing import assert_raises_regex
+
+
+def test_assert_raises_regex_context_manager():
+ with assert_raises_regex(ValueError, 'no deprecation warning'):
+ raise ValueError('no deprecation warning')
+
+
+def test_info_method_heading():
+ # info(class) should only print "Methods:" heading if methods exist
+
+ class NoPublicMethods:
+ pass
+
+ class WithPublicMethods:
+ def first_method():
+ pass
+
+ def _has_method_heading(cls):
+ out = StringIO()
+ np.info(cls, output=out)
+ return 'Methods:' in out.getvalue()
+
+ assert _has_method_heading(WithPublicMethods)
+ assert not _has_method_heading(NoPublicMethods)
+
+
+def test_drop_metadata():
+ def _compare_dtypes(dt1, dt2):
+ return np.can_cast(dt1, dt2, casting='no')
+
+ # structured dtype
+ dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])],
+ metadata={'msg': 'titi'})
+ dt_m = _utils_impl.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+ assert dt_m['l1'].metadata is None
+ assert dt_m['l1']['l2'].metadata is None
+
+ # alignment
+ dt = np.dtype([('x', '<f8'), ('y', '<i4')],
+ align=True,
+ metadata={'msg': 'toto'})
+ dt_m = _utils_impl.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+
+ # subdtype
+ dt = np.dtype('8f',
+ metadata={'msg': 'toto'})
+ dt_m = _utils_impl.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+
+ # scalar
+ dt = np.dtype('uint32',
+ metadata={'msg': 'toto'})
+ dt_m = _utils_impl.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+
+
+@pytest.mark.parametrize("dtype",
+ [np.dtype("i,i,i,i")[["f1", "f3"]],
+ np.dtype("f8"),
+ np.dtype("10i")])
+def test_drop_metadata_identity_and_copy(dtype):
+ # If there is no metadata, the identity is preserved:
+ assert _utils_impl.drop_metadata(dtype) is dtype
+
+ # If there is any, it is dropped (subforms are checked above)
+ dtype = np.dtype(dtype, metadata={1: 2})
+ assert _utils_impl.drop_metadata(dtype).metadata is None
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/user_array.py b/.venv/lib/python3.12/site-packages/numpy/lib/user_array.py
new file mode 100644
index 0000000..2e96d03
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/user_array.py
@@ -0,0 +1 @@
+from ._user_array_impl import __doc__, container # noqa: F401
diff --git a/.venv/lib/python3.12/site-packages/numpy/lib/user_array.pyi b/.venv/lib/python3.12/site-packages/numpy/lib/user_array.pyi
new file mode 100644
index 0000000..9b90d89
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/numpy/lib/user_array.pyi
@@ -0,0 +1 @@
+from ._user_array_impl import container as container